From b10177cb6e50b83f386a1a0f2b51815d62af5e28 Mon Sep 17 00:00:00 2001 From: Johannes 'josch' Schauer Date: Wed, 8 Jan 2020 15:41:49 +0100 Subject: [PATCH] use spaces instead of tabs --- mmdebstrap | 5198 ++++++++++++++++++++++++++-------------------------- 1 file changed, 2599 insertions(+), 2599 deletions(-) diff --git a/mmdebstrap b/mmdebstrap index bc31c80..c6493f9 100755 --- a/mmdebstrap +++ b/mmdebstrap @@ -88,48 +88,48 @@ my $is_covering = !!(eval 'Devel::Cover::get_coverage()'); sub debug { if ($verbosity_level < 3) { - return; + return; } my $msg = shift; my ($package, $filename, $line) = caller; $msg = "D: $PID $line $msg"; if ( -t STDERR ) { - $msg = colored($msg, 'clear') + $msg = colored($msg, 'clear') } print STDERR "$msg\n"; } sub info { if ($verbosity_level == 0) { - return; + return; } my $msg = shift; if ($verbosity_level >= 3) { - my ($package, $filename, $line) = caller; - $msg = "$PID $line $msg" + my ($package, $filename, $line) = caller; + $msg = "$PID $line $msg" } $msg = "I: $msg"; if ( -t STDERR ) { - $msg = colored($msg, 'green') + $msg = colored($msg, 'green') } print STDERR "$msg\n"; } sub warning { if ($verbosity_level == 0) { - return; + return; } my $msg = shift; $msg = "W: $msg"; if ( -t STDERR ) { - $msg = colored($msg, 'bold yellow') + $msg = colored($msg, 'bold yellow') } print STDERR "$msg\n"; } sub error { if ($verbosity_level == 0) { - return; + return; } # if error() is called with the string from a previous error() that was # caught inside an eval(), then the string will have a newline which we @@ -137,12 +137,12 @@ sub error { chomp (my $msg = shift); $msg = "E: $msg"; if ( -t STDERR ) { - $msg = colored($msg, 'bold red') + $msg = colored($msg, 'bold red') } if ($verbosity_level == 3) { - croak $msg; # produces a backtrace + croak $msg; # produces a backtrace } else { - die "$msg\n"; + die "$msg\n"; } } @@ -151,17 +151,17 @@ sub error { sub is_mountpoint($) { my $dir = shift; if (! -e $dir) { - return 0; + return 0; } my @a = stat "$dir/."; my @b = stat "$dir/.."; # if the device number is different, then the directory must be mounted if ($a[0] != $b[0]) { - return 1; + return 1; } # if the inode number is the same, then the directory must be mounted if ($a[1] == $b[1]) { - return 1; + return 1; } return 0; } @@ -172,27 +172,27 @@ sub is_mountpoint($) { sub get_tar_compressor($) { my $filename = shift; if ($filename eq '-') { - return undef + return undef } elsif ($filename =~ /\.tar$/) { - return undef + return undef } elsif ($filename =~ /\.(gz|tgz|taz)$/) { - return ['gzip']; + return ['gzip']; } elsif ($filename =~ /\.(Z|taZ)$/) { - return ['compress']; + return ['compress']; } elsif ($filename =~ /\.(bz2|tbz|tbz2|tz2)$/) { - return ['bzip2']; + return ['bzip2']; } elsif ($filename =~ /\.lz$/) { - return ['lzip']; + return ['lzip']; } elsif ($filename =~ /\.(lzma|tlz)$/) { - return ['lzma']; + return ['lzma']; } elsif ($filename =~ /\.lzo$/) { - return ['lzop']; + return ['lzop']; } elsif ($filename =~ /\.lz4$/) { - return ['lz4']; + return ['lz4']; } elsif ($filename =~ /\.(xz|txz)$/) { - return ['xz', '--threads=0']; + return ['xz', '--threads=0']; } elsif ($filename =~ /\.zst$/) { - return ['zstd']; + return ['zstd']; } return undef } @@ -200,13 +200,13 @@ sub get_tar_compressor($) { sub test_unshare($) { my $verbose = shift; if ($EFFECTIVE_USER_ID == 0) { - my $msg = "cannot use unshare mode when executing as root"; - if ($verbose) { - warning $msg; - } else { - debug $msg; - } - return 0; + my $msg = "cannot use unshare mode when executing as root"; + if ($verbose) { + warning $msg; + } else { + debug $msg; + } + return 0; } # arguments to syscalls have to be stored in their own variable or # otherwise we will get "Modification of a read-only value attempted" @@ -215,62 +215,62 @@ sub test_unshare($) { # otherwise have unshared the mmdebstrap process itself which we don't want my $pid = fork() // error "fork() failed: $!"; if ($pid == 0) { - my $ret = syscall &SYS_unshare, $unshare_flags; - if ($ret == 0) { - exit 0; - } else { - my $msg = "unshare syscall failed: $!"; - if ($verbose) { - warning $msg; - } else { - debug $msg; - } - exit 1; - } + my $ret = syscall &SYS_unshare, $unshare_flags; + if ($ret == 0) { + exit 0; + } else { + my $msg = "unshare syscall failed: $!"; + if ($verbose) { + warning $msg; + } else { + debug $msg; + } + exit 1; + } } waitpid($pid, 0); if (($? >> 8) != 0) { - return 0; + return 0; } # if newuidmap and newgidmap exist, the exit status will be 1 when # executed without parameters system "newuidmap 2>/dev/null"; if (($? >> 8) != 1) { - if (($? >> 8) == 127) { - my $msg = "cannot find newuidmap"; - if ($verbose) { - warning $msg; - } else { - debug $msg; - } - } else { - my $msg = "newuidmap returned unknown exit status: $?"; - if ($verbose) { - warning $msg; - } else { - debug $msg; - } - } - return 0; + if (($? >> 8) == 127) { + my $msg = "cannot find newuidmap"; + if ($verbose) { + warning $msg; + } else { + debug $msg; + } + } else { + my $msg = "newuidmap returned unknown exit status: $?"; + if ($verbose) { + warning $msg; + } else { + debug $msg; + } + } + return 0; } system "newgidmap 2>/dev/null"; if (($? >> 8) != 1) { - if (($? >> 8) == 127) { - my $msg = "cannot find newgidmap"; - if ($verbose) { - warning $msg; - } else { - debug $msg; - } - } else { - my $msg = "newgidmap returned unknown exit status: $?"; - if ($verbose) { - warning $msg; - } else { - debug $msg; - } - } - return 0; + if (($? >> 8) == 127) { + my $msg = "cannot find newgidmap"; + if ($verbose) { + warning $msg; + } else { + debug $msg; + } + } else { + my $msg = "newgidmap returned unknown exit status: $?"; + if ($verbose) { + warning $msg; + } else { + debug $msg; + } + } + return 0; } return 1; } @@ -281,46 +281,46 @@ sub read_subuid_subgid() { my @result = (); if (! -e "/etc/subuid") { - warning "/etc/subuid doesn't exist"; - return; + warning "/etc/subuid doesn't exist"; + return; } if (! -r "/etc/subuid") { - warning "/etc/subuid is not readable"; - return; + warning "/etc/subuid is not readable"; + return; } open $fh, "<", "/etc/subuid" or error "cannot open /etc/subuid for reading: $!"; while (my $line = <$fh>) { - ($n, $subid, $num_subid) = split(/:/, $line, 3); - last if ($n eq $username); + ($n, $subid, $num_subid) = split(/:/, $line, 3); + last if ($n eq $username); } close $fh; push @result, ["u", 0, $subid, $num_subid]; if (scalar(@result) < 1) { - warning "/etc/subuid does not contain an entry for $username"; - return; + warning "/etc/subuid does not contain an entry for $username"; + return; } if (scalar(@result) > 1) { - warning "/etc/subuid contains multiple entries for $username"; - return; + warning "/etc/subuid contains multiple entries for $username"; + return; } open $fh, "<", "/etc/subgid" or error "cannot open /etc/subgid for reading: $!"; while (my $line = <$fh>) { - ($n, $subid, $num_subid) = split(/:/, $line, 3); - last if ($n eq $username); + ($n, $subid, $num_subid) = split(/:/, $line, 3); + last if ($n eq $username); } close $fh; push @result, ["g", 0, $subid, $num_subid]; if (scalar(@result) < 2) { - warning "/etc/subgid does not contain an entry for $username"; - return; + warning "/etc/subgid does not contain an entry for $username"; + return; } if (scalar(@result) > 2) { - warning "/etc/subgid contains multiple entries for $username"; - return; + warning "/etc/subgid contains multiple entries for $username"; + return; } return @result; @@ -376,138 +376,138 @@ sub get_unshare_cmd(&$) { my $unshare_flags = CLONE_NEWUSER | CLONE_NEWNS | CLONE_NEWPID | CLONE_NEWUTS | CLONE_NEWIPC; if (0) { - $unshare_flags |= CLONE_NEWNET; + $unshare_flags |= CLONE_NEWNET; } # fork a new process and let the child get unshare()ed # we don't want to unshare the parent process my $gcpid = fork() // error "fork() failed: $!"; if ($gcpid == 0) { - # Create a pipe for the parent process to signal the child process that it is - # done with calling unshare() so that the child can go ahead setting up - # uid_map and gid_map. - pipe my $rfh, my $wfh; - - # We have to do this dance with forking a process and then modifying the - # parent from the child because: - # - new[ug]idmap can only be called on a process id after that process has - # unshared the user namespace - # - a process looses its capabilities if it performs an execve() with nonzero - # user ids see the capabilities(7) man page for details. - # - a process that unshared the user namespace by default does not have the - # privileges to call new[ug]idmap on itself - # - # this also works the other way around (the child setting up a user namespace - # and being modified from the parent) but that way, the parent would have to - # stay around until the child exited (so a pid would be wasted). Additionally, - # that variant would require an additional pipe to let the parent signal the - # child that it is done with calling new[ug]idmap. The way it is done here, - # this signaling can instead be done by wait()-ing for the exit of the child. - my $ppid = $$; - my $cpid = fork() // error "fork() failed: $!"; - if ($cpid == 0) { - # child - - # Close the writing descriptor at our end of the pipe so that we - # see EOF when parent closes its descriptor. - close $wfh; - - # Wait for the parent process to finish its unshare() call by - # waiting for an EOF. - 0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF"; - - # The program's new[ug]idmap have to be used because they are - # setuid root. These privileges are needed to map the ids from - # /etc/sub[ug]id to the user namespace set up by the parent. - # Without these privileges, only the id of the user itself can be - # mapped into the new namespace. - # - # Since new[ug]idmap is setuid root we also don't need to write - # "deny" to /proc/$$/setgroups beforehand (this is otherwise - # required for unprivileged processes trying to write to - # /proc/$$/gid_map since kernel version 3.19 for security reasons) - # and therefore the parent process keeps its ability to change its - # own group here. - # - # Since /proc/$ppid/[ug]id_map can only be written to once, - # respectively, instead of making multiple calls to new[ug]idmap, - # we assemble a command line that makes one call each. - my $uidmapcmd = ""; - my $gidmapcmd = ""; - foreach (@{$idmap}) { - my ($t, $hostid, $nsid, $range) = @{$_}; - if ($t ne "u" and $t ne "g" and $t ne "b") { - error "invalid idmap type: $t"; - } - if ($t eq "u" or $t eq "b") { - $uidmapcmd .= " $hostid $nsid $range"; - } - if ($t eq "g" or $t eq "b") { - $gidmapcmd .= " $hostid $nsid $range"; - } - } - my $idmapcmd = ''; - if ($uidmapcmd ne "") { - 0 == system "newuidmap $ppid $uidmapcmd" or error "newuidmap $ppid $uidmapcmd failed: $!"; - } - if ($gidmapcmd ne "") { - 0 == system "newgidmap $ppid $gidmapcmd" or error "newgidmap $ppid $gidmapcmd failed: $!"; - } - exit 0; - } - - # parent - - # After fork()-ing, the parent immediately calls unshare... - 0 == syscall &SYS_unshare, $unshare_flags or error "unshare() failed: $!"; - - # .. and then signals the child process that we are done with the - # unshare() call by sending an EOF. - close $wfh; - - # Wait for the child process to finish its setup by waiting for its - # exit. - $cpid == waitpid $cpid, 0 or error "waitpid() failed: $!"; - my $exit = $? >> 8; - if ($exit != 0) { - error "child had a non-zero exit status: $exit"; - } - - # Currently we are nobody (uid and gid are 65534). So we become root - # user and group instead. - # - # We are using direct syscalls instead of setting $(, $), $< and $> - # because then perl would do additional stuff which we don't need or - # want here, like checking /proc/sys/kernel/ngroups_max (which might - # not exist). It would also also call setgroups() in a way that makes - # the root user be part of the group unknown. - 0 == syscall &SYS_setgid, 0 or error "setgid failed: $!"; - 0 == syscall &SYS_setuid, 0 or error "setuid failed: $!"; - 0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!"; - - if (1) { - # When the pid namespace is also unshared, then processes expect a - # master pid to always be alive within the namespace. To achieve - # this, we fork() here instead of exec() to always have one dummy - # process running as pid 1 inside the namespace. This is also what - # the unshare tool does when used with the --fork option. - # - # Otherwise, without a pid 1, new processes cannot be forked - # anymore after pid 1 finished. - my $cpid = fork() // error "fork() failed: $!"; - if ($cpid != 0) { - # The parent process will stay alive as pid 1 in this - # namespace until the child finishes executing. This is - # important because pid 1 must never die or otherwise nothing - # new can be forked. - $cpid == waitpid $cpid, 0 or error "waitpid() failed: $!"; - exit ($? >> 8); - } - } - - &{$cmd}(); - - exit 0; + # Create a pipe for the parent process to signal the child process that it is + # done with calling unshare() so that the child can go ahead setting up + # uid_map and gid_map. + pipe my $rfh, my $wfh; + + # We have to do this dance with forking a process and then modifying the + # parent from the child because: + # - new[ug]idmap can only be called on a process id after that process has + # unshared the user namespace + # - a process looses its capabilities if it performs an execve() with nonzero + # user ids see the capabilities(7) man page for details. + # - a process that unshared the user namespace by default does not have the + # privileges to call new[ug]idmap on itself + # + # this also works the other way around (the child setting up a user namespace + # and being modified from the parent) but that way, the parent would have to + # stay around until the child exited (so a pid would be wasted). Additionally, + # that variant would require an additional pipe to let the parent signal the + # child that it is done with calling new[ug]idmap. The way it is done here, + # this signaling can instead be done by wait()-ing for the exit of the child. + my $ppid = $$; + my $cpid = fork() // error "fork() failed: $!"; + if ($cpid == 0) { + # child + + # Close the writing descriptor at our end of the pipe so that we + # see EOF when parent closes its descriptor. + close $wfh; + + # Wait for the parent process to finish its unshare() call by + # waiting for an EOF. + 0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF"; + + # The program's new[ug]idmap have to be used because they are + # setuid root. These privileges are needed to map the ids from + # /etc/sub[ug]id to the user namespace set up by the parent. + # Without these privileges, only the id of the user itself can be + # mapped into the new namespace. + # + # Since new[ug]idmap is setuid root we also don't need to write + # "deny" to /proc/$$/setgroups beforehand (this is otherwise + # required for unprivileged processes trying to write to + # /proc/$$/gid_map since kernel version 3.19 for security reasons) + # and therefore the parent process keeps its ability to change its + # own group here. + # + # Since /proc/$ppid/[ug]id_map can only be written to once, + # respectively, instead of making multiple calls to new[ug]idmap, + # we assemble a command line that makes one call each. + my $uidmapcmd = ""; + my $gidmapcmd = ""; + foreach (@{$idmap}) { + my ($t, $hostid, $nsid, $range) = @{$_}; + if ($t ne "u" and $t ne "g" and $t ne "b") { + error "invalid idmap type: $t"; + } + if ($t eq "u" or $t eq "b") { + $uidmapcmd .= " $hostid $nsid $range"; + } + if ($t eq "g" or $t eq "b") { + $gidmapcmd .= " $hostid $nsid $range"; + } + } + my $idmapcmd = ''; + if ($uidmapcmd ne "") { + 0 == system "newuidmap $ppid $uidmapcmd" or error "newuidmap $ppid $uidmapcmd failed: $!"; + } + if ($gidmapcmd ne "") { + 0 == system "newgidmap $ppid $gidmapcmd" or error "newgidmap $ppid $gidmapcmd failed: $!"; + } + exit 0; + } + + # parent + + # After fork()-ing, the parent immediately calls unshare... + 0 == syscall &SYS_unshare, $unshare_flags or error "unshare() failed: $!"; + + # .. and then signals the child process that we are done with the + # unshare() call by sending an EOF. + close $wfh; + + # Wait for the child process to finish its setup by waiting for its + # exit. + $cpid == waitpid $cpid, 0 or error "waitpid() failed: $!"; + my $exit = $? >> 8; + if ($exit != 0) { + error "child had a non-zero exit status: $exit"; + } + + # Currently we are nobody (uid and gid are 65534). So we become root + # user and group instead. + # + # We are using direct syscalls instead of setting $(, $), $< and $> + # because then perl would do additional stuff which we don't need or + # want here, like checking /proc/sys/kernel/ngroups_max (which might + # not exist). It would also also call setgroups() in a way that makes + # the root user be part of the group unknown. + 0 == syscall &SYS_setgid, 0 or error "setgid failed: $!"; + 0 == syscall &SYS_setuid, 0 or error "setuid failed: $!"; + 0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!"; + + if (1) { + # When the pid namespace is also unshared, then processes expect a + # master pid to always be alive within the namespace. To achieve + # this, we fork() here instead of exec() to always have one dummy + # process running as pid 1 inside the namespace. This is also what + # the unshare tool does when used with the --fork option. + # + # Otherwise, without a pid 1, new processes cannot be forked + # anymore after pid 1 finished. + my $cpid = fork() // error "fork() failed: $!"; + if ($cpid != 0) { + # The parent process will stay alive as pid 1 in this + # namespace until the child finishes executing. This is + # important because pid 1 must never die or otherwise nothing + # new can be forked. + $cpid == waitpid $cpid, 0 or error "waitpid() failed: $!"; + exit ($? >> 8); + } + } + + &{$cmd}(); + + exit 0; } # parent @@ -518,55 +518,55 @@ sub havemknod($) { my $root = shift; my $havemknod = 0; if (-e "$root/test-dev-null") { - error "/test-dev-null already exists"; + error "/test-dev-null already exists"; } TEST: { - # we fork so that we can read STDERR - my $pid = open my $fh, '-|' // error "failed to fork(): $!"; - if ($pid == 0) { - open(STDERR, '>&', STDOUT) or error "cannot open STDERR: $!"; - # we use mknod(1) instead of the system call because creating the - # right dev_t argument requires makedev(3) - exec 'mknod', "$root/test-dev-null", 'c', '1', '3'; - } - chomp (my $content = do { local $/; <$fh> }); - close $fh; - { - last TEST unless $? == 0 and $content eq ''; - last TEST unless -c "$root/test-dev-null"; - last TEST unless open my $fh, '>', "$root/test-dev-null"; - last TEST unless print $fh 'test'; - } - $havemknod = 1; + # we fork so that we can read STDERR + my $pid = open my $fh, '-|' // error "failed to fork(): $!"; + if ($pid == 0) { + open(STDERR, '>&', STDOUT) or error "cannot open STDERR: $!"; + # we use mknod(1) instead of the system call because creating the + # right dev_t argument requires makedev(3) + exec 'mknod', "$root/test-dev-null", 'c', '1', '3'; + } + chomp (my $content = do { local $/; <$fh> }); + close $fh; + { + last TEST unless $? == 0 and $content eq ''; + last TEST unless -c "$root/test-dev-null"; + last TEST unless open my $fh, '>', "$root/test-dev-null"; + last TEST unless print $fh 'test'; + } + $havemknod = 1; } if (-e "$root/test-dev-null") { - unlink "$root/test-dev-null" or error "cannot unlink /test-dev-null: $!"; + unlink "$root/test-dev-null" or error "cannot unlink /test-dev-null: $!"; } return $havemknod; } sub print_progress { if ($verbosity_level != 1) { - return; + return; } my $perc = shift; if (! -t STDERR) { - return; + return; } if ($perc eq "done") { - # \e[2K clears everything on the current line (i.e. the progress bar) - print STDERR "\e[2Kdone\n"; - return; + # \e[2K clears everything on the current line (i.e. the progress bar) + print STDERR "\e[2Kdone\n"; + return; } if ($perc >= 100) { - $perc = 100; + $perc = 100; } my $width = 50; my $num_x = int($perc*$width/100); my $bar = '=' x $num_x; if ($num_x != $width) { - $bar .= '>'; - $bar .= ' ' x ($width - $num_x - 1); + $bar .= '>'; + $bar .= ' ' x ($width - $num_x - 1); } printf STDERR "%6.2f [%s]\r", $perc, $bar; } @@ -576,7 +576,7 @@ sub run_progress { pipe my $rfh, my $wfh; my $got_signal = 0; my $ignore = sub { - info "run_progress() received signal $_[0]: waiting for child..."; + info "run_progress() received signal $_[0]: waiting for child..."; }; # delay signals so that we can fork and change behaviour of the signal @@ -587,33 +587,33 @@ sub run_progress { my $pid1 = open(my $pipe, '-|') // error "failed to fork(): $!"; if ($pid1 == 0) { - # child: default signal handlers - $SIG{'INT'} = 'DEFAULT'; - $SIG{'HUP'} = 'DEFAULT'; - $SIG{'PIPE'} = 'DEFAULT'; - $SIG{'TERM'} = 'DEFAULT'; - - # unblock all delayed signals (and possibly handle them) - POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; - - close $rfh; - # Unset the close-on-exec flag, so that the file descriptor does not - # get closed when we exec - my $flags = fcntl( $wfh, F_GETFD, 0 ) or error "fcntl F_GETFD: $!"; - fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC ) or error "fcntl F_SETFD: $!"; - my $fd = fileno $wfh; - # redirect stderr to stdout so that we can capture it - open(STDERR, '>&', STDOUT) or error "cannot open STDOUT: $!"; - my @execargs = $get_exec->($fd); - # before apt 1.5, "apt-get update" attempted to chdir() into the - # working directory. This will fail if the current working directory - # is not accessible by the user (for example in unshare mode). See - # Debian bug #860738 - if (defined $chdir) { - chdir $chdir or error "failed chdir() to $chdir: $!"; - } - eval 'Devel::Cover::set_coverage("none")' if $is_covering; - exec { $execargs[0] } @execargs or error 'cannot exec() ' . (join ' ', @execargs); + # child: default signal handlers + $SIG{'INT'} = 'DEFAULT'; + $SIG{'HUP'} = 'DEFAULT'; + $SIG{'PIPE'} = 'DEFAULT'; + $SIG{'TERM'} = 'DEFAULT'; + + # unblock all delayed signals (and possibly handle them) + POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; + + close $rfh; + # Unset the close-on-exec flag, so that the file descriptor does not + # get closed when we exec + my $flags = fcntl( $wfh, F_GETFD, 0 ) or error "fcntl F_GETFD: $!"; + fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC ) or error "fcntl F_SETFD: $!"; + my $fd = fileno $wfh; + # redirect stderr to stdout so that we can capture it + open(STDERR, '>&', STDOUT) or error "cannot open STDOUT: $!"; + my @execargs = $get_exec->($fd); + # before apt 1.5, "apt-get update" attempted to chdir() into the + # working directory. This will fail if the current working directory + # is not accessible by the user (for example in unshare mode). See + # Debian bug #860738 + if (defined $chdir) { + chdir $chdir or error "failed chdir() to $chdir: $!"; + } + eval 'Devel::Cover::set_coverage("none")' if $is_covering; + exec { $execargs[0] } @execargs or error 'cannot exec() ' . (join ' ', @execargs); } close $wfh; @@ -622,24 +622,24 @@ sub run_progress { # child will parse $rfh for the progress meter my $pid2 = fork() // error "failed to fork(): $!"; if ($pid2 == 0) { - # child: default signal handlers - $SIG{'INT'} = 'IGNORE'; - $SIG{'HUP'} = 'IGNORE'; - $SIG{'PIPE'} = 'IGNORE'; - $SIG{'TERM'} = 'IGNORE'; - - # unblock all delayed signals (and possibly handle them) - POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; - - print_progress 0.0; - while (my $line = <$rfh>) { - my $output = $line_handler->($line); - next unless $output; - print_progress $output; - } - print_progress "done"; - - exit 0; + # child: default signal handlers + $SIG{'INT'} = 'IGNORE'; + $SIG{'HUP'} = 'IGNORE'; + $SIG{'PIPE'} = 'IGNORE'; + $SIG{'TERM'} = 'IGNORE'; + + # unblock all delayed signals (and possibly handle them) + POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; + + print_progress 0.0; + while (my $line = <$rfh>) { + my $output = $line_handler->($line); + next unless $output; + print_progress $output; + } + print_progress "done"; + + exit 0; } # parent: ignore signals @@ -656,35 +656,35 @@ sub run_progress { my $output = ''; my $has_error = 0; while (my $line = <$pipe>) { - $has_error = $line_has_error->($line); - if ($verbosity_level >= 2) { - print STDERR $line; - } else { - # forward captured apt output - $output .= $line; - } + $has_error = $line_has_error->($line); + if ($verbosity_level >= 2) { + print STDERR $line; + } else { + # forward captured apt output + $output .= $line; + } } close($pipe); my $fail = 0; if ($? != 0 or $has_error) { - $fail = 1; + $fail = 1; } waitpid $pid2, 0; $? == 0 or error "progress parsing failed"; if ($got_signal) { - error "run_progress() received signal: $got_signal"; + error "run_progress() received signal: $got_signal"; } # only print failure after progress output finished or otherwise it # might interfere with the remaining output if ($fail) { - if ($verbosity_level >= 1) { - print STDERR $output; - } - error ((join ' ', $get_exec->('<$fd>')) . ' failed'); + if ($verbosity_level >= 1) { + print STDERR $output; + } + error ((join ' ', $get_exec->('<$fd>')) . ' failed'); } } @@ -698,10 +698,10 @@ sub run_dpkg_progress { # number is twice the number of packages my $total = (scalar @debs) * 2; my $line_handler = sub { - if ($_[0] =~ /^processing: (install|configure): /) { - $num += 1; - } - return $num/$total*100; + if ($_[0] =~ /^processing: (install|configure): /) { + $num += 1; + } + return $num/$total*100; }; run_progress $get_exec, $line_handler, $line_has_error; } @@ -710,31 +710,31 @@ sub run_apt_progress { my $options = shift; my @debs = @{$options->{PKGS} // []}; my $get_exec = sub { - return ( - @{$options->{ARGV}}, - "-oAPT::Status-Fd=$_[0]", - # prevent apt from messing up the terminal and allow dpkg to - # receive SIGINT and quit immediately without waiting for - # maintainer script to finish - '-oDpkg::Use-Pty=false', - @debs - )}; + return ( + @{$options->{ARGV}}, + "-oAPT::Status-Fd=$_[0]", + # prevent apt from messing up the terminal and allow dpkg to + # receive SIGINT and quit immediately without waiting for + # maintainer script to finish + '-oDpkg::Use-Pty=false', + @debs + )}; my $line_has_error = sub { return 0; }; if ($options->{FIND_APT_WARNINGS}) { - $line_has_error = sub { - # apt-get doesn't report a non-zero exit if the update failed. - # Thus, we have to parse its output. See #778357, #776152, #696335 - # and #745735 - if ($_[0] =~ /^(W: |Err:)/) { - return 1; - } - return 0; - }; + $line_has_error = sub { + # apt-get doesn't report a non-zero exit if the update failed. + # Thus, we have to parse its output. See #778357, #776152, #696335 + # and #745735 + if ($_[0] =~ /^(W: |Err:)/) { + return 1; + } + return 0; + }; } my $line_handler = sub { - if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) { - return $2; - } + if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) { + return $2; + } }; run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR}; } @@ -746,14 +746,14 @@ sub run_chroot(&$) { my @cleanup_tasks = (); my $cleanup = sub { - my $signal = $_[0]; - while (my $task = pop @cleanup_tasks) { - $task->(); - } - if ($signal) { - warning "pid $PID cought signal: $signal"; - exit 1; - } + my $signal = $_[0]; + while (my $task = pop @cleanup_tasks) { + $task->(); + } + if ($signal) { + warning "pid $PID cought signal: $signal"; + exit 1; + } }; local $SIG{INT} = $cleanup; @@ -762,182 +762,182 @@ sub run_chroot(&$) { local $SIG{TERM} = $cleanup; eval { - if (any { $_ eq $options->{mode} } ('root', 'unshare')) { - # if more than essential should be installed, make the system look - # more like a real one by creating or bind-mounting the device nodes - foreach my $file (@devfiles) { - my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file}; - next if $fname eq './dev/'; - if ($type == 0) { # normal file - error "type 0 not implemented"; - } elsif ($type == 1) { # hardlink - error "type 1 not implemented"; - } elsif ($type == 2) { # symlink - if (!$options->{havemknod}) { - if ($options->{mode} eq 'fakechroot' and $linkname =~ /^\/proc/) { - # there is no /proc in fakechroot mode - next; - } - if (any { $_ eq $options->{mode} } ('root', 'unshare')) { - push @cleanup_tasks, sub { - unlink "$options->{root}/$fname" or warn "cannot unlink $fname: $!"; - } - } - symlink $linkname, "$options->{root}/$fname" or error "cannot create symlink $fname"; - } - } elsif ($type == 3 or $type == 4) { # character/block special - if (!$options->{havemknod}) { - open my $fh, '>', "$options->{root}/$fname" or error "cannot open $options->{root}/$fname: $!"; - close $fh; - if ($options->{mode} eq 'unshare') { - push @cleanup_tasks, sub { - 0 == system('umount', '--no-mtab', "$options->{root}/$fname") or warn "umount $fname failed: $?"; - unlink "$options->{root}/$fname" or warn "cannot unlink $fname: $!"; - }; - } elsif ($options->{mode} eq 'root') { - push @cleanup_tasks, sub { - 0 == system('umount', "$options->{root}/$fname") or warn "umount failed: $?"; - unlink "$options->{root}/$fname" or warn "cannot unlink $fname: $!"; - }; - } else { - error "unknown mode: $options->{mode}"; - } - 0 == system('mount', '-o', 'bind', "/$fname", "$options->{root}/$fname") or error "mount $fname failed: $?"; - } - } elsif ($type == 5) { # directory - if (!$options->{havemknod}) { - if (any { $_ eq $options->{mode} } ('root', 'unshare')) { - push @cleanup_tasks, sub { - rmdir "$options->{root}/$fname" or warn "cannot rmdir $fname: $!"; - } - } - if (-e "$options->{root}/$fname") { - if (! -d "$options->{root}/$fname") { - error "$fname already exists but is not a directory"; - } - } else { - my $num_created = make_path "$options->{root}/$fname", {error => \my $err}; - if ($err && @$err) { - error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); - } elsif ($num_created == 0) { - error "cannot create $options->{root}/$fname"; - } - } - chmod $mode, "$options->{root}/$fname" or error "cannot chmod $fname: $!"; - } - if ($options->{mode} eq 'unshare') { - push @cleanup_tasks, sub { - 0 == system('umount', '--no-mtab', "$options->{root}/$fname") or warn "umount $fname failed: $?"; - }; - } elsif ($options->{mode} eq 'root') { - push @cleanup_tasks, sub { - 0 == system('umount', "$options->{root}/$fname") or warn "umount $fname failed: $?"; - }; - } else { - error "unknown mode: $options->{mode}"; - } - 0 == system('mount', '-o', 'bind', "/$fname", "$options->{root}/$fname") or error "mount $fname failed: $?"; - } else { - error "unsupported type: $type"; - } - } - } elsif (any { $_ eq $options->{mode} } ('proot', 'fakechroot', 'chrootless')) { - # we cannot mount in fakechroot and proot mode - # in proot mode we have /dev bind-mounted already through --bind=/dev - } else { - error "unknown mode: $options->{mode}"; - } - # We can only mount /proc and /sys after extracting the essential - # set because if we mount it before, then base-files will not be able - # to extract those - if ($options->{mode} eq 'root') { - push @cleanup_tasks, sub { - 0 == system('umount', "$options->{root}/sys") or warn "umount /sys failed: $?"; - }; - 0 == system('mount', '-t', 'sysfs', '-o', 'nosuid,nodev,noexec', 'sys', "$options->{root}/sys") or error "mount /sys failed: $?"; - } elsif ($options->{mode} eq 'unshare') { - # naturally we have to clean up after ourselves in sudo mode where we - # do a real mount. But we also need to unmount in unshare mode because - # otherwise, even with the --one-file-system tar option, the - # permissions of the mount source will be stored and not the mount - # target (the directory) - push @cleanup_tasks, sub { - # since we cannot write to /etc/mtab we need --no-mtab - # unmounting /sys only seems to be successful with --lazy - 0 == system('umount', '--no-mtab', '--lazy', "$options->{root}/sys") or warn "umount /sys failed: $?"; - }; - # without the network namespace unshared, we cannot mount a new - # sysfs. Since we need network, we just bind-mount. - # - # we have to rbind because just using bind results in "wrong fs - # type, bad option, bad superblock" error - 0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys") or error "mount /sys failed: $?"; - } elsif (any { $_ eq $options->{mode} } ('proot', 'fakechroot', 'chrootless')) { - # we cannot mount in fakechroot and proot mode - # in proot mode we have /proc bind-mounted already through --bind=/proc - } else { - error "unknown mode: $options->{mode}"; - } - if ($options->{mode} eq 'root') { - push @cleanup_tasks, sub { - # some maintainer scripts mount additional stuff into /proc - # which we need to unmount beforehand - if (is_mountpoint("$options->{root}/proc/sys/fs/binfmt_misc")) { - 0 == system('umount', "$options->{root}/proc/sys/fs/binfmt_misc") or error "umount /proc/sys/fs/binfmt_misc failed: $?"; - } - 0 == system('umount', "$options->{root}/proc") or error "umount /proc failed: $?"; - }; - 0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc") or error "mount /proc failed: $?"; - } elsif ($options->{mode} eq 'unshare') { - # naturally we have to clean up after ourselves in sudo mode where we - # do a real mount. But we also need to unmount in unshare mode because - # otherwise, even with the --one-file-system tar option, the - # permissions of the mount source will be stored and not the mount - # target (the directory) - push @cleanup_tasks, sub { - # since we cannot write to /etc/mtab we need --no-mtab - 0 == system('umount', '--no-mtab', "$options->{root}/proc") or error "umount /proc failed: $?"; - }; - 0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc") or error "mount /proc failed: $?"; - } elsif (any { $_ eq $options->{mode} } ('proot', 'fakechroot', 'chrootless')) { - # we cannot mount in fakechroot and proot mode - # in proot mode we have /sys bind-mounted already through --bind=/sys - } else { - error "unknown mode: $options->{mode}"; - } - - # prevent daemons from starting - # the directory might not exist in custom variant, for example - if (-d "$options->{root}/usr/sbin/") { - open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d" or error "cannot open policy-rc.d: $!"; - print $fh "#!/bin/sh\n"; - print $fh "exit 101\n"; - close $fh; - chmod 0755, "$options->{root}/usr/sbin/policy-rc.d" or error "cannot chmod policy-rc.d: $!"; - } - - # the file might not exist if it was removed in a hook - if (-e "$options->{root}/sbin/start-stop-daemon") { - if (-e "$options->{root}/sbin/start-stop-daemon.REAL") { - error "$options->{root}/sbin/start-stop-daemon.REAL already exists"; - } - move("$options->{root}/sbin/start-stop-daemon", "$options->{root}/sbin/start-stop-daemon.REAL") or error "cannot move start-stop-daemon: $!"; - open my $fh, '>', "$options->{root}/sbin/start-stop-daemon" or error "cannot open start-stop-daemon: $!"; - print $fh "#!/bin/sh\n"; - print $fh "echo \"Warning: Fake start-stop-daemon called, doing nothing\">&2\n"; - close $fh; - chmod 0755, "$options->{root}/sbin/start-stop-daemon" or error "cannot chmod start-stop-daemon: $!"; - } - - &{$cmd}(); - - # cleanup - if (-e "$options->{root}/sbin/start-stop-daemon.REAL") { - move("$options->{root}/sbin/start-stop-daemon.REAL", "$options->{root}/sbin/start-stop-daemon") or error "cannot move start-stop-daemon: $!"; - } - if (-e "$options->{root}/usr/sbin/policy-rc.d") { - unlink "$options->{root}/usr/sbin/policy-rc.d" or error "cannot unlink policy-rc.d: $!"; - } + if (any { $_ eq $options->{mode} } ('root', 'unshare')) { + # if more than essential should be installed, make the system look + # more like a real one by creating or bind-mounting the device nodes + foreach my $file (@devfiles) { + my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file}; + next if $fname eq './dev/'; + if ($type == 0) { # normal file + error "type 0 not implemented"; + } elsif ($type == 1) { # hardlink + error "type 1 not implemented"; + } elsif ($type == 2) { # symlink + if (!$options->{havemknod}) { + if ($options->{mode} eq 'fakechroot' and $linkname =~ /^\/proc/) { + # there is no /proc in fakechroot mode + next; + } + if (any { $_ eq $options->{mode} } ('root', 'unshare')) { + push @cleanup_tasks, sub { + unlink "$options->{root}/$fname" or warn "cannot unlink $fname: $!"; + } + } + symlink $linkname, "$options->{root}/$fname" or error "cannot create symlink $fname"; + } + } elsif ($type == 3 or $type == 4) { # character/block special + if (!$options->{havemknod}) { + open my $fh, '>', "$options->{root}/$fname" or error "cannot open $options->{root}/$fname: $!"; + close $fh; + if ($options->{mode} eq 'unshare') { + push @cleanup_tasks, sub { + 0 == system('umount', '--no-mtab', "$options->{root}/$fname") or warn "umount $fname failed: $?"; + unlink "$options->{root}/$fname" or warn "cannot unlink $fname: $!"; + }; + } elsif ($options->{mode} eq 'root') { + push @cleanup_tasks, sub { + 0 == system('umount', "$options->{root}/$fname") or warn "umount failed: $?"; + unlink "$options->{root}/$fname" or warn "cannot unlink $fname: $!"; + }; + } else { + error "unknown mode: $options->{mode}"; + } + 0 == system('mount', '-o', 'bind', "/$fname", "$options->{root}/$fname") or error "mount $fname failed: $?"; + } + } elsif ($type == 5) { # directory + if (!$options->{havemknod}) { + if (any { $_ eq $options->{mode} } ('root', 'unshare')) { + push @cleanup_tasks, sub { + rmdir "$options->{root}/$fname" or warn "cannot rmdir $fname: $!"; + } + } + if (-e "$options->{root}/$fname") { + if (! -d "$options->{root}/$fname") { + error "$fname already exists but is not a directory"; + } + } else { + my $num_created = make_path "$options->{root}/$fname", {error => \my $err}; + if ($err && @$err) { + error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); + } elsif ($num_created == 0) { + error "cannot create $options->{root}/$fname"; + } + } + chmod $mode, "$options->{root}/$fname" or error "cannot chmod $fname: $!"; + } + if ($options->{mode} eq 'unshare') { + push @cleanup_tasks, sub { + 0 == system('umount', '--no-mtab', "$options->{root}/$fname") or warn "umount $fname failed: $?"; + }; + } elsif ($options->{mode} eq 'root') { + push @cleanup_tasks, sub { + 0 == system('umount', "$options->{root}/$fname") or warn "umount $fname failed: $?"; + }; + } else { + error "unknown mode: $options->{mode}"; + } + 0 == system('mount', '-o', 'bind', "/$fname", "$options->{root}/$fname") or error "mount $fname failed: $?"; + } else { + error "unsupported type: $type"; + } + } + } elsif (any { $_ eq $options->{mode} } ('proot', 'fakechroot', 'chrootless')) { + # we cannot mount in fakechroot and proot mode + # in proot mode we have /dev bind-mounted already through --bind=/dev + } else { + error "unknown mode: $options->{mode}"; + } + # We can only mount /proc and /sys after extracting the essential + # set because if we mount it before, then base-files will not be able + # to extract those + if ($options->{mode} eq 'root') { + push @cleanup_tasks, sub { + 0 == system('umount', "$options->{root}/sys") or warn "umount /sys failed: $?"; + }; + 0 == system('mount', '-t', 'sysfs', '-o', 'nosuid,nodev,noexec', 'sys', "$options->{root}/sys") or error "mount /sys failed: $?"; + } elsif ($options->{mode} eq 'unshare') { + # naturally we have to clean up after ourselves in sudo mode where we + # do a real mount. But we also need to unmount in unshare mode because + # otherwise, even with the --one-file-system tar option, the + # permissions of the mount source will be stored and not the mount + # target (the directory) + push @cleanup_tasks, sub { + # since we cannot write to /etc/mtab we need --no-mtab + # unmounting /sys only seems to be successful with --lazy + 0 == system('umount', '--no-mtab', '--lazy', "$options->{root}/sys") or warn "umount /sys failed: $?"; + }; + # without the network namespace unshared, we cannot mount a new + # sysfs. Since we need network, we just bind-mount. + # + # we have to rbind because just using bind results in "wrong fs + # type, bad option, bad superblock" error + 0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys") or error "mount /sys failed: $?"; + } elsif (any { $_ eq $options->{mode} } ('proot', 'fakechroot', 'chrootless')) { + # we cannot mount in fakechroot and proot mode + # in proot mode we have /proc bind-mounted already through --bind=/proc + } else { + error "unknown mode: $options->{mode}"; + } + if ($options->{mode} eq 'root') { + push @cleanup_tasks, sub { + # some maintainer scripts mount additional stuff into /proc + # which we need to unmount beforehand + if (is_mountpoint("$options->{root}/proc/sys/fs/binfmt_misc")) { + 0 == system('umount', "$options->{root}/proc/sys/fs/binfmt_misc") or error "umount /proc/sys/fs/binfmt_misc failed: $?"; + } + 0 == system('umount', "$options->{root}/proc") or error "umount /proc failed: $?"; + }; + 0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc") or error "mount /proc failed: $?"; + } elsif ($options->{mode} eq 'unshare') { + # naturally we have to clean up after ourselves in sudo mode where we + # do a real mount. But we also need to unmount in unshare mode because + # otherwise, even with the --one-file-system tar option, the + # permissions of the mount source will be stored and not the mount + # target (the directory) + push @cleanup_tasks, sub { + # since we cannot write to /etc/mtab we need --no-mtab + 0 == system('umount', '--no-mtab', "$options->{root}/proc") or error "umount /proc failed: $?"; + }; + 0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc") or error "mount /proc failed: $?"; + } elsif (any { $_ eq $options->{mode} } ('proot', 'fakechroot', 'chrootless')) { + # we cannot mount in fakechroot and proot mode + # in proot mode we have /sys bind-mounted already through --bind=/sys + } else { + error "unknown mode: $options->{mode}"; + } + + # prevent daemons from starting + # the directory might not exist in custom variant, for example + if (-d "$options->{root}/usr/sbin/") { + open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d" or error "cannot open policy-rc.d: $!"; + print $fh "#!/bin/sh\n"; + print $fh "exit 101\n"; + close $fh; + chmod 0755, "$options->{root}/usr/sbin/policy-rc.d" or error "cannot chmod policy-rc.d: $!"; + } + + # the file might not exist if it was removed in a hook + if (-e "$options->{root}/sbin/start-stop-daemon") { + if (-e "$options->{root}/sbin/start-stop-daemon.REAL") { + error "$options->{root}/sbin/start-stop-daemon.REAL already exists"; + } + move("$options->{root}/sbin/start-stop-daemon", "$options->{root}/sbin/start-stop-daemon.REAL") or error "cannot move start-stop-daemon: $!"; + open my $fh, '>', "$options->{root}/sbin/start-stop-daemon" or error "cannot open start-stop-daemon: $!"; + print $fh "#!/bin/sh\n"; + print $fh "echo \"Warning: Fake start-stop-daemon called, doing nothing\">&2\n"; + close $fh; + chmod 0755, "$options->{root}/sbin/start-stop-daemon" or error "cannot chmod start-stop-daemon: $!"; + } + + &{$cmd}(); + + # cleanup + if (-e "$options->{root}/sbin/start-stop-daemon.REAL") { + move("$options->{root}/sbin/start-stop-daemon.REAL", "$options->{root}/sbin/start-stop-daemon") or error "cannot move start-stop-daemon: $!"; + } + if (-e "$options->{root}/usr/sbin/policy-rc.d") { + unlink "$options->{root}/usr/sbin/policy-rc.d" or error "cannot unlink policy-rc.d: $!"; + } }; @@ -947,7 +947,7 @@ sub run_chroot(&$) { $cleanup->(0); if ($error) { - error "run_chroot failed: $error"; + error "run_chroot failed: $error"; } } @@ -956,59 +956,59 @@ sub run_hooks($$) { my $options = shift; if (scalar @{$options->{"${name}_hook"}} == 0) { - return; + return; } my $runner = sub { - foreach my $script (@{$options->{"${name}_hook"}}) { - if ($script =~ /^(copy-in|copy-out|tar-in|tar-out|upload|download) /) { - info "running special hook: $script"; - if (any { $_ eq $options->{variant} } ('extract', 'custom') - and any { $_ eq $options->{mode} } ('fakechroot', 'proot') - and $name ne 'setup') { - info "the copy-in, copy-out, tar-in and tar-out commands in fakechroot mode or proot mode might fail in extract and custom variants because there might be no tar inside the chroot"; - } - - my $pid = fork() // error "fork() failed: $!"; - if ($pid == 0) { - # whatever the script writes on stdout is sent to the - # socket - # whatever is written to the socket, send to stdin - open(STDOUT, '>&', $options->{hooksock}) or error "cannot open STDOUT: $!"; - open(STDIN, '<&', $options->{hooksock}) or error "cannot open STDIN: $!"; - - # we execute ourselves under sh to avoid having to - # implement a clever parser of the quoting used in $script - # for the filenames - my $prefix = ""; - if($is_covering) { - $prefix = "$EXECUTABLE_NAME -MDevel::Cover=-silent,-nogcov "; - } - exec 'sh', '-c', "$prefix$PROGRAM_NAME --hook-helper \"\$1\" \"\$2\" \"\$3\" \"\$4\" \"\$5\" $script", - 'exec', $options->{root}, $options->{mode}, $name, (defined $options->{qemu} ? "qemu-$options->{qemu}" : 'env', $verbosity_level); - } - waitpid($pid, 0); - $? == 0 or error "special hook failed with exit code $?"; - } elsif ( -x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) { - info "running --$name-hook directly: $script $options->{root}"; - # execute it directly if it's an executable file - # or if it there are no shell metacharacters - # (the /a regex modifier makes \w match only ASCII) - 0 == system($script, $options->{root}) or error "command failed: $script"; - } else { - info "running --$name-hook in shell: sh -c '$script' exec $options->{root}"; - # otherwise, wrap everything in sh -c - 0 == system('sh', '-c', $script, 'exec', $options->{root}) or error "command failed: $script"; - } - } + foreach my $script (@{$options->{"${name}_hook"}}) { + if ($script =~ /^(copy-in|copy-out|tar-in|tar-out|upload|download) /) { + info "running special hook: $script"; + if (any { $_ eq $options->{variant} } ('extract', 'custom') + and any { $_ eq $options->{mode} } ('fakechroot', 'proot') + and $name ne 'setup') { + info "the copy-in, copy-out, tar-in and tar-out commands in fakechroot mode or proot mode might fail in extract and custom variants because there might be no tar inside the chroot"; + } + + my $pid = fork() // error "fork() failed: $!"; + if ($pid == 0) { + # whatever the script writes on stdout is sent to the + # socket + # whatever is written to the socket, send to stdin + open(STDOUT, '>&', $options->{hooksock}) or error "cannot open STDOUT: $!"; + open(STDIN, '<&', $options->{hooksock}) or error "cannot open STDIN: $!"; + + # we execute ourselves under sh to avoid having to + # implement a clever parser of the quoting used in $script + # for the filenames + my $prefix = ""; + if($is_covering) { + $prefix = "$EXECUTABLE_NAME -MDevel::Cover=-silent,-nogcov "; + } + exec 'sh', '-c', "$prefix$PROGRAM_NAME --hook-helper \"\$1\" \"\$2\" \"\$3\" \"\$4\" \"\$5\" $script", + 'exec', $options->{root}, $options->{mode}, $name, (defined $options->{qemu} ? "qemu-$options->{qemu}" : 'env', $verbosity_level); + } + waitpid($pid, 0); + $? == 0 or error "special hook failed with exit code $?"; + } elsif ( -x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) { + info "running --$name-hook directly: $script $options->{root}"; + # execute it directly if it's an executable file + # or if it there are no shell metacharacters + # (the /a regex modifier makes \w match only ASCII) + 0 == system($script, $options->{root}) or error "command failed: $script"; + } else { + info "running --$name-hook in shell: sh -c '$script' exec $options->{root}"; + # otherwise, wrap everything in sh -c + 0 == system('sh', '-c', $script, 'exec', $options->{root}) or error "command failed: $script"; + } + } }; if ($name eq 'setup') { - # execute directly without mounting anything (the mount points do not - # exist yet) - &{$runner}(); + # execute directly without mounting anything (the mount points do not + # exist yet) + &{$runner}(); } else { - run_chroot \&$runner, $options; + run_chroot \&$runner, $options; } } @@ -1017,19 +1017,19 @@ sub setup { my $options = shift; foreach my $key (sort keys %{$options}) { - my $value = $options->{$key}; - if (!defined $value) { - next; - } - if (ref $value eq '') { - debug "$key: $options->{$key}"; - } elsif (ref $value eq 'ARRAY') { - debug "$key: [" . (join ', ', @{$value}) . "]"; - } elsif (ref $value eq 'GLOB') { - debug "$key: GLOB"; - } else { - error "unknown type for key $key: " . (ref $value); - } + my $value = $options->{$key}; + if (!defined $value) { + next; + } + if (ref $value eq '') { + debug "$key: $options->{$key}"; + } elsif (ref $value eq 'ARRAY') { + debug "$key: [" . (join ', ', @{$value}) . "]"; + } elsif (ref $value eq 'GLOB') { + debug "$key: GLOB"; + } else { + error "unknown type for key $key: " . (ref $value); + } } my ($conf, $tmpfile) = tempfile(UNLINK => 1) or error "cannot open apt.conf: $!"; @@ -1037,13 +1037,13 @@ sub setup { # the host system might have configured additional architectures # force only the native architecture if (scalar @{$options->{foreignarchs}} > 0) { - print $conf "Apt::Architectures { \"$options->{nativearch}\"; "; - foreach my $arch (@{$options->{foreignarchs}}) { - print $conf "\"$arch\"; "; - } - print $conf "};\n"; + print $conf "Apt::Architectures { \"$options->{nativearch}\"; "; + foreach my $arch (@{$options->{foreignarchs}}) { + print $conf "\"$arch\"; "; + } + print $conf "};\n"; } else { - print $conf "Apt::Architectures \"$options->{nativearch}\";\n"; + print $conf "Apt::Architectures \"$options->{nativearch}\";\n"; } print $conf "Dir \"$options->{root}\";\n"; # not needed anymore for apt 1.3 and newer @@ -1052,47 +1052,47 @@ sub setup { print $conf "Dir::Etc::Trusted \"$options->{apttrusted}\";\n"; print $conf "Dir::Etc::TrustedParts \"$options->{apttrustedparts}\";\n"; if ($options->{variant} ne 'apt') { - # apt considers itself essential. Thus, when generating an EDSP - # document for an external solver, it will add the Essential:yes field - # to the apt package stanza. This is unnecessary for any other variant - # than 'apt' because in all other variants we compile the set of - # packages we consider essential ourselves and for the 'essential' - # variant it would even be wrong to add apt. This workaround is only - # needed when apt is used with an external solver but doesn't hurt - # otherwise and we don't have a good way to figure out whether apt is - # using an external solver or not short of parsing the --aptopt - # options. - print $conf "pkgCacheGen::ForceEssential \",\";\n"; + # apt considers itself essential. Thus, when generating an EDSP + # document for an external solver, it will add the Essential:yes field + # to the apt package stanza. This is unnecessary for any other variant + # than 'apt' because in all other variants we compile the set of + # packages we consider essential ourselves and for the 'essential' + # variant it would even be wrong to add apt. This workaround is only + # needed when apt is used with an external solver but doesn't hurt + # otherwise and we don't have a good way to figure out whether apt is + # using an external solver or not short of parsing the --aptopt + # options. + print $conf "pkgCacheGen::ForceEssential \",\";\n"; } close $conf; { - my @directories = ('/etc/apt/apt.conf.d', '/etc/apt/sources.list.d', - '/etc/apt/preferences.d', '/var/cache/apt', - '/var/lib/apt/lists/partial', '/var/lib/dpkg', - '/etc/dpkg/dpkg.cfg.d/'); - # if dpkg and apt operate from the outside we need some more - # directories because dpkg and apt might not even be installed inside - # the chroot - if ($options->{mode} eq 'chrootless') { - push @directories, ('/var/log/apt', '/var/lib/dpkg/triggers', - '/var/lib/dpkg/info', '/var/lib/dpkg/alternatives', - '/var/lib/dpkg/updates'); - } - foreach my $dir (@directories) { - if (-e "$options->{root}/$dir") { - if (! -d "$options->{root}/$dir") { - error "$dir already exists but is not a directory"; - } - } else { - my $num_created = make_path "$options->{root}/$dir", {error => \my $err}; - if ($err && @$err) { - error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); - } elsif ($num_created == 0) { - error "cannot create $options->{root}/$dir"; - } - } - } + my @directories = ('/etc/apt/apt.conf.d', '/etc/apt/sources.list.d', + '/etc/apt/preferences.d', '/var/cache/apt', + '/var/lib/apt/lists/partial', '/var/lib/dpkg', + '/etc/dpkg/dpkg.cfg.d/'); + # if dpkg and apt operate from the outside we need some more + # directories because dpkg and apt might not even be installed inside + # the chroot + if ($options->{mode} eq 'chrootless') { + push @directories, ('/var/log/apt', '/var/lib/dpkg/triggers', + '/var/lib/dpkg/info', '/var/lib/dpkg/alternatives', + '/var/lib/dpkg/updates'); + } + foreach my $dir (@directories) { + if (-e "$options->{root}/$dir") { + if (! -d "$options->{root}/$dir") { + error "$dir already exists but is not a directory"; + } + } else { + my $num_created = make_path "$options->{root}/$dir", {error => \my $err}; + if ($err && @$err) { + error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); + } elsif ($num_created == 0) { + error "cannot create $options->{root}/$dir"; + } + } + } } # We put certain configuration items in their own configuration file @@ -1101,30 +1101,30 @@ sub setup { # The config filename is chosen such that any settings in it will be # overridden by what the user specified with --aptopt. { - open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap" or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!"; - print $fh "Apt::Install-Recommends false;\n"; - print $fh "Acquire::Languages \"none\";\n"; - close $fh; + open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap" or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!"; + print $fh "Apt::Install-Recommends false;\n"; + print $fh "Acquire::Languages \"none\";\n"; + close $fh; } { - open my $fh, '>', "$options->{root}/var/lib/dpkg/status" or error "failed to open(): $!"; - close $fh; + open my $fh, '>', "$options->{root}/var/lib/dpkg/status" or error "failed to open(): $!"; + close $fh; } # /var/lib/dpkg/available is required to exist or otherwise package # removals will fail { - open my $fh, '>', "$options->{root}/var/lib/dpkg/available" or error "failed to open(): $!"; - close $fh; + open my $fh, '>', "$options->{root}/var/lib/dpkg/available" or error "failed to open(): $!"; + close $fh; } # /var/lib/dpkg/cmethopt is used by dselect # see #930788 { - open my $fh, '>', "$options->{root}/var/lib/dpkg/cmethopt" or error "failed to open(): $!"; - print $fh "apt apt\n"; - close $fh; + open my $fh, '>', "$options->{root}/var/lib/dpkg/cmethopt" or error "failed to open(): $!"; + print $fh "apt apt\n"; + close $fh; } # we create /var/lib/dpkg/arch inside the chroot either if there is more @@ -1133,54 +1133,54 @@ sub setup { # architecture outside the chroot. chomp (my $hostarch = `dpkg --print-architecture`); if (scalar @{$options->{foreignarchs}} > 0 or ( - $options->{mode} eq 'chrootless' and $hostarch ne $options->{nativearch})) { - open my $fh, '>', "$options->{root}/var/lib/dpkg/arch" or error "cannot open /var/lib/dpkg/arch: $!"; - print $fh "$options->{nativearch}\n"; - foreach my $arch (@{$options->{foreignarchs}}) { - print $fh "$arch\n"; - } - close $fh; + $options->{mode} eq 'chrootless' and $hostarch ne $options->{nativearch})) { + open my $fh, '>', "$options->{root}/var/lib/dpkg/arch" or error "cannot open /var/lib/dpkg/arch: $!"; + print $fh "$options->{nativearch}\n"; + foreach my $arch (@{$options->{foreignarchs}}) { + print $fh "$arch\n"; + } + close $fh; } if (scalar @{$options->{aptopts}} > 0) { - open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap" or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!"; - foreach my $opt (@{$options->{aptopts}}) { - if (-r $opt) { - # flush handle because copy() uses syswrite() which bypasses - # buffered IO - $fh->flush(); - copy $opt, $fh or error "cannot copy $opt: $!"; - } else { - print $fh $opt; - if ($opt !~ /;$/) { - print $fh ';'; - } - if ($opt !~ /\n$/) { - print $fh "\n"; - } - } - } - close $fh; + open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap" or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!"; + foreach my $opt (@{$options->{aptopts}}) { + if (-r $opt) { + # flush handle because copy() uses syswrite() which bypasses + # buffered IO + $fh->flush(); + copy $opt, $fh or error "cannot copy $opt: $!"; + } else { + print $fh $opt; + if ($opt !~ /;$/) { + print $fh ';'; + } + if ($opt !~ /\n$/) { + print $fh "\n"; + } + } + } + close $fh; } if (scalar @{$options->{dpkgopts}} > 0) { - # FIXME: in chrootless mode, dpkg will only read the configuration - # from the host - open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap" or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!"; - foreach my $opt (@{$options->{dpkgopts}}) { - if (-r $opt) { - # flush handle because copy() uses syswrite() which bypasses - # buffered IO - $fh->flush(); - copy $opt, $fh or error "cannot copy $opt: $!"; - } else { - print $fh $opt; - if ($opt !~ /\n$/) { - print $fh "\n"; - } - } - } - close $fh; + # FIXME: in chrootless mode, dpkg will only read the configuration + # from the host + open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap" or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!"; + foreach my $opt (@{$options->{dpkgopts}}) { + if (-r $opt) { + # flush handle because copy() uses syswrite() which bypasses + # buffered IO + $fh->flush(); + copy $opt, $fh or error "cannot copy $opt: $!"; + } else { + print $fh $opt; + if ($opt !~ /\n$/) { + print $fh "\n"; + } + } + } + close $fh; } ## setup merged usr @@ -1191,67 +1191,67 @@ sub setup { #} { - open my $fh, '>', "$options->{root}/etc/fstab" or error "cannot open fstab: $!"; - print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n"; - close $fh; - chmod 0644, "$options->{root}/etc/fstab" or error "cannot chmod fstab: $!"; + open my $fh, '>', "$options->{root}/etc/fstab" or error "cannot open fstab: $!"; + print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n"; + close $fh; + chmod 0644, "$options->{root}/etc/fstab" or error "cannot chmod fstab: $!"; } # write /etc/apt/sources.list { - open my $fh, '>', "$options->{root}/etc/apt/sources.list" or error "cannot open /etc/apt/sources.list: $!"; - print $fh $options->{sourceslist}; - close $fh; + open my $fh, '>', "$options->{root}/etc/apt/sources.list" or error "cannot open /etc/apt/sources.list: $!"; + print $fh $options->{sourceslist}; + close $fh; } # allow network access from within if (-e "/etc/resolv.conf") { - copy("/etc/resolv.conf", "$options->{root}/etc/resolv.conf") or error "cannot copy /etc/resolv.conf: $!"; + copy("/etc/resolv.conf", "$options->{root}/etc/resolv.conf") or error "cannot copy /etc/resolv.conf: $!"; } else { - warning("Host system does not have a /etc/resolv.conf to copy into the rootfs.") + warning("Host system does not have a /etc/resolv.conf to copy into the rootfs.") } if (-e "/etc/hostname") { - copy("/etc/hostname", "$options->{root}/etc/hostname") or error "cannot copy /etc/hostname: $!"; + copy("/etc/hostname", "$options->{root}/etc/hostname") or error "cannot copy /etc/hostname: $!"; } else { - warning("Host system does not have a /etc/hostname to copy into the rootfs.") + warning("Host system does not have a /etc/hostname to copy into the rootfs.") } if ($options->{havemknod}) { - foreach my $file (@devfiles) { - my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file}; - if ($type == 0) { # normal file - error "type 0 not implemented"; - } elsif ($type == 1) { # hardlink - error "type 1 not implemented"; - } elsif ($type == 2) { # symlink - if ($options->{mode} eq 'fakechroot' and $linkname =~ /^\/proc/) { - # there is no /proc in fakechroot mode - next; - } - symlink $linkname, "$options->{root}/$fname" or error "cannot create symlink $fname"; - next; # chmod cannot work on symlinks - } elsif ($type == 3) { # character special - 0 == system('mknod', "$options->{root}/$fname", 'c', $devmajor, $devminor) or error "mknod failed: $?"; - } elsif ($type == 4) { # block special - 0 == system('mknod', "$options->{root}/$fname", 'b', $devmajor, $devminor) or error "mknod failed: $?"; - } elsif ($type == 5) { # directory - if (-e "$options->{root}/$fname") { - if (! -d "$options->{root}/$fname") { - error "$fname already exists but is not a directory"; - } - } else { - my $num_created = make_path "$options->{root}/$fname", {error => \my $err}; - if ($err && @$err) { - error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); - } elsif ($num_created == 0) { - error "cannot create $options->{root}/$fname"; - } - } - } else { - error "unsupported type: $type"; - } - chmod $mode, "$options->{root}/$fname" or error "cannot chmod $fname: $!"; - } + foreach my $file (@devfiles) { + my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file}; + if ($type == 0) { # normal file + error "type 0 not implemented"; + } elsif ($type == 1) { # hardlink + error "type 1 not implemented"; + } elsif ($type == 2) { # symlink + if ($options->{mode} eq 'fakechroot' and $linkname =~ /^\/proc/) { + # there is no /proc in fakechroot mode + next; + } + symlink $linkname, "$options->{root}/$fname" or error "cannot create symlink $fname"; + next; # chmod cannot work on symlinks + } elsif ($type == 3) { # character special + 0 == system('mknod', "$options->{root}/$fname", 'c', $devmajor, $devminor) or error "mknod failed: $?"; + } elsif ($type == 4) { # block special + 0 == system('mknod', "$options->{root}/$fname", 'b', $devmajor, $devminor) or error "mknod failed: $?"; + } elsif ($type == 5) { # directory + if (-e "$options->{root}/$fname") { + if (! -d "$options->{root}/$fname") { + error "$fname already exists but is not a directory"; + } + } else { + my $num_created = make_path "$options->{root}/$fname", {error => \my $err}; + if ($err && @$err) { + error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); + } elsif ($num_created == 0) { + error "cannot create $options->{root}/$fname"; + } + } + } else { + error "unsupported type: $type"; + } + chmod $mode, "$options->{root}/$fname" or error "cannot chmod $fname: $!"; + } } # we tell apt about the configuration via a config file passed via the @@ -1266,20 +1266,20 @@ sub setup { # not have permissions to read the root directory. In that case, we have # to disable apt sandboxing. if ($options->{mode} eq 'root') { - my $partial = '/var/lib/apt/lists/partial'; - if (system('/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test', '-r', "$options->{root}$partial") != 0) { - warning "Download is performed unsandboxed as root as file $options->{root}$partial couldn't be accessed by user _apt"; - open my $fh, '>>', $tmpfile or error "cannot open $tmpfile for appending: $!"; - print $fh "APT::Sandbox::User \"root\";\n"; - close $fh; - } + my $partial = '/var/lib/apt/lists/partial'; + if (system('/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test', '-r', "$options->{root}$partial") != 0) { + warning "Download is performed unsandboxed as root as file $options->{root}$partial couldn't be accessed by user _apt"; + open my $fh, '>>', $tmpfile or error "cannot open $tmpfile for appending: $!"; + print $fh "APT::Sandbox::User \"root\";\n"; + close $fh; + } } # setting PATH for chroot, ldconfig, start-stop-daemon... if (defined $ENV{PATH} && $ENV{PATH} ne "") { - $ENV{PATH} = "$ENV{PATH}:/usr/sbin:/usr/bin:/sbin:/bin"; + $ENV{PATH} = "$ENV{PATH}:/usr/sbin:/usr/bin:/sbin:/bin"; } else { - $ENV{PATH} = "/usr/sbin:/usr/bin:/sbin:/bin"; + $ENV{PATH} = "/usr/sbin:/usr/bin:/sbin:/bin"; } # run setup hooks @@ -1287,41 +1287,41 @@ sub setup { info "running apt-get update..."; run_apt_progress({ ARGV => ['apt-get', 'update'], - CHDIR => $options->{root}, - FIND_APT_WARNINGS => 1 }); + CHDIR => $options->{root}, + FIND_APT_WARNINGS => 1 }); # check if anything was downloaded at all { - open my $fh, '-|', 'apt-get', 'indextargets' // error "failed to fork(): $!"; - chomp (my $indextargets = do { local $/; <$fh> }); - close $fh; - if ($indextargets eq '') { - info "content of /etc/apt/sources.list:"; - if ($verbosity_level >= 1) { - copy("$options->{root}/etc/apt/sources.list", *STDERR); - } - error "apt-get update didn't download anything"; - } + open my $fh, '-|', 'apt-get', 'indextargets' // error "failed to fork(): $!"; + chomp (my $indextargets = do { local $/; <$fh> }); + close $fh; + if ($indextargets eq '') { + info "content of /etc/apt/sources.list:"; + if ($verbosity_level >= 1) { + copy("$options->{root}/etc/apt/sources.list", *STDERR); + } + error "apt-get update didn't download anything"; + } } my @pkgs_to_install; for my $incl (@{$options->{include}}) { - for my $pkg (split /[,\s]+/, $incl) { - # strip leading and trailing whitespace - $pkg =~ s/^\s+|\s+$//g; - # skip if the remainder is an empty string - if ($pkg eq '') { - next; - } - # do not append component if it's already in the list - if (any {$_ eq $pkg} @pkgs_to_install) { - next; - } - push @pkgs_to_install, $pkg; - } + for my $pkg (split /[,\s]+/, $incl) { + # strip leading and trailing whitespace + $pkg =~ s/^\s+|\s+$//g; + # skip if the remainder is an empty string + if ($pkg eq '') { + next; + } + # do not append component if it's already in the list + if (any {$_ eq $pkg} @pkgs_to_install) { + next; + } + push @pkgs_to_install, $pkg; + } } if ($options->{variant} eq 'buildd') { - push @pkgs_to_install, 'build-essential'; + push @pkgs_to_install, 'build-essential'; } # To figure out the right package set for the apt variant we can use: # $ apt-get dist-upgrade -o dir::state::status=/dev/null @@ -1330,149 +1330,149 @@ sub setup { # (essential variant) then we have to compute the package set ourselves. # Same if we want to install priority based variants. if (any { $_ eq $options->{variant} } ('extract', 'custom')) { - info "downloading packages with apt..."; - run_apt_progress({ - ARGV => ['apt-get', '--yes', - '-oApt::Get::Download-Only=true', - 'install'], - PKGS => [@pkgs_to_install], - }); + info "downloading packages with apt..."; + run_apt_progress({ + ARGV => ['apt-get', '--yes', + '-oApt::Get::Download-Only=true', + 'install'], + PKGS => [@pkgs_to_install], + }); } elsif ($options->{variant} eq 'apt') { - # if we just want to install Essential:yes packages, apt and their - # dependencies then we can make use of libapt treating apt as - # implicitly essential. An upgrade with the (currently) empty status - # file will trigger an installation of the essential packages plus apt. - # - # 2018-09-02, #debian-dpkg on OFTC, times in UTC+2 - # 23:39 < josch> I'll just put it in my script and if it starts - # breaking some time I just say it's apt's fault. :P - # 23:42 < DonKult> that is how it usually works, so yes, do that :P (<- - # and please add that line next to it so you can - # remind me in 5+ years that I said that after I wrote - # in the bugreport: "Are you crazy?!? Nobody in his - # right mind would even suggest depending on it!") - info "downloading packages with apt..."; - run_apt_progress({ - ARGV => ['apt-get', '--yes', - '-oApt::Get::Download-Only=true', - 'dist-upgrade'], - }); + # if we just want to install Essential:yes packages, apt and their + # dependencies then we can make use of libapt treating apt as + # implicitly essential. An upgrade with the (currently) empty status + # file will trigger an installation of the essential packages plus apt. + # + # 2018-09-02, #debian-dpkg on OFTC, times in UTC+2 + # 23:39 < josch> I'll just put it in my script and if it starts + # breaking some time I just say it's apt's fault. :P + # 23:42 < DonKult> that is how it usually works, so yes, do that :P (<- + # and please add that line next to it so you can + # remind me in 5+ years that I said that after I wrote + # in the bugreport: "Are you crazy?!? Nobody in his + # right mind would even suggest depending on it!") + info "downloading packages with apt..."; + run_apt_progress({ + ARGV => ['apt-get', '--yes', + '-oApt::Get::Download-Only=true', + 'dist-upgrade'], + }); } elsif (any { $_ eq $options->{variant} } ('essential', 'standard', 'important', 'required', 'buildd', 'minbase')) { - my %ess_pkgs; - open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format', '$(FILENAME)', 'Created-By: Packages') or error "cannot start apt-get indextargets: $!"; - while (my $fname = <$pipe_apt>) { - chomp $fname; - open (my $pipe_cat, '-|', '/usr/lib/apt/apt-helper', 'cat-file', $fname) or error "cannot start apt-helper cat-file: $!"; - - my $pkgname; - my $ess = ''; - my $prio = 'optional'; - my $arch = ''; - while (my $line = <$pipe_cat>) { - chomp $line; - # Dpkg::Index takes 10 seconds to parse a typical Packages - # file. Thus we instead use a simple parser that just retrieve - # the information we need. - if ($line ne "") { - if ($line =~ /^Package: (.*)/) { - $pkgname = $1; - } elsif ($line =~ /^Essential: yes$/) { - $ess = 'yes' - } elsif ($line =~ /^Priority: (.*)/) { - $prio = $1; - } elsif ($line =~ /^Architecture: (.*)/) { - $arch = $1; - } - next; - } - # we are only interested of packages of native architecture or - # Architecture:all - if ($arch eq $options->{nativearch} or $arch eq 'all') { - # the line is empty, thus a package stanza just finished - # processing and we can handle it now - if ($ess eq 'yes') { - $ess_pkgs{$pkgname} = (); - } elsif ($options->{variant} eq 'essential') { - # for this variant we are only interested in the - # essential packages - } elsif (any { $_ eq $options->{variant} } ('standard', 'important', 'required', 'buildd', 'minbase')) { - if ($prio eq 'optional' or $prio eq 'extra') { - # always ignore packages of priority optional and extra - } elsif ($prio eq 'standard') { - if (none { $_ eq $options->{variant} } ('important', 'required', 'buildd', 'minbase')) { - push @pkgs_to_install, $pkgname; - } - } elsif ($prio eq 'important') { - if (none { $_ eq $options->{variant} } ('required', 'buildd', 'minbase')) { - push @pkgs_to_install, $pkgname; - } - } elsif ($prio eq 'required') { - # required packages are part of all sets except - # essential and apt - push @pkgs_to_install, $pkgname; - } else { - error "unknown priority: $prio"; - } - } else { - error "unknown variant: $options->{variant}"; - } - } - # reset values - undef $pkgname; - $ess = ''; - $prio = 'optional'; - $arch = ''; - } - - close $pipe_cat; - $? == 0 or error "apt-helper cat-file failed: $?"; - } - close $pipe_apt; - $? == 0 or error "apt-get indextargets failed: $?"; - - debug "Identified the following Essential:yes packages:"; - foreach my $pkg (sort keys %ess_pkgs) { - debug " $pkg"; - } - - info "downloading packages with apt..."; - run_apt_progress({ - ARGV => ['apt-get', '--yes', - '-oApt::Get::Download-Only=true', - 'install'], - PKGS => [keys %ess_pkgs], - }); + my %ess_pkgs; + open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format', '$(FILENAME)', 'Created-By: Packages') or error "cannot start apt-get indextargets: $!"; + while (my $fname = <$pipe_apt>) { + chomp $fname; + open (my $pipe_cat, '-|', '/usr/lib/apt/apt-helper', 'cat-file', $fname) or error "cannot start apt-helper cat-file: $!"; + + my $pkgname; + my $ess = ''; + my $prio = 'optional'; + my $arch = ''; + while (my $line = <$pipe_cat>) { + chomp $line; + # Dpkg::Index takes 10 seconds to parse a typical Packages + # file. Thus we instead use a simple parser that just retrieve + # the information we need. + if ($line ne "") { + if ($line =~ /^Package: (.*)/) { + $pkgname = $1; + } elsif ($line =~ /^Essential: yes$/) { + $ess = 'yes' + } elsif ($line =~ /^Priority: (.*)/) { + $prio = $1; + } elsif ($line =~ /^Architecture: (.*)/) { + $arch = $1; + } + next; + } + # we are only interested of packages of native architecture or + # Architecture:all + if ($arch eq $options->{nativearch} or $arch eq 'all') { + # the line is empty, thus a package stanza just finished + # processing and we can handle it now + if ($ess eq 'yes') { + $ess_pkgs{$pkgname} = (); + } elsif ($options->{variant} eq 'essential') { + # for this variant we are only interested in the + # essential packages + } elsif (any { $_ eq $options->{variant} } ('standard', 'important', 'required', 'buildd', 'minbase')) { + if ($prio eq 'optional' or $prio eq 'extra') { + # always ignore packages of priority optional and extra + } elsif ($prio eq 'standard') { + if (none { $_ eq $options->{variant} } ('important', 'required', 'buildd', 'minbase')) { + push @pkgs_to_install, $pkgname; + } + } elsif ($prio eq 'important') { + if (none { $_ eq $options->{variant} } ('required', 'buildd', 'minbase')) { + push @pkgs_to_install, $pkgname; + } + } elsif ($prio eq 'required') { + # required packages are part of all sets except + # essential and apt + push @pkgs_to_install, $pkgname; + } else { + error "unknown priority: $prio"; + } + } else { + error "unknown variant: $options->{variant}"; + } + } + # reset values + undef $pkgname; + $ess = ''; + $prio = 'optional'; + $arch = ''; + } + + close $pipe_cat; + $? == 0 or error "apt-helper cat-file failed: $?"; + } + close $pipe_apt; + $? == 0 or error "apt-get indextargets failed: $?"; + + debug "Identified the following Essential:yes packages:"; + foreach my $pkg (sort keys %ess_pkgs) { + debug " $pkg"; + } + + info "downloading packages with apt..."; + run_apt_progress({ + ARGV => ['apt-get', '--yes', + '-oApt::Get::Download-Only=true', + 'install'], + PKGS => [keys %ess_pkgs], + }); } else { - error "unknown variant: $options->{variant}"; + error "unknown variant: $options->{variant}"; } # extract the downloaded packages my @essential_pkgs; { - my $apt_archives = "/var/cache/apt/archives/"; - opendir my $dh, "$options->{root}/$apt_archives" or error "cannot read $apt_archives"; - while (my $deb = readdir $dh) { - if ($deb !~ /\.deb$/) { - next; - } - $deb = "$apt_archives/$deb"; - if (! -f "$options->{root}/$deb") { - next; - } - push @essential_pkgs, $deb; - } - close $dh; + my $apt_archives = "/var/cache/apt/archives/"; + opendir my $dh, "$options->{root}/$apt_archives" or error "cannot read $apt_archives"; + while (my $deb = readdir $dh) { + if ($deb !~ /\.deb$/) { + next; + } + $deb = "$apt_archives/$deb"; + if (! -f "$options->{root}/$deb") { + next; + } + push @essential_pkgs, $deb; + } + close $dh; } if (scalar @essential_pkgs == 0) { - # check if a file:// URI was used - open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format', '$(URI)', 'Created-By: Packages') or error "cannot start apt-get indextargets: $!"; - while (my $uri = <$pipe_apt>) { - if ($uri =~ /^file:\/\//) { - error "nothing got downloaded -- use copy:// instead of file://"; - } - } - error "nothing got downloaded"; + # check if a file:// URI was used + open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format', '$(URI)', 'Created-By: Packages') or error "cannot start apt-get indextargets: $!"; + while (my $uri = <$pipe_apt>) { + if ($uri =~ /^file:\/\//) { + error "nothing got downloaded -- use copy:// instead of file://"; + } + } + error "nothing got downloaded"; } # We have to extract the packages from @essential_pkgs either if we run in @@ -1480,378 +1480,378 @@ sub setup { # In other words, the only scenario in which the @essential_pkgs are not # extracted are in chrootless mode in any other than the extract variant. if ($options->{mode} eq 'chrootless' and $options->{variant} ne 'extract') { - # nothing to do + # nothing to do } else { - info "extracting archives..."; - print_progress 0.0; - my $counter = 0; - my $total = scalar @essential_pkgs; - foreach my $deb (@essential_pkgs) { - $counter += 1; - # not using dpkg-deb --extract as that would replace the - # merged-usr symlinks with plain directories - pipe my $rfh, my $wfh; - my $pid1 = fork() // error "fork() failed: $!"; - if ($pid1 == 0) { - open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!"; - debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb"); - eval 'Devel::Cover::set_coverage("none")' if $is_covering; - exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb"; - } - my $pid2 = fork() // error "fork() failed: $!"; - if ($pid2 == 0) { - open(STDIN, '<&', $rfh) or error "cannot open STDIN: $!"; - debug("running tar -C $options->{root} --keep-directory-symlink --extract --file -"); - eval 'Devel::Cover::set_coverage("none")' if $is_covering; - exec 'tar', '-C', $options->{root}, '--keep-directory-symlink', '--extract', '--file', '-'; - } - waitpid($pid1, 0); - $? == 0 or error "dpkg-deb --fsys-tarfile failed: $?"; - waitpid($pid2, 0); - $? == 0 or error "tar --extract failed: $?"; - print_progress ($counter/$total*100); - } - print_progress "done"; + info "extracting archives..."; + print_progress 0.0; + my $counter = 0; + my $total = scalar @essential_pkgs; + foreach my $deb (@essential_pkgs) { + $counter += 1; + # not using dpkg-deb --extract as that would replace the + # merged-usr symlinks with plain directories + pipe my $rfh, my $wfh; + my $pid1 = fork() // error "fork() failed: $!"; + if ($pid1 == 0) { + open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!"; + debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb"); + eval 'Devel::Cover::set_coverage("none")' if $is_covering; + exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb"; + } + my $pid2 = fork() // error "fork() failed: $!"; + if ($pid2 == 0) { + open(STDIN, '<&', $rfh) or error "cannot open STDIN: $!"; + debug("running tar -C $options->{root} --keep-directory-symlink --extract --file -"); + eval 'Devel::Cover::set_coverage("none")' if $is_covering; + exec 'tar', '-C', $options->{root}, '--keep-directory-symlink', '--extract', '--file', '-'; + } + waitpid($pid1, 0); + $? == 0 or error "dpkg-deb --fsys-tarfile failed: $?"; + waitpid($pid2, 0); + $? == 0 or error "tar --extract failed: $?"; + print_progress ($counter/$total*100); + } + print_progress "done"; } if ($options->{mode} eq 'chrootless') { - info "installing packages..."; - # FIXME: the dpkg config from the host is parsed before the command - # line arguments are parsed and might break this mode - # Example: if the host has --path-exclude set, then this will also - # affect the chroot. - my @chrootless_opts = ( - '-oDPkg::Options::=--force-not-root', - '-oDPkg::Options::=--force-script-chrootless', - '-oDPkg::Options::=--root=' . $options->{root}, - '-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log"); - if (defined $options->{qemu}) { - # The binfmt support on the outside is used, so qemu needs to know - # where it has to look for shared libraries - if (defined $ENV{QEMU_LD_PREFIX} - && $ENV{QEMU_LD_PREFIX} ne "") { - $ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}"; - } else { - $ENV{QEMU_LD_PREFIX} = $options->{root}; - } - } - if ($options->{variant} eq 'extract') { - # nothing to do - } else { - run_apt_progress({ - ARGV => ['apt-get', '--yes', - @chrootless_opts, - 'install'], - PKGS => [map { "$options->{root}/$_" } @essential_pkgs], - }); - } - if (any { $_ eq $options->{variant} } ('extract', 'custom')) { - # nothing to do - } elsif (any { $_ eq $options->{variant} } ('essential', 'apt', 'standard', 'important', 'required', 'buildd', 'minbase')) { - # run essential hooks - run_hooks('essential', $options); - - if (scalar @pkgs_to_install > 0) { - run_apt_progress({ - ARGV => ['apt-get', '--yes', - @chrootless_opts, - 'install'], - PKGS => [@pkgs_to_install], - }); - } - } else { - error "unknown variant: $options->{variant}"; - } + info "installing packages..."; + # FIXME: the dpkg config from the host is parsed before the command + # line arguments are parsed and might break this mode + # Example: if the host has --path-exclude set, then this will also + # affect the chroot. + my @chrootless_opts = ( + '-oDPkg::Options::=--force-not-root', + '-oDPkg::Options::=--force-script-chrootless', + '-oDPkg::Options::=--root=' . $options->{root}, + '-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log"); + if (defined $options->{qemu}) { + # The binfmt support on the outside is used, so qemu needs to know + # where it has to look for shared libraries + if (defined $ENV{QEMU_LD_PREFIX} + && $ENV{QEMU_LD_PREFIX} ne "") { + $ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}"; + } else { + $ENV{QEMU_LD_PREFIX} = $options->{root}; + } + } + if ($options->{variant} eq 'extract') { + # nothing to do + } else { + run_apt_progress({ + ARGV => ['apt-get', '--yes', + @chrootless_opts, + 'install'], + PKGS => [map { "$options->{root}/$_" } @essential_pkgs], + }); + } + if (any { $_ eq $options->{variant} } ('extract', 'custom')) { + # nothing to do + } elsif (any { $_ eq $options->{variant} } ('essential', 'apt', 'standard', 'important', 'required', 'buildd', 'minbase')) { + # run essential hooks + run_hooks('essential', $options); + + if (scalar @pkgs_to_install > 0) { + run_apt_progress({ + ARGV => ['apt-get', '--yes', + @chrootless_opts, + 'install'], + PKGS => [@pkgs_to_install], + }); + } + } else { + error "unknown variant: $options->{variant}"; + } } elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot', 'proot')) { - if (any { $_ eq $options->{variant} } ('extract')) { - # nothing to do - } elsif (any { $_ eq $options->{variant} } ('custom', 'essential', 'apt', 'standard', 'important', 'required', 'buildd', 'minbase')) { - if ($options->{mode} eq 'fakechroot') { - # this borrows from and extends - # /etc/fakechroot/debootstrap.env and /etc/fakechroot/chroot.env - { - my @fakechrootsubst = (); - foreach my $dir ('/usr/sbin', '/usr/bin', '/sbin', '/bin') { - push @fakechrootsubst, "$dir/chroot=/usr/sbin/chroot.fakechroot"; - push @fakechrootsubst, "$dir/mkfifo=/bin/true"; - push @fakechrootsubst, "$dir/ldconfig=/bin/true"; - push @fakechrootsubst, "$dir/ldd=/usr/bin/ldd.fakechroot"; - push @fakechrootsubst, "$dir/ischroot=/bin/true"; - } - if (defined $ENV{FAKECHROOT_CMD_SUBST} - && $ENV{FAKECHROOT_CMD_SUBST} ne "") { - push @fakechrootsubst, split /:/, $ENV{FAKECHROOT_CMD_SUBST}; - } - $ENV{FAKECHROOT_CMD_SUBST} = join ':', @fakechrootsubst; - } - if (defined $ENV{FAKECHROOT_EXCLUDE_PATH} - && $ENV{FAKECHROOT_EXCLUDE_PATH} ne "") { - $ENV{FAKECHROOT_EXCLUDE_PATH} = "$ENV{FAKECHROOT_EXCLUDE_PATH}:/dev:/proc:/sys"; - } else { - $ENV{FAKECHROOT_EXCLUDE_PATH} = '/dev:/proc:/sys'; - } - # workaround for long unix socket path if FAKECHROOT_BASE - # exceeds the limit of 108 bytes - $ENV{FAKECHROOT_AF_UNIX_PATH} = "/tmp"; - { - my @ldsoconf = ('/etc/ld.so.conf'); - opendir(my $dh, '/etc/ld.so.conf.d') or error "Can't opendir(/etc/ld.so.conf.d): $!"; - while (my $entry = readdir $dh) { - # skip the "." and ".." entries - next if $entry eq "."; - next if $entry eq ".."; - next if $entry !~ /\.conf$/; - push @ldsoconf, "/etc/ld.so.conf.d/$entry"; - } - closedir($dh); - my @ldlibpath = (); - if (defined $ENV{LD_LIBRARY_PATH} - && $ENV{LD_LIBRARY_PATH} ne "") { - push @ldlibpath, (split /:/, $ENV{LD_LIBRARY_PATH}); - } - # FIXME: workaround allowing installation of systemd should - # live in fakechroot, see #917920 - push @ldlibpath, "/lib/systemd"; - foreach my $fname (@ldsoconf) { - open my $fh, "<", $fname or error "cannot open $fname for reading: $!"; - while (my $line = <$fh>) { - next if $line !~ /^\//; - push @ldlibpath, $line; - } - close $fh; - } - $ENV{LD_LIBRARY_PATH} = join ':', @ldlibpath; - } - } - - # make sure that APT_CONFIG is not set when executing anything inside the - # chroot - my @chrootcmd = (); - if ($options->{mode} eq 'proot') { - push @chrootcmd, ( - 'proot', - '--root-id', - '--bind=/dev', - '--bind=/proc', - '--bind=/sys', - "--rootfs=$options->{root}", - '--cwd=/'); - } elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot')) { - push @chrootcmd, ('/usr/sbin/chroot', $options->{root}); - } else { - error "unknown mode: $options->{mode}"; - } - - # copy qemu-user-static binary into chroot or setup proot with --qemu - if (defined $options->{qemu}) { - if ($options->{mode} eq 'proot') { - push @chrootcmd, "--qemu=qemu-$options->{qemu}"; - } elsif ($options->{mode} eq 'fakechroot') { - # The binfmt support on the outside is used, so qemu needs to know - # where it has to look for shared libraries - if (defined $ENV{QEMU_LD_PREFIX} - && $ENV{QEMU_LD_PREFIX} ne "") { - $ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}"; - } else { - $ENV{QEMU_LD_PREFIX} = $options->{root}; - } - # Make sure that the fakeroot and fakechroot shared libraries - # exist for the right architecture - open my $fh, '-|', 'dpkg-architecture', '-a', $options->{nativearch}, '-qDEB_HOST_MULTIARCH' // error "failed to fork(): $!"; - chomp (my $deb_host_multiarch = do { local $/; <$fh> }); - close $fh; - if ($? != 0 or !$deb_host_multiarch) { - error "dpkg-architecture failed: $?"; - } - my $fakechrootdir = "/usr/lib/$deb_host_multiarch/fakechroot"; - if (! -e "$fakechrootdir/libfakechroot.so") { - error "$fakechrootdir/libfakechroot.so doesn't exist. Install libfakechroot:$options->{nativearch} outside the chroot"; - } - my $fakerootdir = "/usr/lib/$deb_host_multiarch/libfakeroot"; - if (! -e "$fakerootdir/libfakeroot-sysv.so") { - error "$fakerootdir/libfakeroot-sysv.so doesn't exist. Install libfakeroot:$options->{nativearch} outside the chroot"; - } - # fakechroot only fills LD_LIBRARY_PATH with the directories of - # the host's architecture. We append the directories of the chroot - # architecture. - $ENV{LD_LIBRARY_PATH} .= ":$fakechrootdir:$fakerootdir"; - } elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) { - # other modes require a static qemu-user binary - my $qemubin = "/usr/bin/qemu-$options->{qemu}-static"; - if (! -e $qemubin) { - error "cannot find $qemubin"; - } - copy $qemubin, "$options->{root}/$qemubin" or error "cannot copy $qemubin: $!"; - # File::Copy does not retain permissions but on some - # platforms (like Travis CI) the binfmt interpreter must - # have the executable bit set or otherwise execve will - # fail with EACCES - chmod 0755, "$options->{root}/$qemubin" or error "cannot chmod $qemubin: $!"; - } else { - error "unknown mode: $options->{mode}"; - } - } - - # some versions of coreutils use the renameat2 system call in mv. - # This breaks certain versions of fakechroot and proot. Here we do - # a sanity check and warn the user in case things might break. - if (any { $_ eq $options->{mode} } ('fakechroot', 'proot') and -e "$options->{root}/bin/mv") { - mkdir "$options->{root}/000-move-me" or error "cannot create directory: $!"; - my $ret = system @chrootcmd, '/bin/mv', '/000-move-me', '/001-delete-me'; - if ($ret != 0) { - if ($options->{mode} eq 'proot') { - info "the /bin/mv binary inside the chroot doesn't work under proot"; - info "this is likely due to missing support for renameat2 in proot"; - info "see https://github.com/proot-me/PRoot/issues/147"; - } else { - info "the /bin/mv binary inside the chroot doesn't work under fakechroot"; - info "with certain versions of coreutils and glibc, this is due to missing support for renameat2 in fakechroot"; - info "see https://github.com/dex4er/fakechroot/issues/60"; - } - info "expect package post installation scripts not to work"; - rmdir "$options->{root}/000-move-me" or error "cannot rmdir: $!"; - } else { - rmdir "$options->{root}/001-delete-me" or error "cannot rmdir: $!"; - } - } - - # install the extracted packages properly - # we need --force-depends because dpkg does not take Pre-Depends into - # account and thus doesn't install them in the right order - # And the --predep-package option is broken: #539133 - info "installing packages..."; - run_chroot { - run_dpkg_progress({ - ARGV => [@chrootcmd, 'env', '--unset=TMPDIR', - 'dpkg', '--install', '--force-depends'], - PKGS => \@essential_pkgs, - }); - } $options; - - # if the path-excluded option was added to the dpkg config, reinstall all - # packages - if (-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") { - open(my $fh, '<', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!"; - my $num_matches = grep /^path-exclude=/, <$fh>; - close $fh; - if ($num_matches > 0) { - # without --skip-same-version, dpkg will install the given - # packages even though they are already installed - info "re-installing packages because of path-exclude..."; - run_dpkg_progress({ - ARGV => [@chrootcmd, 'env', '--unset=TMPDIR', - 'dpkg', '--install', '--force-depends'], - PKGS => \@essential_pkgs, - }); - } - } - - foreach my $deb (@essential_pkgs) { - unlink "$options->{root}/$deb" or error "cannot unlink $deb: $!"; - } - - # run essential hooks - if ($options->{variant} ne 'custom') { - run_hooks('essential', $options); - } - - if ($options->{variant} ne 'custom' and scalar @pkgs_to_install > 0) { - # some packages have to be installed from the outside before anything - # can be installed from the inside. - # - # we do not need to install any *-archive-keyring packages inside the - # chroot prior to installing the packages, because the keyring is only - # used when doing "apt-get update" and that was already done at the - # beginning using key material from the outside. Since the apt cache - # is already filled and we are not calling "apt-get update" again, the - # keyring can be installed later during installation. But: if it's not - # installed during installation, then we might end up with a fully - # installed system without keyrings that are valid for its - # sources.list. - my @pkgs_to_install_from_outside; - - # install apt if necessary - if ($options->{variant} ne 'apt') { - push @pkgs_to_install_from_outside, 'apt'; - } - - # since apt will be run inside the chroot, make sure that - # apt-transport-https and ca-certificates gets installed first if any - # mirror is a https URI - open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format', '$(URI)', 'Created-By: Packages') or error "cannot start apt-get indextargets: $!"; - while (my $uri = <$pipe_apt>) { - if ($uri =~ /^https:\/\//) { - # FIXME: support for https is part of apt >= 1.5 - push @pkgs_to_install_from_outside, 'apt-transport-https'; - push @pkgs_to_install_from_outside, 'ca-certificates'; - last; - } elsif ($uri =~ /^tor(\+[a-z]+)*:\/\//) { - # tor URIs can be tor+http://, tor+https:// or even - # tor+mirror+file:// - push @pkgs_to_install_from_outside, 'apt-transport-tor'; - last; - } - } - close $pipe_apt; - $? == 0 or error "apt-get indextargets failed"; - - if (scalar @pkgs_to_install_from_outside > 0) { - info 'downloading ' . (join ', ', @pkgs_to_install_from_outside) . "..."; - run_apt_progress({ - ARGV => ['apt-get', '--yes', - '-oApt::Get::Download-Only=true', - 'install'], - PKGS => [@pkgs_to_install_from_outside], - }); - my @debs_to_install; - my $apt_archives = "/var/cache/apt/archives/"; - opendir my $dh, "$options->{root}/$apt_archives" or error "cannot read $apt_archives"; - while (my $deb = readdir $dh) { - if ($deb !~ /\.deb$/) { - next; - } - $deb = "$apt_archives/$deb"; - if (! -f "$options->{root}/$deb") { - next; - } - push @debs_to_install, $deb; - } - close $dh; - if (scalar @debs_to_install == 0) { - warning "nothing got downloaded -- maybe the packages were already installed?"; - } else { - # we need --force-depends because dpkg does not take Pre-Depends - # into account and thus doesn't install them in the right order - info 'installing ' . (join ', ', @pkgs_to_install_from_outside) . "..."; - run_dpkg_progress({ - ARGV => [@chrootcmd, 'env', '--unset=TMPDIR', - 'dpkg', '--install', '--force-depends'], - PKGS => \@debs_to_install, - }); - foreach my $deb (@debs_to_install) { - unlink "$options->{root}/$deb" or error "cannot unlink $deb: $!"; - } - } - } - - run_chroot { - info "installing remaining packages inside the chroot..."; - run_apt_progress({ - ARGV => [@chrootcmd, 'env', - '--unset=APT_CONFIG', - '--unset=TMPDIR', - 'apt-get', '--yes', 'install'], - PKGS => [@pkgs_to_install], - }); - } $options; - - } - } else { - error "unknown variant: $options->{variant}"; - } + if (any { $_ eq $options->{variant} } ('extract')) { + # nothing to do + } elsif (any { $_ eq $options->{variant} } ('custom', 'essential', 'apt', 'standard', 'important', 'required', 'buildd', 'minbase')) { + if ($options->{mode} eq 'fakechroot') { + # this borrows from and extends + # /etc/fakechroot/debootstrap.env and /etc/fakechroot/chroot.env + { + my @fakechrootsubst = (); + foreach my $dir ('/usr/sbin', '/usr/bin', '/sbin', '/bin') { + push @fakechrootsubst, "$dir/chroot=/usr/sbin/chroot.fakechroot"; + push @fakechrootsubst, "$dir/mkfifo=/bin/true"; + push @fakechrootsubst, "$dir/ldconfig=/bin/true"; + push @fakechrootsubst, "$dir/ldd=/usr/bin/ldd.fakechroot"; + push @fakechrootsubst, "$dir/ischroot=/bin/true"; + } + if (defined $ENV{FAKECHROOT_CMD_SUBST} + && $ENV{FAKECHROOT_CMD_SUBST} ne "") { + push @fakechrootsubst, split /:/, $ENV{FAKECHROOT_CMD_SUBST}; + } + $ENV{FAKECHROOT_CMD_SUBST} = join ':', @fakechrootsubst; + } + if (defined $ENV{FAKECHROOT_EXCLUDE_PATH} + && $ENV{FAKECHROOT_EXCLUDE_PATH} ne "") { + $ENV{FAKECHROOT_EXCLUDE_PATH} = "$ENV{FAKECHROOT_EXCLUDE_PATH}:/dev:/proc:/sys"; + } else { + $ENV{FAKECHROOT_EXCLUDE_PATH} = '/dev:/proc:/sys'; + } + # workaround for long unix socket path if FAKECHROOT_BASE + # exceeds the limit of 108 bytes + $ENV{FAKECHROOT_AF_UNIX_PATH} = "/tmp"; + { + my @ldsoconf = ('/etc/ld.so.conf'); + opendir(my $dh, '/etc/ld.so.conf.d') or error "Can't opendir(/etc/ld.so.conf.d): $!"; + while (my $entry = readdir $dh) { + # skip the "." and ".." entries + next if $entry eq "."; + next if $entry eq ".."; + next if $entry !~ /\.conf$/; + push @ldsoconf, "/etc/ld.so.conf.d/$entry"; + } + closedir($dh); + my @ldlibpath = (); + if (defined $ENV{LD_LIBRARY_PATH} + && $ENV{LD_LIBRARY_PATH} ne "") { + push @ldlibpath, (split /:/, $ENV{LD_LIBRARY_PATH}); + } + # FIXME: workaround allowing installation of systemd should + # live in fakechroot, see #917920 + push @ldlibpath, "/lib/systemd"; + foreach my $fname (@ldsoconf) { + open my $fh, "<", $fname or error "cannot open $fname for reading: $!"; + while (my $line = <$fh>) { + next if $line !~ /^\//; + push @ldlibpath, $line; + } + close $fh; + } + $ENV{LD_LIBRARY_PATH} = join ':', @ldlibpath; + } + } + + # make sure that APT_CONFIG is not set when executing anything inside the + # chroot + my @chrootcmd = (); + if ($options->{mode} eq 'proot') { + push @chrootcmd, ( + 'proot', + '--root-id', + '--bind=/dev', + '--bind=/proc', + '--bind=/sys', + "--rootfs=$options->{root}", + '--cwd=/'); + } elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot')) { + push @chrootcmd, ('/usr/sbin/chroot', $options->{root}); + } else { + error "unknown mode: $options->{mode}"; + } + + # copy qemu-user-static binary into chroot or setup proot with --qemu + if (defined $options->{qemu}) { + if ($options->{mode} eq 'proot') { + push @chrootcmd, "--qemu=qemu-$options->{qemu}"; + } elsif ($options->{mode} eq 'fakechroot') { + # The binfmt support on the outside is used, so qemu needs to know + # where it has to look for shared libraries + if (defined $ENV{QEMU_LD_PREFIX} + && $ENV{QEMU_LD_PREFIX} ne "") { + $ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}"; + } else { + $ENV{QEMU_LD_PREFIX} = $options->{root}; + } + # Make sure that the fakeroot and fakechroot shared libraries + # exist for the right architecture + open my $fh, '-|', 'dpkg-architecture', '-a', $options->{nativearch}, '-qDEB_HOST_MULTIARCH' // error "failed to fork(): $!"; + chomp (my $deb_host_multiarch = do { local $/; <$fh> }); + close $fh; + if ($? != 0 or !$deb_host_multiarch) { + error "dpkg-architecture failed: $?"; + } + my $fakechrootdir = "/usr/lib/$deb_host_multiarch/fakechroot"; + if (! -e "$fakechrootdir/libfakechroot.so") { + error "$fakechrootdir/libfakechroot.so doesn't exist. Install libfakechroot:$options->{nativearch} outside the chroot"; + } + my $fakerootdir = "/usr/lib/$deb_host_multiarch/libfakeroot"; + if (! -e "$fakerootdir/libfakeroot-sysv.so") { + error "$fakerootdir/libfakeroot-sysv.so doesn't exist. Install libfakeroot:$options->{nativearch} outside the chroot"; + } + # fakechroot only fills LD_LIBRARY_PATH with the directories of + # the host's architecture. We append the directories of the chroot + # architecture. + $ENV{LD_LIBRARY_PATH} .= ":$fakechrootdir:$fakerootdir"; + } elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) { + # other modes require a static qemu-user binary + my $qemubin = "/usr/bin/qemu-$options->{qemu}-static"; + if (! -e $qemubin) { + error "cannot find $qemubin"; + } + copy $qemubin, "$options->{root}/$qemubin" or error "cannot copy $qemubin: $!"; + # File::Copy does not retain permissions but on some + # platforms (like Travis CI) the binfmt interpreter must + # have the executable bit set or otherwise execve will + # fail with EACCES + chmod 0755, "$options->{root}/$qemubin" or error "cannot chmod $qemubin: $!"; + } else { + error "unknown mode: $options->{mode}"; + } + } + + # some versions of coreutils use the renameat2 system call in mv. + # This breaks certain versions of fakechroot and proot. Here we do + # a sanity check and warn the user in case things might break. + if (any { $_ eq $options->{mode} } ('fakechroot', 'proot') and -e "$options->{root}/bin/mv") { + mkdir "$options->{root}/000-move-me" or error "cannot create directory: $!"; + my $ret = system @chrootcmd, '/bin/mv', '/000-move-me', '/001-delete-me'; + if ($ret != 0) { + if ($options->{mode} eq 'proot') { + info "the /bin/mv binary inside the chroot doesn't work under proot"; + info "this is likely due to missing support for renameat2 in proot"; + info "see https://github.com/proot-me/PRoot/issues/147"; + } else { + info "the /bin/mv binary inside the chroot doesn't work under fakechroot"; + info "with certain versions of coreutils and glibc, this is due to missing support for renameat2 in fakechroot"; + info "see https://github.com/dex4er/fakechroot/issues/60"; + } + info "expect package post installation scripts not to work"; + rmdir "$options->{root}/000-move-me" or error "cannot rmdir: $!"; + } else { + rmdir "$options->{root}/001-delete-me" or error "cannot rmdir: $!"; + } + } + + # install the extracted packages properly + # we need --force-depends because dpkg does not take Pre-Depends into + # account and thus doesn't install them in the right order + # And the --predep-package option is broken: #539133 + info "installing packages..."; + run_chroot { + run_dpkg_progress({ + ARGV => [@chrootcmd, 'env', '--unset=TMPDIR', + 'dpkg', '--install', '--force-depends'], + PKGS => \@essential_pkgs, + }); + } $options; + + # if the path-excluded option was added to the dpkg config, reinstall all + # packages + if (-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") { + open(my $fh, '<', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!"; + my $num_matches = grep /^path-exclude=/, <$fh>; + close $fh; + if ($num_matches > 0) { + # without --skip-same-version, dpkg will install the given + # packages even though they are already installed + info "re-installing packages because of path-exclude..."; + run_dpkg_progress({ + ARGV => [@chrootcmd, 'env', '--unset=TMPDIR', + 'dpkg', '--install', '--force-depends'], + PKGS => \@essential_pkgs, + }); + } + } + + foreach my $deb (@essential_pkgs) { + unlink "$options->{root}/$deb" or error "cannot unlink $deb: $!"; + } + + # run essential hooks + if ($options->{variant} ne 'custom') { + run_hooks('essential', $options); + } + + if ($options->{variant} ne 'custom' and scalar @pkgs_to_install > 0) { + # some packages have to be installed from the outside before anything + # can be installed from the inside. + # + # we do not need to install any *-archive-keyring packages inside the + # chroot prior to installing the packages, because the keyring is only + # used when doing "apt-get update" and that was already done at the + # beginning using key material from the outside. Since the apt cache + # is already filled and we are not calling "apt-get update" again, the + # keyring can be installed later during installation. But: if it's not + # installed during installation, then we might end up with a fully + # installed system without keyrings that are valid for its + # sources.list. + my @pkgs_to_install_from_outside; + + # install apt if necessary + if ($options->{variant} ne 'apt') { + push @pkgs_to_install_from_outside, 'apt'; + } + + # since apt will be run inside the chroot, make sure that + # apt-transport-https and ca-certificates gets installed first if any + # mirror is a https URI + open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format', '$(URI)', 'Created-By: Packages') or error "cannot start apt-get indextargets: $!"; + while (my $uri = <$pipe_apt>) { + if ($uri =~ /^https:\/\//) { + # FIXME: support for https is part of apt >= 1.5 + push @pkgs_to_install_from_outside, 'apt-transport-https'; + push @pkgs_to_install_from_outside, 'ca-certificates'; + last; + } elsif ($uri =~ /^tor(\+[a-z]+)*:\/\//) { + # tor URIs can be tor+http://, tor+https:// or even + # tor+mirror+file:// + push @pkgs_to_install_from_outside, 'apt-transport-tor'; + last; + } + } + close $pipe_apt; + $? == 0 or error "apt-get indextargets failed"; + + if (scalar @pkgs_to_install_from_outside > 0) { + info 'downloading ' . (join ', ', @pkgs_to_install_from_outside) . "..."; + run_apt_progress({ + ARGV => ['apt-get', '--yes', + '-oApt::Get::Download-Only=true', + 'install'], + PKGS => [@pkgs_to_install_from_outside], + }); + my @debs_to_install; + my $apt_archives = "/var/cache/apt/archives/"; + opendir my $dh, "$options->{root}/$apt_archives" or error "cannot read $apt_archives"; + while (my $deb = readdir $dh) { + if ($deb !~ /\.deb$/) { + next; + } + $deb = "$apt_archives/$deb"; + if (! -f "$options->{root}/$deb") { + next; + } + push @debs_to_install, $deb; + } + close $dh; + if (scalar @debs_to_install == 0) { + warning "nothing got downloaded -- maybe the packages were already installed?"; + } else { + # we need --force-depends because dpkg does not take Pre-Depends + # into account and thus doesn't install them in the right order + info 'installing ' . (join ', ', @pkgs_to_install_from_outside) . "..."; + run_dpkg_progress({ + ARGV => [@chrootcmd, 'env', '--unset=TMPDIR', + 'dpkg', '--install', '--force-depends'], + PKGS => \@debs_to_install, + }); + foreach my $deb (@debs_to_install) { + unlink "$options->{root}/$deb" or error "cannot unlink $deb: $!"; + } + } + } + + run_chroot { + info "installing remaining packages inside the chroot..."; + run_apt_progress({ + ARGV => [@chrootcmd, 'env', + '--unset=APT_CONFIG', + '--unset=TMPDIR', + 'apt-get', '--yes', 'install'], + PKGS => [@pkgs_to_install], + }); + } $options; + + } + } else { + error "unknown variant: $options->{variant}"; + } } else { - error "unknown mode: $options->{mode}"; + error "unknown mode: $options->{mode}"; } run_hooks('customize', $options); @@ -1860,51 +1860,51 @@ sub setup { unlink "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap" or error "failed to unlink /etc/apt/apt.conf.d/00mmdebstrap: $!"; info "cleaning package lists and apt cache..."; run_apt_progress({ - ARGV => ['apt-get', - '--option', 'Dir::Etc::SourceList=/dev/null', - '--option', 'Dir::Etc::SourceParts=/dev/null', - 'update'], - CHDIR => $options->{root}, - }); + ARGV => ['apt-get', + '--option', 'Dir::Etc::SourceList=/dev/null', + '--option', 'Dir::Etc::SourceParts=/dev/null', + 'update'], + CHDIR => $options->{root}, + }); run_apt_progress({ ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} }); # apt since 1.6 creates the auxfiles directory. If apt inside the chroot # is older than that, then it will not know how to clean it. if (-e "$options->{root}/var/lib/apt/lists/auxfiles") { - rmdir "$options->{root}/var/lib/apt/lists/auxfiles" or die "cannot rmdir /var/lib/apt/lists/auxfiles: $!"; + rmdir "$options->{root}/var/lib/apt/lists/auxfiles" or die "cannot rmdir /var/lib/apt/lists/auxfiles: $!"; } if (defined $options->{qemu} and any { $_ eq $options->{mode} } ('root', 'unshare')) { - unlink "$options->{root}/usr/bin/qemu-$options->{qemu}-static" or error "cannot unlink /usr/bin/qemu-$options->{qemu}-static: $!"; + unlink "$options->{root}/usr/bin/qemu-$options->{qemu}-static" or error "cannot unlink /usr/bin/qemu-$options->{qemu}-static: $!"; } # clean up certain files to make output reproducible foreach my $fname ('/var/log/dpkg.log', '/var/log/apt/history.log', '/var/log/apt/term.log', '/var/log/alternatives.log', '/var/cache/ldconfig/aux-cache', '/var/log/apt/eipp.log.xz') { - my $path = "$options->{root}$fname"; - if (! -e $path) { - next; - } - unlink $path or error "cannot unlink $path: $!"; + my $path = "$options->{root}$fname"; + if (! -e $path) { + next; + } + unlink $path or error "cannot unlink $path: $!"; } # remove any possible leftovers in /tmp but warn about it if (-d "$options->{root}/tmp") { - opendir(my $dh, "$options->{root}/tmp") or error "Can't opendir($options->{root}/tmp): $!"; - while (my $entry = readdir $dh) { - # skip the "." and ".." entries - next if $entry eq "."; - next if $entry eq ".."; - warning "deleting files in /tmp: $entry"; - remove_tree("$options->{root}/tmp/$entry", {error => \my $err}); - if (@$err) { - for my $diag (@$err) { - my ($file, $message) = %$diag; - if ($file eq '') { warning "general error: $message"; } - else { warning "problem unlinking $file: $message"; } - } - } - } - closedir($dh); + opendir(my $dh, "$options->{root}/tmp") or error "Can't opendir($options->{root}/tmp): $!"; + while (my $entry = readdir $dh) { + # skip the "." and ".." entries + next if $entry eq "."; + next if $entry eq ".."; + warning "deleting files in /tmp: $entry"; + remove_tree("$options->{root}/tmp/$entry", {error => \my $err}); + if (@$err) { + for my $diag (@$err) { + my ($file, $message) = %$diag; + if ($file eq '') { warning "general error: $message"; } + else { warning "problem unlinking $file: $message"; } + } + } + } + closedir($dh); } } @@ -1929,235 +1929,235 @@ sub main() { umask 022; if (scalar @ARGV >= 7 && $ARGV[0] eq "--hook-helper") { - my $root = $ARGV[1]; - my $mode = $ARGV[2]; - my $hook = $ARGV[3]; - my $qemu = $ARGV[4]; - $verbosity_level = $ARGV[5]; - my $command = $ARGV[6]; - - # unless we are in the setup hook (where there is no tar inside the - # chroot) we need to run tar on the inside because otherwise, possible - # absolute symlinks in the path given via --directory are not - # correctly resolved - # - # FIXME: the issue above can be fixed by a function that is able to - # resolve absolute symlinks even inside the chroot directory to a full - # path that is valid on the outside -- fakechroot and proot have their - # own reasons, see below - my @cmdprefix = (); - my @tarcmd = ('tar'); - if ($hook eq 'setup') { - if ($mode eq 'proot') { - # since we cannot run tar inside the chroot under proot during - # the setup hook because the chroot is empty, we have to run - # tar from the outside, which leads to all files being owned - # by the user running mmdebstrap. To let the ownership - # information not be completely off, we force all files be - # owned by the root user. - push @tarcmd, '--owner=root', '--group=root'; - } - } elsif (any { $_ eq $hook} ('essential', 'customize')) { - if ($mode eq 'fakechroot') { - # Fakechroot requires tar to run inside the chroot or - # otherwise absolute symlinks will include the path to the - # root directory - push @cmdprefix, '/usr/sbin/chroot', $root; - } elsif ($mode eq 'proot') { - # proot requires tar to run inside proot or otherwise - # permissions will be completely off - push @cmdprefix, 'proot', '--root-id', "--rootfs=$root", '--cwd=/', "--qemu=$qemu"; - } elsif (any { $_ eq $mode } ('root', 'chrootless', 'unshare')) { - push @cmdprefix, '/usr/sbin/chroot', $root; - } else { - error "unknown mode: $mode"; - } - } else { - error "unknown hook: $hook"; - } - - if (any { $_ eq $command} ('copy-in', 'tar-in', 'upload')) { - if (scalar @ARGV < 9) { - error "copy-in and tar-in need at least one path on the outside and the output path inside the chroot" - } - my $outpath = $ARGV[-1]; - for (my $i = 7; $i < $#ARGV; $i++) { - # the right argument for tar's --directory argument depends on - # whether tar is called from inside the chroot or from the - # outside - my $directory; - if ($hook eq 'setup') { - $directory = "$root/$outpath"; - } elsif (any { $_ eq $hook} ('essential', 'customize')) { - $directory = $outpath; - } else { - error "unknown hook: $hook"; - } - - # FIXME: here we would like to check if the path inside the - # chroot given by $directory actually exists but we cannot - # because we are missing a function that can resolve even - # paths including absolute symlinks to paths that are valid - # outside the chroot - - my $fh; - if ($command eq 'upload') { - # open the requested file for writing - open $fh, '|-', @cmdprefix, 'sh', '-c', 'cat > "$1"', 'exec', $directory // error "failed to fork(): $!"; - } else { - # open a tar process that extracts the tarfile that we supply - # it with on stdin to the output directory inside the chroot - open $fh, '|-', @cmdprefix, @tarcmd, '--directory', $directory, '--extract', '--file', '-' // error "failed to fork(): $!"; - } - - if ($command eq 'copy-in') { - # instruct the parent process to create a tarball of the - # requested path outside the chroot - debug "sending mktar"; - print STDOUT (pack("n", length $ARGV[$i]) . "mktar" . $ARGV[$i]); - } else { - # instruct parent process to open a tarball of the - # requested path outside the chroot for reading - debug "sending openr"; - print STDOUT (pack("n", length $ARGV[$i]) . "openr" . $ARGV[$i]); - } - STDOUT->flush(); - debug "waiting for okthx"; - checkokthx \*STDIN; - - # handle "write" messages from the parent process and feed - # their payload into the tar process until a "close" message - # is encountered - while(1) { - # receive the next message - my $ret = read (STDIN, my $buf, 2+5) // error "cannot read from socket: $!"; - if ($ret == 0) { - error "received eof on socket"; - } - my ($len, $msg) = unpack("nA5", $buf); - debug "received message: $msg"; - if ($msg eq "close") { - # finish the loop - if ($len != 0) { - error "expected no payload but got $len bytes"; - } - debug "sending okthx"; - print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; - STDOUT->flush(); - last; - } elsif ($msg ne "write") { - # we should not receive this message at this point - print STDOUT (pack("n", 0) . "error") or error "cannot write to socket: $!"; - STDOUT->flush(); - error "expected write but got: $msg"; - } - # read the payload - my $content; - { - my $ret = read (STDIN, $content, $len) // error "error cannot read from socket: $!"; - if ($ret == 0) { - error "received eof on socket"; - } - } - # write the payload to the tar process - print $fh $content or error "cannot write to tar process: $!"; - debug "sending okthx"; - print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; - STDOUT->flush(); - } - close $fh; - if ($command ne 'upload' and $? != 0) { - error "tar failed"; - } - } - } elsif (any { $_ eq $command} ('copy-out', 'tar-out', 'download')) { - if (scalar @ARGV < 9) { - error "copy-out needs at least one path inside the chroot and the output path on the outside" - } - my $outpath = $ARGV[-1]; - for (my $i = 7; $i < $#ARGV; $i++) { - # the right argument for tar's --directory argument depends on - # whether tar is called from inside the chroot or from the - # outside - my $directory; - if ($hook eq 'setup') { - $directory = "$root/$ARGV[$i]"; - } elsif (any { $_ eq $hook} ('essential', 'customize')) { - $directory = $ARGV[$i]; - } else { - error "unknown hook: $hook"; - } - - # FIXME: here we would like to check if the path inside the - # chroot given by $directory actually exists but we cannot - # because we are missing a function that can resolve even - # paths including absolute symlinks to paths that are valid - # outside the chroot - - my $fh; - if ($command eq 'download') { - # open the requested file for reading - open $fh, '-|', @cmdprefix, 'sh', '-c', 'cat "$1"', 'exec', $directory // error "failed to fork(): $!"; - } else { - # Open a tar process that creates a tarfile of everything in - # the requested directory inside the chroot and writes it to - # stdout. To emulate the behaviour of cp, change to the - # dirname of the requested path first. - open $fh, '-|', @cmdprefix, @tarcmd, '--directory', dirname($directory), '--create', '--file', '-', basename($directory) // error "failed to fork(): $!"; - } - - if ($command eq 'copy-out') { - # instruct the parent process to extract a tarball to a - # certain path outside the chroot - debug "sending untar"; - print STDOUT (pack("n", length $outpath) . "untar" . $outpath); - } else { - # instruct parent process to open a tarball of the - # requested path outside the chroot for writing - debug "sending openw"; - print STDOUT (pack("n", length $outpath) . "openw" . $outpath); - } - STDOUT->flush(); - debug "waiting for okthx"; - checkokthx \*STDIN; - - # read from the tar process and send as payload to the parent - # process - while (1) { - # read from tar - my $ret = read ($fh, my $cont, 4096) // error "cannot read from pipe: $!"; - if ($ret == 0) { last; } - debug "sending write"; - # send to parent - print STDOUT pack("n", $ret) . "write" . $cont; - STDOUT->flush(); - debug "waiting for okthx"; - checkokthx \*STDIN; - if ($ret < 4096) { last; } - } - - # signal to the parent process that we are done - debug "sending close"; - print STDOUT pack("n", 0) . "close"; - STDOUT->flush(); - debug "waiting for okthx"; - checkokthx \*STDIN; - - close $fh; - if ($command ne 'download' and $? != 0) { - error "tar failed"; - } - } - } else { - error "unknown command: $command"; - } - - exit 0; + my $root = $ARGV[1]; + my $mode = $ARGV[2]; + my $hook = $ARGV[3]; + my $qemu = $ARGV[4]; + $verbosity_level = $ARGV[5]; + my $command = $ARGV[6]; + + # unless we are in the setup hook (where there is no tar inside the + # chroot) we need to run tar on the inside because otherwise, possible + # absolute symlinks in the path given via --directory are not + # correctly resolved + # + # FIXME: the issue above can be fixed by a function that is able to + # resolve absolute symlinks even inside the chroot directory to a full + # path that is valid on the outside -- fakechroot and proot have their + # own reasons, see below + my @cmdprefix = (); + my @tarcmd = ('tar'); + if ($hook eq 'setup') { + if ($mode eq 'proot') { + # since we cannot run tar inside the chroot under proot during + # the setup hook because the chroot is empty, we have to run + # tar from the outside, which leads to all files being owned + # by the user running mmdebstrap. To let the ownership + # information not be completely off, we force all files be + # owned by the root user. + push @tarcmd, '--owner=root', '--group=root'; + } + } elsif (any { $_ eq $hook} ('essential', 'customize')) { + if ($mode eq 'fakechroot') { + # Fakechroot requires tar to run inside the chroot or + # otherwise absolute symlinks will include the path to the + # root directory + push @cmdprefix, '/usr/sbin/chroot', $root; + } elsif ($mode eq 'proot') { + # proot requires tar to run inside proot or otherwise + # permissions will be completely off + push @cmdprefix, 'proot', '--root-id', "--rootfs=$root", '--cwd=/', "--qemu=$qemu"; + } elsif (any { $_ eq $mode } ('root', 'chrootless', 'unshare')) { + push @cmdprefix, '/usr/sbin/chroot', $root; + } else { + error "unknown mode: $mode"; + } + } else { + error "unknown hook: $hook"; + } + + if (any { $_ eq $command} ('copy-in', 'tar-in', 'upload')) { + if (scalar @ARGV < 9) { + error "copy-in and tar-in need at least one path on the outside and the output path inside the chroot" + } + my $outpath = $ARGV[-1]; + for (my $i = 7; $i < $#ARGV; $i++) { + # the right argument for tar's --directory argument depends on + # whether tar is called from inside the chroot or from the + # outside + my $directory; + if ($hook eq 'setup') { + $directory = "$root/$outpath"; + } elsif (any { $_ eq $hook} ('essential', 'customize')) { + $directory = $outpath; + } else { + error "unknown hook: $hook"; + } + + # FIXME: here we would like to check if the path inside the + # chroot given by $directory actually exists but we cannot + # because we are missing a function that can resolve even + # paths including absolute symlinks to paths that are valid + # outside the chroot + + my $fh; + if ($command eq 'upload') { + # open the requested file for writing + open $fh, '|-', @cmdprefix, 'sh', '-c', 'cat > "$1"', 'exec', $directory // error "failed to fork(): $!"; + } else { + # open a tar process that extracts the tarfile that we supply + # it with on stdin to the output directory inside the chroot + open $fh, '|-', @cmdprefix, @tarcmd, '--directory', $directory, '--extract', '--file', '-' // error "failed to fork(): $!"; + } + + if ($command eq 'copy-in') { + # instruct the parent process to create a tarball of the + # requested path outside the chroot + debug "sending mktar"; + print STDOUT (pack("n", length $ARGV[$i]) . "mktar" . $ARGV[$i]); + } else { + # instruct parent process to open a tarball of the + # requested path outside the chroot for reading + debug "sending openr"; + print STDOUT (pack("n", length $ARGV[$i]) . "openr" . $ARGV[$i]); + } + STDOUT->flush(); + debug "waiting for okthx"; + checkokthx \*STDIN; + + # handle "write" messages from the parent process and feed + # their payload into the tar process until a "close" message + # is encountered + while(1) { + # receive the next message + my $ret = read (STDIN, my $buf, 2+5) // error "cannot read from socket: $!"; + if ($ret == 0) { + error "received eof on socket"; + } + my ($len, $msg) = unpack("nA5", $buf); + debug "received message: $msg"; + if ($msg eq "close") { + # finish the loop + if ($len != 0) { + error "expected no payload but got $len bytes"; + } + debug "sending okthx"; + print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; + STDOUT->flush(); + last; + } elsif ($msg ne "write") { + # we should not receive this message at this point + print STDOUT (pack("n", 0) . "error") or error "cannot write to socket: $!"; + STDOUT->flush(); + error "expected write but got: $msg"; + } + # read the payload + my $content; + { + my $ret = read (STDIN, $content, $len) // error "error cannot read from socket: $!"; + if ($ret == 0) { + error "received eof on socket"; + } + } + # write the payload to the tar process + print $fh $content or error "cannot write to tar process: $!"; + debug "sending okthx"; + print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; + STDOUT->flush(); + } + close $fh; + if ($command ne 'upload' and $? != 0) { + error "tar failed"; + } + } + } elsif (any { $_ eq $command} ('copy-out', 'tar-out', 'download')) { + if (scalar @ARGV < 9) { + error "copy-out needs at least one path inside the chroot and the output path on the outside" + } + my $outpath = $ARGV[-1]; + for (my $i = 7; $i < $#ARGV; $i++) { + # the right argument for tar's --directory argument depends on + # whether tar is called from inside the chroot or from the + # outside + my $directory; + if ($hook eq 'setup') { + $directory = "$root/$ARGV[$i]"; + } elsif (any { $_ eq $hook} ('essential', 'customize')) { + $directory = $ARGV[$i]; + } else { + error "unknown hook: $hook"; + } + + # FIXME: here we would like to check if the path inside the + # chroot given by $directory actually exists but we cannot + # because we are missing a function that can resolve even + # paths including absolute symlinks to paths that are valid + # outside the chroot + + my $fh; + if ($command eq 'download') { + # open the requested file for reading + open $fh, '-|', @cmdprefix, 'sh', '-c', 'cat "$1"', 'exec', $directory // error "failed to fork(): $!"; + } else { + # Open a tar process that creates a tarfile of everything in + # the requested directory inside the chroot and writes it to + # stdout. To emulate the behaviour of cp, change to the + # dirname of the requested path first. + open $fh, '-|', @cmdprefix, @tarcmd, '--directory', dirname($directory), '--create', '--file', '-', basename($directory) // error "failed to fork(): $!"; + } + + if ($command eq 'copy-out') { + # instruct the parent process to extract a tarball to a + # certain path outside the chroot + debug "sending untar"; + print STDOUT (pack("n", length $outpath) . "untar" . $outpath); + } else { + # instruct parent process to open a tarball of the + # requested path outside the chroot for writing + debug "sending openw"; + print STDOUT (pack("n", length $outpath) . "openw" . $outpath); + } + STDOUT->flush(); + debug "waiting for okthx"; + checkokthx \*STDIN; + + # read from the tar process and send as payload to the parent + # process + while (1) { + # read from tar + my $ret = read ($fh, my $cont, 4096) // error "cannot read from pipe: $!"; + if ($ret == 0) { last; } + debug "sending write"; + # send to parent + print STDOUT pack("n", $ret) . "write" . $cont; + STDOUT->flush(); + debug "waiting for okthx"; + checkokthx \*STDIN; + if ($ret < 4096) { last; } + } + + # signal to the parent process that we are done + debug "sending close"; + print STDOUT pack("n", 0) . "close"; + STDOUT->flush(); + debug "waiting for okthx"; + checkokthx \*STDIN; + + close $fh; + if ($command ne 'download' and $? != 0) { + error "tar failed"; + } + } + } else { + error "unknown command: $command"; + } + + exit 0; } my $mtime = time; if (exists $ENV{SOURCE_DATE_EPOCH}) { - $mtime = $ENV{SOURCE_DATE_EPOCH}+0; + $mtime = $ENV{SOURCE_DATE_EPOCH}+0; } $ENV{DEBIAN_FRONTEND} = 'noninteractive'; @@ -2171,222 +2171,222 @@ sub main() { chomp (my $hostarch = `dpkg --print-architecture`); my $options = { - components => ["main"], - variant => "important", - include => [], - architectures => [$hostarch], - mode => 'auto', - dpkgopts => [], - aptopts => [], - apttrusted => "/etc/apt/trusted.gpg", - apttrustedparts => "/etc/apt/trusted.gpg.d", - noop => [], - setup_hook => [], - essential_hook => [], - customize_hook => [], + components => ["main"], + variant => "important", + include => [], + architectures => [$hostarch], + mode => 'auto', + dpkgopts => [], + aptopts => [], + apttrusted => "/etc/apt/trusted.gpg", + apttrustedparts => "/etc/apt/trusted.gpg.d", + noop => [], + setup_hook => [], + essential_hook => [], + customize_hook => [], }; my $logfile = undef; Getopt::Long::Configure ('default', 'bundling', 'auto_abbrev', 'ignore_case_always'); GetOptions( - 'h|help' => sub { pod2usage(-exitval => 0, -verbose => 1) }, - 'man' => sub { pod2usage(-exitval => 0, -verbose => 2) }, - 'version' => sub { print STDOUT "mmdebstrap $VERSION\n"; exit 0; }, - 'components=s@' => \$options->{components}, - 'variant=s' => \$options->{variant}, - 'include=s@' => \$options->{include}, - 'architectures=s@' => \$options->{architectures}, - 'mode=s' => \$options->{mode}, - 'dpkgopt=s@' => \$options->{dpkgopts}, - 'aptopt=s@' => \$options->{aptopts}, - 'keyring=s' => sub { - my ($opt_name, $opt_value) = @_; - if ($opt_value =~ /"/) { - error "--keyring: apt cannot handle paths with double quotes: $opt_value"; - } - if (! -e $opt_value) { - error "keyring \"$opt_value\" does not exist"; - } - my $abs_path = abs_path($opt_value); - if (!defined $abs_path) { - error "unable to get absolute path of --keyring: $opt_value"; - } - # since abs_path resolved all symlinks for us, we can now test - # what the actual target actually is - if (-d $opt_value) { - $options->{apttrustedparts} = $opt_value; - } else { - $options->{apttrusted} = $opt_value; - } - }, - 's|silent' => sub { $verbosity_level = 0; }, - 'q|quiet' => sub { $verbosity_level = 0; }, - 'v|verbose' => sub { $verbosity_level = 2; }, - 'd|debug' => sub { $verbosity_level = 3; }, - 'logfile=s' => \$logfile, - # no-op options so that mmdebstrap can be used with - # sbuild-createchroot --debootstrap=mmdebstrap - 'resolve-deps' => sub { push @{$options->{noop}}, 'resolve-deps'; }, - 'merged-usr' => sub { push @{$options->{noop}}, 'merged-usr'; }, - 'no-merged-usr' => sub { push @{$options->{noop}}, 'no-merged-usr'; }, - 'force-check-gpg' => sub { push @{$options->{noop}}, 'force-check-gpg'; }, - # hook options are hidden until I'm happy with them - 'setup-hook=s@' => \$options->{setup_hook}, - 'essential-hook=s@' => \$options->{essential_hook}, - 'customize-hook=s@' => \$options->{customize_hook}, + 'h|help' => sub { pod2usage(-exitval => 0, -verbose => 1) }, + 'man' => sub { pod2usage(-exitval => 0, -verbose => 2) }, + 'version' => sub { print STDOUT "mmdebstrap $VERSION\n"; exit 0; }, + 'components=s@' => \$options->{components}, + 'variant=s' => \$options->{variant}, + 'include=s@' => \$options->{include}, + 'architectures=s@' => \$options->{architectures}, + 'mode=s' => \$options->{mode}, + 'dpkgopt=s@' => \$options->{dpkgopts}, + 'aptopt=s@' => \$options->{aptopts}, + 'keyring=s' => sub { + my ($opt_name, $opt_value) = @_; + if ($opt_value =~ /"/) { + error "--keyring: apt cannot handle paths with double quotes: $opt_value"; + } + if (! -e $opt_value) { + error "keyring \"$opt_value\" does not exist"; + } + my $abs_path = abs_path($opt_value); + if (!defined $abs_path) { + error "unable to get absolute path of --keyring: $opt_value"; + } + # since abs_path resolved all symlinks for us, we can now test + # what the actual target actually is + if (-d $opt_value) { + $options->{apttrustedparts} = $opt_value; + } else { + $options->{apttrusted} = $opt_value; + } + }, + 's|silent' => sub { $verbosity_level = 0; }, + 'q|quiet' => sub { $verbosity_level = 0; }, + 'v|verbose' => sub { $verbosity_level = 2; }, + 'd|debug' => sub { $verbosity_level = 3; }, + 'logfile=s' => \$logfile, + # no-op options so that mmdebstrap can be used with + # sbuild-createchroot --debootstrap=mmdebstrap + 'resolve-deps' => sub { push @{$options->{noop}}, 'resolve-deps'; }, + 'merged-usr' => sub { push @{$options->{noop}}, 'merged-usr'; }, + 'no-merged-usr' => sub { push @{$options->{noop}}, 'no-merged-usr'; }, + 'force-check-gpg' => sub { push @{$options->{noop}}, 'force-check-gpg'; }, + # hook options are hidden until I'm happy with them + 'setup-hook=s@' => \$options->{setup_hook}, + 'essential-hook=s@' => \$options->{essential_hook}, + 'customize-hook=s@' => \$options->{customize_hook}, ) or pod2usage(-exitval => 2, -verbose => 1); if (defined($logfile)) { - open(STDERR, '>', $logfile) or error "cannot open $logfile: $!"; + open(STDERR, '>', $logfile) or error "cannot open $logfile: $!"; } foreach my $arg (@{$options->{noop}}) { - info "The option --$arg is a no-op. It only exists for compatibility with some debootstrap wrappers."; + info "The option --$arg is a no-op. It only exists for compatibility with some debootstrap wrappers."; } my @valid_variants = ('extract', 'custom', 'essential', 'apt', 'required', - 'minbase', 'buildd', 'important', 'debootstrap', '-', 'standard'); + 'minbase', 'buildd', 'important', 'debootstrap', '-', 'standard'); if (none { $_ eq $options->{variant}} @valid_variants) { - error "invalid variant. Choose from " . (join ', ', @valid_variants); + error "invalid variant. Choose from " . (join ', ', @valid_variants); } # debootstrap and - are an alias for important if (any { $_ eq $options->{variant} } ('-', 'debootstrap')) { - $options->{variant} = 'important'; + $options->{variant} = 'important'; } if ($options->{variant} eq 'essential' and scalar @{$options->{include}} > 0) { - warning "cannot install extra packages with variant essential because apt is missing"; + warning "cannot install extra packages with variant essential because apt is missing"; } # fakeroot is an alias for fakechroot if ($options->{mode} eq 'fakeroot') { - $options->{mode} = 'fakechroot'; + $options->{mode} = 'fakechroot'; } # sudo is an alias for root if ($options->{mode} eq 'sudo') { - $options->{mode} = 'root'; + $options->{mode} = 'root'; } my @valid_modes = ('auto', 'root', 'unshare', 'fakechroot', 'proot', - 'chrootless'); + 'chrootless'); if (none { $_ eq $options->{mode} } @valid_modes) { - error "invalid mode. Choose from " . (join ', ', @valid_modes); + error "invalid mode. Choose from " . (join ', ', @valid_modes); } my $check_fakechroot_running = sub { - # test if we are inside fakechroot already - # We fork a child process because setting FAKECHROOT_DETECT seems to - # be an irreversible operation for fakechroot. - my $pid = open my $rfh, '-|' // error "failed to fork(): $!"; - if ($pid == 0) { - # with the FAKECHROOT_DETECT environment variable set, any program - # execution will be replaced with the output "fakeroot [version]" - $ENV{FAKECHROOT_DETECT} = 0; - exec 'echo', 'If fakechroot is running, this will not be printed'; - } - my $content = do { local $/; <$rfh> }; - waitpid $pid, 0; - my $result = 0; - if ($? == 0 and $content =~ /^fakechroot \d\.\d+$/) { - $result = 1; - } - return $result; + # test if we are inside fakechroot already + # We fork a child process because setting FAKECHROOT_DETECT seems to + # be an irreversible operation for fakechroot. + my $pid = open my $rfh, '-|' // error "failed to fork(): $!"; + if ($pid == 0) { + # with the FAKECHROOT_DETECT environment variable set, any program + # execution will be replaced with the output "fakeroot [version]" + $ENV{FAKECHROOT_DETECT} = 0; + exec 'echo', 'If fakechroot is running, this will not be printed'; + } + my $content = do { local $/; <$rfh> }; + waitpid $pid, 0; + my $result = 0; + if ($? == 0 and $content =~ /^fakechroot \d\.\d+$/) { + $result = 1; + } + return $result; }; # figure out the mode to use or test whether the chosen mode is legal if ($options->{mode} eq 'auto') { - if (&{$check_fakechroot_running}()) { - # if mmdebstrap is executed inside fakechroot, then we assume the - # user expects fakechroot mode - $options->{mode} = 'fakechroot'; - } elsif ($EFFECTIVE_USER_ID == 0) { - # if mmdebstrap is executed as root, we assume the user wants root - # mode - $options->{mode} = 'root'; - } elsif (test_unshare(0)) { - # otherwise, unshare mode is our best option if test_unshare() - # succeeds - $options->{mode} = 'unshare'; - } elsif (system('fakechroot --version>/dev/null') == 0) { - # the next fallback is fakechroot - # exec ourselves again but within fakechroot - my @prefix = (); - if($is_covering) { - @prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov'); - } - exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG; - } elsif (system('proot --version>/dev/null') == 0) { - # and lastly, proot - $options->{mode} = 'proot'; - } else { - error "unable to pick chroot mode automatically"; - } - info "automatically chosen mode: $options->{mode}"; + if (&{$check_fakechroot_running}()) { + # if mmdebstrap is executed inside fakechroot, then we assume the + # user expects fakechroot mode + $options->{mode} = 'fakechroot'; + } elsif ($EFFECTIVE_USER_ID == 0) { + # if mmdebstrap is executed as root, we assume the user wants root + # mode + $options->{mode} = 'root'; + } elsif (test_unshare(0)) { + # otherwise, unshare mode is our best option if test_unshare() + # succeeds + $options->{mode} = 'unshare'; + } elsif (system('fakechroot --version>/dev/null') == 0) { + # the next fallback is fakechroot + # exec ourselves again but within fakechroot + my @prefix = (); + if($is_covering) { + @prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov'); + } + exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG; + } elsif (system('proot --version>/dev/null') == 0) { + # and lastly, proot + $options->{mode} = 'proot'; + } else { + error "unable to pick chroot mode automatically"; + } + info "automatically chosen mode: $options->{mode}"; } elsif ($options->{mode} eq 'root') { - if ($EFFECTIVE_USER_ID != 0) { - error "need to be root"; - } + if ($EFFECTIVE_USER_ID != 0) { + error "need to be root"; + } } elsif ($options->{mode} eq 'proot') { - if (system('proot --version>/dev/null') != 0) { - error "need working proot binary"; - } + if (system('proot --version>/dev/null') != 0) { + error "need working proot binary"; + } } elsif ($options->{mode} eq 'fakechroot') { - if (&{$check_fakechroot_running}()) { - # fakechroot is already running - } elsif (system('fakechroot --version>/dev/null') != 0) { - error "need working fakechroot binary"; - } else { - # exec ourselves again but within fakechroot - my @prefix = (); - if($is_covering) { - @prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov'); - } - exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG; - } + if (&{$check_fakechroot_running}()) { + # fakechroot is already running + } elsif (system('fakechroot --version>/dev/null') != 0) { + error "need working fakechroot binary"; + } else { + # exec ourselves again but within fakechroot + my @prefix = (); + if($is_covering) { + @prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov'); + } + exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG; + } } elsif ($options->{mode} eq 'unshare') { - if (!test_unshare(1)) { - my $procfile = '/proc/sys/kernel/unprivileged_userns_clone'; - open(my $fh, '<', $procfile) or error "failed to open $procfile: $!"; - chomp(my $content = do { local $/; <$fh> }); - close($fh); - if ($content ne "1") { - info "/proc/sys/kernel/unprivileged_userns_clone is set to $content"; - info "try running: sudo sysctl -w kernel.unprivileged_userns_clone=1"; - info "or permanently enable unprivileged usernamespaces by putting the setting into /etc/sysctl.d/"; - info "see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=898446"; - } - exit 1; - } + if (!test_unshare(1)) { + my $procfile = '/proc/sys/kernel/unprivileged_userns_clone'; + open(my $fh, '<', $procfile) or error "failed to open $procfile: $!"; + chomp(my $content = do { local $/; <$fh> }); + close($fh); + if ($content ne "1") { + info "/proc/sys/kernel/unprivileged_userns_clone is set to $content"; + info "try running: sudo sysctl -w kernel.unprivileged_userns_clone=1"; + info "or permanently enable unprivileged usernamespaces by putting the setting into /etc/sysctl.d/"; + info "see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=898446"; + } + exit 1; + } } elsif ($options->{mode} eq 'chrootless') { - # nothing to do + # nothing to do } else { - error "unknown mode: $options->{mode}"; + error "unknown mode: $options->{mode}"; } my @architectures = (); foreach my $archs (@{$options->{architectures}}) { - foreach my $arch (split /[,\s]+/, $archs) { - # strip leading and trailing whitespace - $arch =~ s/^\s+|\s+$//g; - # skip if the remainder is an empty string - if ($arch eq '') { - next; - } - # do not append component if it's already in the list - if (any {$_ eq $arch} @architectures) { - next; - } - push @architectures, $arch; - } + foreach my $arch (split /[,\s]+/, $archs) { + # strip leading and trailing whitespace + $arch =~ s/^\s+|\s+$//g; + # skip if the remainder is an empty string + if ($arch eq '') { + next; + } + # do not append component if it's already in the list + if (any {$_ eq $arch} @architectures) { + next; + } + push @architectures, $arch; + } } $options->{nativearch} = $hostarch; $options->{foreignarchs} = []; if (scalar @architectures == 0) { - warning "empty architecture list: falling back to native architecture $hostarch"; + warning "empty architecture list: falling back to native architecture $hostarch"; } elsif (scalar @architectures == 1) { - $options->{nativearch} = $architectures[0]; + $options->{nativearch} = $architectures[0]; } else { - $options->{nativearch} = $architectures[0]; - push @{$options->{foreignarchs}}, @architectures[1..$#architectures]; + $options->{nativearch} = $architectures[0]; + push @{$options->{foreignarchs}}, @architectures[1..$#architectures]; } debug "Native architecture (outside): $hostarch"; @@ -2394,330 +2394,330 @@ sub main() { debug ("Foreign architectures (inside): " . (join ', ', @{$options->{foreignarchs}})); { - # FIXME: autogenerate this list - my $deb2qemu = { - alpha => 'alpha', - amd64 => 'x86_64', - arm => 'arm', - arm64 => 'aarch64', - armel => 'arm', - armhf => 'arm', - hppa => 'hppa', - i386 => 'i386', - m68k => 'm68k', - mips => 'mips', - mips64 => 'mips64', - mips64el => 'mips64el', - mipsel => 'mipsel', - powerpc => 'ppc', - ppc64 => 'ppc64', - ppc64el => 'ppc64le', - riscv64 => 'riscv64', - s390x => 's390x', - sh4 => 'sh4', - sparc => 'sparc', - sparc64 => 'sparc64', - }; - if ($hostarch ne $options->{nativearch}) { - my $withemu = 0; - my $noemu = 0; - { - my $pid = open my $fh, '-|' // error "failed to fork(): $!"; - if ($pid == 0) { - { - no warnings; # don't print a warning if the following fails - exec 'arch-test', $options->{nativearch}; - } - # if exec didn't work (for example because the arch-test program is - # missing) prepare for the worst and assume that the architecture - # cannot be executed - print "$options->{nativearch}: not supported on this machine/kernel\n"; - exit 1; - } - chomp (my $content = do { local $/; <$fh> }); - close $fh; - if ($? == 0 and $content eq "$options->{nativearch}: ok") { - $withemu = 1; - } - } - { - my $pid = open my $fh, '-|' // error "failed to fork(): $!"; - if ($pid == 0) { - { - no warnings; # don't print a warning if the following fails - exec 'arch-test', '-n', $options->{nativearch}; - } - # if exec didn't work (for example because the arch-test program is - # missing) prepare for the worst and assume that the architecture - # cannot be executed - print "$options->{nativearch}: not supported on this machine/kernel\n"; - exit 1; - } - chomp (my $content = do { local $/; <$fh> }); - close $fh; - if ($? == 0 and $content eq "$options->{nativearch}: ok") { - $noemu = 1; - } - } - # four different outcomes, depending on whether arch-test - # succeeded with or without emulation - # - # withemu | noemu | - # --------+-------+----------------- - # 0 | 0 | test why emu doesn't work and quit - # 0 | 1 | should never happen - # 1 | 0 | use qemu emulation - # 1 | 1 | don't use qemu emulation - if ($withemu == 0 and $noemu == 0) { - { - open my $fh, '<', '/proc/filesystems' or error "failed to open /proc/filesystems: $!"; - unless (grep /^nodev\tbinfmt_misc$/, (<$fh>)) { - warning "binfmt_misc not found in /proc/filesystems -- is the module loaded?"; - } - close $fh; - } - { - open my $fh, '<', '/proc/mounts' or error "failed to open /proc/mounts: $!"; - unless (grep /^binfmt_misc \/proc\/sys\/fs\/binfmt_misc binfmt_misc/, (<$fh>)) { - warning "binfmt_misc not found in /proc/mounts -- not mounted?"; - } - close $fh; - } - { - if (!exists $deb2qemu->{$options->{nativearch}}) { - warning "no mapping from $options->{nativearch} to qemu-user binary"; - } else { - my $binfmt_identifier = 'qemu-' . $deb2qemu->{$options->{nativearch}}; - open my $fh, '-|', '/usr/sbin/update-binfmts', '--display', $binfmt_identifier // error "failed to fork(): $!"; - chomp (my $binfmts = do { local $/; <$fh> }); - close $fh; - if ($binfmts eq '') { - warning "$binfmt_identifier is not a supported binfmt name"; - } - } - } - error "$options->{nativearch} can neither be executed natively nor via qemu user emulation with binfmt_misc"; - } elsif ($withemu == 0 and $noemu == 1) { - error "arch-test succeeded without emu but not with emu"; - } elsif ($withemu == 1 and $noemu == 0) { - info "$options->{nativearch} cannot be executed, falling back to qemu-user"; - if (!exists $deb2qemu->{$options->{nativearch}}) { - error "no mapping from $options->{nativearch} to qemu-user binary"; - } - $options->{qemu} = $deb2qemu->{$options->{nativearch}}; - } elsif ($withemu == 1 and $noemu == 1) { - info "$options->{nativearch} is different from $hostarch but can be executed natively"; - } else { - error "logic error"; - } - } else { - info "chroot architecture $options->{nativearch} is equal to the host's architecture"; - } + # FIXME: autogenerate this list + my $deb2qemu = { + alpha => 'alpha', + amd64 => 'x86_64', + arm => 'arm', + arm64 => 'aarch64', + armel => 'arm', + armhf => 'arm', + hppa => 'hppa', + i386 => 'i386', + m68k => 'm68k', + mips => 'mips', + mips64 => 'mips64', + mips64el => 'mips64el', + mipsel => 'mipsel', + powerpc => 'ppc', + ppc64 => 'ppc64', + ppc64el => 'ppc64le', + riscv64 => 'riscv64', + s390x => 's390x', + sh4 => 'sh4', + sparc => 'sparc', + sparc64 => 'sparc64', + }; + if ($hostarch ne $options->{nativearch}) { + my $withemu = 0; + my $noemu = 0; + { + my $pid = open my $fh, '-|' // error "failed to fork(): $!"; + if ($pid == 0) { + { + no warnings; # don't print a warning if the following fails + exec 'arch-test', $options->{nativearch}; + } + # if exec didn't work (for example because the arch-test program is + # missing) prepare for the worst and assume that the architecture + # cannot be executed + print "$options->{nativearch}: not supported on this machine/kernel\n"; + exit 1; + } + chomp (my $content = do { local $/; <$fh> }); + close $fh; + if ($? == 0 and $content eq "$options->{nativearch}: ok") { + $withemu = 1; + } + } + { + my $pid = open my $fh, '-|' // error "failed to fork(): $!"; + if ($pid == 0) { + { + no warnings; # don't print a warning if the following fails + exec 'arch-test', '-n', $options->{nativearch}; + } + # if exec didn't work (for example because the arch-test program is + # missing) prepare for the worst and assume that the architecture + # cannot be executed + print "$options->{nativearch}: not supported on this machine/kernel\n"; + exit 1; + } + chomp (my $content = do { local $/; <$fh> }); + close $fh; + if ($? == 0 and $content eq "$options->{nativearch}: ok") { + $noemu = 1; + } + } + # four different outcomes, depending on whether arch-test + # succeeded with or without emulation + # + # withemu | noemu | + # --------+-------+----------------- + # 0 | 0 | test why emu doesn't work and quit + # 0 | 1 | should never happen + # 1 | 0 | use qemu emulation + # 1 | 1 | don't use qemu emulation + if ($withemu == 0 and $noemu == 0) { + { + open my $fh, '<', '/proc/filesystems' or error "failed to open /proc/filesystems: $!"; + unless (grep /^nodev\tbinfmt_misc$/, (<$fh>)) { + warning "binfmt_misc not found in /proc/filesystems -- is the module loaded?"; + } + close $fh; + } + { + open my $fh, '<', '/proc/mounts' or error "failed to open /proc/mounts: $!"; + unless (grep /^binfmt_misc \/proc\/sys\/fs\/binfmt_misc binfmt_misc/, (<$fh>)) { + warning "binfmt_misc not found in /proc/mounts -- not mounted?"; + } + close $fh; + } + { + if (!exists $deb2qemu->{$options->{nativearch}}) { + warning "no mapping from $options->{nativearch} to qemu-user binary"; + } else { + my $binfmt_identifier = 'qemu-' . $deb2qemu->{$options->{nativearch}}; + open my $fh, '-|', '/usr/sbin/update-binfmts', '--display', $binfmt_identifier // error "failed to fork(): $!"; + chomp (my $binfmts = do { local $/; <$fh> }); + close $fh; + if ($binfmts eq '') { + warning "$binfmt_identifier is not a supported binfmt name"; + } + } + } + error "$options->{nativearch} can neither be executed natively nor via qemu user emulation with binfmt_misc"; + } elsif ($withemu == 0 and $noemu == 1) { + error "arch-test succeeded without emu but not with emu"; + } elsif ($withemu == 1 and $noemu == 0) { + info "$options->{nativearch} cannot be executed, falling back to qemu-user"; + if (!exists $deb2qemu->{$options->{nativearch}}) { + error "no mapping from $options->{nativearch} to qemu-user binary"; + } + $options->{qemu} = $deb2qemu->{$options->{nativearch}}; + } elsif ($withemu == 1 and $noemu == 1) { + info "$options->{nativearch} is different from $hostarch but can be executed natively"; + } else { + error "logic error"; + } + } else { + info "chroot architecture $options->{nativearch} is equal to the host's architecture"; + } } { - my $suite; - if (scalar @ARGV > 0) { - $suite = shift @ARGV; - if (scalar @ARGV > 0) { - $options->{target} = shift @ARGV; - } else { - $options->{target} = '-'; - } - } else { - info "No SUITE specified, expecting sources.list on standard input"; - $options->{target} = '-'; - } - - my $sourceslist = ''; - if (! defined $suite) { - # If no suite was specified, then the whole sources.list has to - # come from standard input - info "Reading sources.list from standard input..."; - $sourceslist = do { local $/; }; - } else { - my @components = (); - foreach my $comp (@{$options->{components}}) { - my @comps = split /[,\s]+/, $comp; - foreach my $c (@comps) { - # strip leading and trailing whitespace - $c =~ s/^\s+|\s+$//g; - # skip if the remainder is an empty string - if ($c eq "") { - next; - } - # do not append component if it's already in the list - if (any {$_ eq $c} @components) { - next; - } - push @components, $c; - } - } - my $compstr = join " ", @components; - # if the currently selected apt keyrings do not contain the - # necessary key material for the chosen suite, then attempt adding - # a signed-by option - my $signedby = ''; - { - # try to guess the right keyring path for the given suite - my $keyring; - if (any {$_ eq $suite} ('potato', 'woody', 'sarge', 'etch', 'lenny', 'squeeze', 'wheezy')) { - $keyring = '/usr/share/keyrings/debian-archive-removed-keys.gpg'; - } elsif (any {$_ eq $suite} ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis')) { - $keyring = '/usr/share/keyrings/tanglu-archive-keyring.gpg'; - } elsif (any {$_ eq $suite} ('kali-dev', 'kali-rolling', 'kali-bleeding-edge')) { - $keyring = '/usr/share/keyrings/kali-archive-keyring.gpg'; - } elsif (any {$_ eq $suite} ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic')) { - $keyring = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'; - } elsif (any {$_ eq $suite} ('unstable', 'stable', 'oldstable', 'jessie', 'stretch', 'buster', 'bullseye', 'bookworm')) { - $keyring = '/usr/share/keyrings/debian-archive-keyring.gpg'; - } - # we can only check if we need the signed-by entry if we u - # automatically chosen keyring exists - if (defined $keyring && -e $keyring) { - # we can only check key material if gpg is installed - my $gpghome = tempdir("mmdebstrap.gpghome.XXXXXXXXXXXX", TMPDIR => 1, CLEANUP => 1); - my @gpgcmd = ('gpg', '--quiet', '--ignore-time-conflict', '--no-options', '--no-default-keyring', '--homedir', $gpghome, '--no-auto-check-trustdb', '--trust-model', 'always'); - my ($ret, $fh, $message); - { - # change warning handler to prevent message - # Can't exec "gpg": No such file or directory - local $SIG{__WARN__} = sub { $message = shift; }; - $ret = open $fh, '-|', @gpgcmd, '--version'; - } - close $fh; # we only want to check if the gpg command exists - if ($? == 0 && defined $ret && !defined $message) { - # find all the fingerprints of the keys apt currently - # knows about - my @aptfingerprints = (); - my $collect_fingerprints = sub { - my $filename = shift; - open my $fh, '-|', @gpgcmd, '--keyring', $filename, '--with-colons', '--list-keys' // error "failed to fork(): $!"; - while (my $line = <$fh>) { - if ($line !~ /^fpr:::::::::([^:]+):/) { - next; - } - push @aptfingerprints, $1; - } - close $fh; - }; - opendir my $dh, "$options->{apttrustedparts}" or error "cannot read $options->{apttrustedparts}"; - while (my $filename = readdir $dh) { - if ($filename !~ /\.(asc|gpg)$/) { - next; - } - $collect_fingerprints->("$options->{apttrustedparts}/$filename"); - } - if (-e $options->{apttrusted}) { - $collect_fingerprints->($options->{apttrusted}); - } - # check if all fingerprints from the keyring that we - # guessed are known by apt and only add signed-by - # option if that's not the case - my @suitefingerprints = (); - open my $suitefh, '-|', @gpgcmd, '--keyring', $keyring, '--with-colons', '--list-keys' // error "failed to fork(): $!"; - while (my $line = <$suitefh>) { - if ($line !~ /^fpr:::::::::([^:]+):/) { - next; - } - # if this fingerprint is not known by apt, then we - # need to add the signed-by option - if (none { $_ eq $1 } @aptfingerprints) { - $signedby = " [signed-by=\"$keyring\"]"; - last; - } - } - close $suitefh; - if ($? != 0) { - error "gpg failed"; - } - } else { - info "gpg --version failed: cannot determine the right signed-by value" - } - remove_tree($gpghome, {error => \my $err}); - if (@$err) { - for my $diag (@$err) { - my ($file, $message) = %$diag; - if ($file eq '') { warning "general error: $message"; } - else { warning "problem unlinking $file: $message"; } - } - } - } - } - if (scalar @ARGV > 0) { - for my $arg (@ARGV) { - if ($arg eq '-') { - info "Reading sources.list from standard input..."; - $sourceslist .= do { local $/; }; - } elsif ($arg =~ /^deb(-src)? /) { - $sourceslist .= "$arg\n"; - } elsif ($arg =~ /:\/\//) { - $sourceslist .= "deb$signedby $arg $suite $compstr\n"; - } elsif (-f $arg) { - open my $fh, '<', $arg or error "cannot open $arg: $!"; - while (my $line = <$fh>) { - $sourceslist .= $line; - } - close $fh; - } else { - error "invalid mirror: $arg"; - } - } - } else { - my @debstable = ('oldoldstable', 'oldstable', 'stable', 'jessie', 'stretch', 'buster', 'bullseye', 'bookworm'); - my @ubuntustable = ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic'); - my @tanglustable = ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis'); - my @kali = ('kali-dev', 'kali-rolling', 'kali-bleeding-edge'); - - my $mirror = 'http://deb.debian.org/debian'; - my $secmirror = 'http://security.debian.org/debian-security'; - if (any {$_ eq $suite} @ubuntustable) { - if (any {$_ eq $options->{nativearch}} ('amd64', 'i386')) { - $mirror = 'http://archive.ubuntu.com/ubuntu'; - $secmirror = 'http://security.ubuntu.com/ubuntu'; - } else { - $mirror = 'http://ports.ubuntu.com/ubuntu-ports'; - $secmirror = 'http://ports.ubuntu.com/ubuntu-ports'; - } - } elsif (any {$_ eq $suite} @tanglustable) { - $mirror = 'http://archive.tanglu.org/tanglu' - } elsif (any {$_ eq $suite} @kali) { - $mirror = 'https://http.kali.org/kali' - } - $sourceslist .= "deb$signedby $mirror $suite $compstr\n"; - if (any {$_ eq $suite} @ubuntustable) { - $sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n"; - $sourceslist .= "deb$signedby $secmirror $suite-security $compstr\n"; - } elsif (any {$_ eq $suite} @tanglustable) { - $sourceslist .= "deb$signedby $secmirror $suite-updates $compstr\n"; - } elsif (any {$_ eq $suite} @debstable) { - $sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n"; - if (any {$_ eq $suite} ('oldoldstable', 'oldstable', 'stable', 'jessie', 'stretch', 'buster')) { - $sourceslist .= "deb$signedby $secmirror $suite/updates $compstr\n"; - } else { - # starting from bullseye use - # https://lists.debian.org/87r26wqr2a.fsf@43-1.org - $sourceslist .= "deb$signedby $secmirror $suite-security $compstr\n"; - } - } - } - } - if ($sourceslist eq '') { - error "empty apt sources.list"; - } - $options->{sourceslist} = $sourceslist; + my $suite; + if (scalar @ARGV > 0) { + $suite = shift @ARGV; + if (scalar @ARGV > 0) { + $options->{target} = shift @ARGV; + } else { + $options->{target} = '-'; + } + } else { + info "No SUITE specified, expecting sources.list on standard input"; + $options->{target} = '-'; + } + + my $sourceslist = ''; + if (! defined $suite) { + # If no suite was specified, then the whole sources.list has to + # come from standard input + info "Reading sources.list from standard input..."; + $sourceslist = do { local $/; }; + } else { + my @components = (); + foreach my $comp (@{$options->{components}}) { + my @comps = split /[,\s]+/, $comp; + foreach my $c (@comps) { + # strip leading and trailing whitespace + $c =~ s/^\s+|\s+$//g; + # skip if the remainder is an empty string + if ($c eq "") { + next; + } + # do not append component if it's already in the list + if (any {$_ eq $c} @components) { + next; + } + push @components, $c; + } + } + my $compstr = join " ", @components; + # if the currently selected apt keyrings do not contain the + # necessary key material for the chosen suite, then attempt adding + # a signed-by option + my $signedby = ''; + { + # try to guess the right keyring path for the given suite + my $keyring; + if (any {$_ eq $suite} ('potato', 'woody', 'sarge', 'etch', 'lenny', 'squeeze', 'wheezy')) { + $keyring = '/usr/share/keyrings/debian-archive-removed-keys.gpg'; + } elsif (any {$_ eq $suite} ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis')) { + $keyring = '/usr/share/keyrings/tanglu-archive-keyring.gpg'; + } elsif (any {$_ eq $suite} ('kali-dev', 'kali-rolling', 'kali-bleeding-edge')) { + $keyring = '/usr/share/keyrings/kali-archive-keyring.gpg'; + } elsif (any {$_ eq $suite} ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic')) { + $keyring = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'; + } elsif (any {$_ eq $suite} ('unstable', 'stable', 'oldstable', 'jessie', 'stretch', 'buster', 'bullseye', 'bookworm')) { + $keyring = '/usr/share/keyrings/debian-archive-keyring.gpg'; + } + # we can only check if we need the signed-by entry if we u + # automatically chosen keyring exists + if (defined $keyring && -e $keyring) { + # we can only check key material if gpg is installed + my $gpghome = tempdir("mmdebstrap.gpghome.XXXXXXXXXXXX", TMPDIR => 1, CLEANUP => 1); + my @gpgcmd = ('gpg', '--quiet', '--ignore-time-conflict', '--no-options', '--no-default-keyring', '--homedir', $gpghome, '--no-auto-check-trustdb', '--trust-model', 'always'); + my ($ret, $fh, $message); + { + # change warning handler to prevent message + # Can't exec "gpg": No such file or directory + local $SIG{__WARN__} = sub { $message = shift; }; + $ret = open $fh, '-|', @gpgcmd, '--version'; + } + close $fh; # we only want to check if the gpg command exists + if ($? == 0 && defined $ret && !defined $message) { + # find all the fingerprints of the keys apt currently + # knows about + my @aptfingerprints = (); + my $collect_fingerprints = sub { + my $filename = shift; + open my $fh, '-|', @gpgcmd, '--keyring', $filename, '--with-colons', '--list-keys' // error "failed to fork(): $!"; + while (my $line = <$fh>) { + if ($line !~ /^fpr:::::::::([^:]+):/) { + next; + } + push @aptfingerprints, $1; + } + close $fh; + }; + opendir my $dh, "$options->{apttrustedparts}" or error "cannot read $options->{apttrustedparts}"; + while (my $filename = readdir $dh) { + if ($filename !~ /\.(asc|gpg)$/) { + next; + } + $collect_fingerprints->("$options->{apttrustedparts}/$filename"); + } + if (-e $options->{apttrusted}) { + $collect_fingerprints->($options->{apttrusted}); + } + # check if all fingerprints from the keyring that we + # guessed are known by apt and only add signed-by + # option if that's not the case + my @suitefingerprints = (); + open my $suitefh, '-|', @gpgcmd, '--keyring', $keyring, '--with-colons', '--list-keys' // error "failed to fork(): $!"; + while (my $line = <$suitefh>) { + if ($line !~ /^fpr:::::::::([^:]+):/) { + next; + } + # if this fingerprint is not known by apt, then we + # need to add the signed-by option + if (none { $_ eq $1 } @aptfingerprints) { + $signedby = " [signed-by=\"$keyring\"]"; + last; + } + } + close $suitefh; + if ($? != 0) { + error "gpg failed"; + } + } else { + info "gpg --version failed: cannot determine the right signed-by value" + } + remove_tree($gpghome, {error => \my $err}); + if (@$err) { + for my $diag (@$err) { + my ($file, $message) = %$diag; + if ($file eq '') { warning "general error: $message"; } + else { warning "problem unlinking $file: $message"; } + } + } + } + } + if (scalar @ARGV > 0) { + for my $arg (@ARGV) { + if ($arg eq '-') { + info "Reading sources.list from standard input..."; + $sourceslist .= do { local $/; }; + } elsif ($arg =~ /^deb(-src)? /) { + $sourceslist .= "$arg\n"; + } elsif ($arg =~ /:\/\//) { + $sourceslist .= "deb$signedby $arg $suite $compstr\n"; + } elsif (-f $arg) { + open my $fh, '<', $arg or error "cannot open $arg: $!"; + while (my $line = <$fh>) { + $sourceslist .= $line; + } + close $fh; + } else { + error "invalid mirror: $arg"; + } + } + } else { + my @debstable = ('oldoldstable', 'oldstable', 'stable', 'jessie', 'stretch', 'buster', 'bullseye', 'bookworm'); + my @ubuntustable = ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic'); + my @tanglustable = ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis'); + my @kali = ('kali-dev', 'kali-rolling', 'kali-bleeding-edge'); + + my $mirror = 'http://deb.debian.org/debian'; + my $secmirror = 'http://security.debian.org/debian-security'; + if (any {$_ eq $suite} @ubuntustable) { + if (any {$_ eq $options->{nativearch}} ('amd64', 'i386')) { + $mirror = 'http://archive.ubuntu.com/ubuntu'; + $secmirror = 'http://security.ubuntu.com/ubuntu'; + } else { + $mirror = 'http://ports.ubuntu.com/ubuntu-ports'; + $secmirror = 'http://ports.ubuntu.com/ubuntu-ports'; + } + } elsif (any {$_ eq $suite} @tanglustable) { + $mirror = 'http://archive.tanglu.org/tanglu' + } elsif (any {$_ eq $suite} @kali) { + $mirror = 'https://http.kali.org/kali' + } + $sourceslist .= "deb$signedby $mirror $suite $compstr\n"; + if (any {$_ eq $suite} @ubuntustable) { + $sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n"; + $sourceslist .= "deb$signedby $secmirror $suite-security $compstr\n"; + } elsif (any {$_ eq $suite} @tanglustable) { + $sourceslist .= "deb$signedby $secmirror $suite-updates $compstr\n"; + } elsif (any {$_ eq $suite} @debstable) { + $sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n"; + if (any {$_ eq $suite} ('oldoldstable', 'oldstable', 'stable', 'jessie', 'stretch', 'buster')) { + $sourceslist .= "deb$signedby $secmirror $suite/updates $compstr\n"; + } else { + # starting from bullseye use + # https://lists.debian.org/87r26wqr2a.fsf@43-1.org + $sourceslist .= "deb$signedby $secmirror $suite-security $compstr\n"; + } + } + } + } + if ($sourceslist eq '') { + error "empty apt sources.list"; + } + $options->{sourceslist} = $sourceslist; } if ($options->{target} ne '-') { - my $abs_path = abs_path($options->{target}); - if (!defined $abs_path) { - error "unable to get absolute path of target directory $options->{target}"; - } - $options->{target} = $abs_path; + my $abs_path = abs_path($options->{target}); + if (!defined $abs_path) { + error "unable to get absolute path of target directory $options->{target}"; + } + $options->{target} = $abs_path; } if ($options->{target} eq '/') { - error "refusing to use the filesystem root as output directory"; + error "refusing to use the filesystem root as output directory"; } my $tar_compressor = get_tar_compressor($options->{target}); @@ -2726,173 +2726,173 @@ sub main() { $options->{maketar} = 0; $options->{makesqfs} = 0; if (defined $tar_compressor or $options->{target} =~ /\.tar$/ or $options->{target} eq '-') { - $options->{maketar} = 1; - # check if the compressor is installed - if (defined $tar_compressor) { - my $pid = fork() // error "fork() failed: $!"; - if ($pid == 0) { - open(STDOUT, '>', '/dev/null') or error "cannot open /dev/null for writing: $!"; - open(STDIN, '<', '/dev/null') or error "cannot open /dev/null for reading: $!"; - exec { $tar_compressor->[0] } @{$tar_compressor} or error ("cannot exec " . (join " ", @{$tar_compressor}) . ": $!"); - } - waitpid $pid, 0; - if ($? != 0) { - error ("failed to start " . (join " ", @{$tar_compressor})); - } - } + $options->{maketar} = 1; + # check if the compressor is installed + if (defined $tar_compressor) { + my $pid = fork() // error "fork() failed: $!"; + if ($pid == 0) { + open(STDOUT, '>', '/dev/null') or error "cannot open /dev/null for writing: $!"; + open(STDIN, '<', '/dev/null') or error "cannot open /dev/null for reading: $!"; + exec { $tar_compressor->[0] } @{$tar_compressor} or error ("cannot exec " . (join " ", @{$tar_compressor}) . ": $!"); + } + waitpid $pid, 0; + if ($? != 0) { + error ("failed to start " . (join " ", @{$tar_compressor})); + } + } } elsif ($options->{target} =~ /\.(squashfs|sqfs)$/) { - $options->{makesqfs} = 1; - # check if tar2sqfs is installed - my $pid = fork() // error "fork() failed: $!"; - if ($pid == 0) { - open(STDOUT, '>', '/dev/null') or error "cannot open /dev/null for writing: $!"; - open(STDIN, '<', '/dev/null') or error "cannot open /dev/null for reading: $!"; - exec ('tar2sqfs', '--version') or error ("cannot exec tar2sqfs --version: $!"); - } - waitpid $pid, 0; - if ($? != 0) { - error ("failed to start tar2sqfs --version"); - } + $options->{makesqfs} = 1; + # check if tar2sqfs is installed + my $pid = fork() // error "fork() failed: $!"; + if ($pid == 0) { + open(STDOUT, '>', '/dev/null') or error "cannot open /dev/null for writing: $!"; + open(STDIN, '<', '/dev/null') or error "cannot open /dev/null for reading: $!"; + exec ('tar2sqfs', '--version') or error ("cannot exec tar2sqfs --version: $!"); + } + waitpid $pid, 0; + if ($? != 0) { + error ("failed to start tar2sqfs --version"); + } } if ($options->{maketar} or $options->{makesqfs}) { - if (any { $_ eq $options->{variant} } ('extract', 'custom') and any { $_ eq $options->{mode} } ('fakechroot', 'proot')) { - info "creating a tarball or squashfs image in fakechroot mode or proot mode might fail in extract and custom variants because there might be no tar inside the chroot"; - } - # try to fail early if target tarball or squashfs image cannot be - # opened for writing - if ($options->{target} ne '-') { - open my $fh, '>', $options->{target} or error "cannot open $options->{target} for writing: $!"; - close $fh; - } - # since the output is a tarball, we create the rootfs in a temporary - # directory - $options->{root} = tempdir( - 'mmdebstrap.XXXXXXXXXX', - DIR => File::Spec->tmpdir - ); - info "using $options->{root} as tempdir"; - # in unshare and root mode, other users than the current user need to - # access the rootfs, most prominently, the _apt user. Thus, make the - # temporary directory world readable. - if (any { $_ eq $options->{mode} } ('unshare', 'root')) { - chmod 0755, $options->{root} or error "cannot chmod root: $!"; - } + if (any { $_ eq $options->{variant} } ('extract', 'custom') and any { $_ eq $options->{mode} } ('fakechroot', 'proot')) { + info "creating a tarball or squashfs image in fakechroot mode or proot mode might fail in extract and custom variants because there might be no tar inside the chroot"; + } + # try to fail early if target tarball or squashfs image cannot be + # opened for writing + if ($options->{target} ne '-') { + open my $fh, '>', $options->{target} or error "cannot open $options->{target} for writing: $!"; + close $fh; + } + # since the output is a tarball, we create the rootfs in a temporary + # directory + $options->{root} = tempdir( + 'mmdebstrap.XXXXXXXXXX', + DIR => File::Spec->tmpdir + ); + info "using $options->{root} as tempdir"; + # in unshare and root mode, other users than the current user need to + # access the rootfs, most prominently, the _apt user. Thus, make the + # temporary directory world readable. + if (any { $_ eq $options->{mode} } ('unshare', 'root')) { + chmod 0755, $options->{root} or error "cannot chmod root: $!"; + } } else { - # user does not seem to have specified a tarball as output, thus work - # directly in the supplied directory - $options->{root} = $options->{target}; - if (-e $options->{root}) { - if (! -d $options->{root}) { - error "$options->{root} exists and is not a directory"; - } - # check if the directory is empty or contains nothing more than an - # empty lost+found directory. The latter exists on freshly created - # ext3 and ext4 partitions. - # rationale for requiring an empty directory: https://bugs.debian.org/833525 - opendir(my $dh, $options->{root}) or error "Can't opendir($options->{root}): $!"; - while (my $entry = readdir $dh) { - # skip the "." and ".." entries - next if $entry eq "."; - next if $entry eq ".."; - # if the entry is a directory named "lost+found" then skip it - # if it's empty - if ($entry eq "lost+found" and -d "$options->{root}/$entry") { - opendir(my $dh2, "$options->{root}/$entry"); - # Attempt reading the directory thrice. If the third time - # succeeds, then it has more entries than just "." and ".." - # and must thus not be empty. - readdir $dh2; - readdir $dh2; - # rationale for requiring an empty directory: - # https://bugs.debian.org/833525 - if (readdir $dh2) { - error "$options->{root} contains a non-empty lost+found directory"; - } - closedir($dh2); - } else { - error "$options->{root} is not empty"; - } - } - closedir($dh); - } else { - my $num_created = make_path "$options->{root}", {error => \my $err}; - if ($err && @$err) { - error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); - } elsif ($num_created == 0) { - error "cannot create $options->{root}"; - } - } + # user does not seem to have specified a tarball as output, thus work + # directly in the supplied directory + $options->{root} = $options->{target}; + if (-e $options->{root}) { + if (! -d $options->{root}) { + error "$options->{root} exists and is not a directory"; + } + # check if the directory is empty or contains nothing more than an + # empty lost+found directory. The latter exists on freshly created + # ext3 and ext4 partitions. + # rationale for requiring an empty directory: https://bugs.debian.org/833525 + opendir(my $dh, $options->{root}) or error "Can't opendir($options->{root}): $!"; + while (my $entry = readdir $dh) { + # skip the "." and ".." entries + next if $entry eq "."; + next if $entry eq ".."; + # if the entry is a directory named "lost+found" then skip it + # if it's empty + if ($entry eq "lost+found" and -d "$options->{root}/$entry") { + opendir(my $dh2, "$options->{root}/$entry"); + # Attempt reading the directory thrice. If the third time + # succeeds, then it has more entries than just "." and ".." + # and must thus not be empty. + readdir $dh2; + readdir $dh2; + # rationale for requiring an empty directory: + # https://bugs.debian.org/833525 + if (readdir $dh2) { + error "$options->{root} contains a non-empty lost+found directory"; + } + closedir($dh2); + } else { + error "$options->{root} is not empty"; + } + } + closedir($dh); + } else { + my $num_created = make_path "$options->{root}", {error => \my $err}; + if ($err && @$err) { + error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); + } elsif ($num_created == 0) { + error "cannot create $options->{root}"; + } + } } # check for double quotes because apt doesn't allow to escape them and # thus paths with double quotes are invalid in the apt config if ($options->{root} =~ /"/) { - error "apt cannot handle paths with double quotes"; + error "apt cannot handle paths with double quotes"; } my @idmap; # for unshare mode the rootfs directory has to have appropriate # permissions if ($options->{mode} eq 'unshare') { - @idmap = read_subuid_subgid; - # sanity check - if (scalar(@idmap) != 2 || $idmap[0][0] ne 'u' || $idmap[1][0] ne 'g') { - error "invalid idmap"; - } - - my $outer_gid = $REAL_GROUP_ID+0; - - my $pid = get_unshare_cmd { chown 1, 1, $options->{root} } - [ - ['u', '0', $REAL_USER_ID, '1'], - ['g', '0', $outer_gid, '1'], - ['u', '1', $idmap[0][2], '1'], - ['g', '1', $idmap[1][2], '1']]; - waitpid $pid, 0; - $? == 0 or error "chown failed"; + @idmap = read_subuid_subgid; + # sanity check + if (scalar(@idmap) != 2 || $idmap[0][0] ne 'u' || $idmap[1][0] ne 'g') { + error "invalid idmap"; + } + + my $outer_gid = $REAL_GROUP_ID+0; + + my $pid = get_unshare_cmd { chown 1, 1, $options->{root} } + [ + ['u', '0', $REAL_USER_ID, '1'], + ['g', '0', $outer_gid, '1'], + ['u', '1', $idmap[0][2], '1'], + ['g', '1', $idmap[1][2], '1']]; + waitpid $pid, 0; + $? == 0 or error "chown failed"; } # figure out whether we have mknod $options->{havemknod} = 0; if ($options->{mode} eq 'unshare') { - my $pid = get_unshare_cmd { - $options->{havemknod} = havemknod($options->{root}); - } \@idmap; - waitpid $pid, 0; - $? == 0 or error "havemknod failed"; + my $pid = get_unshare_cmd { + $options->{havemknod} = havemknod($options->{root}); + } \@idmap; + waitpid $pid, 0; + $? == 0 or error "havemknod failed"; } elsif (any { $_ eq $options->{mode} } ('root', 'fakechroot', 'proot', 'chrootless')) { - $options->{havemknod} = havemknod($options->{root}); + $options->{havemknod} = havemknod($options->{root}); } else { - error "unknown mode: $options->{mode}"; + error "unknown mode: $options->{mode}"; } my $devtar = ''; # We always craft the /dev entries ourselves if a tarball is to be created if ($options->{maketar} or $options->{makesqfs}) { - foreach my $file (@devfiles) { - my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file}; - my $entry = pack('a100 a8 a8 a8 a12 a12 A8 a1 a100 a8 a32 a32 a8 a8 a155 x12', - $fname, - sprintf('%07o', $mode), - sprintf('%07o', 0), # uid - sprintf('%07o', 0), # gid - sprintf('%011o', 0), # size - sprintf('%011o', $mtime), - '', # checksum - $type, - $linkname, - "ustar ", - '', # username - '', # groupname - defined($devmajor) ? sprintf('%07o', $devmajor) : '', - defined($devminor) ? sprintf('%07o', $devminor) : '', - '', # prefix - ); - # compute and insert checksum - substr($entry,148,7) = sprintf("%06o\0", unpack("%16C*",$entry)); - $devtar .= $entry; - } + foreach my $file (@devfiles) { + my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file}; + my $entry = pack('a100 a8 a8 a8 a12 a12 A8 a1 a100 a8 a32 a32 a8 a8 a155 x12', + $fname, + sprintf('%07o', $mode), + sprintf('%07o', 0), # uid + sprintf('%07o', 0), # gid + sprintf('%011o', 0), # size + sprintf('%011o', $mtime), + '', # checksum + $type, + $linkname, + "ustar ", + '', # username + '', # groupname + defined($devmajor) ? sprintf('%07o', $devmajor) : '', + defined($devminor) ? sprintf('%07o', $devminor) : '', + '', # prefix + ); + # compute and insert checksum + substr($entry,148,7) = sprintf("%06o\0", unpack("%16C*",$entry)); + $devtar .= $entry; + } } my $exitstatus = 0; @@ -2912,116 +2912,116 @@ sub main() { socketpair my $childsock, my $parentsock, AF_UNIX, SOCK_STREAM, PF_UNSPEC or error "socketpair failed: $!"; $options->{hooksock} = $childsock; if ($options->{mode} eq 'unshare') { - $pid = get_unshare_cmd { - # child - $SIG{'INT'} = 'DEFAULT'; - $SIG{'HUP'} = 'DEFAULT'; - $SIG{'PIPE'} = 'DEFAULT'; - $SIG{'TERM'} = 'DEFAULT'; + $pid = get_unshare_cmd { + # child + $SIG{'INT'} = 'DEFAULT'; + $SIG{'HUP'} = 'DEFAULT'; + $SIG{'PIPE'} = 'DEFAULT'; + $SIG{'TERM'} = 'DEFAULT'; - # unblock all delayed signals (and possibly handle them) - POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; + # unblock all delayed signals (and possibly handle them) + POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; - close $rfh; - close $parentsock; - open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!"; + close $rfh; + close $parentsock; + open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!"; - setup($options); + setup($options); - print $childsock (pack('n', 0) . 'adios'); - $childsock->flush(); + print $childsock (pack('n', 0) . 'adios'); + $childsock->flush(); - close $childsock; + close $childsock; - if ($options->{maketar} or $options->{makesqfs}) { - info "creating tarball..."; + if ($options->{maketar} or $options->{makesqfs}) { + info "creating tarball..."; - # redirect tar output to the writing end of the pipe so that the - # parent process can capture the output - open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!"; + # redirect tar output to the writing end of the pipe so that the + # parent process can capture the output + open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!"; - # Add ./dev as the first entries of the tar file. - # We cannot add them after calling tar, because there is no way to - # prevent tar from writing NULL entries at the end. - print $devtar; + # Add ./dev as the first entries of the tar file. + # We cannot add them after calling tar, because there is no way to + # prevent tar from writing NULL entries at the end. + print $devtar; - # pack everything except ./dev - 0 == system('tar', @taropts, '-C', $options->{root}, '.') or error "tar failed: $?"; + # pack everything except ./dev + 0 == system('tar', @taropts, '-C', $options->{root}, '.') or error "tar failed: $?"; - info "done"; - } + info "done"; + } - exit 0; - } \@idmap; + exit 0; + } \@idmap; } elsif (any { $_ eq $options->{mode} } ('root', 'fakechroot', 'proot', 'chrootless')) { - $pid = fork() // error "fork() failed: $!"; - if ($pid == 0) { - $SIG{'INT'} = 'DEFAULT'; - $SIG{'HUP'} = 'DEFAULT'; - $SIG{'PIPE'} = 'DEFAULT'; - $SIG{'TERM'} = 'DEFAULT'; - - # unblock all delayed signals (and possibly handle them) - POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; - - close $rfh; - close $parentsock; - open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!"; - - setup($options); - - print $childsock (pack('n', 0) . 'adios'); - $childsock->flush(); - - close $childsock; - - if ($options->{maketar} or $options->{makesqfs}) { - info "creating tarball..."; - - # redirect tar output to the writing end of the pipe so that the - # parent process can capture the output - open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!"; - - # Add ./dev as the first entries of the tar file. - # We cannot add them after calling tar, because there is no way to - # prevent tar from writing NULL entries at the end. - print $devtar; - - if ($options->{mode} eq 'fakechroot') { - # Fakechroot requires tar to run inside the chroot or - # otherwise absolute symlinks will include the path to the - # root directory - 0 == system('/usr/sbin/chroot', $options->{root}, 'tar', @taropts, '-C', '/', '.') or error "tar failed: $?"; - } elsif ($options->{mode} eq 'proot') { - # proot requires tar to run inside proot or otherwise - # permissions will be completely off - my @qemuopt = (); - if (defined $options->{qemu}) { - push @qemuopt, "--qemu=qemu-$options->{qemu}"; - push @taropts, "--exclude=./host-rootfs" - } - 0 == system('proot', '--root-id', "--rootfs=$options->{root}", '--cwd=/', @qemuopt, 'tar', @taropts, '-C', '/', '.') or error "tar failed: $?"; - } elsif (any { $_ eq $options->{mode} } ('root', 'chrootless')) { - # If the chroot directory is not owned by the root user, - # then we assume that no measure was taken to fake root - # permissions. Since the final tarball should contain - # entries with root ownership, we instruct tar to do so. - my @owneropts = (); - if ((stat $options->{root})[4] != 0) { - push @owneropts, '--owner=0', '--group=0', '--numeric-owner'; - } - 0 == system('tar', @taropts, @owneropts, '-C', $options->{root}, '.') or error "tar failed: $?"; - } else { - error "unknown mode: $options->{mode}"; - } - - info "done"; - } - - exit 0; - } + $pid = fork() // error "fork() failed: $!"; + if ($pid == 0) { + $SIG{'INT'} = 'DEFAULT'; + $SIG{'HUP'} = 'DEFAULT'; + $SIG{'PIPE'} = 'DEFAULT'; + $SIG{'TERM'} = 'DEFAULT'; + + # unblock all delayed signals (and possibly handle them) + POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; + + close $rfh; + close $parentsock; + open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!"; + + setup($options); + + print $childsock (pack('n', 0) . 'adios'); + $childsock->flush(); + + close $childsock; + + if ($options->{maketar} or $options->{makesqfs}) { + info "creating tarball..."; + + # redirect tar output to the writing end of the pipe so that the + # parent process can capture the output + open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!"; + + # Add ./dev as the first entries of the tar file. + # We cannot add them after calling tar, because there is no way to + # prevent tar from writing NULL entries at the end. + print $devtar; + + if ($options->{mode} eq 'fakechroot') { + # Fakechroot requires tar to run inside the chroot or + # otherwise absolute symlinks will include the path to the + # root directory + 0 == system('/usr/sbin/chroot', $options->{root}, 'tar', @taropts, '-C', '/', '.') or error "tar failed: $?"; + } elsif ($options->{mode} eq 'proot') { + # proot requires tar to run inside proot or otherwise + # permissions will be completely off + my @qemuopt = (); + if (defined $options->{qemu}) { + push @qemuopt, "--qemu=qemu-$options->{qemu}"; + push @taropts, "--exclude=./host-rootfs" + } + 0 == system('proot', '--root-id', "--rootfs=$options->{root}", '--cwd=/', @qemuopt, 'tar', @taropts, '-C', '/', '.') or error "tar failed: $?"; + } elsif (any { $_ eq $options->{mode} } ('root', 'chrootless')) { + # If the chroot directory is not owned by the root user, + # then we assume that no measure was taken to fake root + # permissions. Since the final tarball should contain + # entries with root ownership, we instruct tar to do so. + my @owneropts = (); + if ((stat $options->{root})[4] != 0) { + push @owneropts, '--owner=0', '--group=0', '--numeric-owner'; + } + 0 == system('tar', @taropts, @owneropts, '-C', $options->{root}, '.') or error "tar failed: $?"; + } else { + error "unknown mode: $options->{mode}"; + } + + info "done"; + } + + exit 0; + } } else { - error "unknown mode: $options->{mode}"; + error "unknown mode: $options->{mode}"; } # parent @@ -3029,8 +3029,8 @@ sub main() { my $got_signal = 0; my $waiting_for = "setup"; my $ignore = sub { - $got_signal = shift; - info "main() received signal $got_signal: waiting for $waiting_for..."; + $got_signal = shift; + info "main() received signal $got_signal: waiting for $waiting_for..."; }; $SIG{'INT'} = $ignore; @@ -3049,408 +3049,408 @@ sub main() { # we use eval() so that error() doesn't take this process down and # thus leaves the setup() process without a parent eval { - while (1) { - # get the next message - my $msg = "error"; - my $len = -1; - { - debug "reading from parentsock"; - my $ret = read ($parentsock, my $buf, 2+5) // error "cannot read from socket: $!"; - debug "finished reading from parentsock"; - if ($ret == 0) { - error "received eof on socket"; - } - ($len, $msg) = unpack("nA5", $buf); - } - if ($msg eq "adios") { - # setup finished, so we break out of the loop - if ($len != 0) { - error "expected no payload but got $len bytes"; - } - last; - } elsif ($msg eq "openr") { - # handle the openr message - debug "received message: openr"; - my $infile; - { - my $ret = read ($parentsock, $infile, $len) // error "cannot read from socket: $!"; - if ($ret == 0) { - error "received eof on socket"; - } - } - # make sure that the requested path exists outside the chroot - if (! -e $infile) { - print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; - $parentsock->flush(); - error "$infile does not exist"; - } - debug "sending okthx"; - print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; - $parentsock->flush(); - - open my $fh, '<', $infile or error "failed to open $infile for reading: $!"; - - # read from the file and send as payload to the child process - while (1) { - # read from file - my $ret = read ($fh, my $cont, 4096) // error "cannot read from pipe: $!"; - if ($ret == 0) { last; } - debug "sending write"; - # send to child - print $parentsock pack("n", $ret) . "write" . $cont; - $parentsock->flush(); - debug "waiting for okthx"; - checkokthx $parentsock; - if ($ret < 4096) { last; } - } - - # signal to the child process that we are done - debug "sending close"; - print $parentsock pack("n", 0) . "close"; - $parentsock->flush(); - debug "waiting for okthx"; - checkokthx $parentsock; - - close $fh; - } elsif ($msg eq "openw") { - debug "received message: openw"; - # payload is the output directory - my $outfile; - { - my $ret = read ($parentsock, $outfile, $len) // error "cannot read from socket: $!"; - if ($ret == 0) { - error "received eof on socket"; - } - } - # make sure that the directory exists - my $outdir = dirname($outfile); - if (-e $outdir) { - if (! -d $outdir) { - print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; - $parentsock->flush(); - error "$outdir already exists but is not a directory"; - } - } else { - my $num_created = make_path $outdir, {error => \my $err}; - if ($err && @$err) { - print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; - $parentsock->flush(); - error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); - } elsif ($num_created == 0) { - print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; - $parentsock->flush(); - error "cannot create $outdir"; - } - } - debug "sending okthx"; - print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; - $parentsock->flush(); - - # now we expect one or more "write" messages containing the - # tarball to write - open my $fh, '>', $outfile or error "failed to open $outfile for writing: $!"; - - # handle "write" messages from the child process and feed - # their payload into the file handle until a "close" message - # is encountered - while(1) { - # receive the next message - my $ret = read ($parentsock, my $buf, 2+5) // error "cannot read from socket: $!"; - if ($ret == 0) { - error "received eof on socket"; - } - my ($len, $msg) = unpack("nA5", $buf); - debug "received message: $msg"; - if ($msg eq "close") { - # finish the loop - if ($len != 0) { - error "expected no payload but got $len bytes"; - } - debug "sending okthx"; - print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; - $parentsock->flush(); - last; - } elsif ($msg ne "write") { - # we should not receive this message at this point - print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; - $parentsock->flush(); - error "expected write but got: $msg"; - } - # read the payload - my $content; - { - my $ret = read ($parentsock, $content, $len) // error "error cannot read from socket: $!"; - if ($ret == 0) { - error "received eof on socket"; - } - } - # write the payload to the file handle - print $fh $content or error "cannot write to file handle: $!"; - debug "sending okthx"; - print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; - $parentsock->flush(); - } - close $fh; - } elsif ($msg eq "mktar") { - # handle the mktar message - debug "received message: mktar"; - my $indir; - { - my $ret = read ($parentsock, $indir, $len) // error "cannot read from socket: $!"; - if ($ret == 0) { - error "received eof on socket"; - } - } - # make sure that the requested path exists outside the chroot - if (! -e $indir) { - print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; - $parentsock->flush(); - error "$indir does not exist"; - } - debug "sending okthx"; - print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; - $parentsock->flush(); - - # Open a tar process creating a tarfile of the instructed - # path. To emulate the behaviour of cp, change to the - # dirname of the requested path first. - open my $fh, '-|', 'tar', '--directory', dirname($indir), '--create', '--file', '-', basename($indir) // error "failed to fork(): $!"; - - # read from the tar process and send as payload to the child - # process - while (1) { - # read from tar - my $ret = read ($fh, my $cont, 4096) // error "cannot read from pipe: $!"; - if ($ret == 0) { last; } - debug "sending write"; - # send to child - print $parentsock pack("n", $ret) . "write" . $cont; - $parentsock->flush(); - debug "waiting for okthx"; - checkokthx $parentsock; - if ($ret < 4096) { last; } - } - - # signal to the child process that we are done - debug "sending close"; - print $parentsock pack("n", 0) . "close"; - $parentsock->flush(); - debug "waiting for okthx"; - checkokthx $parentsock; - - close $fh; - if ($? != 0) { - error "tar failed"; - } - } elsif ($msg eq "untar") { - debug "received message: untar"; - # payload is the output directory - my $outdir; - { - my $ret = read ($parentsock, $outdir, $len) // error "cannot read from socket: $!"; - if ($ret == 0) { - error "received eof on socket"; - } - } - # make sure that the directory exists - if (-e $outdir) { - if (! -d $outdir) { - print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; - $parentsock->flush(); - error "$outdir already exists but is not a directory"; - } - } else { - my $num_created = make_path $outdir, {error => \my $err}; - if ($err && @$err) { - print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; - $parentsock->flush(); - error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); - } elsif ($num_created == 0) { - print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; - $parentsock->flush(); - error "cannot create $outdir"; - } - } - debug "sending okthx"; - print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; - $parentsock->flush(); - - # now we expect one or more "write" messages containing the - # tarball to unpack - open my $fh, '|-', 'tar', '--directory', $outdir, '--extract', '--file', '-' // error "failed to fork(): $!"; - - # handle "write" messages from the child process and feed - # their payload into the tar process until a "close" message - # is encountered - while(1) { - # receive the next message - my $ret = read ($parentsock, my $buf, 2+5) // error "cannot read from socket: $!"; - if ($ret == 0) { - error "received eof on socket"; - } - my ($len, $msg) = unpack("nA5", $buf); - debug "received message: $msg"; - if ($msg eq "close") { - # finish the loop - if ($len != 0) { - error "expected no payload but got $len bytes"; - } - debug "sending okthx"; - print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; - $parentsock->flush(); - last; - } elsif ($msg ne "write") { - # we should not receive this message at this point - print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; - $parentsock->flush(); - error "expected write but got: $msg"; - } - # read the payload - my $content; - { - my $ret = read ($parentsock, $content, $len) // error "error cannot read from socket: $!"; - if ($ret == 0) { - error "received eof on socket"; - } - } - # write the payload to the tar process - print $fh $content or error "cannot write to tar process: $!"; - debug "sending okthx"; - print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; - $parentsock->flush(); - } - close $fh; - if ($? != 0) { - error "tar failed"; - } - } else { - error "unknown message: $msg"; - } - } + while (1) { + # get the next message + my $msg = "error"; + my $len = -1; + { + debug "reading from parentsock"; + my $ret = read ($parentsock, my $buf, 2+5) // error "cannot read from socket: $!"; + debug "finished reading from parentsock"; + if ($ret == 0) { + error "received eof on socket"; + } + ($len, $msg) = unpack("nA5", $buf); + } + if ($msg eq "adios") { + # setup finished, so we break out of the loop + if ($len != 0) { + error "expected no payload but got $len bytes"; + } + last; + } elsif ($msg eq "openr") { + # handle the openr message + debug "received message: openr"; + my $infile; + { + my $ret = read ($parentsock, $infile, $len) // error "cannot read from socket: $!"; + if ($ret == 0) { + error "received eof on socket"; + } + } + # make sure that the requested path exists outside the chroot + if (! -e $infile) { + print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; + $parentsock->flush(); + error "$infile does not exist"; + } + debug "sending okthx"; + print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; + $parentsock->flush(); + + open my $fh, '<', $infile or error "failed to open $infile for reading: $!"; + + # read from the file and send as payload to the child process + while (1) { + # read from file + my $ret = read ($fh, my $cont, 4096) // error "cannot read from pipe: $!"; + if ($ret == 0) { last; } + debug "sending write"; + # send to child + print $parentsock pack("n", $ret) . "write" . $cont; + $parentsock->flush(); + debug "waiting for okthx"; + checkokthx $parentsock; + if ($ret < 4096) { last; } + } + + # signal to the child process that we are done + debug "sending close"; + print $parentsock pack("n", 0) . "close"; + $parentsock->flush(); + debug "waiting for okthx"; + checkokthx $parentsock; + + close $fh; + } elsif ($msg eq "openw") { + debug "received message: openw"; + # payload is the output directory + my $outfile; + { + my $ret = read ($parentsock, $outfile, $len) // error "cannot read from socket: $!"; + if ($ret == 0) { + error "received eof on socket"; + } + } + # make sure that the directory exists + my $outdir = dirname($outfile); + if (-e $outdir) { + if (! -d $outdir) { + print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; + $parentsock->flush(); + error "$outdir already exists but is not a directory"; + } + } else { + my $num_created = make_path $outdir, {error => \my $err}; + if ($err && @$err) { + print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; + $parentsock->flush(); + error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); + } elsif ($num_created == 0) { + print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; + $parentsock->flush(); + error "cannot create $outdir"; + } + } + debug "sending okthx"; + print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; + $parentsock->flush(); + + # now we expect one or more "write" messages containing the + # tarball to write + open my $fh, '>', $outfile or error "failed to open $outfile for writing: $!"; + + # handle "write" messages from the child process and feed + # their payload into the file handle until a "close" message + # is encountered + while(1) { + # receive the next message + my $ret = read ($parentsock, my $buf, 2+5) // error "cannot read from socket: $!"; + if ($ret == 0) { + error "received eof on socket"; + } + my ($len, $msg) = unpack("nA5", $buf); + debug "received message: $msg"; + if ($msg eq "close") { + # finish the loop + if ($len != 0) { + error "expected no payload but got $len bytes"; + } + debug "sending okthx"; + print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; + $parentsock->flush(); + last; + } elsif ($msg ne "write") { + # we should not receive this message at this point + print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; + $parentsock->flush(); + error "expected write but got: $msg"; + } + # read the payload + my $content; + { + my $ret = read ($parentsock, $content, $len) // error "error cannot read from socket: $!"; + if ($ret == 0) { + error "received eof on socket"; + } + } + # write the payload to the file handle + print $fh $content or error "cannot write to file handle: $!"; + debug "sending okthx"; + print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; + $parentsock->flush(); + } + close $fh; + } elsif ($msg eq "mktar") { + # handle the mktar message + debug "received message: mktar"; + my $indir; + { + my $ret = read ($parentsock, $indir, $len) // error "cannot read from socket: $!"; + if ($ret == 0) { + error "received eof on socket"; + } + } + # make sure that the requested path exists outside the chroot + if (! -e $indir) { + print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; + $parentsock->flush(); + error "$indir does not exist"; + } + debug "sending okthx"; + print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; + $parentsock->flush(); + + # Open a tar process creating a tarfile of the instructed + # path. To emulate the behaviour of cp, change to the + # dirname of the requested path first. + open my $fh, '-|', 'tar', '--directory', dirname($indir), '--create', '--file', '-', basename($indir) // error "failed to fork(): $!"; + + # read from the tar process and send as payload to the child + # process + while (1) { + # read from tar + my $ret = read ($fh, my $cont, 4096) // error "cannot read from pipe: $!"; + if ($ret == 0) { last; } + debug "sending write"; + # send to child + print $parentsock pack("n", $ret) . "write" . $cont; + $parentsock->flush(); + debug "waiting for okthx"; + checkokthx $parentsock; + if ($ret < 4096) { last; } + } + + # signal to the child process that we are done + debug "sending close"; + print $parentsock pack("n", 0) . "close"; + $parentsock->flush(); + debug "waiting for okthx"; + checkokthx $parentsock; + + close $fh; + if ($? != 0) { + error "tar failed"; + } + } elsif ($msg eq "untar") { + debug "received message: untar"; + # payload is the output directory + my $outdir; + { + my $ret = read ($parentsock, $outdir, $len) // error "cannot read from socket: $!"; + if ($ret == 0) { + error "received eof on socket"; + } + } + # make sure that the directory exists + if (-e $outdir) { + if (! -d $outdir) { + print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; + $parentsock->flush(); + error "$outdir already exists but is not a directory"; + } + } else { + my $num_created = make_path $outdir, {error => \my $err}; + if ($err && @$err) { + print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; + $parentsock->flush(); + error (join "; ", (map {"cannot create " . (join ": ", %{$_})} @$err)); + } elsif ($num_created == 0) { + print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; + $parentsock->flush(); + error "cannot create $outdir"; + } + } + debug "sending okthx"; + print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; + $parentsock->flush(); + + # now we expect one or more "write" messages containing the + # tarball to unpack + open my $fh, '|-', 'tar', '--directory', $outdir, '--extract', '--file', '-' // error "failed to fork(): $!"; + + # handle "write" messages from the child process and feed + # their payload into the tar process until a "close" message + # is encountered + while(1) { + # receive the next message + my $ret = read ($parentsock, my $buf, 2+5) // error "cannot read from socket: $!"; + if ($ret == 0) { + error "received eof on socket"; + } + my ($len, $msg) = unpack("nA5", $buf); + debug "received message: $msg"; + if ($msg eq "close") { + # finish the loop + if ($len != 0) { + error "expected no payload but got $len bytes"; + } + debug "sending okthx"; + print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; + $parentsock->flush(); + last; + } elsif ($msg ne "write") { + # we should not receive this message at this point + print $parentsock (pack("n", 0) . "error") or error "cannot write to socket: $!"; + $parentsock->flush(); + error "expected write but got: $msg"; + } + # read the payload + my $content; + { + my $ret = read ($parentsock, $content, $len) // error "error cannot read from socket: $!"; + if ($ret == 0) { + error "received eof on socket"; + } + } + # write the payload to the tar process + print $fh $content or error "cannot write to tar process: $!"; + debug "sending okthx"; + print $parentsock (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; + $parentsock->flush(); + } + close $fh; + if ($? != 0) { + error "tar failed"; + } + } else { + error "unknown message: $msg"; + } + } }; if ($@) { - # we cannot die here because that would leave the other thread - # running without a parent - warning "listening on child socket failed: $@"; - $exitstatus = 1; + # we cannot die here because that would leave the other thread + # running without a parent + warning "listening on child socket failed: $@"; + $exitstatus = 1; } debug "finish to listen for hooks"; close $parentsock; if ($options->{maketar} or $options->{makesqfs}) { - # we use eval() so that error() doesn't take this process down and - # thus leaves the setup() process without a parent - eval { - if ($options->{target} eq '-') { - if (!copy($rfh, *STDOUT)) { - error "cannot copy to standard output: $!"; - } - } else { - if ($options->{makesqfs} or defined $tar_compressor) { - my @argv = (); - if ($options->{makesqfs}) { - push @argv, 'tar2sqfs', - '--quiet', '--no-skip', '--force', '--exportable', - '--compressor', 'xz', - '--block-size', '1048576', - $options->{target}; - } else { - push @argv, @{$tar_compressor}; - } - POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!"; - my $cpid = fork() // error "fork() failed: $!"; - if ($cpid == 0) { - # child: default signal handlers - $SIG{'INT'} = 'DEFAULT'; - $SIG{'HUP'} = 'DEFAULT'; - $SIG{'PIPE'} = 'DEFAULT'; - $SIG{'TERM'} = 'DEFAULT'; - - # unblock all delayed signals (and possibly handle them) - POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; - - if ($options->{makesqfs}) { - open(STDOUT, '>', '/dev/null') or error "cannot open /dev/null for writing: $!"; - } else { - open(STDOUT, '>', $options->{target}) or error "cannot open $options->{target} for writing: $!"; - } - open(STDIN, '<&', $rfh) or error "cannot open file handle for reading: $!"; - eval 'Devel::Cover::set_coverage("none")' if $is_covering; - exec { $argv[0] } @argv or error ("cannot exec " . (join " ", @argv) . ": $!"); - } - POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; - waitpid $cpid, 0; - if ($? != 0) { - error ("failed to start " . (join " ", @argv)); - } - } else { - if(!copy($rfh, $options->{target})) { - error "cannot copy to $options->{target}: $!"; - } - } - } - }; - if ($@) { - # we cannot die here because that would leave the other thread - # running without a parent - warning "creating tarball failed: $@"; - $exitstatus = 1; - } + # we use eval() so that error() doesn't take this process down and + # thus leaves the setup() process without a parent + eval { + if ($options->{target} eq '-') { + if (!copy($rfh, *STDOUT)) { + error "cannot copy to standard output: $!"; + } + } else { + if ($options->{makesqfs} or defined $tar_compressor) { + my @argv = (); + if ($options->{makesqfs}) { + push @argv, 'tar2sqfs', + '--quiet', '--no-skip', '--force', '--exportable', + '--compressor', 'xz', + '--block-size', '1048576', + $options->{target}; + } else { + push @argv, @{$tar_compressor}; + } + POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!"; + my $cpid = fork() // error "fork() failed: $!"; + if ($cpid == 0) { + # child: default signal handlers + $SIG{'INT'} = 'DEFAULT'; + $SIG{'HUP'} = 'DEFAULT'; + $SIG{'PIPE'} = 'DEFAULT'; + $SIG{'TERM'} = 'DEFAULT'; + + # unblock all delayed signals (and possibly handle them) + POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; + + if ($options->{makesqfs}) { + open(STDOUT, '>', '/dev/null') or error "cannot open /dev/null for writing: $!"; + } else { + open(STDOUT, '>', $options->{target}) or error "cannot open $options->{target} for writing: $!"; + } + open(STDIN, '<&', $rfh) or error "cannot open file handle for reading: $!"; + eval 'Devel::Cover::set_coverage("none")' if $is_covering; + exec { $argv[0] } @argv or error ("cannot exec " . (join " ", @argv) . ": $!"); + } + POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; + waitpid $cpid, 0; + if ($? != 0) { + error ("failed to start " . (join " ", @argv)); + } + } else { + if(!copy($rfh, $options->{target})) { + error "cannot copy to $options->{target}: $!"; + } + } + } + }; + if ($@) { + # we cannot die here because that would leave the other thread + # running without a parent + warning "creating tarball failed: $@"; + $exitstatus = 1; + } } close($rfh); waitpid $pid, 0; if ($? != 0) { - $exitstatus = 1; + $exitstatus = 1; } # change signal handler message $waiting_for = "cleanup"; if (($options->{maketar} or $options->{makesqfs}) and -e $options->{root}) { - info "removing tempdir $options->{root}..."; - if ($options->{mode} eq 'unshare') { - # We don't have permissions to remove the directory outside - # the unshared namespace, so we remove it here. - # Since this is still inside the unshared namespace, there is - # no risk of removing anything important. - $pid = get_unshare_cmd { - # File::Path will produce the error "cannot stat initial - # working directory" if the working directory cannot be - # accessed by the unprivileged unshared user. Thus, we first - # navigate to the parent of the root directory. - chdir "$options->{root}/.." or error "unable to chdir() to parent directory of $options->{root}: $!"; - remove_tree($options->{root}, {error => \my $err}); - if (@$err) { - for my $diag (@$err) { - my ($file, $message) = %$diag; - if ($file eq '') { - warning "general error: $message"; - } - else { - warning "problem unlinking $file: $message"; - } - } - } - } \@idmap; - waitpid $pid, 0; - $? == 0 or error "remove_tree failed"; - } elsif (any { $_ eq $options->{mode} } ('root', 'fakechroot', 'proot', 'chrootless')) { - # without unshare, we use the system's rm to recursively remove the - # temporary directory just to make sure that we do not accidentally - # remove more than we should by using --one-file-system. - # - # --interactive=never is needed when in proot mode, the - # write-protected file /apt/apt.conf.d/01autoremove-kernels is to - # be removed. - 0 == system('rm', '--interactive=never', '--recursive', '--preserve-root', '--one-file-system', $options->{root}) or error "rm failed: $!"; - } else { - error "unknown mode: $options->{mode}"; - } + info "removing tempdir $options->{root}..."; + if ($options->{mode} eq 'unshare') { + # We don't have permissions to remove the directory outside + # the unshared namespace, so we remove it here. + # Since this is still inside the unshared namespace, there is + # no risk of removing anything important. + $pid = get_unshare_cmd { + # File::Path will produce the error "cannot stat initial + # working directory" if the working directory cannot be + # accessed by the unprivileged unshared user. Thus, we first + # navigate to the parent of the root directory. + chdir "$options->{root}/.." or error "unable to chdir() to parent directory of $options->{root}: $!"; + remove_tree($options->{root}, {error => \my $err}); + if (@$err) { + for my $diag (@$err) { + my ($file, $message) = %$diag; + if ($file eq '') { + warning "general error: $message"; + } + else { + warning "problem unlinking $file: $message"; + } + } + } + } \@idmap; + waitpid $pid, 0; + $? == 0 or error "remove_tree failed"; + } elsif (any { $_ eq $options->{mode} } ('root', 'fakechroot', 'proot', 'chrootless')) { + # without unshare, we use the system's rm to recursively remove the + # temporary directory just to make sure that we do not accidentally + # remove more than we should by using --one-file-system. + # + # --interactive=never is needed when in proot mode, the + # write-protected file /apt/apt.conf.d/01autoremove-kernels is to + # be removed. + 0 == system('rm', '--interactive=never', '--recursive', '--preserve-root', '--one-file-system', $options->{root}) or error "rm failed: $!"; + } else { + error "unknown mode: $options->{mode}"; + } } if ($got_signal) { - $exitstatus = 1; + $exitstatus = 1; } exit $exitstatus;