#!/usr/bin/perl # # © 2018 - 2021 Johannes Schauer Marin Rodrigues # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # The software is provided "as is", without warranty of any kind, express or # implied, including but not limited to the warranties of merchantability, # fitness for a particular purpose and noninfringement. In no event shall the # authors or copyright holders be liable for any claim, damages or other # liability, whether in an action of contract, tort or otherwise, arising # from, out of or in connection with the software or the use or other dealings # in the software. use strict; use warnings; our $VERSION = '1.1.0'; use English; use Getopt::Long; use Pod::Usage; use File::Copy; use File::Path qw(make_path); use File::Temp qw(tempfile tempdir); use File::Basename; use File::Find; use Cwd qw(abs_path getcwd); require "syscall.ph"; ## no critic (Modules::RequireBarewordIncludes) use Fcntl qw(S_IFCHR S_IFBLK FD_CLOEXEC F_GETFD F_SETFD); use List::Util qw(any none); use POSIX qw(SIGINT SIGHUP SIGPIPE SIGTERM SIG_BLOCK SIG_UNBLOCK strftime isatty); use Carp; use Term::ANSIColor; use Socket; use Time::HiRes; use Math::BigInt; use version; ## no critic (InputOutput::RequireBriefOpen) # from sched.h # use typeglob constants because "use constant" has several drawback as # explained in the documentation for the Readonly CPAN module *CLONE_NEWNS = \0x20000; # mount namespace *CLONE_NEWUTS = \0x4000000; # utsname *CLONE_NEWIPC = \0x8000000; # ipc *CLONE_NEWUSER = \0x10000000; # user *CLONE_NEWPID = \0x20000000; # pid *CLONE_NEWNET = \0x40000000; # net *_LINUX_CAPABILITY_VERSION_3 = \0x20080522; *CAP_SYS_ADMIN = \21; *PR_CAPBSET_READ = \23; our ( $CLONE_NEWNS, $CLONE_NEWUTS, $CLONE_NEWIPC, $CLONE_NEWUSER, $CLONE_NEWPID, $CLONE_NEWNET, $_LINUX_CAPABILITY_VERSION_3, $CAP_SYS_ADMIN, $PR_CAPBSET_READ ); #<<< # type codes: # 0 -> normal file # 1 -> hardlink # 2 -> symlink # 3 -> character special # 4 -> block special # 5 -> directory my @devfiles = ( # filename mode type link target major minor ["", oct(755), 5, '', undef, undef], ["console", oct(666), 3, '', 5, 1], ["fd", oct(777), 2, '/proc/self/fd', undef, undef], ["full", oct(666), 3, '', 1, 7], ["null", oct(666), 3, '', 1, 3], ["ptmx", oct(666), 3, '', 5, 2], ["pts/", oct(755), 5, '', undef, undef], ["random", oct(666), 3, '', 1, 8], ["shm/", oct(755), 5, '', undef, undef], ["stderr", oct(777), 2, '/proc/self/fd/2', undef, undef], ["stdin", oct(777), 2, '/proc/self/fd/0', undef, undef], ["stdout", oct(777), 2, '/proc/self/fd/1', undef, undef], ["tty", oct(666), 3, '', 5, 0], ["urandom", oct(666), 3, '', 1, 9], ["zero", oct(666), 3, '', 1, 5], ); #>>> # verbosity levels: # 0 -> print nothing # 1 -> normal output and progress bars # 2 -> verbose output # 3 -> debug output my $verbosity_level = 1; my $is_covering = 0; { # make $@ local, so we don't print "Undefined subroutine called" # in other parts where we evaluate $@ local $@ = ''; $is_covering = !!(eval { Devel::Cover::get_coverage() }); } # the reason why Perl::Critic warns about this is, that it suspects that the # programmer wants to implement a test whether the terminal is interactive or # not, in which case, complex interactions with the magic *ARGV indeed make it # advisable to use IO::Interactive. In our case, we do not want to create an # interactivity check but just want to check whether STDERR is opened to a tty, # so our use of -t is fine and not "fragile and complicated" as is written in # the description of InputOutput::ProhibitInteractiveTest. Also see # https://github.com/Perl-Critic/Perl-Critic/issues/918 sub stderr_is_tty() { ## no critic (InputOutput::ProhibitInteractiveTest) if (-t STDERR) { return 1; } else { return 0; } } sub debug { if ($verbosity_level < 3) { return; } my $msg = shift; my ($package, $filename, $line) = caller; $msg = "D: $PID $line $msg"; if (stderr_is_tty()) { $msg = colored($msg, 'clear'); } print STDERR "$msg\n"; return; } sub info { if ($verbosity_level == 0) { return; } my $msg = shift; if ($verbosity_level >= 3) { my ($package, $filename, $line) = caller; $msg = "$PID $line $msg"; } $msg = "I: $msg"; if (stderr_is_tty()) { $msg = colored($msg, 'green'); } print STDERR "$msg\n"; return; } sub warning { if ($verbosity_level == 0) { return; } my $msg = shift; $msg = "W: $msg"; if (stderr_is_tty()) { $msg = colored($msg, 'bold yellow'); } print STDERR "$msg\n"; return; } sub error { # if error() is called with the string from a previous error() that was # caught inside an eval(), then the string will have a newline which we # are stripping here chomp(my $msg = shift); $msg = "E: $msg"; if (stderr_is_tty()) { $msg = colored($msg, 'bold red'); } if ($verbosity_level == 3) { croak $msg; # produces a backtrace } else { die "$msg\n"; } } # The encoding of dev_t is MMMM Mmmm mmmM MMmm, where M is a hex digit of # the major number and m is a hex digit of the minor number. sub major { my $rdev = shift; my $right = Math::BigInt->from_hex("0x00000000000fff00")->band($rdev)->brsft(8); my $left = Math::BigInt->from_hex("0xfffff00000000000")->band($rdev)->brsft(32); return $right->bior($left); } sub minor { my $rdev = shift; my $right = Math::BigInt->from_hex("0x00000000000000ff")->band($rdev); my $left = Math::BigInt->from_hex("0x00000ffffff00000")->band($rdev)->brsft(12); return $right->bior($left); } sub can_execute { my $tool = shift; my $pid = open my $fh, '-|' // return 0; if ($pid == 0) { open(STDERR, '>&', STDOUT) or die; exec {$tool} $tool, '--version' or die; } chomp( my $content = do { local $/; <$fh> } ); close $fh; if ($? != 0) { return 0; } if (length $content == 0) { return 0; } return 1; } # check whether a directory is mounted by comparing the device number of the # directory itself with its parent sub is_mountpoint { my $dir = shift; if (!-e $dir) { return 0; } my @a = stat "$dir/."; my @b = stat "$dir/.."; # if the device number is different, then the directory must be mounted if ($a[0] != $b[0]) { return 1; } # if the inode number is the same, then the directory must be mounted if ($a[1] == $b[1]) { return 1; } return 0; } # tar cannot figure out the decompression program when receiving data on # standard input, thus we do it ourselves. This is copied from tar's # src/suffix.c sub get_tar_compressor { my $filename = shift; if ($filename eq '-') { return; } elsif ($filename =~ /\.tar$/) { return; } elsif ($filename =~ /\.(gz|tgz|taz)$/) { return ['gzip']; } elsif ($filename =~ /\.(Z|taZ)$/) { return ['compress']; } elsif ($filename =~ /\.(bz2|tbz|tbz2|tz2)$/) { return ['bzip2']; } elsif ($filename =~ /\.lz$/) { return ['lzip']; } elsif ($filename =~ /\.(lzma|tlz)$/) { return ['lzma']; } elsif ($filename =~ /\.lzo$/) { return ['lzop']; } elsif ($filename =~ /\.lz4$/) { return ['lz4']; } elsif ($filename =~ /\.(xz|txz)$/) { return ['xz']; } elsif ($filename =~ /\.zst$/) { return ['zstd']; } return; } # avoid dependency on String::ShellQuote by implementing the mechanism # from python's shlex.quote function sub shellescape { my $string = shift; if (length $string == 0) { return "''"; } # search for occurrences of characters that are not safe # the 'a' regex modifier makes sure that \w only matches ASCII if ($string !~ m/[^\w@\%+=:,.\/-]/a) { return $string; } # wrap the string in single quotes and handle existing single quotes by # putting them outside of the single-quoted string $string =~ s/'/'"'"'/g; return "'$string'"; } sub test_unshare_userns { my $verbose = shift; if ($EFFECTIVE_USER_ID == 0) { my $msg = "cannot unshare user namespace when executing as root"; if ($verbose) { warning $msg; } else { debug $msg; } return 0; } # arguments to syscalls have to be stored in their own variable or # otherwise we will get "Modification of a read-only value attempted" my $unshare_flags = $CLONE_NEWUSER; # we spawn a new per process because if unshare succeeds, we would # otherwise have unshared the mmdebstrap process itself which we don't want my $pid = fork() // error "fork() failed: $!"; if ($pid == 0) { my $ret = syscall(&SYS_unshare, $unshare_flags); if ($ret == 0) { exit 0; } else { my $msg = "unshare syscall failed: $!"; if ($verbose) { warning $msg; } else { debug $msg; } exit 1; } } waitpid($pid, 0); if (($? >> 8) != 0) { return 0; } # if newuidmap and newgidmap exist, the exit status will be 1 when # executed without parameters system "newuidmap 2>/dev/null"; if (($? >> 8) != 1) { if (($? >> 8) == 127) { my $msg = "cannot find newuidmap"; if ($verbose) { warning $msg; } else { debug $msg; } } else { my $msg = "newuidmap returned unknown exit status: $?"; if ($verbose) { warning $msg; } else { debug $msg; } } return 0; } system "newgidmap 2>/dev/null"; if (($? >> 8) != 1) { if (($? >> 8) == 127) { my $msg = "cannot find newgidmap"; if ($verbose) { warning $msg; } else { debug $msg; } } else { my $msg = "newgidmap returned unknown exit status: $?"; if ($verbose) { warning $msg; } else { debug $msg; } } return 0; } return 1; } sub read_subuid_subgid() { my $username = getpwuid $REAL_USER_ID; my ($subid, $num_subid, $fh, $n); my @result = (); if (!-e "/etc/subuid") { warning "/etc/subuid doesn't exist"; return; } if (!-r "/etc/subuid") { warning "/etc/subuid is not readable"; return; } open $fh, "<", "/etc/subuid" or error "cannot open /etc/subuid for reading: $!"; while (my $line = <$fh>) { ($n, $subid, $num_subid) = split(/:/, $line, 3); last if ($n eq $username); } close $fh; if (!length $subid) { warning "/etc/subuid is empty"; return; } if ($n ne $username) { warning "no entry in /etc/subuid for $username"; return; } push @result, ["u", 0, $subid, $num_subid]; if (scalar(@result) < 1) { warning "/etc/subuid does not contain an entry for $username"; return; } if (scalar(@result) > 1) { warning "/etc/subuid contains multiple entries for $username"; return; } my $groupname = getgrgid $REAL_GROUP_ID; if (!-e "/etc/subgid") { warning "/etc/subgid doesn't exist"; return; } if (!-r "/etc/subgid") { warning "/etc/subgid is not readable"; return; } open $fh, "<", "/etc/subgid" or error "cannot open /etc/subgid for reading: $!"; while (my $line = <$fh>) { ($n, $subid, $num_subid) = split(/:/, $line, 3); last if ($n eq $groupname); } close $fh; if (!length $subid) { warning "/etc/subgid is empty"; return; } if ($n ne $groupname) { warning "no entry in /etc/subgid for $groupname"; return; } push @result, ["g", 0, $subid, $num_subid]; if (scalar(@result) < 2) { warning "/etc/subgid does not contain an entry for $groupname"; return; } if (scalar(@result) > 2) { warning "/etc/subgid contains multiple entries for $groupname"; return; } return @result; } # This function spawns two child processes forming the following process tree # # A # | # fork() # | \ # B C # | | # | fork() # | | \ # | D E # | | | # |unshare() # | close() # | | | # | | read() # | | newuidmap(D) # | | newgidmap(D) # | | / # | waitpid() # | | # | fork() # | | \ # | F G # | | | # | | exec() # | | / # | waitpid() # | / # waitpid() # # To better refer to each individual part, we give each process a new # identifier after calling fork(). Process A is the main process. After # executing fork() we call the parent and child B and C, respectively. This # first fork() is done because we do not want to modify A. B then remains # waiting for its child C to finish. C calls fork() again, splitting into # the parent D and its child E. In the parent D we call unshare() and close a # pipe shared by D and E to signal to E that D is done with calling unshare(). # E notices this by using read() and follows up with executing the tools # new[ug]idmap on D. E finishes and D continues with doing another fork(). # This is because when unsharing the PID namespace, we need a PID 1 to be kept # alive or otherwise any child processes cannot fork() anymore themselves. So # we keep F as PID 1 and finally call exec() in G. sub get_unshare_cmd { my $cmd = shift; my $idmap = shift; # unsharing the mount namespace (NEWNS) requires CAP_SYS_ADMIN my $unshare_flags = $CLONE_NEWNS | $CLONE_NEWPID | $CLONE_NEWUTS | $CLONE_NEWIPC; # we only need to add CLONE_NEWUSER if we are not yet root if ($EFFECTIVE_USER_ID != 0) { $unshare_flags |= $CLONE_NEWUSER; } if (0) { $unshare_flags |= $CLONE_NEWNET; } # fork a new process and let the child get unshare()ed # we don't want to unshare the parent process my $gcpid = fork() // error "fork() failed: $!"; if ($gcpid == 0) { # Create a pipe for the parent process to signal the child process that # it is done with calling unshare() so that the child can go ahead # setting up uid_map and gid_map. pipe my $rfh, my $wfh; # We have to do this dance with forking a process and then modifying # the parent from the child because: # - new[ug]idmap can only be called on a process id after that process # has unshared the user namespace # - a process looses its capabilities if it performs an execve() with # nonzero user ids see the capabilities(7) man page for details. # - a process that unshared the user namespace by default does not # have the privileges to call new[ug]idmap on itself # # this also works the other way around (the child setting up a user # namespace and being modified from the parent) but that way, the # parent would have to stay around until the child exited (so a pid # would be wasted). Additionally, that variant would require an # additional pipe to let the parent signal the child that it is done # with calling new[ug]idmap. The way it is done here, this signaling # can instead be done by wait()-ing for the exit of the child. my $ppid = $$; my $cpid = fork() // error "fork() failed: $!"; if ($cpid == 0) { # child # Close the writing descriptor at our end of the pipe so that we # see EOF when parent closes its descriptor. close $wfh; # Wait for the parent process to finish its unshare() call by # waiting for an EOF. 0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF"; # the process is already root, so no need for newuidmap/newgidmap if ($EFFECTIVE_USER_ID == 0) { exit 0; } # The program's new[ug]idmap have to be used because they are # setuid root. These privileges are needed to map the ids from # /etc/sub[ug]id to the user namespace set up by the parent. # Without these privileges, only the id of the user itself can be # mapped into the new namespace. # # Since new[ug]idmap is setuid root we also don't need to write # "deny" to /proc/$$/setgroups beforehand (this is otherwise # required for unprivileged processes trying to write to # /proc/$$/gid_map since kernel version 3.19 for security reasons) # and therefore the parent process keeps its ability to change its # own group here. # # Since /proc/$ppid/[ug]id_map can only be written to once, # respectively, instead of making multiple calls to new[ug]idmap, # we assemble a command line that makes one call each. my $uidmapcmd = ""; my $gidmapcmd = ""; foreach (@{$idmap}) { my ($t, $hostid, $nsid, $range) = @{$_}; if ($t ne "u" and $t ne "g" and $t ne "b") { error "invalid idmap type: $t"; } if ($t eq "u" or $t eq "b") { $uidmapcmd .= " $hostid $nsid $range"; } if ($t eq "g" or $t eq "b") { $gidmapcmd .= " $hostid $nsid $range"; } } my $idmapcmd = ''; if ($uidmapcmd ne "") { 0 == system "newuidmap $ppid $uidmapcmd" or error "newuidmap $ppid $uidmapcmd failed: $!"; } if ($gidmapcmd ne "") { 0 == system "newgidmap $ppid $gidmapcmd" or error "newgidmap $ppid $gidmapcmd failed: $!"; } exit 0; } # parent # After fork()-ing, the parent immediately calls unshare... 0 == syscall &SYS_unshare, $unshare_flags or error "unshare() failed: $!"; # .. and then signals the child process that we are done with the # unshare() call by sending an EOF. close $wfh; # Wait for the child process to finish its setup by waiting for its # exit. $cpid == waitpid $cpid, 0 or error "waitpid() failed: $!"; my $exit = $? >> 8; if ($exit != 0) { error "child had a non-zero exit status: $exit"; } # Currently we are nobody (uid and gid are 65534). So we become root # user and group instead. # # We are using direct syscalls instead of setting $(, $), $< and $> # because then perl would do additional stuff which we don't need or # want here, like checking /proc/sys/kernel/ngroups_max (which might # not exist). It would also also call setgroups() in a way that makes # the root user be part of the group unknown. if ($EFFECTIVE_USER_ID != 0) { 0 == syscall &SYS_setgid, 0 or error "setgid failed: $!"; 0 == syscall &SYS_setuid, 0 or error "setuid failed: $!"; 0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!"; } if (1) { # When the pid namespace is also unshared, then processes expect a # master pid to always be alive within the namespace. To achieve # this, we fork() here instead of exec() to always have one dummy # process running as pid 1 inside the namespace. This is also what # the unshare tool does when used with the --fork option. # # Otherwise, without a pid 1, new processes cannot be forked # anymore after pid 1 finished. my $cpid = fork() // error "fork() failed: $!"; if ($cpid != 0) { # The parent process will stay alive as pid 1 in this # namespace until the child finishes executing. This is # important because pid 1 must never die or otherwise nothing # new can be forked. $cpid == waitpid $cpid, 0 or error "waitpid() failed: $!"; exit($? >> 8); } } &{$cmd}(); exit 0; } # parent return $gcpid; } sub havemknod { my $root = shift; my $havemknod = 0; if (-e "$root/test-dev-null") { error "/test-dev-null already exists"; } TEST: { # we fork so that we can read STDERR my $pid = open my $fh, '-|' // error "failed to fork(): $!"; if ($pid == 0) { open(STDERR, '>&', STDOUT) or error "cannot open STDERR: $!"; # we use mknod(1) instead of the system call because creating the # right dev_t argument requires makedev(3) exec 'mknod', "$root/test-dev-null", 'c', '1', '3'; } chomp( my $content = do { local $/; <$fh> } ); close $fh; { last TEST unless $? == 0 and $content eq ''; last TEST unless -c "$root/test-dev-null"; last TEST unless open my $fh, '>', "$root/test-dev-null"; last TEST unless print $fh 'test'; } $havemknod = 1; } if (-e "$root/test-dev-null") { unlink "$root/test-dev-null" or error "cannot unlink /test-dev-null: $!"; } return $havemknod; } sub print_progress { if ($verbosity_level != 1) { return; } my $perc = shift; if (!stderr_is_tty()) { return; } if ($perc eq "done") { # \e[2K clears everything on the current line (i.e. the progress bar) print STDERR "\e[2Kdone\n"; return; } if ($perc >= 100) { $perc = 100; } my $width = 50; my $num_x = int($perc * $width / 100); my $bar = '=' x $num_x; if ($num_x != $width) { $bar .= '>'; $bar .= ' ' x ($width - $num_x - 1); } printf STDERR "%6.2f [%s]\r", $perc, $bar; return; } sub run_progress { my ($get_exec, $line_handler, $line_has_error, $chdir) = @_; pipe my $rfh, my $wfh; my $got_signal = 0; my $ignore = sub { info "run_progress() received signal $_[0]: waiting for child..."; }; debug("run_progress: exec " . (join ' ', ($get_exec->('${FD}')))); # delay signals so that we can fork and change behaviour of the signal # handler in parent and child without getting interrupted my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM); POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!"; my $pid1 = open(my $pipe, '-|') // error "failed to fork(): $!"; if ($pid1 == 0) { # child: default signal handlers local $SIG{'INT'} = 'DEFAULT'; local $SIG{'HUP'} = 'DEFAULT'; local $SIG{'PIPE'} = 'DEFAULT'; local $SIG{'TERM'} = 'DEFAULT'; # unblock all delayed signals (and possibly handle them) POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; close $rfh; # Unset the close-on-exec flag, so that the file descriptor does not # get closed when we exec my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!"; fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC) or error "fcntl F_SETFD: $!"; my $fd = fileno $wfh; # redirect stderr to stdout so that we can capture it open(STDERR, '>&', STDOUT) or error "cannot open STDOUT: $!"; my @execargs = $get_exec->($fd); # before apt 1.5, "apt-get update" attempted to chdir() into the # working directory. This will fail if the current working directory # is not accessible by the user (for example in unshare mode). See # Debian bug #860738 if (defined $chdir) { chdir $chdir or error "failed chdir() to $chdir: $!"; } eval { Devel::Cover::set_coverage("none") } if $is_covering; exec { $execargs[0] } @execargs or error 'cannot exec() ' . (join ' ', @execargs); } close $wfh; # spawn two processes: # parent will parse stdout to look for errors # child will parse $rfh for the progress meter my $pid2 = fork() // error "failed to fork(): $!"; if ($pid2 == 0) { # child: default signal handlers local $SIG{'INT'} = 'IGNORE'; local $SIG{'HUP'} = 'IGNORE'; local $SIG{'PIPE'} = 'IGNORE'; local $SIG{'TERM'} = 'IGNORE'; # unblock all delayed signals (and possibly handle them) POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; my $progress = 0.0; my $status = undef; print_progress($progress); while (my $line = <$rfh>) { my ($newprogress, $newstatus) = $line_handler->($line); next unless $newprogress; # start a new line if the new progress value is less than the # previous one if ($newprogress < $progress) { print_progress("done"); } if (defined $newstatus) { $status = $newstatus; } if ( defined $status and $verbosity_level == 1 and stderr_is_tty()) { # \e[2K clears everything on the current line (i.e. the # progress bar) print STDERR "\e[2K$status: "; } print_progress($newprogress); $progress = $newprogress; } print_progress("done"); exit 0; } # parent: ignore signals # by using "local", the original is automatically restored once the # function returns local $SIG{'INT'} = $ignore; local $SIG{'HUP'} = $ignore; local $SIG{'PIPE'} = $ignore; local $SIG{'TERM'} = $ignore; # unblock all delayed signals (and possibly handle them) POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; my $output = ''; my $has_error = 0; while (my $line = <$pipe>) { $has_error = $line_has_error->($line); if ($verbosity_level >= 2) { print STDERR $line; } else { # forward captured apt output $output .= $line; } } close($pipe); my $fail = 0; if ($? != 0 or $has_error) { $fail = 1; } waitpid $pid2, 0; $? == 0 or error "progress parsing failed"; if ($got_signal) { error "run_progress() received signal: $got_signal"; } # only print failure after progress output finished or otherwise it # might interfere with the remaining output if ($fail) { if ($verbosity_level >= 1) { print STDERR $output; } error((join ' ', $get_exec->('<$fd>')) . ' failed'); } return; } sub run_dpkg_progress { my $options = shift; my @debs = @{ $options->{PKGS} // [] }; my $get_exec = sub { return @{ $options->{ARGV} }, "--status-fd=$_[0]", @debs; }; my $line_has_error = sub { return 0; }; my $num = 0; # each package has one install and one configure step, thus the total # number is twice the number of packages my $total = (scalar @debs) * 2; my $line_handler = sub { my $status = undef; if ($_[0] =~ /^processing: (install|configure): /) { if ($1 eq 'install') { $status = 'installing'; } elsif ($1 eq 'configure') { $status = 'configuring'; } else { error "unknown status: $1"; } $num += 1; } return $num / $total * 100, $status; }; run_progress $get_exec, $line_handler, $line_has_error; return; } sub run_apt_progress { my $options = shift; my @debs = @{ $options->{PKGS} // [] }; my $get_exec = sub { my @prefix = (); my @opts = (); return ( @prefix, @{ $options->{ARGV} }, @opts, "-oAPT::Status-Fd=$_[0]", # prevent apt from messing up the terminal and allow dpkg to # receive SIGINT and quit immediately without waiting for # maintainer script to finish '-oDpkg::Use-Pty=false', @debs ); }; my $line_has_error = sub { return 0; }; if ($options->{FIND_APT_WARNINGS}) { $line_has_error = sub { # apt-get doesn't report a non-zero exit if the update failed. # Thus, we have to parse its output. See #778357, #776152, #696335 # and #745735 for the parsing bugs as well as #594813, #696335, # #776152, #778357 and #953726 for non-zero exit on transient # network errors. # # For example, we want to fail with the following warning: # W: Some index files failed to download. They have been ignored, # or old ones used instead. # But since this message is meant for human consumption it is not # guaranteed to be stable across different apt versions and may # change arbitrarily in the future. Thus, we error out on any W: # lines as well. The downside is, that apt also unconditionally # and by design prints a warning for unsigned repositories, even # if they were allowed with Acquire::AllowInsecureRepositories "1" # or with trusted=yes. # # A workaround was introduced by apt 2.1.16 with the --error-on=any # option to apt-get update. if ($_[0] =~ /^(W: |Err:)/) { return 1; } return 0; }; } my $line_handler = sub { if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) { my $status = undef; if ($1 eq 'pmstatus') { $status = "installing"; } elsif ($1 eq 'dlstatus') { $status = "downloading"; } else { error "unknown status: $1"; } return $2, $status; } }; run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR}; return; } sub run_apt_download_progress { my $options = shift; if ($options->{dryrun}) { info "simulate downloading packages with apt..."; } else { info "downloading packages with apt..."; } if ($verbosity_level >= 3) { my @apt_debug_opts = qw( -oDebug::pkgProblemResolver=true -oDebug::pkgDepCache::Marker=1 -oDebug::pkgDepCache::AutoInstall=1 ); push @{ $options->{APT_ARGV} }, @apt_debug_opts; } pipe my $rfh, my $wfh; my $pid = open my $fh, '-|' // error "fork() failed: $!"; if ($pid == 0) { close $wfh; # read until parent process closes $wfh my $content = do { local $/; <$rfh> }; close $rfh; # the parent is done -- pass what we read back to it print $content; exit 0; } close $rfh; # Unset the close-on-exec flag, so that the file descriptor does not # get closed when we exec my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!"; fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC) or error "fcntl F_SETFD: $!"; my $fd = fileno $wfh; # run_apt_progress() can raise an exception which would leave this function # without cleaning up the other thread we started, making mmdebstrap hang # in case run_apt_progress() fails -- so wrap this in eval() instead eval { # 2022-05-02, #debian-apt on OFTC, times in UTC+2 # 16:57 < josch> DonKult: how is -oDebug::pkgDpkgPm=1 # -oDir::Log=/dev/null a "fancy no-op"? # 11:52 < DonKult> josch: "fancy no-op" in sofar as it does nothing to # the system even through its not in a special mode # ala simulation or download-only. It does all the # things it normally does, except that it just prints # the dpkg calls instead of execv() them which in # practice amounts means it does nothing (the Dir::Log # just prevents libapt from creating the /var/log/apt # directories. As the code creates them even if no # logs will be placed there…). As said, midterm an apt # --print-install-packages or something would be nice # to avoid running everything. run_apt_progress({ ARGV => [ 'apt-get', '--yes', '-oDebug::pkgDpkgPm=1', '-oDir::Log=/dev/null', $options->{dryrun} ? '-oAPT::Get::Simulate=true' : ( "-oAPT::Keep-Fds::=$fd", "-oDPkg::Tools::options::'cat >&$fd'::InfoFD=$fd", "-oDpkg::Pre-Install-Pkgs::=cat >&$fd", # no need to lock the database if we are just downloading "-oDebug::NoLocking=1", # no need for pty magic if we write no log "-oDpkg::Use-Pty=0", ), @{ $options->{APT_ARGV} }, ], }); }; my $err = ''; if ($@) { $err = "apt download failed: $@"; } # signal the child process that we are done close $wfh; # and then read from it what it got my @listofdebs = <$fh>; close $fh; if ($? != 0) { $err = "status child failed"; } if ($err) { error $err; } # remove trailing newlines chomp @listofdebs; return @listofdebs; } sub run_chroot { my $cmd = shift; my $options = shift; my @cleanup_tasks = (); my $cleanup = sub { my $signal = $_[0]; while (my $task = pop @cleanup_tasks) { $task->(); } if ($signal) { warning "pid $PID cought signal: $signal"; exit 1; } }; local $SIG{INT} = $cleanup; local $SIG{HUP} = $cleanup; local $SIG{PIPE} = $cleanup; local $SIG{TERM} = $cleanup; eval { if (any { $_ eq $options->{mode} } ('root', 'unshare')) { # if more than essential should be installed, make the system look # more like a real one by creating or bind-mounting the device # nodes foreach my $file (@devfiles) { my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file}; next if $fname eq ''; if ($type == 0) { # normal file error "type 0 not implemented"; } elsif ($type == 1) { # hardlink error "type 1 not implemented"; } elsif ($type == 2) { # symlink if (!$options->{havemknod}) { # If we had mknod, then the symlink was already created # in the run_setup function. if (!-d "$options->{root}/dev") { warning( "skipping creation of ./dev/$fname because the" . " /dev directory is missing in the target" ); next; } push @cleanup_tasks, sub { unlink "$options->{root}/dev/$fname" or warning("cannot unlink ./dev/$fname: $!"); }; symlink $linkname, "$options->{root}/dev/$fname" or error "cannot create symlink ./dev/$fname -> $linkname"; } } elsif ($type == 3 or $type == 4) { # character/block special if (any { $_ =~ '^chroot/mount(?:/dev)?$' } @{ $options->{skip} }) { info "skipping chroot/mount/dev as requested"; } elsif (!$options->{canmount}) { warning "skipping bind-mounting ./dev/$fname"; } elsif (!$options->{havemknod}) { if (!-d "$options->{root}/dev") { warning( "skipping creation of ./dev/$fname because the" . " /dev directory is missing in the target" ); next; } if ($fname eq "ptmx") { # We must not bind-mount ptmx from the outside or # otherwise posix_openpt() will fail. Instead # /dev/ptmx must refer to /dev/pts/ptmx either by # symlink or by bind-mounting. We choose a symlink. symlink '/dev/pts/ptmx', "$options->{root}/dev/ptmx" or error "cannot create /dev/pts/ptmx symlink"; push @cleanup_tasks, sub { unlink "$options->{root}/dev/ptmx" or error "unlink /dev/ptmx"; }; next; } if (!-e "/dev/$fname") { warning("skipping creation of ./dev/$fname because" . " /dev/$fname does not exist" . " on the outside"); next; } if (!-c "/dev/$fname") { warning("skipping creation of ./dev/$fname because" . " /dev/$fname on the outside is not a" . " character special file"); next; } open my $fh, '>', "$options->{root}/dev/$fname" or error "cannot open $options->{root}/dev/$fname: $!"; close $fh; my @umountopts = (); if ($options->{mode} eq 'unshare') { push @umountopts, '--no-mtab'; } push @cleanup_tasks, sub { 0 == system('umount', @umountopts, "$options->{root}/dev/$fname") or warning("umount ./dev/$fname failed: $?"); unlink "$options->{root}/dev/$fname" or warning("cannot unlink ./dev/$fname: $!"); }; 0 == system('mount', '-o', 'bind', "/dev/$fname", "$options->{root}/dev/$fname") or error "mount ./dev/$fname failed: $?"; } } elsif ($type == 5) { # directory if (any { $_ =~ '^chroot/mount(?:/dev)?$' } @{ $options->{skip} }) { info "skipping chroot/mount/dev as requested"; } elsif (!$options->{canmount}) { warning "skipping bind-mounting ./dev/$fname"; } else { if (!-d "$options->{root}/dev") { warning( "skipping creation of ./dev/$fname because the" . " /dev directory is missing in the target" ); next; } if (!-e "/dev/$fname" && $fname ne "pts/") { warning("skipping creation of ./dev/$fname because" . " /dev/$fname does not exist" . " on the outside"); next; } if (!-d "/dev/$fname" && $fname ne "pts/") { warning("skipping creation of ./dev/$fname because" . " /dev/$fname on the outside is not a" . " directory"); next; } if (!$options->{havemknod}) { # If had mknod, then the directory to bind-mount into # was already created in the run_setup function. push @cleanup_tasks, sub { rmdir "$options->{root}/dev/$fname" or warning("cannot rmdir ./dev/$fname: $!"); }; if (-e "$options->{root}/dev/$fname") { if (!-d "$options->{root}/dev/$fname") { error "./dev/$fname already exists but is not" . " a directory"; } } else { my $num_created = make_path "$options->{root}/dev/$fname", { error => \my $err }; if ($err && @$err) { error( join "; ", ( map { "cannot create " . (join ": ", %{$_}) } @$err )); } elsif ($num_created == 0) { error( "cannot create $options->{root}" . "/dev/$fname"); } } chmod $mode, "$options->{root}/dev/$fname" or error "cannot chmod ./dev/$fname: $!"; } my @umountopts = (); if ($options->{mode} eq 'unshare') { push @umountopts, '--no-mtab'; } push @cleanup_tasks, sub { 0 == system('umount', @umountopts, "$options->{root}/dev/$fname") or warning("umount ./dev/$fname failed: $?"); }; if ($fname eq "pts/") { # We cannot just bind-mount /dev/pts from the host as # doing so will make posix_openpt() fail. Instead, we # need to mount a new devpts. # We need ptmxmode=666 because /dev/ptmx is a symlink # to /dev/pts/ptmx and without it posix_openpt() will # fail if we are not the root user. # See also: # kernel.org/doc/Documentation/filesystems/devpts.txt # salsa.debian.org/debian/schroot/-/merge_requests/2 # https://bugs.debian.org/856877 # https://bugs.debian.org/817236 0 == system( 'mount', '-t', 'devpts', 'none', "$options->{root}/dev/pts", '-o', 'noexec,nosuid,uid=5,mode=620,ptmxmode=666' ) or error "mount /dev/pts failed"; } else { 0 == system('mount', '-o', 'bind', "/dev/$fname", "$options->{root}/dev/$fname") or error "mount ./dev/$fname failed: $?"; } } } else { error "unsupported type: $type"; } } } elsif ( any { $_ eq $options->{mode} } ('proot', 'fakechroot', 'chrootless') ) { # we cannot mount in fakechroot and proot mode # in proot mode we have /dev bind-mounted already through # --bind=/dev } else { error "unknown mode: $options->{mode}"; } # We can only mount /proc and /sys after extracting the essential # set because if we mount it before, then base-files will not be able # to extract those if ((any { $_ eq $options->{mode} } ('root', 'unshare')) && (any { $_ =~ '^chroot/mount(?:/sys)?$' } @{ $options->{skip} })) { info "skipping chroot/mount/sys as requested"; } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) && !$options->{canmount}) { warning "skipping mount sysfs"; } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) && !-d "$options->{root}/sys") { warning("skipping mounting of sysfs because the" . " /sys directory is missing in the target"); } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) && !-e "/sys") { warning("skipping bind-mounting /sys because" . " /sys does not exist on the outside"); } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) && !-d "/sys") { warning("skipping bind-mounting /sys because" . " /sys on the outside is not a directory"); } elsif ($options->{mode} eq 'root') { # we don't know whether we run in root mode inside an unshared # user namespace or as real root so we first try the real mount and # then fall back to mounting in a way that works in unshared mode if ( 0 == system( 'mount', '-t', 'sysfs', '-o', 'ro,nosuid,nodev,noexec', 'sys', "$options->{root}/sys" ) ) { push @cleanup_tasks, sub { 0 == system('umount', "$options->{root}/sys") or warning("umount /sys failed: $?"); }; } elsif ( 0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys")) { push @cleanup_tasks, sub { # since we cannot write to /etc/mtab we need --no-mtab # unmounting /sys only seems to be successful with --lazy 0 == system( 'umount', '--no-mtab', '--lazy', "$options->{root}/sys" ) or warning("umount /sys failed: $?"); }; } else { error "mount /sys failed: $?"; } } elsif ($options->{mode} eq 'unshare') { # naturally we have to clean up after ourselves in sudo mode where # we do a real mount. But we also need to unmount in unshare mode # because otherwise, even with the --one-file-system tar option, # the permissions of the mount source will be stored and not the # mount target (the directory) push @cleanup_tasks, sub { # since we cannot write to /etc/mtab we need --no-mtab # unmounting /sys only seems to be successful with --lazy 0 == system('umount', '--no-mtab', '--lazy', "$options->{root}/sys") or warning("umount /sys failed: $?"); }; # without the network namespace unshared, we cannot mount a new # sysfs. Since we need network, we just bind-mount. # # we have to rbind because just using bind results in "wrong fs # type, bad option, bad superblock" error 0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys") or error "mount /sys failed: $?"; } elsif ( any { $_ eq $options->{mode} } ('proot', 'fakechroot', 'chrootless') ) { # we cannot mount in fakechroot and proot mode # in proot mode we have /proc bind-mounted already through # --bind=/proc } else { error "unknown mode: $options->{mode}"; } if ((any { $_ eq $options->{mode} } ('root', 'unshare')) && (any { $_ =~ '^chroot/mount(?:/proc)?$' } @{ $options->{skip} })) { info "skipping chroot/mount/proc as requested"; } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) && !$options->{canmount}) { warning "skipping mount proc"; } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) && !-d "$options->{root}/proc") { warning("skipping mounting of proc because the" . " /proc directory is missing in the target"); } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) && !-e "/proc") { warning("skipping bind-mounting /proc because" . " /proc does not exist on the outside"); } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) && !-d "/proc") { warning("skipping bind-mounting /proc because" . " /proc on the outside is not a directory"); } elsif ($options->{mode} eq 'root') { # we don't know whether we run in root mode inside an unshared # user namespace or as real root so we first try the real mount and # then fall back to mounting in a way that works in unshared if ( 0 == system( 'mount', '-t', 'proc', '-o', 'ro', 'proc', "$options->{root}/proc" ) ) { push @cleanup_tasks, sub { # some maintainer scripts mount additional stuff into /proc # which we need to unmount beforehand if ( is_mountpoint( $options->{root} . "/proc/sys/fs/binfmt_misc" ) ) { 0 == system('umount', "$options->{root}/proc/sys/fs/binfmt_misc") or warning( "umount /proc/sys/fs/binfmt_misc failed: $?"); } 0 == system('umount', "$options->{root}/proc") or warning("umount /proc failed: $?"); }; } elsif ( 0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc")) { push @cleanup_tasks, sub { # since we cannot write to /etc/mtab we need --no-mtab 0 == system('umount', '--no-mtab', "$options->{root}/proc") or warning("umount /proc failed: $?"); }; } else { error "mount /proc failed: $?"; } } elsif ($options->{mode} eq 'unshare') { # naturally we have to clean up after ourselves in sudo mode where # we do a real mount. But we also need to unmount in unshare mode # because otherwise, even with the --one-file-system tar option, # the permissions of the mount source will be stored and not the # mount target (the directory) push @cleanup_tasks, sub { # since we cannot write to /etc/mtab we need --no-mtab 0 == system('umount', '--no-mtab', "$options->{root}/proc") or warning("umount /proc failed: $?"); }; 0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc") or error "mount /proc failed: $?"; } elsif ( any { $_ eq $options->{mode} } ('proot', 'fakechroot', 'chrootless') ) { # we cannot mount in fakechroot and proot mode # in proot mode we have /sys bind-mounted already through # --bind=/sys } else { error "unknown mode: $options->{mode}"; } # prevent daemons from starting # the directory might not exist in custom variant, for example # # ideally, we should use update-alternatives but we cannot rely on it # existing inside the chroot # # See #911290 for more problems of this interface if (any { $_ eq 'chroot/policy-rc.d' } @{ $options->{skip} }) { info "skipping chroot/policy-rc.d as requested"; } else { if (-d "$options->{root}/usr/sbin/") { open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d" or error "cannot open policy-rc.d: $!"; print $fh "#!/bin/sh\n"; print $fh "exit 101\n"; close $fh; chmod 0755, "$options->{root}/usr/sbin/policy-rc.d" or error "cannot chmod policy-rc.d: $!"; } } # the file might not exist if it was removed in a hook if (any { $_ eq 'chroot/start-stop-daemon' } @{ $options->{skip} }) { info "skipping chroot/start-stop-daemon as requested"; } else { if (-f "$options->{root}/sbin/start-stop-daemon") { if (-e "$options->{root}/sbin/start-stop-daemon.REAL") { error "$options->{root}/sbin/start-stop-daemon.REAL already" . " exists"; } move( "$options->{root}/sbin/start-stop-daemon", "$options->{root}/sbin/start-stop-daemon.REAL" ) or error "cannot move start-stop-daemon: $!"; open my $fh, '>', "$options->{root}/sbin/start-stop-daemon" or error "cannot open start-stop-daemon: $!"; print $fh "#!/bin/sh\n"; print $fh "echo \"Warning: Fake start-stop-daemon called, doing" . " nothing\">&2\n"; close $fh; chmod 0755, "$options->{root}/sbin/start-stop-daemon" or error "cannot chmod start-stop-daemon: $!"; } } &{$cmd}(); # cleanup if (any { $_ eq 'chroot/start-stop-daemon' } @{ $options->{skip} }) { info "skipping chroot/start-stop-daemon as requested"; } else { if (-e "$options->{root}/sbin/start-stop-daemon.REAL") { move( "$options->{root}/sbin/start-stop-daemon.REAL", "$options->{root}/sbin/start-stop-daemon" ) or error "cannot move start-stop-daemon: $!"; } } if (any { $_ eq 'chroot/policy-rc.d' } @{ $options->{skip} }) { info "skipping chroot/policy-rc.d as requested"; } else { if (-f "$options->{root}/usr/sbin/policy-rc.d") { unlink "$options->{root}/usr/sbin/policy-rc.d" or error "cannot unlink policy-rc.d: $!"; } } }; my $error = $@; # we use the cleanup function to do the unmounting $cleanup->(0); if ($error) { error "run_chroot failed: $error"; } return; } sub run_hooks { my $name = shift; my $options = shift; if (scalar @{ $options->{"${name}_hook"} } == 0) { return; } if ($options->{dryrun}) { info "not running ${name}-hooks because of --dry-run"; return; } my @env_opts = (); # At this point TMPDIR is set to "$options->{root}/tmp". This is to have a # writable TMPDIR even in unshare mode. But if TMPDIR is still set when # running hooks, then every hook script calling chroot, will have to wrap # that into an "env --unset=TMPDIR". To avoid this, we unset TMPDIR here. # If the hook script needs a writable TMPDIR, then it can always use /tmp # inside the chroot. This is also why we do not set a new MMDEBSTRAP_TMPDIR # environment variable. if (length $ENV{TMPDIR}) { push @env_opts, '--unset=TMPDIR'; } # The APT_CONFIG variable, if set, will confuse any manual calls to # apt-get. If you want to use the same config used by mmdebstrap, the # original value is stored in MMDEBSTRAP_APT_CONFIG. if (length $ENV{APT_CONFIG}) { push @env_opts, '--unset=APT_CONFIG'; } if (length $ENV{APT_CONFIG}) { push @env_opts, "MMDEBSTRAP_APT_CONFIG=$ENV{APT_CONFIG}"; } # Storing the mode is important for hook scripts to potentially change # their behavior depending on the mode. It's also important for when the # hook wants to use the mmdebstrap --hook-helper. push @env_opts, "MMDEBSTRAP_MODE=$options->{mode}"; # Storing the hook name is important for hook scripts to potentially change # their behavior depending on the hook. It's also important for when the # hook wants to use the mmdebstrap --hook-helper. push @env_opts, "MMDEBSTRAP_HOOK=$name"; # This is the file descriptor of the socket that the mmdebstrap # --hook-helper can write to and read from to communicate with the outside. push @env_opts, ("MMDEBSTRAP_HOOKSOCK=" . fileno($options->{hooksock})); # Store the verbosity of mmdebstrap so that hooks can be just as verbose # as the mmdebstrap invocation that called them. push @env_opts, ("MMDEBSTRAP_VERBOSITY=" . $verbosity_level); my $runner = sub { foreach my $script (@{ $options->{"${name}_hook"} }) { if ( $script =~ /^( copy-in|copy-out |tar-in|tar-out |upload|download |sync-in|sync-out )\ /x ) { info "running special hook: $script"; if ( any { $_ eq $options->{variant} } ('extract', 'custom') and any { $_ eq $options->{mode} } ('fakechroot', 'proot') and $name ne 'setup' ) { info "the copy-in, copy-out, tar-in and tar-out commands" . " in fakechroot mode or proot mode might fail in" . " extract and custom variants because there might be" . " no tar inside the chroot"; } my $pid = fork() // error "fork() failed: $!"; if ($pid == 0) { # whatever the script writes on stdout is sent to the # socket # whatever is written to the socket, send to stdin open(STDOUT, '>&', $options->{hooksock}) or error "cannot open STDOUT: $!"; open(STDIN, '<&', $options->{hooksock}) or error "cannot open STDIN: $!"; # we execute ourselves under sh to avoid having to # implement a clever parser of the quoting used in $script # for the filenames my $prefix = ""; if ($is_covering) { $prefix = "$EXECUTABLE_NAME -MDevel::Cover=-silent,-nogcov "; } exec 'sh', '-c', "$prefix$PROGRAM_NAME --hook-helper" . " \"\$1\" \"\$2\" \"\$3\" \"\$4\" \"\$5\" $script", 'exec', $options->{root}, $options->{mode}, $name, ( defined $options->{qemu} ? "qemu-$options->{qemu}" : 'env', $verbosity_level ); } waitpid($pid, 0); $? == 0 or error "special hook failed with exit code $?"; } elsif (-x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) { info "running --$name-hook directly: $script $options->{root}"; # execute it directly if it's an executable file # or if it there are no shell metacharacters # (the /a regex modifier makes \w match only ASCII) 0 == system('env', @env_opts, $script, $options->{root}) or error "command failed: $script"; } else { info "running --$name-hook in shell: sh -c '$script' exec" . " $options->{root}"; # otherwise, wrap everything in sh -c 0 == system('env', @env_opts, 'sh', '-c', $script, 'exec', $options->{root}) or error "command failed: $script"; } } }; # Unset the close-on-exec flag, so that the file descriptor does not # get closed when we exec my $flags = fcntl($options->{hooksock}, F_GETFD, 0) or error "fcntl F_GETFD: $!"; fcntl($options->{hooksock}, F_SETFD, $flags & ~FD_CLOEXEC) or error "fcntl F_SETFD: $!"; if ($name eq 'setup') { # execute directly without mounting anything (the mount points do not # exist yet) &{$runner}(); } else { run_chroot(\&$runner, $options); } # Restore flags fcntl($options->{hooksock}, F_SETFD, $flags) or error "fcntl F_SETFD: $!"; return; } sub setup { my $options = shift; foreach my $key (sort keys %{$options}) { my $value = $options->{$key}; if (!defined $value) { next; } if (ref $value eq '') { debug "$key: $options->{$key}"; } elsif (ref $value eq 'ARRAY') { debug "$key: [" . (join ', ', @{$value}) . "]"; } elsif (ref $value eq 'GLOB') { debug "$key: GLOB"; } else { error "unknown type for key $key: " . (ref $value); } } if (-e $options->{apttrusted} && !-r $options->{apttrusted}) { warning "cannot read $options->{apttrusted}"; } if (-e $options->{apttrustedparts} && !-r $options->{apttrustedparts}) { warning "cannot read $options->{apttrustedparts}"; } if (any { $_ eq 'setup' } @{ $options->{skip} }) { info "skipping setup as requested"; } else { run_setup($options); } run_hooks('setup', $options); if (any { $_ eq 'update' } @{ $options->{skip} }) { info "skipping update as requested"; } else { run_update($options); } (my $essential_pkgs, my $cached_debs) = run_download($options); # in theory, we don't have to extract the packages in chrootless mode # but we do it anyways because otherwise directory creation timestamps # will differ compared to non-chrootless and we want to create bit-by-bit # identical tar output # # FIXME: dpkg could be changed to produce the same results run_extract($options, $essential_pkgs); run_hooks('extract', $options); if ($options->{variant} ne 'extract') { my $chrootcmd = []; if ($options->{mode} ne 'chrootless') { $chrootcmd = run_prepare($options); } run_essential($options, $essential_pkgs, $chrootcmd, $cached_debs); run_hooks('essential', $options); run_install($options, $chrootcmd); run_hooks('customize', $options); } if (any { $_ eq 'cleanup' } @{ $options->{skip} }) { info "skipping cleanup as requested"; } else { run_cleanup($options); } return; } sub run_setup() { my $options = shift; { my @directories = ( '/etc/apt/apt.conf.d', '/etc/apt/sources.list.d', '/etc/apt/preferences.d', '/var/cache/apt', '/var/lib/apt/lists/partial', '/tmp' ); # we need /var/lib/dpkg in case we need to write to /var/lib/dpkg/arch push @directories, '/var/lib/dpkg'; # since we do not know the dpkg version inside the chroot at this # point, we can only omit it in chrootless mode if ($options->{mode} ne 'chrootless' or scalar @{ $options->{dpkgopts} } > 0) { push @directories, '/etc/dpkg/dpkg.cfg.d/'; } # if dpkg and apt operate from the outside we need some more # directories because dpkg and apt might not even be installed inside # the chroot. Thus, the following block is not strictly necessary in # chrootless mode. We unconditionally add it anyways, so that the # output with and without chrootless mode is equal. { push @directories, '/var/log/apt'; # since we do not know the dpkg version inside the chroot at this # point, we can only omit it in chrootless mode if ($options->{mode} ne 'chrootless') { push @directories, '/var/lib/dpkg/triggers', '/var/lib/dpkg/info', '/var/lib/dpkg/alternatives', '/var/lib/dpkg/updates'; } } foreach my $dir (@directories) { if (-e "$options->{root}/$dir") { if (!-d "$options->{root}/$dir") { error "$dir already exists but is not a directory"; } } else { my $num_created = make_path "$options->{root}/$dir", { error => \my $err }; if ($err && @$err) { error( join "; ", (map { "cannot create " . (join ": ", %{$_}) } @$err)); } elsif ($num_created == 0) { error "cannot create $options->{root}/$dir"; } } } # make sure /tmp is not 0755 like the rest chmod 01777, "$options->{root}/tmp" or error "cannot chmod /tmp: $!"; } # The TMPDIR set by the user or even /tmp might be inaccessible by the # unshared user. Thus, we place all temporary files in /tmp inside the new # rootfs. # # This will affect calls to tempfile() as well as runs of "apt-get update" # which will create temporary clearsigned.message.XXXXXX files to verify # signatures. # # Setting TMPDIR to inside the chroot is also necessary for when packages # are installed with apt from outside the chroot with # DPkg::Chroot-Directory { ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{"TMPDIR"} = "$options->{root}/tmp"; } my ($conf, $tmpfile) = tempfile("mmdebstrap.apt.conf.XXXXXXXXXXXX", TMPDIR => 1) or error "cannot open apt.conf: $!"; print $conf "Apt::Architecture \"$options->{nativearch}\";\n"; # the host system might have configured additional architectures # force only the native architecture if (scalar @{ $options->{foreignarchs} } > 0) { print $conf "Apt::Architectures { \"$options->{nativearch}\"; "; foreach my $arch (@{ $options->{foreignarchs} }) { print $conf "\"$arch\"; "; } print $conf "};\n"; } else { print $conf "Apt::Architectures \"$options->{nativearch}\";\n"; } print $conf "Dir \"$options->{root}\";\n"; # not needed anymore for apt 1.3 and newer print $conf "Dir::State::Status \"$options->{root}/var/lib/dpkg/status\";\n"; # for authentication, use the keyrings from the host print $conf "Dir::Etc::Trusted \"$options->{apttrusted}\";\n"; print $conf "Dir::Etc::TrustedParts \"$options->{apttrustedparts}\";\n"; if ($options->{variant} ne 'apt') { # apt considers itself essential. Thus, when generating an EDSP # document for an external solver, it will add the Essential:yes field # to the apt package stanza. This is unnecessary for any other variant # than 'apt' because in all other variants we compile the set of # packages we consider essential ourselves and for the 'essential' # variant it would even be wrong to add apt. This workaround is only # needed when apt is used with an external solver but doesn't hurt # otherwise and we don't have a good way to figure out whether apt is # using an external solver or not short of parsing the --aptopt # options. print $conf "pkgCacheGen::ForceEssential \",\";\n"; } close $conf; # We put certain configuration items in their own configuration file # because they have to be valid for apt invocation from outside as well as # from inside the chroot. # The config filename is chosen such that any settings in it will be # overridden by what the user specified with --aptopt. if (!-e "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap") { open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap" or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!"; print $fh "Apt::Install-Recommends false;\n"; print $fh "Acquire::Languages \"none\";\n"; close $fh; } # apt-get update requires this if (!-e "$options->{root}/var/lib/dpkg/status") { open my $fh, '>', "$options->{root}/var/lib/dpkg/status" or error "failed to open(): $!"; close $fh; } # we create /var/lib/dpkg/arch inside the chroot either if there is more # than the native architecture in the chroot or if chrootless mode is # used to create a chroot of a different architecture than the native # architecture outside the chroot. chomp(my $hostarch = `dpkg --print-architecture`); if ( (!-e "$options->{root}/var/lib/dpkg/arch") and ( scalar @{ $options->{foreignarchs} } > 0 or ( $options->{mode} eq 'chrootless' and $hostarch ne $options->{nativearch})) ) { open my $fh, '>', "$options->{root}/var/lib/dpkg/arch" or error "cannot open /var/lib/dpkg/arch: $!"; print $fh "$options->{nativearch}\n"; foreach my $arch (@{ $options->{foreignarchs} }) { print $fh "$arch\n"; } close $fh; } if (scalar @{ $options->{aptopts} } > 0 and (!-e "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap")) { open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap" or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!"; foreach my $opt (@{ $options->{aptopts} }) { if (-r $opt) { # flush handle because copy() uses syswrite() which bypasses # buffered IO $fh->flush(); copy $opt, $fh or error "cannot copy $opt: $!"; } else { print $fh $opt; if ($opt !~ /;$/) { print $fh ';'; } if ($opt !~ /\n$/) { print $fh "\n"; } } } close $fh; if ($verbosity_level >= 3) { debug "content of /etc/apt/apt.conf.d/99mmdebstrap:"; copy("$options->{root}/etc/apt/apt.conf.d/99mmdebstrap", \*STDERR); } } if (scalar @{ $options->{dpkgopts} } > 0 and (!-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")) { # FIXME: in chrootless mode, dpkg will only read the configuration # from the host -- see #808203 if ($options->{mode} eq 'chrootless') { warning('dpkg is unable to read an alternative configuration in' . 'chrootless mode -- see Debian bug #808203'); } open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap" or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!"; foreach my $opt (@{ $options->{dpkgopts} }) { if (-r $opt) { # flush handle because copy() uses syswrite() which bypasses # buffered IO $fh->flush(); copy $opt, $fh or error "cannot copy $opt: $!"; } else { print $fh $opt; if ($opt !~ /\n$/) { print $fh "\n"; } } } close $fh; if ($verbosity_level >= 3) { debug "content of /etc/dpkg/dpkg.cfg.d/99mmdebstrap:"; copy("$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap", \*STDERR); } } if (!-e "$options->{root}/etc/fstab") { open my $fh, '>', "$options->{root}/etc/fstab" or error "cannot open fstab: $!"; print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n"; close $fh; chmod 0644, "$options->{root}/etc/fstab" or error "cannot chmod fstab: $!"; } # write /etc/apt/sources.list and files in /etc/apt/sources.list.d/ { my $firstentry = $options->{sourceslists}->[0]; # if the first sources.list entry is of one-line type and without # explicit filename, then write out an actual /etc/apt/sources.list # otherwise everything goes into /etc/apt/sources.list.d my $fname; if ($firstentry->{type} eq 'one-line' && !defined $firstentry->{fname}) { $fname = "$options->{root}/etc/apt/sources.list"; } else { $fname = "$options->{root}/etc/apt/sources.list.d/0000"; if (defined $firstentry->{fname}) { $fname .= $firstentry->{fname}; if ( $firstentry->{fname} !~ /\.list/ && $firstentry->{fname} !~ /\.sources/) { if ($firstentry->{type} eq 'one-line') { $fname .= '.list'; } elsif ($firstentry->{type} eq 'deb822') { $fname .= '.sources'; } else { error "invalid type: $firstentry->{type}"; } } } else { # if no filename is given, then this must be a deb822 file # because if it was a one-line type file, then it would've been # written to /etc/apt/sources.list $fname .= 'main.sources'; } } if (!-e $fname) { open my $fh, '>', "$fname" or error "cannot open $fname: $!"; print $fh $firstentry->{content}; close $fh; } # everything else goes into /etc/apt/sources.list.d/ for (my $i = 1 ; $i < scalar @{ $options->{sourceslists} } ; $i++) { my $entry = $options->{sourceslists}->[$i]; my $fname = "$options->{root}/etc/apt/sources.list.d/" . sprintf("%04d", $i); if (defined $entry->{fname}) { $fname .= $entry->{fname}; if ( $entry->{fname} !~ /\.list/ && $entry->{fname} !~ /\.sources/) { if ($entry->{type} eq 'one-line') { $fname .= '.list'; } elsif ($entry->{type} eq 'deb822') { $fname .= '.sources'; } else { error "invalid type: $entry->{type}"; } } } else { if ($entry->{type} eq 'one-line') { $fname .= 'main.list'; } elsif ($entry->{type} eq 'deb822') { $fname .= 'main.sources'; } else { error "invalid type: $entry->{type}"; } } if (!-e $fname) { open my $fh, '>', "$fname" or error "cannot open $fname: $!"; print $fh $entry->{content}; close $fh; } } } # allow network access from within foreach my $file ("/etc/resolv.conf", "/etc/hostname") { if (-e $file && !-e "$options->{root}/$file") { # this will create a new file with 644 permissions and copy # contents only even if $file was a symlink copy($file, "$options->{root}/$file") or error "cannot copy $file: $!"; # if the source was a regular file, preserve the permissions if (-f $file) { my $mode = (stat($file))[2]; $mode &= oct(7777); # mask off bits that aren't the mode chmod $mode, "$options->{root}/$file" or error "cannot chmod $file: $!"; } } else { warning("Host system does not have a $file to copy into the" . " rootfs."); } } if ($options->{havemknod}) { foreach my $file (@devfiles) { my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file}; if ($type == 0) { # normal file error "type 0 not implemented"; } elsif ($type == 1) { # hardlink error "type 1 not implemented"; } elsif ($type == 2) { # symlink if ( $options->{mode} eq 'fakechroot' and $linkname =~ /^\/proc/) { # there is no /proc in fakechroot mode next; } symlink $linkname, "$options->{root}/dev/$fname" or error "cannot create symlink ./dev/$fname"; next; # chmod cannot work on symlinks } elsif ($type == 3) { # character special 0 == system('mknod', "$options->{root}/dev/$fname", 'c', $devmajor, $devminor) or error "mknod failed: $?"; } elsif ($type == 4) { # block special 0 == system('mknod', "$options->{root}/dev/$fname", 'b', $devmajor, $devminor) or error "mknod failed: $?"; } elsif ($type == 5) { # directory if (-e "$options->{root}/dev/$fname") { if (!-d "$options->{root}/dev/$fname") { error "./dev/$fname already exists but is not a directory"; } } else { my $num_created = make_path "$options->{root}/dev/$fname", { error => \my $err }; if ($err && @$err) { error( join "; ", ( map { "cannot create " . (join ": ", %{$_}) } @$err )); } elsif ($num_created == 0) { error "cannot create $options->{root}/dev/$fname"; } } } else { error "unsupported type: $type"; } chmod $mode, "$options->{root}/dev/$fname" or error "cannot chmod ./dev/$fname: $!"; } } # we tell apt about the configuration via a config file passed via the # APT_CONFIG environment variable instead of using the --option command # line arguments because configuration settings like Dir::Etc have already # been evaluated at the time that apt takes its command line arguments # into account. { ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{"APT_CONFIG"} = "$tmpfile"; } # we have to make the config file world readable so that a possible # /usr/lib/apt/solvers/apt process which is run by the _apt user is also # able to read it chmod 0666, "$tmpfile" or error "cannot chmod $tmpfile: $!"; if ($verbosity_level >= 3) { 0 == system('apt-get', '--version') or error "apt-get --version failed: $?"; 0 == system('apt-config', 'dump') or error "apt-config failed: $?"; debug "content of $tmpfile:"; copy($tmpfile, \*STDERR); } if (none { $_ eq $options->{mode} } ('fakechroot', 'proot')) { # Apt dropping privileges to another user than root is not useful in # fakechroot and proot mode because all users are faked and thus there # is no real privilege difference anyways. We could set # APT::Sandbox::User "root" in fakechroot and proot mode but we don't # because if we would, then /var/cache/apt/archives/partial/ and # /var/lib/apt/lists/partial/ would not be owned by the _apt user # if mmdebstrap was run in fakechroot or proot mode. # # when apt-get update is run by the root user, then apt will attempt to # drop privileges to the _apt user. This will fail if the _apt user # does not have permissions to read the root directory. In that case, # we have to disable apt sandboxing. This can for example happen in # root mode when the path of the chroot is not in a world-readable # location. my $partial = '/var/lib/apt/lists/partial'; if ( system('/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test', '-r', "$options->{root}$partial") != 0 ) { warning "Download is performed unsandboxed as root as file" . " $options->{root}$partial couldn't be accessed by user _apt"; open my $fh, '>>', $tmpfile or error "cannot open $tmpfile for appending: $!"; print $fh "APT::Sandbox::User \"root\";\n"; close $fh; } } return; } sub run_update() { my $options = shift; my $aptopts = { ARGV => ['apt-get', 'update', '--error-on=any'], CHDIR => $options->{root}, }; info "running apt-get update..."; run_apt_progress($aptopts); # check if anything was downloaded at all { open my $fh, '-|', 'apt-get', 'indextargets' // error "failed to fork(): $!"; chomp( my $indextargets = do { local $/; <$fh> } ); close $fh; if ($indextargets eq '') { if ($verbosity_level >= 1) { 0 == system('apt-cache', 'policy') or error "apt-cache failed: $?"; } error "apt-get update didn't download anything"; } } return; } sub run_download() { my $options = shift; # In the future we want to replace downloading packages with "apt-get # install" and installing them with dpkg by just installing the essential # packages with apt from the outside with DPkg::Chroot-Directory. # We are not doing that because then the preinst script of base-passwd will # not be called early enough and packages will fail to install because they # are missing /etc/passwd. my @cached_debs = (); my @dl_debs = (); if ( !$options->{dryrun} && ((none { $_ eq $options->{variant} } ('extract', 'custom')) || scalar @{ $options->{include} } != 0) && -d "$options->{root}/var/cache/apt/archives/" ) { my $apt_archives = "/var/cache/apt/archives/"; opendir my $dh, "$options->{root}/$apt_archives" or error "cannot read $apt_archives"; while (my $deb = readdir $dh) { if ($deb !~ /\.deb$/) { next; } if (!-f "$options->{root}/$apt_archives/$deb") { next; } push @cached_debs, $deb; } closedir $dh; } # To figure out the right package set for the apt variant we can use: # $ apt-get dist-upgrade -o dir::state::status=/dev/null # This is because that variants only contain essential packages and # apt and libapt treats apt as essential. If we want to install less # (essential variant) then we have to compute the package set ourselves. # Same if we want to install priority based variants. if (any { $_ eq $options->{variant} } ('extract', 'custom')) { if (scalar @{ $options->{include} } == 0) { info "nothing to download -- skipping..."; return ([], \@cached_debs); } my @apt_argv = ('install'); for my $incl (@{ $options->{include} }) { for my $pkg (split /[,\s]+/, $incl) { # strip leading and trailing whitespace $pkg =~ s/^\s+|\s+$//g; # skip if the remainder is an empty string if ($pkg eq '') { next; } push @apt_argv, $pkg; } } @dl_debs = run_apt_download_progress({ APT_ARGV => [@apt_argv], dryrun => $options->{dryrun}, }, ); } elsif ($options->{variant} eq 'apt') { # if we just want to install Essential:yes packages, apt and their # dependencies then we can make use of libapt treating apt as # implicitly essential. An upgrade with the (currently) empty status # file will trigger an installation of the essential packages plus apt. # # 2018-09-02, #debian-dpkg on OFTC, times in UTC+2 # 23:39 < josch> I'll just put it in my script and if it starts # breaking some time I just say it's apt's fault. :P # 23:42 < DonKult> that is how it usually works, so yes, do that :P (<- # and please add that line next to it so you can # remind me in 5+ years that I said that after I wrote # in the bugreport: "Are you crazy?!? Nobody in his # right mind would even suggest depending on it!") @dl_debs = run_apt_download_progress({ APT_ARGV => ['dist-upgrade'], dryrun => $options->{dryrun}, }, ); } elsif ( any { $_ eq $options->{variant} } ('essential', 'standard', 'important', 'required', 'buildd') ) { # 2021-06-07, #debian-apt on OFTC, times in UTC+2 # 17:27 < DonKult> (?essential includes 'apt' through) # 17:30 < josch> DonKult: no, because pkgCacheGen::ForceEssential ","; # 17:32 < DonKult> touché @dl_debs = run_apt_download_progress({ APT_ARGV => [ 'install', '?narrow(' . ( length($options->{suite}) ? '?or(?archive(^' . $options->{suite} . '$),?codename(^' . $options->{suite} . '$)),' : '' ) . '?architecture(' . $options->{nativearch} . '),?essential)' ], dryrun => $options->{dryrun}, }, ); } else { error "unknown variant: $options->{variant}"; } my @essential_pkgs; # strip the chroot directory from the filenames foreach my $deb (@dl_debs) { # if filename does not start with chroot directory then the user # might've used a file:// mirror and we check whether the path is # accessible inside the chroot if (rindex $deb, $options->{root}, 0) { if (!-e "$options->{root}/$deb") { error "package file $deb not accessible from chroot directory" . " -- use copy:// instead of file:// or a bind-mount. You" . " can also try using --hook-dir=/usr/share/mmdebstrap/" . "hooks/file-mirror-automount to automatically create" . " bind-mounts or copy the files as necessary."; } push @essential_pkgs, $deb; next; } # filename starts with chroot directory, strip it off # this is the normal case if (!-e $deb) { error "cannot find package file $deb"; } push @essential_pkgs, substr($deb, length($options->{root})); } return (\@essential_pkgs, \@cached_debs); } sub run_extract() { my $options = shift; my $essential_pkgs = shift; if ($options->{dryrun}) { info "skip extracting packages because of --dry-run"; return; } if (scalar @{$essential_pkgs} == 0) { info "nothing to extract -- skipping..."; return; } info "extracting archives..."; print_progress 0.0; my $counter = 0; my $total = scalar @{$essential_pkgs}; foreach my $deb (@{$essential_pkgs}) { $counter += 1; my $tarfilter; my @tarfilterargs; # if the path-excluded option was added to the dpkg config, # insert the tarfilter between dpkg-deb and tar if (-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") { open(my $fh, '<', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!"; my @matches = grep { /^path-(?:exclude|include)=/ } <$fh>; close $fh; chop @matches; # remove trailing newline @tarfilterargs = map { "--" . $_ } @matches; } if (scalar @tarfilterargs > 0) { if (-x "./tarfilter") { $tarfilter = "./tarfilter"; } else { $tarfilter = "mmtarfilter"; } } my $dpkg_writer; my $tar_reader; my $filter_reader; my $filter_writer; if (scalar @tarfilterargs > 0) { pipe $filter_reader, $dpkg_writer or error "pipe failed: $!"; pipe $tar_reader, $filter_writer or error "pipe failed: $!"; } else { pipe $tar_reader, $dpkg_writer or error "pipe failed: $!"; } # not using dpkg-deb --extract as that would replace the # merged-usr symlinks with plain directories # https://bugs.debian.org/989602 # not using dpkg --unpack because that would try running preinst # maintainer scripts my $pid1 = fork() // error "fork() failed: $!"; if ($pid1 == 0) { open(STDOUT, '>&', $dpkg_writer) or error "cannot open STDOUT: $!"; close($tar_reader) or error "cannot close tar_reader: $!"; if (scalar @tarfilterargs > 0) { close($filter_reader) or error "cannot close filter_reader: $!"; close($filter_writer) or error "cannot close filter_writer: $!"; } debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb"); eval { Devel::Cover::set_coverage("none") } if $is_covering; exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb"; } my $pid2; if (scalar @tarfilterargs > 0) { $pid2 = fork() // error "fork() failed: $!"; if ($pid2 == 0) { open(STDIN, '<&', $filter_reader) or error "cannot open STDIN: $!"; open(STDOUT, '>&', $filter_writer) or error "cannot open STDOUT: $!"; close($dpkg_writer) or error "cannot close dpkg_writer: $!"; close($tar_reader) or error "cannot close tar_reader: $!"; debug("running $tarfilter " . (join " ", @tarfilterargs)); eval { Devel::Cover::set_coverage("none") } if $is_covering; exec $tarfilter, @tarfilterargs; } } my $pid3 = fork() // error "fork() failed: $!"; if ($pid3 == 0) { open(STDIN, '<&', $tar_reader) or error "cannot open STDIN: $!"; close($dpkg_writer) or error "cannot close dpkg_writer: $!"; if (scalar @tarfilterargs > 0) { close($filter_reader) or error "cannot close filter_reader: $!"; close($filter_writer) or error "cannot close filter_writer: $!"; } debug( "running tar -C $options->{root}" . " --keep-directory-symlink --extract --file -"); eval { Devel::Cover::set_coverage("none") } if $is_covering; exec 'tar', '-C', $options->{root}, '--keep-directory-symlink', '--extract', '--file', '-'; } close($dpkg_writer) or error "cannot close dpkg_writer: $!"; close($tar_reader) or error "cannot close tar_reader: $!"; if (scalar @tarfilterargs > 0) { close($filter_reader) or error "cannot close filter_reader: $!"; close($filter_writer) or error "cannot close filter_writer: $!"; } waitpid($pid1, 0); $? == 0 or error "dpkg-deb --fsys-tarfile failed: $?"; if (scalar @tarfilterargs > 0) { waitpid($pid2, 0); $? == 0 or error "tarfilter failed: $?"; } waitpid($pid3, 0); $? == 0 or error "tar --extract failed: $?"; print_progress($counter / $total * 100); } print_progress "done"; return; } sub run_prepare { my $options = shift; if ($options->{mode} eq 'fakechroot') { # this borrows from and extends # /etc/fakechroot/debootstrap.env and # /etc/fakechroot/chroot.env { my %subst = ( chroot => "/usr/sbin/chroot.fakechroot", mkfifo => "/bin/true", ldconfig => (getcwd() . '/ldconfig.fakechroot'), ldd => "/usr/bin/ldd.fakechroot", ischroot => "/bin/true" ); if (!-x $subst{ldconfig}) { $subst{ldconfig} = '/usr/libexec/mmdebstrap/ldconfig.fakechroot'; } my @fakechrootsubst = (); foreach my $d (split ':', $ENV{PATH}) { foreach my $k (sort keys %subst) { if (-e "$d/$k") { push @fakechrootsubst, "$d/$k=$subst{$k}"; } } } if (defined $ENV{FAKECHROOT_CMD_SUBST} && $ENV{FAKECHROOT_CMD_SUBST} ne "") { push @fakechrootsubst, split /:/, $ENV{FAKECHROOT_CMD_SUBST}; } ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{FAKECHROOT_CMD_SUBST} = join ':', @fakechrootsubst; } if (defined $ENV{FAKECHROOT_EXCLUDE_PATH} && $ENV{FAKECHROOT_EXCLUDE_PATH} ne "") { ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{FAKECHROOT_EXCLUDE_PATH} = "$ENV{FAKECHROOT_EXCLUDE_PATH}:/dev:/proc:/sys"; } else { ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{FAKECHROOT_EXCLUDE_PATH} = '/dev:/proc:/sys'; } # workaround for long unix socket path if FAKECHROOT_BASE # exceeds the limit of 108 bytes { ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{FAKECHROOT_AF_UNIX_PATH} = "/tmp"; } { my @ldlibpath = (); if (defined $ENV{LD_LIBRARY_PATH} && $ENV{LD_LIBRARY_PATH} ne "") { push @ldlibpath, (split /:/, $ENV{LD_LIBRARY_PATH}); } # FIXME: workaround allowing installation of systemd should # live in fakechroot, see #917920 push @ldlibpath, "$options->{root}/lib/systemd"; my $parse_ld_so_conf; $parse_ld_so_conf = sub { foreach my $conf (@_) { next if !-r $conf; open my $fh, '<', "$conf" or error "can't read $conf: $!"; while (my $line = <$fh>) { chomp $line; if ($line eq "") { next; } if ($line =~ /^#/) { next; } if ($line =~ /include (.*)/) { $parse_ld_so_conf->(glob("$options->{root}/$1")); next; } if (!-d "$options->{root}/$line") { next; } push @ldlibpath, "$options->{root}/$line"; } close $fh; } }; if (-e "$options->{root}/etc/ld.so.conf") { $parse_ld_so_conf->("$options->{root}/etc/ld.so.conf"); } ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{LD_LIBRARY_PATH} = join ':', @ldlibpath; } } # make sure that APT_CONFIG and TMPDIR are not set when executing # anything inside the chroot my @chrootcmd = ('env', '--unset=APT_CONFIG', '--unset=TMPDIR'); if ($options->{mode} eq 'proot') { push @chrootcmd, ( 'proot', '--root-id', '--bind=/dev', '--bind=/proc', '--bind=/sys', "--rootfs=$options->{root}", '--cwd=/' ); } elsif ( any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot') ) { push @chrootcmd, ('chroot', $options->{root}); } else { error "unknown mode: $options->{mode}"; } # copy qemu-user-static binary into chroot or setup proot with # --qemu if (defined $options->{qemu}) { if ($options->{mode} eq 'proot') { push @chrootcmd, "--qemu=qemu-$options->{qemu}"; } elsif ($options->{mode} eq 'fakechroot') { # Make sure that the fakeroot and fakechroot shared # libraries exist for the right architecture open my $fh, '-|', 'dpkg-architecture', '-a', $options->{nativearch}, '-qDEB_HOST_MULTIARCH' // error "failed to fork(): $!"; chomp( my $deb_host_multiarch = do { local $/; <$fh> } ); close $fh; if (($? != 0) or (!$deb_host_multiarch)) { error "dpkg-architecture failed: $?"; } my $fakechrootdir = "/usr/lib/$deb_host_multiarch/fakechroot"; if (!-e "$fakechrootdir/libfakechroot.so") { error "$fakechrootdir/libfakechroot.so doesn't exist." . " Install libfakechroot:$options->{nativearch}" . " outside the chroot"; } my $fakerootdir = "/usr/lib/$deb_host_multiarch/libfakeroot"; if (!-e "$fakerootdir/libfakeroot-sysv.so") { error "$fakerootdir/libfakeroot-sysv.so doesn't exist." . " Install libfakeroot:$options->{nativearch}" . " outside the chroot"; } # The rest of this block sets environment variables, so we # have to add the "no critic" statement to stop perlcritic # from complaining about setting global variables ## no critic (Variables::RequireLocalizedPunctuationVars) # fakechroot only fills LD_LIBRARY_PATH with the # directories of the host's architecture. We append the # directories of the chroot architecture. $ENV{LD_LIBRARY_PATH} = "$ENV{LD_LIBRARY_PATH}:$fakechrootdir:$fakerootdir"; # The binfmt support on the outside is used, so qemu needs # to know where it has to look for shared libraries if (defined $ENV{QEMU_LD_PREFIX} && $ENV{QEMU_LD_PREFIX} ne "") { $ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}"; } else { $ENV{QEMU_LD_PREFIX} = $options->{root}; } } elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) { my $require_qemu_static = 1; # make $@ local, so we don't print an eventual error # in other parts where we evaluate $@ local $@ = ''; eval { # Check for the F flag which makes the kernel open the binfmt # binary at configuration time instead of lazily at startup # time. If the flag is set, then the qemu-static binary is not # required inside the chroot. if (-e "/proc/sys/fs/binfmt_misc/qemu-$options->{qemu}") { open my $fh, '<', "/proc/sys/fs/binfmt_misc/qemu-$options->{qemu}"; while (my $line = <$fh>) { chomp($line); if ($line =~ /^flags: [A-Z]*F[A-Z]*$/) { $require_qemu_static = 0; last; } } close $fh; } }; if ($require_qemu_static) { # other modes require a static qemu-user binary my $qemubin = "/usr/bin/qemu-$options->{qemu}-static"; if (!-e $qemubin) { error "cannot find $qemubin"; } copy $qemubin, "$options->{root}/$qemubin" or error "cannot copy $qemubin: $!"; # File::Copy does not retain permissions but on some # platforms (like Travis CI) the binfmt interpreter must # have the executable bit set or otherwise execve will # fail with EACCES chmod 0755, "$options->{root}/$qemubin" or error "cannot chmod $qemubin: $!"; } } else { error "unknown mode: $options->{mode}"; } } # some versions of coreutils use the renameat2 system call in mv. # This breaks certain versions of fakechroot and proot. Here we do # a sanity check and warn the user in case things might break. if (any { $_ eq $options->{mode} } ('fakechroot', 'proot') and -e "$options->{root}/bin/mv") { mkdir "$options->{root}/000-move-me" or error "cannot create directory: $!"; my $ret = system @chrootcmd, '/bin/mv', '/000-move-me', '/001-delete-me'; if ($ret != 0) { if ($options->{mode} eq 'proot') { info "the /bin/mv binary inside the chroot doesn't" . " work under proot"; info "this is likely due to missing support for" . " renameat2 in proot"; info "see https://github.com/proot-me/PRoot/issues/147"; } else { info "the /bin/mv binary inside the chroot doesn't" . " work under fakechroot"; info "with certain versions of coreutils and glibc," . " this is due to missing support for renameat2 in" . " fakechroot"; info "see https://github.com/dex4er/fakechroot/issues/60"; } info "expect package post installation scripts not to work"; rmdir "$options->{root}/000-move-me" or error "cannot rmdir: $!"; } else { rmdir "$options->{root}/001-delete-me" or error "cannot rmdir: $!"; } } return \@chrootcmd; } sub run_essential() { my $options = shift; my $essential_pkgs = shift; my $chrootcmd = shift; my $cached_debs = shift; if (scalar @{$essential_pkgs} == 0) { info "no essential packages -- skipping..."; return; } if ($options->{mode} eq 'chrootless') { if ($options->{dryrun}) { info "simulate installing essential packages..."; } else { info "installing essential packages..."; } # FIXME: the dpkg config from the host is parsed before the command # line arguments are parsed and might break this mode # Example: if the host has --path-exclude set, then this will also # affect the chroot. See #808203 my @chrootless_opts = ( '-oDPkg::Options::=--force-not-root', '-oDPkg::Options::=--force-script-chrootless', '-oDPkg::Options::=--root=' . $options->{root}, '-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log", $options->{dryrun} ? '-oAPT::Get::Simulate=true' : (), ); if (defined $options->{qemu}) { # The binfmt support on the outside is used, so qemu needs to know # where it has to look for shared libraries if (defined $ENV{QEMU_LD_PREFIX} && $ENV{QEMU_LD_PREFIX} ne "") { ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}"; } else { ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{QEMU_LD_PREFIX} = $options->{root}; } } # we don't use apt because that will not run the base-passwd preinst # early enough #run_apt_progress({ # ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'], # PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}], #}); run_dpkg_progress({ ARGV => [ 'dpkg', '--force-not-root', '--force-script-chrootless', "--root=$options->{root}", "--log=$options->{root}/var/log/dpkg.log", '--install', '--force-depends' ], PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}] }); } elsif ( any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot', 'proot') ) { # install the extracted packages properly # we need --force-depends because dpkg does not take Pre-Depends # into account and thus doesn't install them in the right order # And the --predep-package option is broken: #539133 # # We could use apt from outside the chroot using DPkg::Chroot-Directory # but then the preinst script of base-passwd will not be called early # enough and packages will fail to install because they are missing # /etc/passwd. Also, with plain dpkg the essential variant can finish # within 9 seconds. If we use apt instead, it becomes 12 seconds. We # prefer speed here. if ($options->{dryrun}) { info "simulate installing essential packages..."; } else { info "installing essential packages..."; run_chroot( sub { run_dpkg_progress({ ARGV => [ @{$chrootcmd}, 'dpkg', '--install', '--force-depends' ], PKGS => $essential_pkgs, }); }, $options ); } } else { error "unknown mode: $options->{mode}"; } if (any { $_ eq 'essential/unlink' } @{ $options->{skip} }) { info "skipping essential/unlink as requested"; } else { foreach my $deb (@{$essential_pkgs}) { # do not unlink those packages that were in /var/cache/apt/archive # before the download phase next if any { "/var/cache/apt/archives/$_" eq $deb } @{$cached_debs}; # do not unlink those packages that were not in # /var/cache/apt/archive (for example because they were provided by # a file:// mirror) next if $deb !~ /\/var\/cache\/apt\/archives\//; unlink "$options->{root}/$deb" or error "cannot unlink $deb: $!"; } } return; } sub run_install() { my $options = shift; my $chrootcmd = shift; my %pkgs_to_install; for my $incl (@{ $options->{include} }) { for my $pkg (split /[,\s]+/, $incl) { # strip leading and trailing whitespace $pkg =~ s/^\s+|\s+$//g; # skip if the remainder is an empty string if ($pkg eq '') { next; } $pkgs_to_install{$pkg} = (); } } if ($options->{variant} eq 'buildd') { $pkgs_to_install{'build-essential'} = (); } if ( any { $_ eq $options->{variant} } ('required', 'important', 'standard', 'buildd') ) { # Many of the priority:required packages are also essential:yes. We # make sure not to select those here to avoid useless "xxx is already # the newest version" messages. my $priority; if (any { $_ eq $options->{variant} } ('required', 'buildd')) { $priority = '?and(?priority(required),?not(?essential))'; } elsif ($options->{variant} eq 'important') { $priority = '?and(?or(?priority(required),?priority(important)),' . '?not(?essential))'; } elsif ($options->{variant} eq 'standard') { $priority = '?and(?or(~prequired,~pimportant,~pstandard),' . '?not(?essential))'; } $pkgs_to_install{ "?narrow(" . ( length($options->{suite}) ? '?or(?archive(^' . $options->{suite} . '$),?codename(^' . $options->{suite} . '$)),' : '' ) . "?architecture($options->{nativearch})," . "$priority)" } = (); } my @pkgs_to_install = keys %pkgs_to_install; if ($options->{mode} eq 'chrootless') { if (scalar @pkgs_to_install > 0) { my @chrootless_opts = ( '-oDPkg::Options::=--force-not-root', '-oDPkg::Options::=--force-script-chrootless', '-oDPkg::Options::=--root=' . $options->{root}, '-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log", $options->{dryrun} ? '-oAPT::Get::Simulate=true' : (), ); run_apt_progress({ ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'], PKGS => [@pkgs_to_install], }); } } elsif ( any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot', 'proot') ) { if ($options->{variant} ne 'custom' and scalar @pkgs_to_install > 0) { # Advantage of running apt on the outside instead of inside the # chroot: # # - we can build chroots without apt (for example from buildinfo # files) # # - we do not need to install additional packages like # apt-transport-* or ca-certificates inside the chroot # # - we do not not need additional key material inside the chroot # # - we can make use of file:// and copy:// # # - we can use EDSP solvers without installing apt-utils or other # solvers inside the chroot # # The DPkg::Install::Recursive::force=true workaround can be # dropped after this issue is fixed: # https://salsa.debian.org/apt-team/apt/-/merge_requests/189 # # We could also move the dpkg call to the outside and run dpkg with # --root but this would only make sense in situations where there # is no dpkg inside the chroot. if (!$options->{dryrun}) { run_chroot( sub { info "installing remaining packages inside the" . " chroot..."; run_apt_progress({ ARGV => [ 'apt-get', '-o', 'Dir::Bin::dpkg=env', '-o', 'DPkg::Options::=--unset=TMPDIR', '-o', 'DPkg::Options::=dpkg', $options->{mode} eq 'fakechroot' ? ( '-o', 'DPkg::Install::Recursive::force=true' ) : (), '-o', "DPkg::Chroot-Directory=$options->{root}", '--yes', 'install' ], PKGS => [@pkgs_to_install], }); }, $options ); } else { info "simulate installing remaining packages inside the" . " chroot..."; run_apt_progress({ ARGV => [ 'apt-get', '--yes', '-oAPT::Get::Simulate=true', 'install' ], PKGS => [@pkgs_to_install], }); } } } else { error "unknown mode: $options->{mode}"; } return; } sub run_cleanup() { my $options = shift; if (any { $_ eq 'cleanup/apt' } @{ $options->{skip} }) { info "skipping cleanup/apt as requested"; } else { if ( none { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} } and none { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) { info "cleaning package lists and apt cache..."; } if (any { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }) { info "skipping cleanup/apt/lists as requested"; } else { if (any { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) { info "cleaning package lists..."; } run_apt_progress({ ARGV => [ 'apt-get', '--option', 'Dir::Etc::SourceList=/dev/null', '--option', 'Dir::Etc::SourceParts=/dev/null', 'update' ], CHDIR => $options->{root}, }); } if (any { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) { info "skipping cleanup/apt/cache as requested"; } else { if (any { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }) { info "cleaning apt cache..."; } run_apt_progress( { ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} }); } # apt since 1.6 creates the auxfiles directory. If apt inside the # chroot is older than that, then it will not know how to clean it. if (-e "$options->{root}/var/lib/apt/lists/auxfiles") { 0 == system( 'rm', '--interactive=never', '--recursive', '--preserve-root', '--one-file-system', "$options->{root}/var/lib/apt/lists/auxfiles" ) or error "rm failed: $?"; } } if (any { $_ eq 'cleanup/mmdebstrap' } @{ $options->{skip} }) { info "skipping cleanup/mmdebstrap as requested"; } else { # clean up temporary configuration file unlink "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap" or error "failed to unlink /etc/apt/apt.conf.d/00mmdebstrap: $!"; if (defined $ENV{APT_CONFIG} && -e $ENV{APT_CONFIG}) { unlink $ENV{APT_CONFIG} or error "failed to unlink $ENV{APT_CONFIG}: $!"; } if (any { $_ eq 'cleanup/mmdebstrap/qemu' } @{ $options->{skip} }) { info "skipping cleanup/mmdebstrap/qume as requested"; } elsif (defined $options->{qemu} and any { $_ eq $options->{mode} } ('root', 'unshare') and -e "$options->{root}/usr/bin/qemu-$options->{qemu}-static") { unlink "$options->{root}/usr/bin/qemu-$options->{qemu}-static" or error "cannot unlink /usr/bin/qemu-$options->{qemu}-static: $!"; } } if (any { $_ eq 'cleanup/reproducible' } @{ $options->{skip} }) { info "skipping cleanup/reproducible as requested"; } else { # clean up certain files to make output reproducible foreach my $fname ( '/var/log/dpkg.log', '/var/log/apt/history.log', '/var/log/apt/term.log', '/var/log/alternatives.log', '/var/cache/ldconfig/aux-cache', '/var/log/apt/eipp.log.xz', '/var/lib/dbus/machine-id' ) { my $path = "$options->{root}$fname"; if (!-e $path) { next; } unlink $path or error "cannot unlink $path: $!"; } if (-e "$options->{root}/etc/machine-id") { # from machine-id(5): # For operating system images which are created once and used on # multiple machines, for example for containers or in the cloud, # /etc/machine-id should be an empty file in the generic file # system image. An ID will be generated during boot and saved to # this file if possible. Having an empty file in place is useful # because it allows a temporary file to be bind-mounted over the # real file, in case the image is used read-only. unlink "$options->{root}/etc/machine-id" or error "cannot unlink /etc/machine-id: $!"; open my $fh, '>', "$options->{root}/etc/machine-id" or error "failed to open(): $!"; print $fh "uninitialized\n"; close $fh; } } if (any { $_ eq 'cleanup/run' } @{ $options->{skip} }) { info "skipping cleanup/run as requested"; } else { # remove any possible leftovers in /run if (-d "$options->{root}/run") { opendir(my $dh, "$options->{root}/run") or error "Can't opendir($options->{root}/run): $!"; while (my $entry = readdir $dh) { # skip the "." and ".." entries next if $entry eq "."; next if $entry eq ".."; debug "deleting files in /run: $entry"; 0 == system( 'rm', '--interactive=never', '--recursive', '--preserve-root', '--one-file-system', "$options->{root}/run/$entry" ) or error "rm failed: $?"; } closedir($dh); } } if (any { $_ eq 'cleanup/tmp' } @{ $options->{skip} }) { info "skipping cleanup/tmp as requested"; } else { # remove any possible leftovers in /tmp if (-d "$options->{root}/tmp") { opendir(my $dh, "$options->{root}/tmp") or error "Can't opendir($options->{root}/tmp): $!"; while (my $entry = readdir $dh) { # skip the "." and ".." entries next if $entry eq "."; next if $entry eq ".."; debug "deleting files in /tmp: $entry"; 0 == system( 'rm', '--interactive=never', '--recursive', '--preserve-root', '--one-file-system', "$options->{root}/tmp/$entry" ) or error "rm failed: $?"; } closedir($dh); } } return; } # messages from process inside unshared namespace to the outside # openw -- open file for writing # untar -- extract tar into directory # write -- write data to last opened file or tar process # close -- finish file writing or tar extraction # adios -- last message and tear-down # messages from process outside unshared namespace to the inside # okthx -- success sub checkokthx { my $fh = shift; my $ret = read($fh, my $buf, 2 + 5) // error "cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } my ($len, $msg) = unpack("nA5", $buf); if ($msg ne "okthx") { error "expected okthx but got: $msg"; } if ($len != 0) { error "expected no payload but got $len bytes"; } return; } # resolve a path inside a chroot sub chrooted_realpath { my $root = shift; my $src = shift; my $result = $root; my $prefix; # relative paths are relative to the root of the chroot # remove prefixed slashes $src =~ s{^/+}{}; my $loop = 0; while (length $src) { if ($loop > 25) { error "too many levels of symbolic links"; } # Get the first directory component. ($prefix, $src) = split m{/+}, $src, 2; # Resolve the first directory component. if ($prefix eq ".") { # Ignore, stay at the same directory. } elsif ($prefix eq "..") { # Go up one directory. $result =~ s{(.*)/[^/]*}{$1}; # but not further than the root if ($result !~ m/^\Q$root\E/) { $result = $root; } } elsif (-l "$result/$prefix") { my $dst = readlink "$result/$prefix"; if ($dst =~ s{^/+}{}) { # Absolute pathname, reset result back to $root. $result = $root; } $src = length $src ? "$dst/$src" : $dst; $loop++; } else { # Otherwise append the prefix. $result = "$result/$prefix"; } } return $result; } sub hookhelper { # we put everything in an eval block because that way we can easily handle # errors without goto labels or much code duplication: the error handler # has to send an "error" message to the other side eval { my $root = $ARGV[1]; my $mode = $ARGV[2]; my $hook = $ARGV[3]; my $qemu = $ARGV[4]; $verbosity_level = $ARGV[5]; my $command = $ARGV[6]; my @cmdprefix = (); my @tarcmd = ( 'tar', '--numeric-owner', '--xattrs', '--format=pax', '--pax-option=exthdr.name=%d/PaxHeaders/%f,' . 'delete=atime,delete=ctime' ); if ($hook eq 'setup') { if ($mode eq 'proot') { # since we cannot run tar inside the chroot under proot during # the setup hook because the chroot is empty, we have to run # tar from the outside, which leads to all files being owned # by the user running mmdebstrap. To let the ownership # information not be completely off, we force all files be # owned by the root user. push @tarcmd, '--owner=0', '--group=0'; } } elsif (any { $_ eq $hook } ('extract', 'essential', 'customize')) { if ($mode eq 'fakechroot') { # Fakechroot requires tar to run inside the chroot or # otherwise absolute symlinks will include the path to the # root directory push @cmdprefix, 'chroot', $root; } elsif ($mode eq 'proot') { # proot requires tar to run inside proot or otherwise # permissions will be completely off push @cmdprefix, 'proot', '--root-id', "--rootfs=$root", '--cwd=/', "--qemu=$qemu"; } elsif (any { $_ eq $mode } ('root', 'chrootless', 'unshare')) { # not chrooting in this case } else { error "unknown mode: $mode"; } } else { error "unknown hook: $hook"; } if ( any { $_ eq $command } ('copy-in', 'tar-in', 'upload', 'sync-in') ) { if (scalar @ARGV < 9) { error "$command needs at least one path on the" . " outside and the output path inside the chroot"; } my $outpath = $ARGV[-1]; for (my $i = 7 ; $i < $#ARGV ; $i++) { # the right argument for tar's --directory argument depends on # whether tar is called from inside the chroot or from the # outside my $directory; if ($hook eq 'setup') { # tar runs outside, so acquire the correct path $directory = chrooted_realpath $root, $outpath; } elsif ( any { $_ eq $hook } ('extract', 'essential', 'customize') ) { if (any { $_ eq $mode } ('fakechroot', 'proot')) { # tar will run inside the chroot $directory = $outpath; } elsif ( any { $_ eq $mode } ('root', 'chrootless', 'unshare') ) { $directory = chrooted_realpath $root, $outpath; } else { error "unknown mode: $mode"; } } else { error "unknown hook: $hook"; } # if chrooted_realpath was used and if neither fakechroot or # proot were used (absolute symlinks will be broken) we can # check and potentially fail early if the target does not exist if (none { $_ eq $mode } ('fakechroot', 'proot')) { my $dirtocheck = $directory; if ($command eq 'upload') { # check the parent directory instead $dirtocheck =~ s/(.*)\/[^\/]*/$1/; } if (!-e $dirtocheck) { error "path does not exist: $dirtocheck"; } if (!-d $dirtocheck) { error "path is not a directory: $dirtocheck"; } } my $fh; if ($command eq 'upload') { # open the requested file for writing open $fh, '|-', @cmdprefix, 'sh', '-c', 'cat > "$1"', 'exec', $directory // error "failed to fork(): $!"; } elsif ( any { $_ eq $command } ('copy-in', 'tar-in', 'sync-in') ) { # open a tar process that extracts the tarfile that we # supply it with on stdin to the output directory inside # the chroot my @cmd = ( @cmdprefix, @tarcmd, '--xattrs-include=*', '--directory', $directory, '--extract', '--file', '-' ); debug("helper: running " . (join " ", @cmd)); open($fh, '|-', @cmd) // error "failed to fork(): $!"; } else { error "unknown command: $command"; } if ($command eq 'copy-in') { # instruct the parent process to create a tarball of the # requested path outside the chroot debug "helper: sending mktar"; print STDOUT ( pack("n", length $ARGV[$i]) . "mktar" . $ARGV[$i]); } elsif ($command eq 'sync-in') { # instruct the parent process to create a tarball of the # content of the requested path outside the chroot debug "helper: sending mktac"; print STDOUT ( pack("n", length $ARGV[$i]) . "mktac" . $ARGV[$i]); } elsif (any { $_ eq $command } ('upload', 'tar-in')) { # instruct parent process to open a tarball of the # requested path outside the chroot for reading debug "helper: sending openr"; print STDOUT ( pack("n", length $ARGV[$i]) . "openr" . $ARGV[$i]); } else { error "unknown command: $command"; } STDOUT->flush(); debug "helper: waiting for okthx"; checkokthx \*STDIN; # handle "write" messages from the parent process and feed # their payload into the tar process until a "close" message # is encountered while (1) { # receive the next message my $ret = read(STDIN, my $buf, 2 + 5) // error "cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } my ($len, $msg) = unpack("nA5", $buf); debug "helper: received message: $msg"; if ($msg eq "close") { # finish the loop if ($len != 0) { error "expected no payload but got $len bytes"; } debug "helper: sending okthx"; print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; STDOUT->flush(); last; } elsif ($msg ne "write") { error "expected write but got: $msg"; } # read the payload my $content; { my $ret = read(STDIN, $content, $len) // error "error cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } } # write the payload to the tar process print $fh $content or error "cannot write to tar process: $!"; debug "helper: sending okthx"; print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; STDOUT->flush(); } close $fh; if ($command ne 'upload' and $? != 0) { error "tar failed"; } } } elsif ( any { $_ eq $command } ('copy-out', 'tar-out', 'download', 'sync-out') ) { if (scalar @ARGV < 9) { error "$command needs at least one path inside the chroot and" . " the output path on the outside"; } my $outpath = $ARGV[-1]; for (my $i = 7 ; $i < $#ARGV ; $i++) { # the right argument for tar's --directory argument depends on # whether tar is called from inside the chroot or from the # outside my $directory; if ($hook eq 'setup') { # tar runs outside, so acquire the correct path $directory = chrooted_realpath $root, $ARGV[$i]; } elsif ( any { $_ eq $hook } ('extract', 'essential', 'customize') ) { if (any { $_ eq $mode } ('fakechroot', 'proot')) { # tar will run inside the chroot $directory = $ARGV[$i]; } elsif ( any { $_ eq $mode } ('root', 'chrootless', 'unshare') ) { $directory = chrooted_realpath $root, $ARGV[$i]; } else { error "unknown mode: $mode"; } } else { error "unknown hook: $hook"; } # if chrooted_realpath was used and if neither fakechroot or # proot were used (absolute symlinks will be broken) we can # check and potentially fail early if the source does not exist if (none { $_ eq $mode } ('fakechroot', 'proot')) { if (!-e $directory) { error "path does not exist: $directory"; } if ($command eq 'download') { if (!-f $directory) { error "path is not a file: $directory"; } } } my $fh; if ($command eq 'download') { # open the requested file for reading open $fh, '-|', @cmdprefix, 'sh', '-c', 'cat "$1"', 'exec', $directory // error "failed to fork(): $!"; } elsif ($command eq 'sync-out') { # Open a tar process that creates a tarfile of everything # inside the requested directory inside the chroot and # writes it to stdout. my @cmd = ( @cmdprefix, @tarcmd, '--directory', $directory, '--create', '--file', '-', '.' ); debug("helper: running " . (join " ", @cmd)); open($fh, '-|', @cmd) // error "failed to fork(): $!"; } elsif (any { $_ eq $command } ('copy-out', 'tar-out')) { # Open a tar process that creates a tarfile of the # requested directory inside the chroot and writes it to # stdout. To emulate the behaviour of cp, change to the # dirname of the requested path first. my @cmd = ( @cmdprefix, @tarcmd, '--directory', dirname($directory), '--create', '--file', '-', basename($directory)); debug("helper: running " . (join " ", @cmd)); open($fh, '-|', @cmd) // error "failed to fork(): $!"; } else { error "unknown command: $command"; } if (any { $_ eq $command } ('copy-out', 'sync-out')) { # instruct the parent process to extract a tarball to a # certain path outside the chroot debug "helper: sending untar"; print STDOUT ( pack("n", length $outpath) . "untar" . $outpath); } elsif (any { $_ eq $command } ('download', 'tar-out')) { # instruct parent process to open a tarball of the # requested path outside the chroot for writing debug "helper: sending openw"; print STDOUT ( pack("n", length $outpath) . "openw" . $outpath); } else { error "unknown command: $command"; } STDOUT->flush(); debug "helper: waiting for okthx"; checkokthx \*STDIN; # read from the tar process and send as payload to the parent # process while (1) { # read from tar my $ret = read($fh, my $cont, 4096) // error "cannot read from pipe: $!"; if ($ret == 0) { last; } debug "helper: sending write"; # send to parent print STDOUT pack("n", $ret) . "write" . $cont; STDOUT->flush(); debug "helper: waiting for okthx"; checkokthx \*STDIN; if ($ret < 4096) { last; } } # signal to the parent process that we are done debug "helper: sending close"; print STDOUT pack("n", 0) . "close"; STDOUT->flush(); debug "helper: waiting for okthx"; checkokthx \*STDIN; close $fh; if ($? != 0) { error "$command failed"; } } } else { error "unknown command: $command"; } }; if ($@) { # inform the other side that something went wrong print STDOUT (pack("n", 0) . "error"); STDOUT->flush(); error "hookhelper failed: $@"; } return; } sub hooklistener { # we put everything in an eval block because that way we can easily handle # errors without goto labels or much code duplication: the error handler # has to send an "error" message to the other side eval { $verbosity_level = $ARGV[1]; while (1) { # get the next message my $msg = "error"; my $len = -1; { debug "listener: reading next command"; my $ret = read(STDIN, my $buf, 2 + 5) // error "cannot read from socket: $!"; debug "listener: finished reading command"; if ($ret == 0) { error "received eof on socket"; } ($len, $msg) = unpack("nA5", $buf); } if ($msg eq "adios") { debug "listener: received message: adios"; # setup finished, so we break out of the loop if ($len != 0) { error "expected no payload but got $len bytes"; } last; } elsif ($msg eq "openr") { # handle the openr message debug "listener: received message: openr"; my $infile; { my $ret = read(STDIN, $infile, $len) // error "cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } } # make sure that the requested path exists outside the chroot if (!-e $infile) { error "$infile does not exist"; } debug "listener: sending okthx"; print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; STDOUT->flush(); open my $fh, '<', $infile or error "failed to open $infile for reading: $!"; # read from the file and send as payload to the child process while (1) { # read from file my $ret = read($fh, my $cont, 4096) // error "cannot read from pipe: $!"; if ($ret == 0) { last; } debug "listener: sending write"; # send to child print STDOUT pack("n", $ret) . "write" . $cont; STDOUT->flush(); debug "listener: waiting for okthx"; checkokthx \*STDIN; if ($ret < 4096) { last; } } # signal to the child process that we are done debug "listener: sending close"; print STDOUT pack("n", 0) . "close"; STDOUT->flush(); debug "listener: waiting for okthx"; checkokthx \*STDIN; close $fh; } elsif ($msg eq "openw") { debug "listener: received message: openw"; # payload is the output directory my $outfile; { my $ret = read(STDIN, $outfile, $len) // error "cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } } # make sure that the directory exists my $outdir = dirname($outfile); if (-e $outdir) { if (!-d $outdir) { error "$outdir already exists but is not a directory"; } } else { my $num_created = make_path $outdir, { error => \my $err }; if ($err && @$err) { error( join "; ", ( map { "cannot create " . (join ": ", %{$_}) } @$err )); } elsif ($num_created == 0) { error "cannot create $outdir"; } } debug "listener: sending okthx"; print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; STDOUT->flush(); # now we expect one or more "write" messages containing the # tarball to write open my $fh, '>', $outfile or error "failed to open $outfile for writing: $!"; # handle "write" messages from the child process and feed # their payload into the file handle until a "close" message # is encountered while (1) { # receive the next message my $ret = read(STDIN, my $buf, 2 + 5) // error "cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } my ($len, $msg) = unpack("nA5", $buf); debug "listener: received message: $msg"; if ($msg eq "close") { # finish the loop if ($len != 0) { error "expected no payload but got $len bytes"; } debug "listener: sending okthx"; print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; STDOUT->flush(); last; } elsif ($msg ne "write") { # we should not receive this message at this point error "expected write but got: $msg"; } # read the payload my $content; { my $ret = read(STDIN, $content, $len) // error "error cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } } # write the payload to the file handle print $fh $content or error "cannot write to file handle: $!"; debug "listener: sending okthx"; print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; STDOUT->flush(); } close $fh; } elsif (any { $_ eq $msg } ('mktar', 'mktac')) { # handle the mktar message debug "listener: received message: $msg"; my $indir; { my $ret = read(STDIN, $indir, $len) // error "cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } } # make sure that the requested path exists outside the chroot if (!-e $indir) { error "$indir does not exist"; } debug "listener: sending okthx"; print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; STDOUT->flush(); # Open a tar process creating a tarfile of the instructed # path. To emulate the behaviour of cp, change to the # dirname of the requested path first. my @cmd = ( 'tar', '--numeric-owner', '--xattrs', '--format=pax', '--pax-option=exthdr.name=%d/PaxHeaders/%f,' . 'delete=atime,delete=ctime', '--directory', $msg eq 'mktar' ? dirname($indir) : $indir, '--create', '--file', '-', $msg eq 'mktar' ? basename($indir) : '.' ); debug("listener: running " . (join " ", @cmd)); open(my $fh, '-|', @cmd) // error "failed to fork(): $!"; # read from the tar process and send as payload to the child # process while (1) { # read from tar my $ret = read($fh, my $cont, 4096) // error "cannot read from pipe: $!"; if ($ret == 0) { last; } debug "listener: sending write ($ret bytes)"; # send to child print STDOUT pack("n", $ret) . "write" . $cont; STDOUT->flush(); debug "listener: waiting for okthx"; checkokthx \*STDIN; if ($ret < 4096) { last; } } # signal to the child process that we are done debug "listener: sending close"; print STDOUT pack("n", 0) . "close"; STDOUT->flush(); debug "listener: waiting for okthx"; checkokthx \*STDIN; close $fh; if ($? != 0) { error "tar failed"; } } elsif ($msg eq "untar") { debug "listener: received message: untar"; # payload is the output directory my $outdir; { my $ret = read(STDIN, $outdir, $len) // error "cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } } # make sure that the directory exists if (-e $outdir) { if (!-d $outdir) { error "$outdir already exists but is not a directory"; } } else { my $num_created = make_path $outdir, { error => \my $err }; if ($err && @$err) { error( join "; ", ( map { "cannot create " . (join ": ", %{$_}) } @$err )); } elsif ($num_created == 0) { error "cannot create $outdir"; } } debug "listener: sending okthx"; print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; STDOUT->flush(); # now we expect one or more "write" messages containing the # tarball to unpack open my $fh, '|-', 'tar', '--numeric-owner', '--xattrs', '--xattrs-include=*', '--directory', $outdir, '--extract', '--file', '-' // error "failed to fork(): $!"; # handle "write" messages from the child process and feed # their payload into the tar process until a "close" message # is encountered while (1) { # receive the next message my $ret = read(STDIN, my $buf, 2 + 5) // error "cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } my ($len, $msg) = unpack("nA5", $buf); debug "listener: received message: $msg"; if ($msg eq "close") { # finish the loop if ($len != 0) { error "expected no payload but got $len bytes"; } debug "listener: sending okthx"; print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; STDOUT->flush(); last; } elsif ($msg ne "write") { # we should not receive this message at this point error "expected write but got: $msg"; } # read the payload my $content; { my $ret = read(STDIN, $content, $len) // error "error cannot read from socket: $!"; if ($ret == 0) { error "received eof on socket"; } } # write the payload to the tar process print $fh $content or error "cannot write to tar process: $!"; debug "listener: sending okthx"; print STDOUT (pack("n", 0) . "okthx") or error "cannot write to socket: $!"; STDOUT->flush(); } close $fh; if ($? != 0) { error "tar failed"; } } else { error "unknown message: $msg"; } } }; if ($@) { debug("hooklistener errored out: $@"); # inform the other side that something went wrong print STDOUT (pack("n", 0) . "error") or error "cannot write to socket: $!"; STDOUT->flush(); } return; } # parse files of the format found in /usr/share/distro-info/ and return two # lists: the first contains codenames of end-of-life distros and the second # list contains codenames of currently active distros sub parse_distro_info { my $file = shift; my @eol = (); my @current = (); my $today = POSIX::strftime "%Y-%m-%d", localtime; open my $fh, '<', $file or error "cannot open $file: $!"; my $i = 0; while (my $line = <$fh>) { chomp($line); $i++; my @cells = split /,/, $line; if (scalar @cells < 4) { error "cannot parse line $i of $file"; } if ( $i == 1 and ( scalar @cells < 6 or $cells[0] ne 'version' or $cells[1] ne 'codename' or $cells[2] ne 'series' or $cells[3] ne 'created' or $cells[4] ne 'release' or $cells[5] ne 'eol') ) { error "cannot find correct header in $file"; } if ($i == 1) { next; } if (scalar @cells == 6) { if ($cells[5] !~ m/^\d\d\d\d-\d\d-\d\d$/) { error "invalid eof date format in $file:$i: $cells[5]"; } # since the date format is iso8601, we can use lexicographic string # comparison to compare dates if ($cells[5] lt $today) { push @eol, $cells[2]; } else { push @current, $cells[2]; } } else { push @current, $cells[2]; } } close $fh; return ([@eol], [@current]); } sub get_suite_by_vendor { my %suite_by_vendor = ( 'debian' => {}, 'ubuntu' => {}, 'tanglu' => {}, 'kali' => {}, ); # pre-fill with some known values foreach my $suite ( 'potato', 'woody', 'sarge', 'etch', 'lenny', 'squeeze', 'wheezy', 'jessie' ) { $suite_by_vendor{'debian'}->{$suite} = 1; } foreach my $suite ( 'unstable', 'stable', 'oldstable', 'stretch', 'buster', 'bullseye', 'bookworm', 'trixie' ) { $suite_by_vendor{'debian'}->{$suite} = 0; } foreach my $suite ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis') { $suite_by_vendor{'tanglu'}->{$suite} = 0; } foreach my $suite ('kali-dev', 'kali-rolling', 'kali-bleeding-edge') { $suite_by_vendor{'kali'}->{$suite} = 0; } foreach my $suite ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic') { $suite_by_vendor{'ubuntu'}->{$suite} = 0; } # if the Debian package distro-info-data is installed, then we can use it, # to get better data about new distros or EOL distros if (-e '/usr/share/distro-info/debian.csv') { my ($eol, $current) = parse_distro_info('/usr/share/distro-info/debian.csv'); foreach my $suite (@{$eol}) { $suite_by_vendor{'debian'}->{$suite} = 1; } foreach my $suite (@{$current}) { $suite_by_vendor{'debian'}->{$suite} = 0; } } if (-e '/usr/share/distro-info/ubuntu.csv') { my ($eol, $current) = parse_distro_info('/usr/share/distro-info/ubuntu.csv'); foreach my $suite (@{$eol}, @{$current}) { $suite_by_vendor{'ubuntu'}->{$suite} = 0; } } # if debootstrap is installed we infer distro names from the symlink # targets of the scripts in /usr/share/debootstrap/scripts/ my $debootstrap_scripts = '/usr/share/debootstrap/scripts/'; if (-d $debootstrap_scripts) { opendir(my $dh, $debootstrap_scripts) or error "Can't opendir($debootstrap_scripts): $!"; while (my $suite = readdir $dh) { # this is only a heuristic -- don't overwrite anything but instead # just update anything that was missing if (!-l "$debootstrap_scripts/$suite") { next; } my $target = readlink "$debootstrap_scripts/$suite"; if ($target eq "sid" and not exists $suite_by_vendor{'debian'}->{$suite}) { $suite_by_vendor{'debian'}->{$suite} = 0; } elsif ($target eq "gutsy" and not exists $suite_by_vendor{'ubuntu'}->{$suite}) { $suite_by_vendor{'ubuntu'}->{$suite} = 0; } elsif ($target eq "aequorea" and not exists $suite_by_vendor{'tanglu'}->{$suite}) { $suite_by_vendor{'tanglu'}->{$suite} = 0; } elsif ($target eq "kali" and not exists $suite_by_vendor{'kali'}->{$suite}) { $suite_by_vendor{'kali'}->{$suite} = 0; } } closedir($dh); } return %suite_by_vendor; } # try to guess the right keyring path for the given suite sub get_keyring_by_suite { my $query = shift; my $suite_by_vendor = shift; my $debianvendor; my $ubuntuvendor; # make $@ local, so we don't print "Can't locate Dpkg/Vendor/Debian.pm" # in other parts where we evaluate $@ local $@ = ''; eval { require Dpkg::Vendor::Debian; require Dpkg::Vendor::Ubuntu; $debianvendor = Dpkg::Vendor::Debian->new(); $ubuntuvendor = Dpkg::Vendor::Ubuntu->new(); }; my $keyring_by_vendor = sub { my $vendor = shift; my $eol = shift; if ($vendor eq 'debian') { if ($eol) { if (defined $debianvendor) { return $debianvendor->run_hook( 'archive-keyrings-historic'); } else { return '/usr/share/keyrings/debian-archive-removed-keys.gpg'; } } else { if (defined $debianvendor) { return $debianvendor->run_hook('archive-keyrings'); } else { return '/usr/share/keyrings/debian-archive-keyring.gpg'; } } } elsif ($vendor eq 'ubuntu') { if (defined $ubuntuvendor) { return $ubuntuvendor->run_hook('archive-keyrings'); } else { return '/usr/share/keyrings/ubuntu-archive-keyring.gpg'; } } elsif ($vendor eq 'tanglu') { return '/usr/share/keyrings/tanglu-archive-keyring.gpg'; } elsif ($vendor eq 'kali') { return '/usr/share/keyrings/kali-archive-keyring.gpg'; } else { error "unknown vendor: $vendor"; } }; my %keyrings = (); foreach my $vendor (keys %{$suite_by_vendor}) { foreach my $suite (keys %{ $suite_by_vendor->{$vendor} }) { my $keyring = $keyring_by_vendor->( $vendor, $suite_by_vendor->{$vendor}->{$suite}); debug "suite $suite with keyring $keyring"; $keyrings{$suite} = $keyring; } } if (exists $keyrings{$query}) { return $keyrings{$query}; } else { return; } } sub get_sourceslist_by_suite { my $suite = shift; my $arch = shift; my $signedby = shift; my $compstr = shift; my $suite_by_vendor = shift; my @debstable = keys %{ $suite_by_vendor->{'debian'} }; my @ubuntustable = keys %{ $suite_by_vendor->{'ubuntu'} }; my @tanglustable = keys %{ $suite_by_vendor->{'tanglu'} }; my @kali = keys %{ $suite_by_vendor->{'kali'} }; my $mirror = 'http://deb.debian.org/debian'; my $secmirror = 'http://security.debian.org/debian-security'; if (any { $_ eq $suite } @ubuntustable) { if (any { $_ eq $arch } ('amd64', 'i386')) { $mirror = 'http://archive.ubuntu.com/ubuntu'; $secmirror = 'http://security.ubuntu.com/ubuntu'; } else { $mirror = 'http://ports.ubuntu.com/ubuntu-ports'; $secmirror = 'http://ports.ubuntu.com/ubuntu-ports'; } if (-e '/usr/share/debootstrap/scripts/gutsy') { # try running the debootstrap script but ignore errors my $script = 'set -eu; default_mirror() { echo $1; }; mirror_style() { :; }; download_style() { :; }; finddebs_style() { :; }; variants() { :; }; keyring() { :; }; doing_variant() { false; }; . /usr/share/debootstrap/scripts/gutsy;'; open my $fh, '-|', 'env', "ARCH=$arch", "SUITE=$suite", 'sh', '-c', $script // last; chomp( my $output = do { local $/; <$fh> } ); close $fh; if ($? == 0 && $output ne '') { $mirror = $output; } } } elsif (any { $_ eq $suite } @tanglustable) { $mirror = 'http://archive.tanglu.org/tanglu'; } elsif (any { $_ eq $suite } @kali) { $mirror = 'https://http.kali.org/kali'; } my $sourceslist = ''; $sourceslist .= "deb$signedby $mirror $suite $compstr\n"; if (any { $_ eq $suite } @ubuntustable) { $sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n"; $sourceslist .= "deb$signedby $secmirror $suite-security $compstr\n"; } elsif (any { $_ eq $suite } @tanglustable) { $sourceslist .= "deb$signedby $secmirror $suite-updates $compstr\n"; } elsif (any { $_ eq $suite } @debstable and none { $_ eq $suite } ('testing', 'unstable', 'sid')) { $sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n"; # the security mirror changes, starting with bullseye # https://lists.debian.org/87r26wqr2a.fsf@43-1.org my $bullseye_or_later = 0; if ( any { $_ eq $suite } ('stable', 'bullseye', 'bookworm', 'trixie') ) { $bullseye_or_later = 1; } my $distro_info = '/usr/share/distro-info/debian.csv'; # make $@ local, so we don't print "Can't locate Debian/DistroInfo.pm" # in other parts where we evaluate $@ local $@ = ''; eval { require Debian::DistroInfo; }; if (!$@) { debug "libdistro-info-perl is installed"; my $debinfo = DebianDistroInfo->new(); if ($debinfo->version($suite, 0) >= 11) { $bullseye_or_later = 1; } } elsif (-f $distro_info) { debug "distro-info-data is installed"; open my $fh, '<', $distro_info or error "cannot open $distro_info: $!"; my $i = 0; my $matching_version; my @releases; my $today = POSIX::strftime "%Y-%m-%d", localtime; while (my $line = <$fh>) { chomp($line); $i++; my @cells = split /,/, $line; if (scalar @cells < 4) { error "cannot parse line $i of $distro_info"; } if ( $i == 1 and ( scalar @cells < 6 or $cells[0] ne 'version' or $cells[1] ne 'codename' or $cells[2] ne 'series' or $cells[3] ne 'created' or $cells[4] ne 'release' or $cells[5] ne 'eol') ) { error "cannot find correct header in $distro_info"; } if ($i == 1) { next; } if ( scalar @cells > 4 and $cells[4] =~ m/^\d\d\d\d-\d\d-\d\d$/ and $cells[4] lt $today) { push @releases, $cells[0]; } if (lc $cells[1] eq $suite or lc $cells[2] eq $suite) { $matching_version = $cells[0]; last; } } close $fh; if (defined $matching_version and $matching_version >= 11) { $bullseye_or_later = 1; } if ($suite eq "stable" and $releases[-1] >= 11) { $bullseye_or_later = 1; } } else { debug "neither libdistro-info-perl nor distro-info-data installed"; } if ($bullseye_or_later) { # starting from bullseye use $sourceslist .= "deb$signedby $secmirror $suite-security" . " $compstr\n"; } else { $sourceslist .= "deb$signedby $secmirror $suite/updates" . " $compstr\n"; } } return $sourceslist; } sub guess_sources_format { my $content = shift; my $is_deb822 = 0; my $is_oneline = 0; for my $line (split "\n", $content) { if ($line =~ /^deb(-src)? /) { $is_oneline = 1; last; } if ($line =~ /^[^#:\s]+:/) { $is_deb822 = 1; last; } } if ($is_deb822) { return 'deb822'; } if ($is_oneline) { return 'one-line'; } return; } sub approx_disk_usage { my $directory = shift; info "approximating disk usage..."; # the "du" utility reports different results depending on the underlying # filesystem, see https://bugs.debian.org/650077 for a discussion # # we use code similar to the one used by dpkg-gencontrol instead # # Regular files are measured in number of 1024 byte blocks. All other # entries are assumed to take one block of space. # # We ignore /dev because depending on the mode, the directory might be # populated or not and we want consistent disk usage results independent # of the mode. my $installed_size = 0; my $scan_installed_size = sub { if ($File::Find::name eq "$directory/dev") { # add all entries of @devfiles once $installed_size += scalar @devfiles; } elsif ($File::Find::name =~ /^$directory\/dev\//) { # ignore everything below /dev } elsif (-l $File::Find::name) { # -f follows symlinks, so we first check if we have a symlink $installed_size += 1; } elsif (-f $File::Find::name) { # add file size in 1024 byte blocks, rounded up $installed_size += int(((-s $File::Find::name) + 1024) / 1024); } else { # all other entries are assumed to only take up one block $installed_size += 1; } }; # We use no_chdir because otherwise the unshared user has to have read # permissions for the current working directory when producing an ext2 # image. See https://bugs.debian.org/1005857 find({ wanted => $scan_installed_size, no_chdir => 1 }, $directory); # because the above is only a heuristic we add 10% extra for good measure return int($installed_size * 1.1); } sub main() { my $before = Time::HiRes::time; umask 022; if (scalar @ARGV >= 7 && $ARGV[0] eq "--hook-helper") { hookhelper(); exit 0; } # this is the counterpart to --hook-helper and will receive and carry # out its instructions if (scalar @ARGV == 2 && $ARGV[0] eq "--hook-listener") { hooklistener(); exit 0; } # this is like: # lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' ... # but without needing lxc if (scalar @ARGV >= 1 && $ARGV[0] eq "--unshare-helper") { if ($EFFECTIVE_USER_ID != 0 && !test_unshare_userns(1)) { exit 1; } my @idmap = (); if ($EFFECTIVE_USER_ID != 0) { @idmap = read_subuid_subgid; } my $pid = get_unshare_cmd( sub { 0 == system @ARGV[1 .. $#ARGV] or error "system failed: $?"; }, \@idmap ); waitpid $pid, 0; $? == 0 or error "unshared command failed"; exit 0; } my $mtime = time; if (exists $ENV{SOURCE_DATE_EPOCH}) { $mtime = $ENV{SOURCE_DATE_EPOCH} + 0; } { ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{DEBIAN_FRONTEND} = 'noninteractive'; $ENV{DEBCONF_NONINTERACTIVE_SEEN} = 'true'; $ENV{LC_ALL} = 'C.UTF-8'; $ENV{LANGUAGE} = 'C.UTF-8'; $ENV{LANG} = 'C.UTF-8'; } # copy ARGV because getopt modifies it my @ARGVORIG = @ARGV; # obtain the correct defaults for the keyring locations that apt knows # about my $apttrusted = `eval \$(apt-config shell v Dir::Etc::trusted/f); printf \$v`; my $apttrustedparts = `eval \$(apt-config shell v Dir::Etc::trustedparts/d); printf \$v`; chomp(my $hostarch = `dpkg --print-architecture`); my $options = { components => ["main"], variant => "important", include => [], architectures => [$hostarch], mode => 'auto', dpkgopts => [], aptopts => [], apttrusted => $apttrusted, apttrustedparts => $apttrustedparts, noop => [], setup_hook => [], extract_hook => [], essential_hook => [], customize_hook => [], dryrun => 0, skip => [], }; my $logfile = undef; my $format = 'auto'; Getopt::Long::Configure('default', 'bundling', 'auto_abbrev', 'ignore_case_always'); GetOptions( 'h|help' => sub { pod2usage(-exitval => 0, -verbose => 1) }, 'man' => sub { pod2usage(-exitval => 0, -verbose => 2) }, 'version' => sub { print STDOUT "mmdebstrap $VERSION\n"; exit 0; }, 'components=s@' => \$options->{components}, 'variant=s' => \$options->{variant}, 'include=s' => sub { my ($opt_name, $opt_value) = @_; my $sanitize_path = sub { my $pkg = shift; $pkg = abs_path($pkg); if (!defined $pkg) { error "cannot resolve absolute path of $pkg: $!"; } if (!-f $pkg) { error "$pkg is not an existing file"; } return $pkg; }; if ($opt_value =~ /^[?~!(]/) { # Treat option as a single apt pattern and don't split by comma # or whitespace -- append it verbatim. push @{ $options->{include} }, $opt_value; } elsif ($opt_value =~ /^\.?\.?\//) { # Treat option as a single path name and don't split by comma # or whitespace -- append the normalized path. push @{ $options->{include} }, sanitize_path($opt_value); } else { for my $pkg (split /[,\s]+/, $opt_value) { # strip leading and trailing whitespace $pkg =~ s/^\s+|\s+$//g; # skip if the remainder is an empty string if ($pkg eq '') { next; } # Make paths canonical absolute paths, resolve symlinks # and check if it's an existing file. if ($pkg =~ /^\.?\.?\//) { $pkg = sanitize_path($pkg); } push @{ $options->{include} }, $pkg; } } # We are not sorting or otherwise normalizing the order of # arguments to apt because package order matters for "apt install" # since https://salsa.debian.org/apt-team/apt/-/merge_requests/256 }, 'architectures=s@' => \$options->{architectures}, 'mode=s' => \$options->{mode}, 'dpkgopt=s@' => \$options->{dpkgopts}, 'aptopt=s@' => \$options->{aptopts}, 'keyring=s' => sub { my ($opt_name, $opt_value) = @_; if ($opt_value =~ /"/) { error "--keyring: apt cannot handle paths with double quotes:" . " $opt_value"; } if (!-e $opt_value) { error "keyring \"$opt_value\" does not exist"; } my $abs_path = abs_path($opt_value); if (!defined $abs_path) { error "unable to get absolute path of --keyring: $opt_value"; } # since abs_path resolved all symlinks for us, we can now test # what the actual target actually is if (-d $abs_path) { $options->{apttrustedparts} = $abs_path; } else { $options->{apttrusted} = $abs_path; } }, 's|silent' => sub { $verbosity_level = 0; }, 'q|quiet' => sub { $verbosity_level = 0; }, 'v|verbose' => sub { $verbosity_level = 2; }, 'd|debug' => sub { $verbosity_level = 3; }, 'format=s' => \$format, 'logfile=s' => \$logfile, # no-op options so that mmdebstrap can be used with # sbuild-createchroot --debootstrap=mmdebstrap 'resolve-deps' => sub { push @{ $options->{noop} }, 'resolve-deps'; }, 'merged-usr' => sub { push @{ $options->{noop} }, 'merged-usr'; }, 'no-merged-usr' => sub { push @{ $options->{noop} }, 'no-merged-usr'; }, 'force-check-gpg' => sub { push @{ $options->{noop} }, 'force-check-gpg'; }, 'setup-hook=s' => sub { push @{ $options->{setup_hook} }, $_[1]; }, 'extract-hook=s' => sub { push @{ $options->{extract_hook} }, $_[1]; }, 'essential-hook=s' => sub { push @{ $options->{essential_hook} }, $_[1]; }, 'customize-hook=s' => sub { push @{ $options->{customize_hook} }, $_[1]; }, 'hook-directory=s' => sub { my ($opt_name, $opt_value) = @_; if (!-e $opt_value) { error "hook directory \"$opt_value\" does not exist"; } my $abs_path = abs_path($opt_value); if (!defined $abs_path) { error( "unable to get absolute path of " . "--hook-directory: $opt_value"); } # since abs_path resolved all symlinks for us, we can now test # what the actual target actually is if (!-d $opt_value) { error "hook directory \"$opt_value\" is not a directory"; } # gather all files starting with special prefixes into the # respective keys of a hash my %scripts; opendir(my $dh, $opt_value) or error "Can't opendir($opt_value): $!"; while (my $entry = readdir $dh) { foreach my $hook ('setup', 'extract', 'essential', 'customize') { if ($entry =~ m/^\Q$hook\E/ and -x "$opt_value/$entry") { push @{ $scripts{$hook} }, "$opt_value/$entry"; } } } closedir($dh); # add the sorted list associated with each key to the respective # list of hooks foreach my $hook (keys %scripts) { push @{ $options->{"${hook}_hook"} }, (sort @{ $scripts{$hook} }); } }, # Sometimes --simulate fails even though non-simulate succeeds because # in simulate mode, apt cannot rely on dpkg to figure out tricky # dependency situations and will give up instead when it cannot find # a solution. # # 2020-02-06, #debian-apt on OFTC, times in UTC+1 # 12:52 < DonKult> [...] It works in non-simulation because simulate is # more picky. If you wanna know why simulate complains # here prepare for long suffering in dependency hell. 'simulate' => \$options->{dryrun}, 'dry-run' => \$options->{dryrun}, 'skip=s' => sub { my ($opt_name, $opt_value) = @_; for my $skip (split /[,\s]+/, $opt_value) { # strip leading and trailing whitespace $skip =~ s/^\s+|\s+$//g; # skip if the remainder is an empty string if ($skip eq '') { next; } push @{ $options->{skip} }, $skip; } }) or pod2usage(-exitval => 2, -verbose => 1); if (defined($logfile)) { open(STDERR, '>', $logfile) or error "cannot open $logfile: $!"; } foreach my $arg (@{ $options->{noop} }) { info "the option --$arg is a no-op. It only exists for compatibility" . " with some debootstrap wrappers."; } if ($options->{dryrun}) { foreach my $hook ('setup', 'extract', 'essential', 'customize') { if (scalar @{ $options->{"${hook}_hook"} } > 0) { warning "In dry-run mode, --$hook-hook options have no effect"; } } } my @valid_variants = ( 'extract', 'custom', 'essential', 'apt', 'required', 'minbase', 'buildd', 'important', 'debootstrap', '-', 'standard' ); if (none { $_ eq $options->{variant} } @valid_variants) { error "invalid variant. Choose from " . (join ', ', @valid_variants); } # debootstrap and - are an alias for important if (any { $_ eq $options->{variant} } ('-', 'debootstrap')) { $options->{variant} = 'important'; } # minbase is an alias for required if ($options->{variant} eq 'minbase') { $options->{variant} = 'required'; } # fakeroot is an alias for fakechroot if ($options->{mode} eq 'fakeroot') { $options->{mode} = 'fakechroot'; } # sudo is an alias for root if ($options->{mode} eq 'sudo') { $options->{mode} = 'root'; } my @valid_modes = ('auto', 'root', 'unshare', 'fakechroot', 'proot', 'chrootless'); if (none { $_ eq $options->{mode} } @valid_modes) { error "invalid mode. Choose from " . (join ', ', @valid_modes); } # sqfs is an alias for squashfs if ($format eq 'sqfs') { $format = 'squashfs'; } # dir is an alias for directory if ($format eq 'dir') { $format = 'directory'; } my @valid_formats = ('auto', 'directory', 'tar', 'squashfs', 'ext2', 'null'); if (none { $_ eq $format } @valid_formats) { error "invalid format. Choose from " . (join ', ', @valid_formats); } # setting PATH for chroot, ldconfig, start-stop-daemon... my $defaultpath = `eval \$(apt-config shell v DPkg::Path); printf \$v`; if (length $ENV{PATH}) { ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{PATH} = "$ENV{PATH}:$defaultpath"; } else { ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{PATH} = $defaultpath; } foreach my $tool ( 'dpkg', 'dpkg-deb', 'apt-get', 'apt-cache', 'apt-config', 'tar', 'rm', 'find', 'env' ) { if (!can_execute $tool) { error "cannot find $tool"; } } { my $dpkgversion = version->new(0); my $pid = open my $fh, '-|' // error "failed to fork(): $!"; if ($pid == 0) { # redirect stderr to /dev/null to hide error messages from dpkg # versions before 1.20.0 open(STDERR, '>', '/dev/null') or error "cannot open /dev/null for writing: $!"; exec 'dpkg', '--robot', '--version'; } chomp( my $content = do { local $/; <$fh> } ); close $fh; # the --robot option was introduced in 1.20.0 but until 1.20.2 the # output contained a string after the version, separated by a # whitespace -- since then, it's only the version if ($? == 0 and $content =~ /^([0-9.]+).*$/) { # dpkg is new enough for the --robot option $dpkgversion = version->new($1); } if ($dpkgversion < "1.20.0") { error "need dpkg >= 1.20.0 but have $dpkgversion"; } } { my $aptversion = version->new(0); my $pid = open my $fh, '-|', 'apt-get', '--version' // error "failed to fork(): $!"; chomp( my $content = do { local $/; <$fh> } ); close $fh; if ( $? == 0 and $content =~ /^apt (\d+\.\d+\.\d+)\w* \(\S+\)$/am) { $aptversion = version->new($1); } if ($aptversion < "2.3.14") { error "need apt >= 2.3.14 but have $aptversion"; } } my $check_fakechroot_running = sub { # test if we are inside fakechroot already # We fork a child process because setting FAKECHROOT_DETECT seems to # be an irreversible operation for fakechroot. my $pid = open my $rfh, '-|' // error "failed to fork(): $!"; if ($pid == 0) { # with the FAKECHROOT_DETECT environment variable set, any program # execution will be replaced with the output "fakeroot [version]" local $ENV{FAKECHROOT_DETECT} = 0; exec 'echo', 'If fakechroot is running, this will not be printed'; } my $content = do { local $/; <$rfh> }; waitpid $pid, 0; my $result = 0; if ($? == 0 and $content =~ /^fakechroot [0-9.]+$/) { $result = 1; } return $result; }; # figure out the mode to use or test whether the chosen mode is legal if ($options->{mode} eq 'auto') { if (&{$check_fakechroot_running}()) { # if mmdebstrap is executed inside fakechroot, then we assume the # user expects fakechroot mode $options->{mode} = 'fakechroot'; } elsif ($EFFECTIVE_USER_ID == 0) { # if mmdebstrap is executed as root, we assume the user wants root # mode $options->{mode} = 'root'; } elsif (test_unshare_userns(0)) { # if we are not root, unshare mode is our best option if # test_unshare_userns() succeeds $options->{mode} = 'unshare'; } elsif (can_execute 'fakechroot') { # the next fallback is fakechroot # exec ourselves again but within fakechroot my @prefix = (); if ($is_covering) { @prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov'); } exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG; } elsif (can_execute 'proot') { # and lastly, proot $options->{mode} = 'proot'; } else { error "unable to pick chroot mode automatically"; } info "automatically chosen mode: $options->{mode}"; } elsif ($options->{mode} eq 'root') { if ($EFFECTIVE_USER_ID != 0) { error "need to be root"; } } elsif ($options->{mode} eq 'proot') { if (!can_execute 'proot') { error "need working proot binary"; } } elsif ($options->{mode} eq 'fakechroot') { if (&{$check_fakechroot_running}()) { # fakechroot is already running } elsif (!can_execute 'fakechroot') { error "need working fakechroot binary"; } else { # exec ourselves again but within fakechroot my @prefix = (); if ($is_covering) { @prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov'); } exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG; } } elsif ($options->{mode} eq 'unshare') { # For unshare mode to work we either need to already be the root user # and then we do not have to unshare the user namespace anymore but we # need to be able to unshare the mount namespace... # # We need to call unshare with "--propagation unchanged" or otherwise # we get 'cannot change root filesystem propagation' when running # mmdebstrap inside a chroot for which the root of the chroot is not # its own mount point. if ($EFFECTIVE_USER_ID == 0 && 0 != system 'unshare --mount --propagation unchanged -- true') { error "unable to unshare the mount namespace"; } # ...or we are not root and then we need to be able to unshare the user # namespace. if ($EFFECTIVE_USER_ID != 0 && !test_unshare_userns(1)) { my $procfile = '/proc/sys/kernel/unprivileged_userns_clone'; open(my $fh, '<', $procfile) or error "failed to open $procfile: $!"; chomp( my $content = do { local $/; <$fh> } ); close($fh); if ($content ne "1") { info "/proc/sys/kernel/unprivileged_userns_clone is set to" . " $content"; info "Try running:"; info " sudo sysctl -w kernel.unprivileged_userns_clone=1"; info "or permanently enable unprivileged usernamespaces by" . " putting the setting into /etc/sysctl.d/"; info "THIS SETTING HAS SECURITY IMPLICATIONS!"; info "Refer to https://bugs.debian.org/cgi-bin/" . "bugreport.cgi?bug=898446"; } exit 1; } } elsif ($options->{mode} eq 'chrootless') { if ($EFFECTIVE_USER_ID == 0) { warning "running chrootless mode as root might damage the host " . "system"; } } else { error "unknown mode: $options->{mode}"; } $options->{canmount} = 1; if ($options->{mode} eq 'root') { # It's possible to be root but not be able to mount anything. # This is for example the case when running under docker. # Mounting needs CAP_SYS_ADMIN which might not be available. # # We test for CAP_SYS_ADMIN using the capget syscall. # We cannot use cap_get_proc from sys/capability.h because Perl. # We don't use capsh because we don't want to depend on libcap2-bin my $hdrp = pack( "Li", # __u32 followed by int $_LINUX_CAPABILITY_VERSION_3, # available since Linux 2.6.26 0 # caps of this process ); my $datap = pack("LLLLLL", 0, 0, 0, 0, 0, 0); # six __u32 0 == syscall &SYS_capget, $hdrp, $datap or error "capget failed: $!"; my ($effective, undef) = unpack "LLLLLL", $datap; if (($effective >> $CAP_SYS_ADMIN) & 1 != 1) { warning "cannot mount because CAP_SYS_ADMIN is not in the effective set"; $options->{canmount} = 0; } if (0 == syscall &SYS_prctl, $PR_CAPBSET_READ, $CAP_SYS_ADMIN) { warning "cannot mount because CAP_SYS_ADMIN is not in the bounding set"; $options->{canmount} = 0; } # To test whether we can use mount without actually trying to mount # something we try unsharing the mount namespace. If this is allowed, # then we are also allowed to mount. # # We need to call unshare with "--propagation unchanged" or otherwise # we get 'cannot change root filesystem propagation' when running # mmdebstrap inside a chroot for which the root of the chroot is not # its own mount point. if (0 != system 'unshare --mount --propagation unchanged -- true') { # if we cannot unshare the mount namespace as root, then we also # cannot mount warning "cannot mount because unshare --mount failed"; $options->{canmount} = 0; } } if (any { $_ eq $options->{mode} } ('root', 'unshare')) { if (!can_execute 'mount') { warning "cannot execute mount"; $options->{canmount} = 0; } } # we can only possibly mount in root and unshare mode if (none { $_ eq $options->{mode} } ('root', 'unshare')) { $options->{canmount} = 0; } my @architectures = (); foreach my $archs (@{ $options->{architectures} }) { foreach my $arch (split /[,\s]+/, $archs) { # strip leading and trailing whitespace $arch =~ s/^\s+|\s+$//g; # skip if the remainder is an empty string if ($arch eq '') { next; } # do not append component if it's already in the list if (any { $_ eq $arch } @architectures) { next; } push @architectures, $arch; } } $options->{nativearch} = $hostarch; $options->{foreignarchs} = []; if (scalar @architectures == 0) { warning "empty architecture list: falling back to native architecture" . " $hostarch"; } elsif (scalar @architectures == 1) { $options->{nativearch} = $architectures[0]; } else { $options->{nativearch} = $architectures[0]; push @{ $options->{foreignarchs} }, @architectures[1 .. $#architectures]; } debug "Native architecture (outside): $hostarch"; debug "Native architecture (inside): $options->{nativearch}"; debug("Foreign architectures (inside): " . (join ', ', @{ $options->{foreignarchs} })); { # FIXME: autogenerate this list my $deb2qemu = { alpha => 'alpha', amd64 => 'x86_64', arm => 'arm', arm64 => 'aarch64', armel => 'arm', armhf => 'arm', hppa => 'hppa', i386 => 'i386', m68k => 'm68k', mips => 'mips', mips64 => 'mips64', mips64el => 'mips64el', mipsel => 'mipsel', powerpc => 'ppc', ppc64 => 'ppc64', ppc64el => 'ppc64le', riscv64 => 'riscv64', s390x => 's390x', sh4 => 'sh4', sparc => 'sparc', sparc64 => 'sparc64', }; if (any { $_ eq 'check/qemu' } @{ $options->{skip} }) { info "skipping check/qemu as requested"; } elsif ($options->{mode} eq "chrootless") { info "skipping emulation check in chrootless mode"; } elsif ($options->{variant} eq "extract") { info "skipping emulation check for extract variant"; } elsif ($hostarch ne $options->{nativearch}) { if (!can_execute 'arch-test') { error "install arch-test for foreign architecture support"; } my $withemu = 0; my $noemu = 0; { my $pid = open my $fh, '-|' // error "failed to fork(): $!"; if ($pid == 0) { { ## no critic (TestingAndDebugging::ProhibitNoWarnings) # don't print a warning if the following fails no warnings; exec 'arch-test', $options->{nativearch}; } # if exec didn't work (for example because the arch-test # program is missing) prepare for the worst and assume that # the architecture cannot be executed print "$options->{nativearch}: not supported on this" . " machine/kernel\n"; exit 1; } chomp( my $content = do { local $/; <$fh> } ); close $fh; if ($? == 0 and $content eq "$options->{nativearch}: ok") { $withemu = 1; } } { my $pid = open my $fh, '-|' // error "failed to fork(): $!"; if ($pid == 0) { { ## no critic (TestingAndDebugging::ProhibitNoWarnings) # don't print a warning if the following fails no warnings; exec 'arch-test', '-n', $options->{nativearch}; } # if exec didn't work (for example because the arch-test # program is missing) prepare for the worst and assume that # the architecture cannot be executed print "$options->{nativearch}: not supported on this" . " machine/kernel\n"; exit 1; } chomp( my $content = do { local $/; <$fh> } ); close $fh; if ($? == 0 and $content eq "$options->{nativearch}: ok") { $noemu = 1; } } # four different outcomes, depending on whether arch-test # succeeded with or without emulation # # withemu | noemu | # --------+-------+----------------- # 0 | 0 | test why emu doesn't work and quit # 0 | 1 | should never happen # 1 | 0 | use qemu emulation # 1 | 1 | don't use qemu emulation if ($withemu == 0 and $noemu == 0) { { open my $fh, '<', '/proc/filesystems' or error "failed to open /proc/filesystems: $!"; unless (grep { /^nodev\tbinfmt_misc$/ } (<$fh>)) { warning "binfmt_misc not found in /proc/filesystems --" . " is the module loaded?"; } close $fh; } { open my $fh, '<', '/proc/mounts' or error "failed to open /proc/mounts: $!"; unless ( grep { /^binfmt_misc\s+ \/proc\/sys\/fs\/binfmt_misc\s+ binfmt_misc\s+/x } (<$fh>) ) { warning "binfmt_misc not found in /proc/mounts -- not" . " mounted?"; } close $fh; } { if (!exists $deb2qemu->{ $options->{nativearch} }) { warning "no mapping from $options->{nativearch} to" . " qemu-user binary"; } elsif (!can_execute 'update-binfmts') { warning "cannot find update-binfmts"; } else { my $binfmt_identifier = 'qemu-' . $deb2qemu->{ $options->{nativearch} }; open my $fh, '-|', 'update-binfmts', '--display', $binfmt_identifier // error "failed to fork(): $!"; chomp( my $binfmts = do { local $/; <$fh> } ); close $fh; if ($? != 0 || $binfmts eq '') { warning "$binfmt_identifier is not a supported" . " binfmt name"; } } } error "$options->{nativearch} can neither be executed natively" . " nor via qemu user emulation with binfmt_misc"; } elsif ($withemu == 0 and $noemu == 1) { error "arch-test succeeded without emu but not with emu"; } elsif ($withemu == 1 and $noemu == 0) { info "$options->{nativearch} cannot be executed natively, but" . " transparently using qemu-user binfmt emulation"; if (!exists $deb2qemu->{ $options->{nativearch} }) { error "no mapping from $options->{nativearch} to qemu-user" . " binary"; } $options->{qemu} = $deb2qemu->{ $options->{nativearch} }; } elsif ($withemu == 1 and $noemu == 1) { info "$options->{nativearch} is different from $hostarch but" . " can be executed natively"; } else { error "logic error"; } } else { info "chroot architecture $options->{nativearch} is equal to the" . " host's architecture"; } } { $options->{suite} = undef; if (scalar @ARGV > 0) { $options->{suite} = shift @ARGV; if (scalar @ARGV > 0) { $options->{target} = shift @ARGV; } else { $options->{target} = '-'; } } else { info "No SUITE specified, expecting sources.list on standard input"; $options->{target} = '-'; } my $sourceslists = []; if (!defined $options->{suite}) { # If no suite was specified, then the whole sources.list has to # come from standard input info "reading sources.list from standard input..."; my $content = do { local $/; ## no critic (InputOutput::ProhibitExplicitStdin) ; }; my $type = guess_sources_format($content); if (!defined $type || ($type ne "deb822" and $type ne "one-line")) { error "cannot determine sources.list format"; } push @{$sourceslists}, { type => $type, fname => undef, content => $content, }; } else { my @components = (); foreach my $comp (@{ $options->{components} }) { my @comps = split /[,\s]+/, $comp; foreach my $c (@comps) { # strip leading and trailing whitespace $c =~ s/^\s+|\s+$//g; # skip if the remainder is an empty string if ($c eq "") { next; } # do not append component if it's already in the list if (any { $_ eq $c } @components) { next; } push @components, $c; } } my $compstr = join " ", @components; # if the currently selected apt keyrings do not contain the # necessary key material for the chosen suite, then attempt adding # a signed-by option my $signedby = ''; my %suite_by_vendor = get_suite_by_vendor(); { my $keyring = get_keyring_by_suite($options->{suite}, \%suite_by_vendor); if (!defined $keyring) { last; } # we can only check if we need the signed-by entry if we u # automatically chosen keyring exists if (!defined $keyring || !-e $keyring) { last; } # we can only check key material if gpg is installed my $gpghome = tempdir( "mmdebstrap.gpghome.XXXXXXXXXXXX", TMPDIR => 1, CLEANUP => 1 ); my @gpgcmd = ( 'gpg', '--quiet', '--ignore-time-conflict', '--no-options', '--no-default-keyring', '--homedir', $gpghome, '--no-auto-check-trustdb', ); my ($ret, $message); { my $fh; { # change warning handler to prevent message # Can't exec "gpg": No such file or directory local $SIG{__WARN__} = sub { $message = shift; }; $ret = open $fh, '-|', @gpgcmd, '--version'; } # we only want to check if the gpg command exists close $fh; } if ($? != 0 || !defined $ret || defined $message) { info "gpg --version failed: cannot determine the right" . " signed-by value"; last; } # initialize gpg trustdb with empty one { 0 == system(@gpgcmd, '--update-trustdb') or error "gpg failed to initialize trustdb:: $?"; } # find all the fingerprints of the keys apt currently # knows about my @keyrings = (); opendir my $dh, "$options->{apttrustedparts}" or error "cannot read $options->{apttrustedparts}"; while (my $filename = readdir $dh) { if ($filename !~ /\.(asc|gpg)$/) { next; } $filename = "$options->{apttrustedparts}/$filename"; # skip empty keyrings -s "$filename" || next; push @keyrings, "$filename"; } closedir $dh; if (-s $options->{apttrusted}) { push @keyrings, $options->{apttrusted}; } my @aptfingerprints = (); if (scalar @keyrings == 0) { $signedby = " [signed-by=\"$keyring\"]"; last; } { open(my $fh, '-|', @gpgcmd, '--with-colons', '--show-keys', @keyrings) // error "failed to fork(): $!"; while (my $line = <$fh>) { if ($line !~ /^fpr:::::::::([^:]+):/) { next; } push @aptfingerprints, $1; } close $fh; } if ($? != 0) { error "gpg failed"; } if (scalar @aptfingerprints == 0) { $signedby = " [signed-by=\"$keyring\"]"; last; } # check if all fingerprints from the keyring that we guessed # are known by apt and only add signed-by option if that's not # the case my @suitefingerprints = (); { open(my $fh, '-|', @gpgcmd, '--with-colons', '--show-keys', $keyring) // error "failed to fork(): $!"; while (my $line = <$fh>) { if ($line !~ /^fpr:::::::::([^:]+):/) { next; } # if this fingerprint is not known by apt, then we need #to add the signed-by option if (none { $_ eq $1 } @aptfingerprints) { $signedby = " [signed-by=\"$keyring\"]"; last; } } close $fh; } if ($? != 0) { error "gpg failed"; } } if (scalar @ARGV > 0) { for my $arg (@ARGV) { if ($arg eq '-') { info 'reading sources.list from standard input...'; my $content = do { local $/; ## no critic (InputOutput::ProhibitExplicitStdin) ; }; my $type = guess_sources_format($content); if (!defined $type || ($type ne 'deb822' and $type ne 'one-line')) { error "cannot determine sources.list format"; } # if last entry is of same type and without filename, # then append if ( scalar @{$sourceslists} > 0 && $sourceslists->[-1]{type} eq $type && !defined $sourceslists->[-1]{fname}) { $sourceslists->[-1]{content} .= ($type eq 'one-line' ? "\n" : "\n\n") . $content; } else { push @{$sourceslists}, { type => $type, fname => undef, content => $content, }; } } elsif ($arg =~ /^deb(-src)? /) { my $content = "$arg\n"; # if last entry is of same type and without filename, # then append if ( scalar @{$sourceslists} > 0 && $sourceslists->[-1]{type} eq 'one-line' && !defined $sourceslists->[-1]{fname}) { $sourceslists->[-1]{content} .= "\n" . $content; } else { push @{$sourceslists}, { type => 'one-line', fname => undef, content => $content, }; } } elsif ($arg =~ /:\/\//) { my $content = join ' ', ( "deb$signedby", $arg, $options->{suite}, "$compstr\n" ); # if last entry is of same type and without filename, # then append if ( scalar @{$sourceslists} > 0 && $sourceslists->[-1]{type} eq 'one-line' && !defined $sourceslists->[-1]{fname}) { $sourceslists->[-1]{content} .= "\n" . $content; } else { push @{$sourceslists}, { type => 'one-line', fname => undef, content => $content, }; } } elsif (-f $arg) { my $content = ''; open my $fh, '<', $arg or error "cannot open $arg: $!"; while (my $line = <$fh>) { $content .= $line; } close $fh; my $type = undef; if ($arg =~ /\.list$/) { $type = 'one-line'; } elsif ($arg =~ /\.sources$/) { $type = 'deb822'; } else { $type = guess_sources_format($content); } if (!defined $type || ($type ne 'deb822' and $type ne 'one-line')) { error "cannot determine sources.list format"; } push @{$sourceslists}, { type => $type, fname => basename($arg), content => $content, }; } else { error "invalid mirror: $arg"; } } } else { my $sourceslist = get_sourceslist_by_suite($options->{suite}, $options->{nativearch}, $signedby, $compstr, \%suite_by_vendor); push @{$sourceslists}, { type => 'one-line', fname => undef, content => $sourceslist, }; } } if (scalar @{$sourceslists} == 0) { error "empty apt sources.list"; } debug("sources list entries:"); for my $list (@{$sourceslists}) { if (defined $list->{fname}) { debug("fname: $list->{fname}"); } debug("type: $list->{type}"); debug("content:"); for my $line (split "\n", $list->{content}) { debug(" $line"); } } $options->{sourceslists} = $sourceslists; } if ($options->{target} eq '-') { if (POSIX::isatty STDOUT) { error "stdout is a an interactive tty"; } } else { my $abs_path = abs_path($options->{target}); if (!defined $abs_path) { error "unable to get absolute path of target directory" . " $options->{target}"; } $options->{target} = $abs_path; } if ($options->{target} eq '/') { error "refusing to use the filesystem root as output directory"; } my $tar_compressor = get_tar_compressor($options->{target}); # figure out the right format if ($format eq 'auto') { # (stat(...))[6] is the device identifier which contains the major and # minor numbers for character special files # major 1 and minor 3 is /dev/null on Linux if ( $options->{target} eq '/dev/null' and $OSNAME eq 'linux' and -c '/dev/null' and major((stat("/dev/null"))[6]) == 1 and minor((stat("/dev/null"))[6]) == 3) { $format = 'null'; } elsif ($options->{target} eq '-' and $OSNAME eq 'linux' and major((stat(STDOUT))[6]) == 1 and minor((stat(STDOUT))[6]) == 3) { # by checking the major and minor number of the STDOUT fd we also # can detect redirections to /dev/null and choose the null format # accordingly $format = 'null'; } elsif ($options->{target} ne '-' and -d $options->{target}) { $format = 'directory'; } elsif ( defined $tar_compressor or $options->{target} =~ /\.tar$/ or $options->{target} eq '-' or -p $options->{target} # named pipe (fifo) or -c $options->{target} # character special like /dev/null ) { $format = 'tar'; # check if the compressor is installed if (defined $tar_compressor) { my $pid = fork() // error "fork() failed: $!"; if ($pid == 0) { open(STDOUT, '>', '/dev/null') or error "cannot open /dev/null for writing: $!"; open(STDIN, '<', '/dev/null') or error "cannot open /dev/null for reading: $!"; exec { $tar_compressor->[0] } @{$tar_compressor} or error("cannot exec " . (join " ", @{$tar_compressor}) . ": $!"); } waitpid $pid, 0; if ($? != 0) { error("failed to start " . (join " ", @{$tar_compressor})); } } } elsif ($options->{target} =~ /\.(squashfs|sqfs)$/) { $format = 'squashfs'; # check if tar2sqfs is installed my $pid = fork() // error "fork() failed: $!"; if ($pid == 0) { open(STDOUT, '>', '/dev/null') or error "cannot open /dev/null for writing: $!"; open(STDIN, '<', '/dev/null') or error "cannot open /dev/null for reading: $!"; exec('tar2sqfs', '--version') or error("cannot exec tar2sqfs --version: $!"); } waitpid $pid, 0; if ($? != 0) { error("failed to start tar2sqfs --version"); } } elsif ($options->{target} =~ /\.ext2$/) { $format = 'ext2'; # check if the installed version of genext2fs supports tarballs on # stdin (undef, my $filename) = tempfile( "mmdebstrap.ext2.XXXXXXXXXXXX", OPEN => 0, TMPDIR => 1 ); open my $fh, '|-', 'genext2fs', '-B', '1024', '-b', '8', '-N', '11', '-a', '-', $filename // error "failed to fork(): $!"; # write 10240 null-bytes to genext2fs -- this represents an empty # tar archive print $fh ("\0" x 10240) or error "cannot write to genext2fs process"; close $fh; my $exitstatus = $?; unlink $filename // die "cannot unlink $filename"; if ($exitstatus != 0) { error "genext2fs failed with exit status: $exitstatus"; } } else { $format = 'directory'; } info "automatically chosen format: $format"; } if ($options->{target} eq '-' and $format ne 'tar' and $format ne 'null') { error "the $format format is unable to write to standard output"; } if ($format eq 'null' and none { $_ eq $options->{target} } ('-', '/dev/null')) { info "ignoring target $options->{target} with null format"; } if (any { $_ eq $format } ('tar', 'squashfs', 'ext2', 'null')) { if ($format ne 'null') { if ( any { $_ eq $options->{variant} } ('extract', 'custom') and any { $_ eq $options->{mode} } ('fakechroot', 'proot')) { info "creating a tarball or squashfs image or ext2 image in" . " fakechroot mode or proot mode might fail in extract and" . " custom variants because there might be no tar inside the" . " chroot"; } # try to fail early if target tarball or squashfs image cannot be # opened for writing if ($options->{target} ne '-') { if ($options->{dryrun}) { if (-e $options->{target}) { info "not overwriting $options->{target} because in" . " dry-run mode"; } } else { open my $fh, '>', $options->{target} or error "cannot open $options->{target} for writing: $!"; close $fh; } } } # since the output is a tarball, we create the rootfs in a temporary # directory $options->{root} = tempdir('mmdebstrap.XXXXXXXXXX', TMPDIR => 1); info "using $options->{root} as tempdir"; # in unshare and root mode, other users than the current user need to # access the rootfs, most prominently, the _apt user. Thus, make the # temporary directory world readable. if ( any { $_ eq $options->{mode} } ('unshare', 'root') or ($EFFECTIVE_USER_ID == 0 and $options->{mode} eq 'chrootless') ) { chmod 0755, $options->{root} or error "cannot chmod root: $!"; } } elsif ($format eq 'directory') { # user does not seem to have specified a tarball as output, thus work # directly in the supplied directory $options->{root} = $options->{target}; if (-e $options->{root}) { if (!-d $options->{root}) { error "$options->{root} exists and is not a directory"; } if (any { $_ eq 'check/empty' } @{ $options->{skip} }) { info "skipping check/empty as requested"; } else { # check if the directory is empty or contains nothing more than # an empty lost+found directory. The latter exists on freshly # created ext3 and ext4 partitions. # rationale for requiring an empty directory: # https://bugs.debian.org/833525 opendir(my $dh, $options->{root}) or error "Can't opendir($options->{root}): $!"; while (my $entry = readdir $dh) { # skip the "." and ".." entries next if $entry eq "."; next if $entry eq ".."; # if the entry is a directory named "lost+found" then skip # it, if it's empty if ($entry eq "lost+found" and -d "$options->{root}/$entry") { opendir(my $dh2, "$options->{root}/$entry"); # Attempt reading the directory thrice. If the third # time succeeds, then it has more entries than just "." # and ".." and must thus not be empty. readdir $dh2; readdir $dh2; # rationale for requiring an empty directory: # https://bugs.debian.org/833525 if (readdir $dh2) { error "$options->{root} contains a non-empty" . " lost+found directory"; } closedir($dh2); } else { error "$options->{root} is not empty"; } } closedir($dh); } } else { my $num_created = make_path "$options->{root}", { error => \my $err }; if ($err && @$err) { error(join "; ", (map { "cannot create " . (join ": ", %{$_}) } @$err)); } elsif ($num_created == 0) { error "cannot create $options->{root}"; } } } else { error "unknown format: $format"; } # check for double quotes because apt doesn't allow to escape them and # thus paths with double quotes are invalid in the apt config if ($options->{root} =~ /"/) { error "apt cannot handle paths with double quotes"; } my @idmap; # for unshare mode the rootfs directory has to have appropriate # permissions if ($EFFECTIVE_USER_ID != 0 and $options->{mode} eq 'unshare') { @idmap = read_subuid_subgid; # sanity check if ( scalar(@idmap) != 2 || $idmap[0][0] ne 'u' || $idmap[1][0] ne 'g' || !length $idmap[0][2] || !length $idmap[1][2]) { error "invalid idmap"; } my $outer_gid = $REAL_GROUP_ID + 0; my $pid = get_unshare_cmd( sub { chown 1, 1, $options->{root} }, [ ['u', '0', $REAL_USER_ID, '1'], ['g', '0', $outer_gid, '1'], ['u', '1', $idmap[0][2], '1'], ['g', '1', $idmap[1][2], '1']]); waitpid $pid, 0; $? == 0 or error "chown failed"; } # figure out whether we have mknod $options->{havemknod} = 0; if ($options->{mode} eq 'unshare') { my $pid = get_unshare_cmd( sub { $options->{havemknod} = havemknod($options->{root}); }, \@idmap ); waitpid $pid, 0; $? == 0 or error "havemknod failed"; } elsif ( any { $_ eq $options->{mode} } ('root', 'fakechroot', 'proot', 'chrootless') ) { $options->{havemknod} = havemknod($options->{root}); } else { error "unknown mode: $options->{mode}"; } my $devtar = ''; # We always craft the /dev entries ourselves if a tarball is to be created if (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) { foreach my $file (@devfiles) { my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file}; if (length "./dev/$fname" > 100) { error "tar entry cannot exceed 100 characters"; } my $entry = pack( 'a100 a8 a8 a8 a12 a12 A8 a1 a100 a8 a32 a32 a8 a8 a155 x12', "./dev/$fname", sprintf('%07o', $mode), sprintf('%07o', 0), # uid sprintf('%07o', 0), # gid sprintf('%011o', 0), # size sprintf('%011o', $mtime), '', # checksum $type, $linkname, "ustar ", '', # username '', # groupname defined($devmajor) ? sprintf('%07o', $devmajor) : '', defined($devminor) ? sprintf('%07o', $devminor) : '', '', # prefix ); # compute and insert checksum substr($entry, 148, 7) = sprintf("%06o\0", unpack("%16C*", $entry)); $devtar .= $entry; } } elsif (any { $_ eq $format } ('directory', 'null')) { # nothing to do } else { error "unknown format: $format"; } my $exitstatus = 0; my @taropts = ( '--sort=name', "--mtime=\@$mtime", '--clamp-mtime', '--numeric-owner', '--one-file-system', '--format=pax', '--pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime', '-c', '--exclude=./dev' ); # tar2sqfs and genext2fs do not support extended attributes if ($format eq "squashfs") { # tar2sqfs supports user.*, trusted.* and security.* but not system.* # https://bugs.debian.org/988100 # lib/sqfs/xattr/xattr.c of https://github.com/AgentD/squashfs-tools-ng # https://github.com/AgentD/squashfs-tools-ng/issues/83 # https://github.com/AgentD/squashfs-tools-ng/issues/25 warning("tar2sqfs does not support extended attributes" . " from the 'system' namespace"); push @taropts, '--xattrs', '--xattrs-exclude=system.*'; } elsif ($format eq "ext2") { warning "genext2fs does not support extended attributes"; } else { push @taropts, '--xattrs'; } # disable signals so that we can fork and change behaviour of the signal # handler in the parent and child without getting interrupted my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM); POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!"; my $pid; # a pipe to transfer the final tarball from the child to the parent pipe my $rfh, my $wfh; # instead of two pipe calls, creating four file handles, we use socketpair socketpair my $childsock, my $parentsock, AF_UNIX, SOCK_STREAM, PF_UNSPEC or error "socketpair failed: $!"; $options->{hooksock} = $childsock; # for communicating the required number of blocks, we don't need # bidirectional communication, so a pipe() is enough # we don't communicate this via the hook communication because # a) this would abuse the functionality exclusively for hooks # b) it puts code writing the protocol outside of the helper/listener # c) the forked listener process cannot communicate to its parent pipe my $nblkreader, my $nblkwriter or error "pipe failed: $!"; if ($options->{mode} eq 'unshare') { $pid = get_unshare_cmd( sub { # child local $SIG{'INT'} = 'DEFAULT'; local $SIG{'HUP'} = 'DEFAULT'; local $SIG{'PIPE'} = 'DEFAULT'; local $SIG{'TERM'} = 'DEFAULT'; # unblock all delayed signals (and possibly handle them) POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; close $rfh; close $parentsock; open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!"; setup($options); print $childsock (pack('n', 0) . 'adios'); $childsock->flush(); close $childsock; close $nblkreader; if (!$options->{dryrun} && $format eq 'ext2') { my $numblocks = approx_disk_usage($options->{root}); print $nblkwriter "$numblocks\n"; $nblkwriter->flush(); } close $nblkwriter; if ($options->{dryrun}) { info "simulate creating tarball..."; } elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) { info "creating tarball..."; # redirect tar output to the writing end of the pipe so # that the parent process can capture the output open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!"; # Add ./dev as the first entries of the tar file. # We cannot add them after calling tar, because there is no # way to prevent tar from writing NULL entries at the end. if (any { $_ eq 'output/dev' } @{ $options->{skip} }) { info "skipping output/dev as requested"; } else { print $devtar; } # pack everything except ./dev 0 == system('tar', @taropts, '-C', $options->{root}, '.') or error "tar failed: $?"; info "done"; } elsif (any { $_ eq $format } ('directory', 'null')) { # nothing to do } else { error "unknown format: $format"; } exit 0; }, \@idmap ); } elsif ( any { $_ eq $options->{mode} } ('root', 'fakechroot', 'proot', 'chrootless') ) { $pid = fork() // error "fork() failed: $!"; if ($pid == 0) { local $SIG{'INT'} = 'DEFAULT'; local $SIG{'HUP'} = 'DEFAULT'; local $SIG{'PIPE'} = 'DEFAULT'; local $SIG{'TERM'} = 'DEFAULT'; # unblock all delayed signals (and possibly handle them) POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; close $rfh; close $parentsock; open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!"; setup($options); print $childsock (pack('n', 0) . 'adios'); $childsock->flush(); close $childsock; close $nblkreader; if (!$options->{dryrun} && $format eq 'ext2') { my $numblocks = approx_disk_usage($options->{root}); print $nblkwriter $numblocks; $nblkwriter->flush(); } close $nblkwriter; if ($options->{dryrun}) { info "simulate creating tarball..."; } elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) { info "creating tarball..."; # redirect tar output to the writing end of the pipe so that # the parent process can capture the output open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!"; # Add ./dev as the first entries of the tar file. # We cannot add them after calling tar, because there is no way # to prevent tar from writing NULL entries at the end. if (any { $_ eq 'output/dev' } @{ $options->{skip} }) { info "skipping output/dev as requested"; } else { print $devtar; } if ($options->{mode} eq 'fakechroot') { # By default, FAKECHROOT_EXCLUDE_PATH includes /proc and # /sys which means that the resulting tarball will contain # the permission and ownership information of /proc and # /sys from the outside, which we want to avoid. ## no critic (Variables::RequireLocalizedPunctuationVars) $ENV{FAKECHROOT_EXCLUDE_PATH} = "/dev"; # Fakechroot requires tar to run inside the chroot or # otherwise absolute symlinks will include the path to the # root directory 0 == system('chroot', $options->{root}, 'tar', @taropts, '-C', '/', '.') or error "tar failed: $?"; } elsif ($options->{mode} eq 'proot') { # proot requires tar to run inside proot or otherwise # permissions will be completely off my @qemuopt = (); if (defined $options->{qemu}) { push @qemuopt, "--qemu=qemu-$options->{qemu}"; push @taropts, "--exclude=./host-rootfs"; } 0 == system('proot', '--root-id', "--rootfs=$options->{root}", '--cwd=/', @qemuopt, 'tar', @taropts, '-C', '/', '.') or error "tar failed: $?"; } elsif ( any { $_ eq $options->{mode} } ('root', 'chrootless') ) { # If the chroot directory is not owned by the root user, # then we assume that no measure was taken to fake root # permissions. Since the final tarball should contain # entries with root ownership, we instruct tar to do so. my @owneropts = (); if ((stat $options->{root})[4] != 0) { push @owneropts, '--owner=0', '--group=0', '--numeric-owner'; } 0 == system('tar', @taropts, @owneropts, '-C', $options->{root}, '.') or error "tar failed: $?"; } else { error "unknown mode: $options->{mode}"; } info "done"; } elsif (any { $_ eq $format } ('directory', 'null')) { # nothing to do } else { error "unknown format: $format"; } exit 0; } } else { error "unknown mode: $options->{mode}"; } # parent my $got_signal = 0; my $waiting_for = "setup"; my $ignore = sub { $got_signal = shift; info "main() received signal $got_signal: waiting for $waiting_for..."; }; local $SIG{'INT'} = $ignore; local $SIG{'HUP'} = $ignore; local $SIG{'PIPE'} = $ignore; local $SIG{'TERM'} = $ignore; # unblock all delayed signals (and possibly handle them) POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; close $wfh; close $childsock; debug "starting to listen for hooks"; # handle special hook commands via parentsock my $lpid = fork() // error "fork() failed: $!"; if ($lpid == 0) { # whatever the script writes on stdout is sent to the # socket # whatever is written to the socket, send to stdin open(STDOUT, '>&', $parentsock) or error "cannot open STDOUT: $!"; open(STDIN, '<&', $parentsock) or error "cannot open STDIN: $!"; my @prefix = (); if ($is_covering) { @prefix = ($EXECUTABLE_NAME, "-MDevel::Cover=-silent,-nogcov"); } exec @prefix, $PROGRAM_NAME, "--hook-listener", $verbosity_level; } waitpid($lpid, 0); if ($? != 0) { # we cannot die here because that would leave the other thread # running without a parent warning "listening on child socket failed: $@"; $exitstatus = 1; } debug "finish to listen for hooks"; close $parentsock; my $numblocks = 0; close $nblkwriter; if (!$options->{dryrun} && $format eq 'ext2') { chomp($numblocks = <$nblkreader>); } close $nblkreader; if ($options->{dryrun}) { # nothing to do } elsif (any { $_ eq $format } ('directory', 'null')) { # nothing to do } elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) { # we use eval() so that error() doesn't take this process down and # thus leaves the setup() process without a parent eval { if ($options->{target} eq '-') { if (!copy($rfh, *STDOUT)) { error "cannot copy to standard output: $!"; } } else { if ( $format eq 'squashfs' or $format eq 'ext2' or defined $tar_compressor) { my @argv = (); if ($format eq 'squashfs') { push @argv, 'tar2sqfs', '--quiet', '--no-skip', '--force', '--exportable', '--compressor', 'xz', '--block-size', '1048576', $options->{target}; } elsif ($format eq 'ext2') { if ($numblocks <= 0) { error "invalid number of blocks: $numblocks"; } push @argv, 'genext2fs', '-B', 1024, '-b', $numblocks, '-i', '16384', '-a', '-', $options->{target}; } elsif ($format eq 'tar') { push @argv, @{$tar_compressor}; } else { error "unknown format: $format"; } POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!"; my $cpid = fork() // error "fork() failed: $!"; if ($cpid == 0) { # child: default signal handlers local $SIG{'INT'} = 'DEFAULT'; local $SIG{'HUP'} = 'DEFAULT'; local $SIG{'PIPE'} = 'DEFAULT'; local $SIG{'TERM'} = 'DEFAULT'; # unblock all delayed signals (and possibly handle # them) POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; # redirect stdout to file or /dev/null if ($format eq 'squashfs' or $format eq 'ext2') { open(STDOUT, '>', '/dev/null') or error "cannot open /dev/null for writing: $!"; } elsif ($format eq 'tar') { open(STDOUT, '>', $options->{target}) or error "cannot open $options->{target} for writing: $!"; } else { error "unknown format: $format"; } open(STDIN, '<&', $rfh) or error "cannot open file handle for reading: $!"; eval { Devel::Cover::set_coverage("none") } if $is_covering; exec { $argv[0] } @argv or error("cannot exec " . (join " ", @argv) . ": $!"); } POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!"; waitpid $cpid, 0; if ($? != 0) { error("failed to run " . (join " ", @argv)); } } else { # somehow, when running under qemu, writing to a virtio # device will not result in a ENOSPC but just stall forever if (!copy($rfh, $options->{target})) { error "cannot copy to $options->{target}: $!"; } } } }; if ($@) { # we cannot die here because that would leave the other thread # running without a parent # We send SIGHUP to all our processes (including eventually # running tar and this process itself) to reliably tear down # all running child processes. The main process is not affected # because we are ignoring SIGHUP. warning "creating tarball failed: $@"; kill HUP => -getpgrp(); $exitstatus = 1; } } else { error "unknown format: $format"; } close($rfh); waitpid $pid, 0; if ($? != 0) { $exitstatus = 1; } # change signal handler message $waiting_for = "cleanup"; if (any { $_ eq $format } ('directory')) { # nothing to do } elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2', 'null')) { if (!-e $options->{root}) { error "$options->{root} does not exist"; } info "removing tempdir $options->{root}..."; if ($options->{mode} eq 'unshare') { # We don't have permissions to remove the directory outside # the unshared namespace, so we remove it here. # Since this is still inside the unshared namespace, there is # no risk of removing anything important. $pid = get_unshare_cmd( sub { # change CWD to chroot directory because find tries to # chdir to the current directory which might not be # accessible by the unshared user: # find: Failed to restore initial working directory 0 == system('env', "--chdir=$options->{root}", 'find', $options->{root}, '-mount', '-mindepth', '1', '-delete') or error "rm failed: $?"; # ignore failure in case the unshared user doesn't have the # required permissions -- we attempt again later if # necessary rmdir "$options->{root}"; }, \@idmap ); waitpid $pid, 0; $? == 0 or error "remove_tree failed"; # in unshare mode, the toplevel directory might've been created in # a directory that the unshared user cannot change and thus cannot # delete. We attempt its removal again outside as the normal user. if (-e $options->{root}) { rmdir "$options->{root}" or error "cannot rmdir $options->{root}: $!"; } } elsif ( any { $_ eq $options->{mode} } ('root', 'fakechroot', 'proot', 'chrootless') ) { # without unshare, we use the system's rm to recursively remove the # temporary directory just to make sure that we do not accidentally # remove more than we should by using --one-file-system. # # --interactive=never is needed when in proot mode, the # write-protected file /apt/apt.conf.d/01autoremove-kernels is to # be removed. 0 == system('rm', '--interactive=never', '--recursive', '--preserve-root', '--one-file-system', $options->{root}) or error "rm failed: $?"; } else { error "unknown mode: $options->{mode}"; } } else { error "unknown format: $format"; } if ($got_signal) { $exitstatus = 1; } if ($exitstatus == 0) { my $duration = Time::HiRes::time - $before; info "success in " . (sprintf "%.04f", $duration) . " seconds"; exit 0; } error "mmdebstrap failed to run"; return 1; } main(); __END__ =head1 NAME mmdebstrap - multi-mirror Debian chroot creation =head1 SYNOPSIS B [B] [I [I [I...]]] =head1 DESCRIPTION B creates a Debian chroot of I into I from one or more Is. It is meant as an alternative to the debootstrap tool (see section B). In contrast to debootstrap it uses apt to resolve dependencies and is thus able to use more than one mirror and resolve more complex dependencies. If no I option is provided, L is used. If I is a stable release name and no I is specified, then mirrors for updates and security are automatically added. If a I option starts with "deb " or "deb-src " then it is used as a one-line-style format entry for apt's sources.list inside the chroot. If a I option contains a "://" then it is interpreted as a mirror URI and the apt line inside the chroot is assembled as "deb [arch=A] B C D" where A is the host's native architecture, B is the I, C is the given I and D is the components given via B<--components> (defaults to "main"). If a I option happens to be an existing file, then its contents are pasted into the chroot's sources.list. This can be used to supply a deb822 style sources.list. If I is C<-> then standard input is pasted into the chroot's sources.list. More than one mirror can be specified and are appended to the chroot's sources.list in the given order. If you specify a https or tor I and you want the chroot to be able to update itself, don't forget to also install the ca-certificates package, the apt-transport-https package for apt versions less than 1.5 and/or the apt-transport-tor package using the B<--include> option, as necessary. The optional I argument can either be the path to a directory, the path to a tarball filename, the path to a squashfs image, the path to an ext2 image, a FIFO, a character special device, or C<->. Without the B<--format> option, I will be used to choose the format. See the section B for more information. If no I was specified or if I is C<->, an uncompressed tarball will be sent to standard output. The I may be a valid release code name (eg, sid, stretch, jessie) or a symbolic name (eg, unstable, testing, stable, oldstable). Any suite name that works with apt on the given mirror will work. If no I was specified, then a single I C<-> is added and thus the information of the desired suite has to come from standard input as part of a valid apt sources.list file. The value of the I argument will be used to determine which apt index to use for finding out the set of C packages and/or the set of packages with the right priority for the selected variant. See the section B for more information. All status output is printed to standard error unless B<--logfile> is used to redirect it to a file or B<--quiet> or B<--silent> is used to suppress any output on standard error. Help and version information will be printed to standard error with the B<--help> and B<--version> options, respectively. Otherwise, an uncompressed tarball might be sent to standard output if I is C<-> or if no I was specified. =head1 OPTIONS Options are case insensitive. Short options may be bundled. Long options require a double dash and may be abbreviated to uniqueness. =over 8 =item B<-h,--help> Print synopsis and options of this man page and exit. =item B<--man> Show the full man page as generated from Perl POD in a pager. This requires the perldoc program from the perl-doc package. This is the same as running: pod2man /usr/bin/mmdebstrap | man -l - =item B<--version> Print the B version and exit. =item B<--variant>=I Choose which package set to install. Valid variant Is are B, B, B, B, B, B, B, B, B, B<->, and B. The default variant is B. See the section B for more information. =item B<--mode>=I Choose how to perform the chroot operation and create a filesystem with ownership information different from the current user. Valid mode Is are B, B, B, B, B, B, B and B. The default mode is B. See the section B for more information. =item B<--format>=I Choose the output format. Valid format Is are B, B, B, B, B and B. The default format is B. See the section B for more information. =item B<--aptopt>=I