forked from josch/mmdebstrap
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
7661 lines
309 KiB
7661 lines
309 KiB
#!/usr/bin/perl |
|
# |
|
# © 2018 - 2023 Johannes Schauer Marin Rodrigues <josch@mister-muffin.de> |
|
# |
|
# Permission is hereby granted, free of charge, to any person obtaining a copy |
|
# of this software and associated documentation files (the "Software"), to |
|
# deal in the Software without restriction, including without limitation the |
|
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
|
# sell copies of the Software, and to permit persons to whom the Software is |
|
# furnished to do so, subject to the following conditions: |
|
# |
|
# The above copyright notice and this permission notice shall be included in |
|
# all copies or substantial portions of the Software. |
|
# |
|
# The software is provided "as is", without warranty of any kind, express or |
|
# implied, including but not limited to the warranties of merchantability, |
|
# fitness for a particular purpose and noninfringement. In no event shall the |
|
# authors or copyright holders be liable for any claim, damages or other |
|
# liability, whether in an action of contract, tort or otherwise, arising |
|
# from, out of or in connection with the software or the use or other dealings |
|
# in the software. |
|
|
|
use strict; |
|
use warnings; |
|
|
|
our $VERSION = '1.3.5'; |
|
|
|
use English; |
|
use Getopt::Long; |
|
use Pod::Usage; |
|
use File::Copy; |
|
use File::Path qw(make_path); |
|
use File::Temp qw(tempfile tempdir); |
|
use File::Basename; |
|
use File::Find; |
|
use Cwd qw(abs_path getcwd); |
|
require "syscall.ph"; ## no critic (Modules::RequireBarewordIncludes) |
|
require "sys/ioctl.ph"; ## no critic (Modules::RequireBarewordIncludes) |
|
use Fcntl qw(S_IFCHR S_IFBLK FD_CLOEXEC F_GETFD F_SETFD); |
|
use List::Util qw(any none); |
|
use POSIX |
|
qw(SIGINT SIGHUP SIGPIPE SIGTERM SIG_BLOCK SIG_UNBLOCK strftime isatty); |
|
use Carp; |
|
use Term::ANSIColor; |
|
use Socket; |
|
use Time::HiRes; |
|
use Math::BigInt; |
|
use Text::ParseWords; |
|
use version; |
|
|
|
## no critic (InputOutput::RequireBriefOpen) |
|
|
|
# from sched.h |
|
# use typeglob constants because "use constant" has several drawback as |
|
# explained in the documentation for the Readonly CPAN module |
|
*CLONE_NEWNS = \0x20000; # mount namespace |
|
*CLONE_NEWUTS = \0x4000000; # utsname |
|
*CLONE_NEWIPC = \0x8000000; # ipc |
|
*CLONE_NEWUSER = \0x10000000; # user |
|
*CLONE_NEWPID = \0x20000000; # pid |
|
*CLONE_NEWNET = \0x40000000; # net |
|
*_LINUX_CAPABILITY_VERSION_3 = \0x20080522; |
|
*CAP_SYS_ADMIN = \21; |
|
*PR_CAPBSET_READ = \23; |
|
# from sys/mount.h |
|
*MS_BIND = \0x1000; |
|
*MS_REC = \0x4000; |
|
*MNT_DETACH = \2; |
|
our ( |
|
$CLONE_NEWNS, $CLONE_NEWUTS, |
|
$CLONE_NEWIPC, $CLONE_NEWUSER, |
|
$CLONE_NEWPID, $CLONE_NEWNET, |
|
$_LINUX_CAPABILITY_VERSION_3, $CAP_SYS_ADMIN, |
|
$PR_CAPBSET_READ, $MS_BIND, |
|
$MS_REC, $MNT_DETACH |
|
); |
|
|
|
#<<< |
|
# type codes: |
|
# 0 -> normal file |
|
# 1 -> hardlink |
|
# 2 -> symlink |
|
# 3 -> character special |
|
# 4 -> block special |
|
# 5 -> directory |
|
my @devfiles = ( |
|
# filename mode type link target major minor |
|
["", oct(755), 5, '', undef, undef], |
|
["console", oct(666), 3, '', 5, 1], |
|
["fd", oct(777), 2, '/proc/self/fd', undef, undef], |
|
["full", oct(666), 3, '', 1, 7], |
|
["null", oct(666), 3, '', 1, 3], |
|
["ptmx", oct(666), 3, '', 5, 2], |
|
["pts/", oct(755), 5, '', undef, undef], |
|
["random", oct(666), 3, '', 1, 8], |
|
["shm/", oct(755), 5, '', undef, undef], |
|
["stderr", oct(777), 2, '/proc/self/fd/2', undef, undef], |
|
["stdin", oct(777), 2, '/proc/self/fd/0', undef, undef], |
|
["stdout", oct(777), 2, '/proc/self/fd/1', undef, undef], |
|
["tty", oct(666), 3, '', 5, 0], |
|
["urandom", oct(666), 3, '', 1, 9], |
|
["zero", oct(666), 3, '', 1, 5], |
|
); |
|
#>>> |
|
|
|
# verbosity levels: |
|
# 0 -> print nothing |
|
# 1 -> normal output and progress bars |
|
# 2 -> verbose output |
|
# 3 -> debug output |
|
my $verbosity_level = 1; |
|
|
|
my $is_covering = 0; |
|
{ |
|
# make $@ local, so we don't print "Undefined subroutine called" |
|
# in other parts where we evaluate $@ |
|
local $@ = ''; |
|
$is_covering = !!(eval { Devel::Cover::get_coverage() }); |
|
} |
|
|
|
# the reason why Perl::Critic warns about this is, that it suspects that the |
|
# programmer wants to implement a test whether the terminal is interactive or |
|
# not, in which case, complex interactions with the magic *ARGV indeed make it |
|
# advisable to use IO::Interactive. In our case, we do not want to create an |
|
# interactivity check but just want to check whether STDERR is opened to a tty, |
|
# so our use of -t is fine and not "fragile and complicated" as is written in |
|
# the description of InputOutput::ProhibitInteractiveTest. Also see |
|
# https://github.com/Perl-Critic/Perl-Critic/issues/918 |
|
sub stderr_is_tty() { |
|
## no critic (InputOutput::ProhibitInteractiveTest) |
|
if (-t STDERR) { |
|
return 1; |
|
} else { |
|
return 0; |
|
} |
|
} |
|
|
|
sub debug { |
|
if ($verbosity_level < 3) { |
|
return; |
|
} |
|
my $msg = shift; |
|
my ($package, $filename, $line) = caller; |
|
$msg = "D: $PID $line $msg"; |
|
if (stderr_is_tty()) { |
|
$msg = colored($msg, 'clear'); |
|
} |
|
print STDERR "$msg\n"; |
|
return; |
|
} |
|
|
|
sub info { |
|
if ($verbosity_level == 0) { |
|
return; |
|
} |
|
my $msg = shift; |
|
if ($verbosity_level >= 3) { |
|
my ($package, $filename, $line) = caller; |
|
$msg = "$PID $line $msg"; |
|
} |
|
$msg = "I: $msg"; |
|
if (stderr_is_tty()) { |
|
$msg = colored($msg, 'green'); |
|
} |
|
print STDERR "$msg\n"; |
|
return; |
|
} |
|
|
|
sub warning { |
|
if ($verbosity_level == 0) { |
|
return; |
|
} |
|
my $msg = shift; |
|
$msg = "W: $msg"; |
|
if (stderr_is_tty()) { |
|
$msg = colored($msg, 'bold yellow'); |
|
} |
|
print STDERR "$msg\n"; |
|
return; |
|
} |
|
|
|
sub error { |
|
# if error() is called with the string from a previous error() that was |
|
# caught inside an eval(), then the string will have a newline which we |
|
# are stripping here |
|
chomp(my $msg = shift); |
|
$msg = "E: $msg"; |
|
if (stderr_is_tty()) { |
|
$msg = colored($msg, 'bold red'); |
|
} |
|
if ($verbosity_level == 3) { |
|
croak $msg; # produces a backtrace |
|
} else { |
|
die "$msg\n"; |
|
} |
|
} |
|
|
|
# The encoding of dev_t is MMMM Mmmm mmmM MMmm, where M is a hex digit of |
|
# the major number and m is a hex digit of the minor number. |
|
sub major { |
|
my $rdev = shift; |
|
my $right |
|
= Math::BigInt->from_hex("0x00000000000fff00")->band($rdev)->brsft(8); |
|
my $left |
|
= Math::BigInt->from_hex("0xfffff00000000000")->band($rdev)->brsft(32); |
|
return $right->bior($left); |
|
} |
|
|
|
sub minor { |
|
my $rdev = shift; |
|
my $right = Math::BigInt->from_hex("0x00000000000000ff")->band($rdev); |
|
my $left |
|
= Math::BigInt->from_hex("0x00000ffffff00000")->band($rdev)->brsft(12); |
|
return $right->bior($left); |
|
} |
|
|
|
sub can_execute { |
|
my $tool = shift; |
|
my $pid = open my $fh, '-|' // return 0; |
|
if ($pid == 0) { |
|
open(STDERR, '>&', STDOUT) or die; |
|
exec {$tool} $tool, '--version' or die; |
|
} |
|
chomp( |
|
my $content = do { local $/; <$fh> } |
|
); |
|
close $fh; |
|
if ($? != 0) { |
|
return 0; |
|
} |
|
if (length $content == 0) { |
|
return 0; |
|
} |
|
return 1; |
|
} |
|
|
|
# check whether a directory is mounted by comparing the device number of the |
|
# directory itself with its parent |
|
sub is_mountpoint { |
|
my $dir = shift; |
|
if (!-e $dir) { |
|
return 0; |
|
} |
|
my @a = stat "$dir/."; |
|
my @b = stat "$dir/.."; |
|
# if the device number is different, then the directory must be mounted |
|
if ($a[0] != $b[0]) { |
|
return 1; |
|
} |
|
# if the inode number is the same, then the directory must be mounted |
|
if ($a[1] == $b[1]) { |
|
return 1; |
|
} |
|
return 0; |
|
} |
|
|
|
# tar cannot figure out the decompression program when receiving data on |
|
# standard input, thus we do it ourselves. This is copied from tar's |
|
# src/suffix.c |
|
sub get_tar_compressor { |
|
my $filename = shift; |
|
if ($filename eq '-') { |
|
return; |
|
} elsif ($filename =~ /\.tar$/) { |
|
return; |
|
} elsif ($filename =~ /\.(gz|tgz|taz)$/) { |
|
return ['gzip']; |
|
} elsif ($filename =~ /\.(Z|taZ)$/) { |
|
return ['compress']; |
|
} elsif ($filename =~ /\.(bz2|tbz|tbz2|tz2)$/) { |
|
return ['bzip2']; |
|
} elsif ($filename =~ /\.lz$/) { |
|
return ['lzip']; |
|
} elsif ($filename =~ /\.(lzma|tlz)$/) { |
|
return ['lzma']; |
|
} elsif ($filename =~ /\.lzo$/) { |
|
return ['lzop']; |
|
} elsif ($filename =~ /\.lz4$/) { |
|
return ['lz4']; |
|
} elsif ($filename =~ /\.(xz|txz)$/) { |
|
return ['xz']; |
|
} elsif ($filename =~ /\.zst$/) { |
|
return ['zstd']; |
|
} |
|
return; |
|
} |
|
|
|
# avoid dependency on String::ShellQuote by implementing the mechanism |
|
# from python's shlex.quote function |
|
sub shellescape { |
|
my $string = shift; |
|
if (length $string == 0) { |
|
return "''"; |
|
} |
|
# search for occurrences of characters that are not safe |
|
# the 'a' regex modifier makes sure that \w only matches ASCII |
|
if ($string !~ m/[^\w@\%+=:,.\/-]/a) { |
|
return $string; |
|
} |
|
# wrap the string in single quotes and handle existing single quotes by |
|
# putting them outside of the single-quoted string |
|
$string =~ s/'/'"'"'/g; |
|
return "'$string'"; |
|
} |
|
|
|
sub test_unshare_userns { |
|
my $verbose = shift; |
|
|
|
local *maybe_error = sub { |
|
my $msg = shift; |
|
if ($verbose) { |
|
error $msg; |
|
} else { |
|
debug $msg; |
|
} |
|
}; |
|
|
|
if ($EFFECTIVE_USER_ID == 0) { |
|
maybe_error("cannot unshare user namespace when executing as root"); |
|
return 0; |
|
} |
|
# arguments to syscalls have to be stored in their own variable or |
|
# otherwise we will get "Modification of a read-only value attempted" |
|
my $unshare_flags = $CLONE_NEWUSER; |
|
# we spawn a new per process because if unshare succeeds, we would |
|
# otherwise have unshared the mmdebstrap process itself which we don't want |
|
my $pid = fork() // error "fork() failed: $!"; |
|
if ($pid == 0) { |
|
my $ret = syscall(&SYS_unshare, $unshare_flags); |
|
if ($ret == 0) { |
|
exit 0; |
|
} else { |
|
maybe_error("unshare syscall failed: $!"); |
|
exit 1; |
|
} |
|
} |
|
waitpid($pid, 0); |
|
if (($? >> 8) != 0) { |
|
return 0; |
|
} |
|
# if newuidmap and newgidmap exist, the exit status will be 1 when |
|
# executed without parameters |
|
system "newuidmap 2>/dev/null"; |
|
if (($? >> 8) != 1) { |
|
if (($? >> 8) == 127) { |
|
maybe_error("cannot find newuidmap"); |
|
} else { |
|
maybe_error("newuidmap returned unknown exit status: $?"); |
|
} |
|
return 0; |
|
} |
|
system "newgidmap 2>/dev/null"; |
|
if (($? >> 8) != 1) { |
|
if (($? >> 8) == 127) { |
|
maybe_error("cannot find newgidmap"); |
|
} else { |
|
maybe_error("newgidmap returned unknown exit status: $?"); |
|
} |
|
return 0; |
|
} |
|
my @idmap = read_subuid_subgid($verbose); |
|
if (scalar @idmap == 0) { |
|
maybe_error("failed to parse /etc/subuid and /etc/subgid"); |
|
return 0; |
|
} |
|
# too much can go wrong when doing the dance required to unsharing the user |
|
# namespace, so instead of adding more complexity to support maybe_error() |
|
# to a function that is already too complex, we use eval() |
|
eval { |
|
$pid = get_unshare_cmd( |
|
sub { |
|
if ($EFFECTIVE_USER_ID == 0) { |
|
exit 0; |
|
} else { |
|
exit 1; |
|
} |
|
}, |
|
\@idmap |
|
); |
|
waitpid $pid, 0; |
|
if ($? != 0) { |
|
maybe_error("failed to unshare the user namespace"); |
|
return 0; |
|
} |
|
}; |
|
if ($@) { |
|
maybe_error($@); |
|
return 0; |
|
} |
|
return 1; |
|
} |
|
|
|
sub read_subuid_subgid { |
|
my $verbose = shift; |
|
my @result = (); |
|
my $username = getpwuid $REAL_USER_ID; |
|
my ($subid, $num_subid, $fh, $n); |
|
|
|
local *maybe_warn = sub { |
|
my $msg = shift; |
|
if ($verbose) { |
|
warning $msg; |
|
} else { |
|
debug $msg; |
|
} |
|
}; |
|
if (!-e "/etc/subuid") { |
|
maybe_warn("/etc/subuid doesn't exist"); |
|
return; |
|
} |
|
if (!-r "/etc/subuid") { |
|
maybe_warn("/etc/subuid is not readable"); |
|
return; |
|
} |
|
|
|
open $fh, "<", "/etc/subuid" |
|
or maybe_warn("cannot open /etc/subuid for reading: $!"); |
|
if (!$fh) { |
|
return; |
|
} |
|
while (my $line = <$fh>) { |
|
($n, $subid, $num_subid) = split(/:/, $line, 3); |
|
last if ($n eq $username); |
|
} |
|
close $fh; |
|
if (!length $subid) { |
|
maybe_warn("/etc/subuid is empty"); |
|
return; |
|
} |
|
if ($n ne $username) { |
|
maybe_warn("no entry in /etc/subuid for $username"); |
|
return; |
|
} |
|
push @result, ["u", 0, $subid, $num_subid]; |
|
|
|
if (scalar(@result) < 1) { |
|
maybe_warn("/etc/subuid does not contain an entry for $username"); |
|
return; |
|
} |
|
if (scalar(@result) > 1) { |
|
maybe_warn("/etc/subuid contains multiple entries for $username"); |
|
return; |
|
} |
|
|
|
if (!-e "/etc/subgid") { |
|
maybe_warn("/etc/subgid doesn't exist"); |
|
return; |
|
} |
|
if (!-r "/etc/subgid") { |
|
maybe_warn("/etc/subgid is not readable"); |
|
return; |
|
} |
|
|
|
open $fh, "<", "/etc/subgid" |
|
or maybe_warn("cannot open /etc/subgid for reading: $!"); |
|
if (!$fh) { |
|
return; |
|
} |
|
while (my $line = <$fh>) { |
|
($n, $subid, $num_subid) = split(/:/, $line, 3); |
|
last if ($n eq $username); |
|
} |
|
close $fh; |
|
if (!length $subid) { |
|
maybe_warn("/etc/subgid is empty"); |
|
return; |
|
} |
|
if ($n ne $username) { |
|
maybe_warn("no entry in /etc/subgid for $username"); |
|
return; |
|
} |
|
push @result, ["g", 0, $subid, $num_subid]; |
|
|
|
if (scalar(@result) < 2) { |
|
maybe_warn("/etc/subgid does not contain an entry for $username"); |
|
return; |
|
} |
|
if (scalar(@result) > 2) { |
|
maybe_warn("/etc/subgid contains multiple entries for $username"); |
|
return; |
|
} |
|
|
|
return @result; |
|
} |
|
|
|
# This function spawns two child processes forming the following process tree |
|
# |
|
# A |
|
# | |
|
# fork() |
|
# | \ |
|
# B C |
|
# | | |
|
# | fork() |
|
# | | \ |
|
# | D E |
|
# | | | |
|
# |unshare() |
|
# | close() |
|
# | | | |
|
# | | read() |
|
# | | newuidmap(D) |
|
# | | newgidmap(D) |
|
# | | / |
|
# | waitpid() |
|
# | | |
|
# | fork() |
|
# | | \ |
|
# | F G |
|
# | | | |
|
# | | exec() |
|
# | | / |
|
# | waitpid() |
|
# | / |
|
# waitpid() |
|
# |
|
# To better refer to each individual part, we give each process a new |
|
# identifier after calling fork(). Process A is the main process. After |
|
# executing fork() we call the parent and child B and C, respectively. This |
|
# first fork() is done because we do not want to modify A. B then remains |
|
# waiting for its child C to finish. C calls fork() again, splitting into |
|
# the parent D and its child E. In the parent D we call unshare() and close a |
|
# pipe shared by D and E to signal to E that D is done with calling unshare(). |
|
# E notices this by using read() and follows up with executing the tools |
|
# new[ug]idmap on D. E finishes and D continues with doing another fork(). |
|
# This is because when unsharing the PID namespace, we need a PID 1 to be kept |
|
# alive or otherwise any child processes cannot fork() anymore themselves. So |
|
# we keep F as PID 1 and finally call exec() in G. |
|
sub get_unshare_cmd { |
|
my $cmd = shift; |
|
my $idmap = shift; |
|
|
|
# unsharing the mount namespace (NEWNS) requires CAP_SYS_ADMIN |
|
my $unshare_flags |
|
= $CLONE_NEWNS | $CLONE_NEWPID | $CLONE_NEWUTS | $CLONE_NEWIPC; |
|
|
|
# we only need to add CLONE_NEWUSER if we are not yet root |
|
if ($EFFECTIVE_USER_ID != 0) { |
|
$unshare_flags |= $CLONE_NEWUSER; |
|
} |
|
|
|
if (0) { |
|
$unshare_flags |= $CLONE_NEWNET; |
|
} |
|
|
|
# fork a new process and let the child get unshare()ed |
|
# we don't want to unshare the parent process |
|
my $gcpid = fork() // error "fork() failed: $!"; |
|
if ($gcpid == 0) { |
|
# Create a pipe for the parent process to signal the child process that |
|
# it is done with calling unshare() so that the child can go ahead |
|
# setting up uid_map and gid_map. |
|
pipe my $rfh, my $wfh; |
|
# We have to do this dance with forking a process and then modifying |
|
# the parent from the child because: |
|
# - new[ug]idmap can only be called on a process id after that process |
|
# has unshared the user namespace |
|
# - a process looses its capabilities if it performs an execve() with |
|
# nonzero user ids see the capabilities(7) man page for details. |
|
# - a process that unshared the user namespace by default does not |
|
# have the privileges to call new[ug]idmap on itself |
|
# |
|
# this also works the other way around (the child setting up a user |
|
# namespace and being modified from the parent) but that way, the |
|
# parent would have to stay around until the child exited (so a pid |
|
# would be wasted). Additionally, that variant would require an |
|
# additional pipe to let the parent signal the child that it is done |
|
# with calling new[ug]idmap. The way it is done here, this signaling |
|
# can instead be done by wait()-ing for the exit of the child. |
|
|
|
my $ppid = $$; |
|
my $cpid = fork() // error "fork() failed: $!"; |
|
if ($cpid == 0) { |
|
# child |
|
|
|
# Close the writing descriptor at our end of the pipe so that we |
|
# see EOF when parent closes its descriptor. |
|
close $wfh; |
|
|
|
# Wait for the parent process to finish its unshare() call by |
|
# waiting for an EOF. |
|
0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF"; |
|
|
|
# the process is already root, so no need for newuidmap/newgidmap |
|
if ($EFFECTIVE_USER_ID == 0) { |
|
exit 0; |
|
} |
|
|
|
# The program's new[ug]idmap have to be used because they are |
|
# setuid root. These privileges are needed to map the ids from |
|
# /etc/sub[ug]id to the user namespace set up by the parent. |
|
# Without these privileges, only the id of the user itself can be |
|
# mapped into the new namespace. |
|
# |
|
# Since new[ug]idmap is setuid root we also don't need to write |
|
# "deny" to /proc/$$/setgroups beforehand (this is otherwise |
|
# required for unprivileged processes trying to write to |
|
# /proc/$$/gid_map since kernel version 3.19 for security reasons) |
|
# and therefore the parent process keeps its ability to change its |
|
# own group here. |
|
# |
|
# Since /proc/$ppid/[ug]id_map can only be written to once, |
|
# respectively, instead of making multiple calls to new[ug]idmap, |
|
# we assemble a command line that makes one call each. |
|
my $uidmapcmd = ""; |
|
my $gidmapcmd = ""; |
|
foreach (@{$idmap}) { |
|
my ($t, $hostid, $nsid, $range) = @{$_}; |
|
if ($t ne "u" and $t ne "g" and $t ne "b") { |
|
error "invalid idmap type: $t"; |
|
} |
|
if ($t eq "u" or $t eq "b") { |
|
$uidmapcmd .= " $hostid $nsid $range"; |
|
} |
|
if ($t eq "g" or $t eq "b") { |
|
$gidmapcmd .= " $hostid $nsid $range"; |
|
} |
|
} |
|
my $idmapcmd = ''; |
|
if ($uidmapcmd ne "") { |
|
0 == system "newuidmap $ppid $uidmapcmd" |
|
or error "newuidmap $ppid $uidmapcmd failed: $!"; |
|
} |
|
if ($gidmapcmd ne "") { |
|
0 == system "newgidmap $ppid $gidmapcmd" |
|
or error "newgidmap $ppid $gidmapcmd failed: $!"; |
|
} |
|
exit 0; |
|
} |
|
|
|
# parent |
|
|
|
# After fork()-ing, the parent immediately calls unshare... |
|
0 == syscall &SYS_unshare, $unshare_flags |
|
or error "unshare() failed: $!"; |
|
|
|
# .. and then signals the child process that we are done with the |
|
# unshare() call by sending an EOF. |
|
close $wfh; |
|
|
|
# Wait for the child process to finish its setup by waiting for its |
|
# exit. |
|
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!"; |
|
my $exit = $? >> 8; |
|
if ($exit != 0) { |
|
error "child had a non-zero exit status: $exit"; |
|
} |
|
|
|
# Currently we are nobody (uid and gid are 65534). So we become root |
|
# user and group instead. |
|
# |
|
# We are using direct syscalls instead of setting $(, $), $< and $> |
|
# because then perl would do additional stuff which we don't need or |
|
# want here, like checking /proc/sys/kernel/ngroups_max (which might |
|
# not exist). It would also also call setgroups() in a way that makes |
|
# the root user be part of the group unknown. |
|
if ($EFFECTIVE_USER_ID != 0) { |
|
0 == syscall &SYS_setgid, 0 or error "setgid failed: $!"; |
|
0 == syscall &SYS_setuid, 0 or error "setuid failed: $!"; |
|
0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!"; |
|
} |
|
|
|
if (1) { |
|
# When the pid namespace is also unshared, then processes expect a |
|
# master pid to always be alive within the namespace. To achieve |
|
# this, we fork() here instead of exec() to always have one dummy |
|
# process running as pid 1 inside the namespace. This is also what |
|
# the unshare tool does when used with the --fork option. |
|
# |
|
# Otherwise, without a pid 1, new processes cannot be forked |
|
# anymore after pid 1 finished. |
|
my $cpid = fork() // error "fork() failed: $!"; |
|
if ($cpid != 0) { |
|
# The parent process will stay alive as pid 1 in this |
|
# namespace until the child finishes executing. This is |
|
# important because pid 1 must never die or otherwise nothing |
|
# new can be forked. |
|
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!"; |
|
exit($? >> 8); |
|
} |
|
} |
|
|
|
&{$cmd}(); |
|
|
|
exit 0; |
|
} |
|
|
|
# parent |
|
return $gcpid; |
|
} |
|
|
|
sub havemknod { |
|
my $root = shift; |
|
my $havemknod = 0; |
|
if (-e "$root/test-dev-null") { |
|
error "/test-dev-null already exists"; |
|
} |
|
TEST: { |
|
# we fork so that we can read STDERR |
|
my $pid = open my $fh, '-|' // error "failed to fork(): $!"; |
|
if ($pid == 0) { |
|
open(STDERR, '>&', STDOUT) or error "cannot open STDERR: $!"; |
|
# we use mknod(1) instead of the system call because creating the |
|
# right dev_t argument requires makedev(3) |
|
exec 'mknod', "$root/test-dev-null", 'c', '1', '3'; |
|
} |
|
chomp( |
|
my $content = do { local $/; <$fh> } |
|
); |
|
close $fh; |
|
{ |
|
last TEST unless $? == 0 and $content eq ''; |
|
last TEST unless -c "$root/test-dev-null"; |
|
last TEST unless open my $fh, '>', "$root/test-dev-null"; |
|
last TEST unless print $fh 'test'; |
|
} |
|
$havemknod = 1; |
|
} |
|
if (-e "$root/test-dev-null") { |
|
unlink "$root/test-dev-null" |
|
or error "cannot unlink /test-dev-null: $!"; |
|
} |
|
return $havemknod; |
|
} |
|
|
|
# inspired by /usr/share/perl/5.34/pod/perlfaq8.pod |
|
sub terminal_width { |
|
if (!stderr_is_tty()) { |
|
return -1; |
|
} |
|
if (!defined &TIOCGWINSZ) { |
|
return -1; |
|
} |
|
if (!-e "/dev/tty") { |
|
return -1; |
|
} |
|
my $tty_fh; |
|
if (!open($tty_fh, "+<", "/dev/tty")) { |
|
return -1; |
|
} |
|
my $winsize = ''; |
|
if (!ioctl($tty_fh, &TIOCGWINSZ, $winsize)) { |
|
return -1; |
|
} |
|
my (undef, $col, undef, undef) = unpack('S4', $winsize); |
|
return $col; |
|
} |
|
|
|
# Prints the current status, the percentage and a progress bar on STDERR if |
|
# it is an interactive tty and if verbosity is set to 1. |
|
# |
|
# * first 12 chars: status |
|
# * following 7 chars: percentage |
|
# * progress bar until 79 chars are filled |
|
sub print_progress { |
|
if ($verbosity_level != 1) { |
|
return; |
|
} |
|
if (!stderr_is_tty()) { |
|
return; |
|
} |
|
my $perc = shift; |
|
my $status = shift; |
|
my $len_status = 12; |
|
my $len_perc = 7; |
|
my $len_prog_min = 10; |
|
my $len_prog_max = 60; |
|
my $twidth = terminal_width(); |
|
|
|
if ($twidth <= $len_status) { |
|
return; |
|
} |
|
# \e[2K clears everything on the current line (i.e. the progress bar) |
|
print STDERR "\e[2K"; |
|
if ($perc eq "done") { |
|
print STDERR "done\n"; |
|
return; |
|
} |
|
if (defined $status) { |
|
printf STDERR "%*s", -$len_status, "$status:"; |
|
} else { |
|
print STDERR (" " x $len_status); |
|
} |
|
if ($twidth <= $len_status + $len_perc) { |
|
print STDERR "\r"; |
|
return; |
|
} |
|
if ($perc >= 100) { |
|
$perc = 100; |
|
} |
|
printf STDERR "%*.2f", $len_perc, $perc; |
|
if ($twidth <= $len_status + $len_perc + $len_prog_min) { |
|
print STDERR "\r"; |
|
return; |
|
} |
|
my $len_prog = $twidth - $len_perc - $len_status; |
|
if ($len_prog > $len_prog_max) { |
|
$len_prog = $len_prog_max; |
|
} |
|
my $num_x = int($perc * ($len_prog - 3) / 100); |
|
my $bar = '=' x $num_x; |
|
if ($num_x != ($len_prog - 3)) { |
|
$bar .= '>'; |
|
$bar .= ' ' x ($len_prog - $num_x - 4); |
|
} |
|
print STDERR " [$bar]\r"; |
|
return; |
|
} |
|
|
|
sub run_progress { |
|
my ($get_exec, $line_handler, $line_has_error, $chdir) = @_; |
|
pipe my $rfh, my $wfh; |
|
my $got_signal = 0; |
|
my $ignore = sub { |
|
info "run_progress() received signal $_[0]: waiting for child..."; |
|
}; |
|
|
|
debug("run_progress: exec " . (join ' ', ($get_exec->('${FD}')))); |
|
|
|
# delay signals so that we can fork and change behaviour of the signal |
|
# handler in parent and child without getting interrupted |
|
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM); |
|
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!"; |
|
|
|
my $pid1 = open(my $pipe, '-|') // error "failed to fork(): $!"; |
|
|
|
if ($pid1 == 0) { |
|
# child: default signal handlers |
|
local $SIG{'INT'} = 'DEFAULT'; |
|
local $SIG{'HUP'} = 'DEFAULT'; |
|
local $SIG{'PIPE'} = 'DEFAULT'; |
|
local $SIG{'TERM'} = 'DEFAULT'; |
|
|
|
# unblock all delayed signals (and possibly handle them) |
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) |
|
or error "Can't unblock signals: $!"; |
|
|
|
close $rfh; |
|
# Unset the close-on-exec flag, so that the file descriptor does not |
|
# get closed when we exec |
|
my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!"; |
|
fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC) |
|
or error "fcntl F_SETFD: $!"; |
|
my $fd = fileno $wfh; |
|
# redirect stderr to stdout so that we can capture it |
|
open(STDERR, '>&', STDOUT) or error "cannot open STDOUT: $!"; |
|
my @execargs = $get_exec->($fd); |
|
# before apt 1.5, "apt-get update" attempted to chdir() into the |
|
# working directory. This will fail if the current working directory |
|
# is not accessible by the user (for example in unshare mode). See |
|
# Debian bug #860738 |
|
if (defined $chdir) { |
|
chdir $chdir or error "failed chdir() to $chdir: $!"; |
|
} |
|
eval { Devel::Cover::set_coverage("none") } if $is_covering; |
|
exec { $execargs[0] } @execargs |
|
or error 'cannot exec() ' . (join ' ', @execargs); |
|
} |
|
close $wfh; |
|
|
|
# spawn two processes: |
|
# parent will parse stdout to look for errors |
|
# child will parse $rfh for the progress meter |
|
my $pid2 = fork() // error "failed to fork(): $!"; |
|
if ($pid2 == 0) { |
|
# child: default signal handlers |
|
local $SIG{'INT'} = 'IGNORE'; |
|
local $SIG{'HUP'} = 'IGNORE'; |
|
local $SIG{'PIPE'} = 'IGNORE'; |
|
local $SIG{'TERM'} = 'IGNORE'; |
|
|
|
# unblock all delayed signals (and possibly handle them) |
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) |
|
or error "Can't unblock signals: $!"; |
|
|
|
if ($verbosity_level != 1 || !stderr_is_tty()) { |
|
# no need to print any progress |
|
# we still need to consume everything from $rfh or otherwise apt |
|
# will block forever if there is too much output |
|
local $/; |
|
<$rfh>; |
|
close $rfh; |
|
exit 0; |
|
} |
|
my $progress = 0.0; |
|
my $status = undef; |
|
print_progress($progress); |
|
while (my $line = <$rfh>) { |
|
my ($newprogress, $newstatus) = $line_handler->($line); |
|
next unless $newprogress; |
|
# start a new line if the new progress value is less than the |
|
# previous one |
|
if ($newprogress < $progress) { |
|
print_progress("done"); |
|
} |
|
if (defined $newstatus) { |
|
$status = $newstatus; |
|
} |
|
print_progress($newprogress, $status); |
|
$progress = $newprogress; |
|
} |
|
print_progress("done"); |
|
|
|
exit 0; |
|
} |
|
|
|
# parent: ignore signals |
|
# by using "local", the original is automatically restored once the |
|
# function returns |
|
local $SIG{'INT'} = $ignore; |
|
local $SIG{'HUP'} = $ignore; |
|
local $SIG{'PIPE'} = $ignore; |
|
local $SIG{'TERM'} = $ignore; |
|
|
|
# unblock all delayed signals (and possibly handle them) |
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) |
|
or error "Can't unblock signals: $!"; |
|
|
|
my $output = ''; |
|
my $has_error = 0; |
|
while (my $line = <$pipe>) { |
|
$has_error = $line_has_error->($line); |
|
if ($verbosity_level >= 2) { |
|
print STDERR $line; |
|
} else { |
|
# forward captured apt output |
|
$output .= $line; |
|
} |
|
} |
|
|
|
close($pipe); |
|
my $fail = 0; |
|
if ($? != 0 or $has_error) { |
|
$fail = 1; |
|
} |
|
|
|
waitpid $pid2, 0; |
|
$? == 0 or error "progress parsing failed"; |
|
|
|
if ($got_signal) { |
|
error "run_progress() received signal: $got_signal"; |
|
} |
|
|
|
# only print failure after progress output finished or otherwise it |
|
# might interfere with the remaining output |
|
if ($fail) { |
|
if ($verbosity_level >= 1) { |
|
print STDERR $output; |
|
} |
|
error((join ' ', $get_exec->('<$fd>')) . ' failed'); |
|
} |
|
return; |
|
} |
|
|
|
sub run_dpkg_progress { |
|
my $options = shift; |
|
my @debs = @{ $options->{PKGS} // [] }; |
|
my $get_exec |
|
= sub { return @{ $options->{ARGV} }, "--status-fd=$_[0]", @debs; }; |
|
my $line_has_error = sub { return 0; }; |
|
my $num = 0; |
|
# each package has one install and one configure step, thus the total |
|
# number is twice the number of packages |
|
my $total = (scalar @debs) * 2; |
|
my $line_handler = sub { |
|
my $status = undef; |
|
if ($_[0] =~ /^processing: (install|configure): /) { |
|
if ($1 eq 'install') { |
|
$status = 'installing'; |
|
} elsif ($1 eq 'configure') { |
|
$status = 'configuring'; |
|
} else { |
|
error "unknown status: $1"; |
|
} |
|
$num += 1; |
|
} |
|
if ($total == 0) { |
|
return 0, $status; |
|
} else { |
|
return $num / $total * 100, $status; |
|
} |
|
}; |
|
run_progress $get_exec, $line_handler, $line_has_error; |
|
return; |
|
} |
|
|
|
sub run_apt_progress { |
|
my $options = shift; |
|
my @debs = @{ $options->{PKGS} // [] }; |
|
|
|
if ($verbosity_level >= 3) { |
|
my @apt_debug_opts = qw( |
|
-oDebug::pkgProblemResolver=true |
|
-oDebug::pkgDepCache::Marker=1 |
|
-oDebug::pkgDepCache::AutoInstall=1 |
|
); |
|
push @{ $options->{ARGV} }, @apt_debug_opts; |
|
} |
|
|
|
my $get_exec = sub { |
|
my @prefix = (); |
|
my @opts = (); |
|
return ( |
|
@prefix, |
|
@{ $options->{ARGV} }, |
|
@opts, |
|
"-oAPT::Status-Fd=$_[0]", |
|
# prevent apt from messing up the terminal and allow dpkg to |
|
# receive SIGINT and quit immediately without waiting for |
|
# maintainer script to finish |
|
'-oDpkg::Use-Pty=false', |
|
@debs |
|
); |
|
}; |
|
my $line_has_error = sub { return 0; }; |
|
if ($options->{FIND_APT_WARNINGS}) { |
|
$line_has_error = sub { |
|
# apt-get doesn't report a non-zero exit if the update failed. |
|
# Thus, we have to parse its output. See #778357, #776152, #696335 |
|
# and #745735 for the parsing bugs as well as #594813, #696335, |
|
# #776152, #778357 and #953726 for non-zero exit on transient |
|
# network errors. |
|
# |
|
# For example, we want to fail with the following warning: |
|
# W: Some index files failed to download. They have been ignored, |
|
# or old ones used instead. |
|
# But since this message is meant for human consumption it is not |
|
# guaranteed to be stable across different apt versions and may |
|
# change arbitrarily in the future. Thus, we error out on any W: |
|
# lines as well. The downside is, that apt also unconditionally |
|
# and by design prints a warning for unsigned repositories, even |
|
# if they were allowed with Acquire::AllowInsecureRepositories "1" |
|
# or with trusted=yes. |
|
# |
|
# A workaround was introduced by apt 2.1.16 with the --error-on=any |
|
# option to apt-get update. |
|
if ($_[0] =~ /^(W: |Err:)/) { |
|
return 1; |
|
} |
|
return 0; |
|
}; |
|
} |
|
my $line_handler = sub { |
|
if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) { |
|
my $status = undef; |
|
if ($1 eq 'pmstatus') { |
|
$status = "installing"; |
|
} elsif ($1 eq 'dlstatus') { |
|
$status = "downloading"; |
|
} else { |
|
error "unknown status: $1"; |
|
} |
|
return $2, $status; |
|
} |
|
}; |
|
run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR}; |
|
return; |
|
} |
|
|
|
sub run_apt_download_progress { |
|
my $options = shift; |
|
if ($options->{dryrun}) { |
|
info "simulate downloading packages with apt..."; |
|
} else { |
|
info "downloading packages with apt..."; |
|
} |
|
|
|
pipe my $rfh, my $wfh; |
|
my $pid = open my $fh, '-|' // error "fork() failed: $!"; |
|
if ($pid == 0) { |
|
close $wfh; |
|
# read until parent process closes $wfh |
|
my $content = do { local $/; <$rfh> }; |
|
close $rfh; |
|
# the parent is done -- pass what we read back to it |
|
print $content; |
|
exit 0; |
|
} |
|
close $rfh; |
|
# Unset the close-on-exec flag, so that the file descriptor does not |
|
# get closed when we exec |
|
my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!"; |
|
fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC) or error "fcntl F_SETFD: $!"; |
|
my $fd = fileno $wfh; |
|
# run_apt_progress() can raise an exception which would leave this function |
|
# without cleaning up the other thread we started, making mmdebstrap hang |
|
# in case run_apt_progress() fails -- so wrap this in eval() instead |
|
eval { |
|
# 2022-05-02, #debian-apt on OFTC, times in UTC+2 |
|
# 16:57 < josch> DonKult: how is -oDebug::pkgDpkgPm=1 |
|
# -oDir::Log=/dev/null a "fancy no-op"? |
|
# 11:52 < DonKult> josch: "fancy no-op" in sofar as it does nothing to |
|
# the system even through its not in a special mode |
|
# ala simulation or download-only. It does all the |
|
# things it normally does, except that it just prints |
|
# the dpkg calls instead of execv() them which in |
|
# practice amounts means it does nothing (the Dir::Log |
|
# just prevents libapt from creating the /var/log/apt |
|
# directories. As the code creates them even if no |
|
# logs will be placed there…). As said, midterm an apt |
|
# --print-install-packages or something would be nice |
|
# to avoid running everything. |
|
run_apt_progress({ |
|
ARGV => [ |
|
'apt-get', |
|
'--yes', |
|
'-oDebug::pkgDpkgPm=1', |
|
'-oDir::Log=/dev/null', |
|
$options->{dryrun} |
|
? '-oAPT::Get::Simulate=true' |
|
: ( |
|
"-oAPT::Keep-Fds::=$fd", |
|
"-oDPkg::Tools::options::'cat >&$fd'::InfoFD=$fd", |
|
"-oDpkg::Pre-Install-Pkgs::=cat >&$fd", |
|
# no need to lock the database if we are just downloading |
|
"-oDebug::NoLocking=1", |
|
# no need for pty magic if we write no log |
|
"-oDpkg::Use-Pty=0", |
|
), |
|
@{ $options->{APT_ARGV} }, |
|
], |
|
}); |
|
}; |
|
my $err = ''; |
|
if ($@) { |
|
$err = "apt download failed: $@"; |
|
} |
|
# signal the child process that we are done |
|
close $wfh; |
|
# and then read from it what it got |
|
my @listofdebs = <$fh>; |
|
close $fh; |
|
if ($? != 0) { |
|
$err = "status child failed"; |
|
} |
|
if ($err) { |
|
error $err; |
|
} |
|
# remove trailing newlines |
|
chomp @listofdebs; |
|
return @listofdebs; |
|
} |
|
|
|
sub setup_mounts { |
|
my $options = shift; |
|
|
|
my @cleanup_tasks = (); |
|
|
|
eval { |
|
if (any { $_ eq $options->{mode} } ('root', 'unshare')) { |
|
# if more than essential should be installed, make the system look |
|
# more like a real one by creating or bind-mounting the device |
|
# nodes |
|
foreach my $file (@devfiles) { |
|
my ($fname, $mode, $type, $linkname, $devmajor, $devminor) |
|
= @{$file}; |
|
next if $fname eq ''; |
|
if ($type == 0) { # normal file |
|
error "type 0 not implemented"; |
|
} elsif ($type == 1) { # hardlink |
|
error "type 1 not implemented"; |
|
} elsif ($type == 2) { # symlink |
|
if (!$options->{havemknod}) { |
|
# If we had mknod, then the symlink was already created |
|
# in the run_setup function. |
|
if (!-d "$options->{root}/dev") { |
|
warning( |
|
"skipping creation of ./dev/$fname because the" |
|
. " /dev directory is missing in the target" |
|
); |
|
next; |
|
} |
|
push @cleanup_tasks, sub { |
|
unlink "$options->{root}/dev/$fname" |
|
or warning("cannot unlink ./dev/$fname: $!"); |
|
}; |
|
symlink $linkname, "$options->{root}/dev/$fname" |
|
or error |
|
"cannot create symlink ./dev/$fname -> $linkname"; |
|
} |
|
} elsif ($type == 3 or $type == 4) { |
|
# character/block special |
|
if (any { $_ =~ '^chroot/mount(?:/dev)?$' } |
|
@{ $options->{skip} }) { |
|
info "skipping chroot/mount/dev as requested"; |
|
} elsif (!$options->{canmount}) { |
|
warning "skipping bind-mounting ./dev/$fname"; |
|
} elsif (!$options->{havemknod}) { |
|
if (!-d "$options->{root}/dev") { |
|
warning( |
|
"skipping creation of ./dev/$fname because the" |
|
. " /dev directory is missing in the target" |
|
); |
|
next; |
|
} |
|
if ($fname eq "ptmx") { |
|
# We must not bind-mount ptmx from the outside or |
|
# otherwise posix_openpt() will fail. Instead |
|
# /dev/ptmx must refer to /dev/pts/ptmx either by |
|
# symlink or by bind-mounting. We choose a symlink. |
|
symlink '/dev/pts/ptmx', |
|
"$options->{root}/dev/ptmx" |
|
or error "cannot create /dev/pts/ptmx symlink"; |
|
push @cleanup_tasks, sub { |
|
unlink "$options->{root}/dev/ptmx" |
|
or warning "unlink /dev/ptmx"; |
|
}; |
|
next; |
|
} |
|
if (!-e "/dev/$fname") { |
|
warning("skipping creation of ./dev/$fname because" |
|
. " /dev/$fname does not exist" |
|
. " on the outside"); |
|
next; |
|
} |
|
if (!-c "/dev/$fname") { |
|
warning("skipping creation of ./dev/$fname because" |
|
. " /dev/$fname on the outside is not a" |
|
. " character special file"); |
|
next; |
|
} |
|
open my $fh, '>', "$options->{root}/dev/$fname" |
|
or error |
|
"cannot open $options->{root}/dev/$fname: $!"; |
|
close $fh; |
|
my @umountopts = (); |
|
if ($options->{mode} eq 'unshare') { |
|
push @umountopts, '--no-mtab'; |
|
} |
|
push @cleanup_tasks, sub { |
|
0 == system('umount', @umountopts, |
|
"$options->{root}/dev/$fname") |
|
or warning("umount ./dev/$fname failed: $?"); |
|
unlink "$options->{root}/dev/$fname" |
|
or warning("cannot unlink ./dev/$fname: $!"); |
|
}; |
|
0 == system('mount', '-o', 'bind', "/dev/$fname", |
|
"$options->{root}/dev/$fname") |
|
or error "mount ./dev/$fname failed: $?"; |
|
} |
|
} elsif ($type == 5) { |
|
# directory |
|
if (any { $_ =~ '^chroot/mount(?:/dev)?$' } |
|
@{ $options->{skip} }) { |
|
info "skipping chroot/mount/dev as requested"; |
|
} elsif (!$options->{canmount}) { |
|
warning "skipping bind-mounting ./dev/$fname"; |
|
} else { |
|
if (!-d "$options->{root}/dev") { |
|
warning( |
|
"skipping creation of ./dev/$fname because the" |
|
. " /dev directory is missing in the target" |
|
); |
|
next; |
|
} |
|
if (!-e "/dev/$fname" && $fname ne "pts/") { |
|
warning("skipping creation of ./dev/$fname because" |
|
. " /dev/$fname does not exist" |
|
. " on the outside"); |
|
next; |
|
} |
|
if (!-d "/dev/$fname" && $fname ne "pts/") { |
|
warning("skipping creation of ./dev/$fname because" |
|
. " /dev/$fname on the outside is not a" |
|
. " directory"); |
|
next; |
|
} |
|
if (!$options->{havemknod}) { |
|
# If had mknod, then the directory to bind-mount into |
|
# was already created in the run_setup function. |
|
push @cleanup_tasks, sub { |
|
rmdir "$options->{root}/dev/$fname" |
|
or warning("cannot rmdir ./dev/$fname: $!"); |
|
}; |
|
if (-e "$options->{root}/dev/$fname") { |
|
if (!-d "$options->{root}/dev/$fname") { |
|
error |
|
"./dev/$fname already exists but is not" |
|
. " a directory"; |
|
} |
|
} else { |
|
my $num_created |
|
= make_path "$options->{root}/dev/$fname", |
|
{ error => \my $err }; |
|
if ($err && @$err) { |
|
error( |
|
join "; ", |
|
( |
|
map { |
|
"cannot create " |
|
. (join ": ", %{$_}) |
|
} @$err |
|
)); |
|
} elsif ($num_created == 0) { |
|
error( "cannot create $options->{root}" |
|
. "/dev/$fname"); |
|
} |
|
} |
|
chmod $mode, "$options->{root}/dev/$fname" |
|
or error "cannot chmod ./dev/$fname: $!"; |
|
} |
|
my @umountopts = (); |
|
if ($options->{mode} eq 'unshare') { |
|
push @umountopts, '--no-mtab'; |
|
} |
|
push @cleanup_tasks, sub { |
|
0 == system('umount', @umountopts, |
|
"$options->{root}/dev/$fname") |
|
or warning("umount ./dev/$fname failed: $?"); |
|
}; |
|
if ($fname eq "pts/") { |
|
# We cannot just bind-mount /dev/pts from the host as |
|
# doing so will make posix_openpt() fail. Instead, we |
|
# need to mount a new devpts. |
|
# We need ptmxmode=666 because /dev/ptmx is a symlink |
|
# to /dev/pts/ptmx and without it posix_openpt() will |
|
# fail if we are not the root user. |
|
# See also: |
|
# kernel.org/doc/Documentation/filesystems/devpts.txt |
|
# salsa.debian.org/debian/schroot/-/merge_requests/2 |
|
# https://bugs.debian.org/856877 |
|
# https://bugs.debian.org/817236 |
|
0 == system( |
|
'mount', |
|
'-t', |
|
'devpts', |
|
'none', |
|
"$options->{root}/dev/pts", |
|
'-o', |
|
'noexec,nosuid,uid=5,mode=620,ptmxmode=666' |
|
) or error "mount /dev/pts failed"; |
|
} else { |
|
0 == system('mount', '-o', 'bind', "/dev/$fname", |
|
"$options->{root}/dev/$fname") |
|
or error "mount ./dev/$fname failed: $?"; |
|
} |
|
} |
|
} else { |
|
error "unsupported type: $type"; |
|
} |
|
} |
|
} elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) { |
|
# we cannot mount in fakechroot mode |
|
} else { |
|
error "unknown mode: $options->{mode}"; |
|
} |
|
# We can only mount /proc and /sys after extracting the essential |
|
# set because if we mount it before, then base-files will not be able |
|
# to extract those |
|
if ( (any { $_ eq $options->{mode} } ('root', 'unshare')) |
|
&& (any { $_ =~ '^chroot/mount(?:/sys)?$' } @{ $options->{skip} })) |
|
{ |
|
info "skipping chroot/mount/sys as requested"; |
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) |
|
&& !$options->{canmount}) { |
|
warning "skipping mount sysfs"; |
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) |
|
&& !-d "$options->{root}/sys") { |
|
warning("skipping mounting of sysfs because the" |
|
. " /sys directory is missing in the target"); |
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) |
|
&& !-e "/sys") { |
|
warning("skipping mounting /sys because" |
|
. " /sys does not exist on the outside"); |
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) |
|
&& !-d "/sys") { |
|
warning("skipping mounting /sys because" |
|
. " /sys on the outside is not a directory"); |
|
} elsif ($options->{mode} eq 'root') { |
|
# we don't know whether we run in root mode inside an unshared |
|
# user namespace or as real root so we first try the real mount and |
|
# then fall back to mounting in a way that works in unshared mode |
|
if ( |
|
0 == system( |
|
'mount', '-t', |
|
'sysfs', '-o', |
|
'ro,nosuid,nodev,noexec', 'sys', |
|
"$options->{root}/sys" |
|
) |
|
) { |
|
push @cleanup_tasks, sub { |
|
0 == system('umount', "$options->{root}/sys") |
|
or warning("umount /sys failed: $?"); |
|
}; |
|
} elsif ( |
|
0 == system('mount', '-o', 'rbind', '/sys', |
|
"$options->{root}/sys")) { |
|
push @cleanup_tasks, sub { |
|
# since we cannot write to /etc/mtab we need --no-mtab |
|
# unmounting /sys only seems to be successful with --lazy |
|
0 == system( |
|
'umount', '--no-mtab', |
|
'--lazy', "$options->{root}/sys" |
|
) or warning("umount /sys failed: $?"); |
|
}; |
|
} else { |
|
error "mount /sys failed: $?"; |
|
} |
|
} elsif ($options->{mode} eq 'unshare') { |
|
# naturally we have to clean up after ourselves in sudo mode where |
|
# we do a real mount. But we also need to unmount in unshare mode |
|
# because otherwise, even with the --one-file-system tar option, |
|
# the permissions of the mount source will be stored and not the |
|
# mount target (the directory) |
|
push @cleanup_tasks, sub { |
|
# since we cannot write to /etc/mtab we need --no-mtab |
|
# unmounting /sys only seems to be successful with --lazy |
|
0 == system('umount', '--no-mtab', '--lazy', |
|
"$options->{root}/sys") |
|
or warning("umount /sys failed: $?"); |
|
}; |
|
# without the network namespace unshared, we cannot mount a new |
|
# sysfs. Since we need network, we just bind-mount. |
|
# |
|
# we have to rbind because just using bind results in "wrong fs |
|
# type, bad option, bad superblock" error |
|
0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys") |
|
or error "mount /sys failed: $?"; |
|
} elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) { |
|
# we cannot mount in fakechroot mode |
|
} else { |
|
error "unknown mode: $options->{mode}"; |
|
} |
|
if ( |
|
(any { $_ eq $options->{mode} } ('root', 'unshare')) |
|
&& (any { $_ =~ '^chroot/mount(?:/proc)?$' } @{ $options->{skip} }) |
|
) { |
|
info "skipping chroot/mount/proc as requested"; |
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) |
|
&& !$options->{canmount}) { |
|
warning "skipping mount proc"; |
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) |
|
&& !-d "$options->{root}/proc") { |
|
warning("skipping mounting of proc because the" |
|
. " /proc directory is missing in the target"); |
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) |
|
&& !-e "/proc") { |
|
warning("skipping mounting /proc because" |
|
. " /proc does not exist on the outside"); |
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare')) |
|
&& !-d "/proc") { |
|
warning("skipping mounting /proc because" |
|
. " /proc on the outside is not a directory"); |
|
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) { |
|
# we don't know whether we run in root mode inside an unshared |
|
# user namespace or as real root so we first try the real mount and |
|
# then fall back to mounting in a way that works in unshared |
|
if ( |
|
$options->{mode} eq 'root' |
|
&& 0 == system( |
|
'mount', '-t', 'proc', '-o', 'ro', 'proc', |
|
"$options->{root}/proc" |
|
) |
|
) { |
|
push @cleanup_tasks, sub { |
|
# some maintainer scripts mount additional stuff into /proc |
|
# which we need to unmount beforehand |
|
if ( |
|
is_mountpoint( |
|
$options->{root} . "/proc/sys/fs/binfmt_misc" |
|
) |
|
) { |
|
0 == system('umount', |
|
"$options->{root}/proc/sys/fs/binfmt_misc") |
|
or warning( |
|
"umount /proc/sys/fs/binfmt_misc failed: $?"); |
|
} |
|
0 == system('umount', "$options->{root}/proc") |
|
or warning("umount /proc failed: $?"); |
|
}; |
|
} elsif ( |
|
0 == system('mount', '-t', 'proc', 'proc', |
|
"$options->{root}/proc")) { |
|
push @cleanup_tasks, sub { |
|
# since we cannot write to /etc/mtab we need --no-mtab |
|
0 == system('umount', '--no-mtab', "$options->{root}/proc") |
|
or warning("umount /proc failed: $?"); |
|
}; |
|
} elsif ( |
|
# if mounting proc failed, try bind-mounting it read-only as a |
|
# last resort |
|
0 == system( |
|
'mount', '-o', |
|
'rbind', '/proc', |
|
"$options->{root}/proc" |
|
) |
|
) { |
|
warning("since mounting /proc normally failed, /proc is now " |
|
. "bind-mounted instead"); |
|
# to make sure that changes (like unmounting) to the |
|
# bind-mounted /proc do not affect the outside /proc, change |
|
# all the bind-mounts under /proc to be a slave mount. |
|
if ( |
|
0 != system('mount', '--make-rslave', |
|
"$options->{root}/proc")) { |
|
warning("mount --make-rslave /proc failed"); |
|
} |
|
push @cleanup_tasks, sub { |
|
# since we cannot write to /etc/mtab we need --no-mtab |
|
0 == system( |
|
'umount', '--no-mtab', |
|
'--lazy', "$options->{root}/proc" |
|
) or warning("umount /proc failed: $?"); |
|
}; |
|
} else { |
|
error "mount /proc failed: $?"; |
|
} |
|
} elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) { |
|
# we cannot mount in fakechroot mode |
|
} else { |
|
error "unknown mode: $options->{mode}"; |
|
} |
|
|
|
# prevent daemons from starting |
|
# the directory might not exist in custom variant, for example |
|
# |
|
# ideally, we should use update-alternatives but we cannot rely on it |
|
# existing inside the chroot |
|
# |
|
# See #911290 for more problems of this interface |
|
if (any { $_ eq 'chroot/policy-rc.d' } @{ $options->{skip} }) { |
|
info "skipping chroot/policy-rc.d as requested"; |
|
} else { |
|
push @cleanup_tasks, sub { |
|
if (-f "$options->{root}/usr/sbin/policy-rc.d") { |
|
unlink "$options->{root}/usr/sbin/policy-rc.d" |
|
or error "cannot unlink policy-rc.d: $!"; |
|
} |
|
}; |
|
if (-d "$options->{root}/usr/sbin/") { |
|
open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d" |
|
or error "cannot open policy-rc.d: $!"; |
|
print $fh "#!/bin/sh\n"; |
|
print $fh "exit 101\n"; |
|
close $fh; |
|
chmod 0755, "$options->{root}/usr/sbin/policy-rc.d" |
|
or error "cannot chmod policy-rc.d: $!"; |
|
} |
|
} |
|
|
|
# the file might not exist if it was removed in a hook |
|
if (any { $_ eq 'chroot/start-stop-daemon' } @{ $options->{skip} }) { |
|
info "skipping chroot/start-stop-daemon as requested"; |
|
} else { |
|
push @cleanup_tasks, sub { |
|
if (-e "$options->{root}/sbin/start-stop-daemon.REAL") { |
|
move( |
|
"$options->{root}/sbin/start-stop-daemon.REAL", |
|
"$options->{root}/sbin/start-stop-daemon" |
|
) or error "cannot move start-stop-daemon: $!"; |
|
} |
|
}; |
|
if (-f "$options->{root}/sbin/start-stop-daemon") { |
|
if (-e "$options->{root}/sbin/start-stop-daemon.REAL") { |
|
error |
|
"$options->{root}/sbin/start-stop-daemon.REAL already" |
|
. " exists"; |
|
} |
|
move( |
|
"$options->{root}/sbin/start-stop-daemon", |
|
"$options->{root}/sbin/start-stop-daemon.REAL" |
|
) or error "cannot move start-stop-daemon: $!"; |
|
open my $fh, '>', "$options->{root}/sbin/start-stop-daemon" |
|
or error "cannot open start-stop-daemon: $!"; |
|
print $fh "#!/bin/sh\n"; |
|
print $fh |
|
"echo \"Warning: Fake start-stop-daemon called, doing" |
|
. " nothing\">&2\n"; |
|
close $fh; |
|
chmod 0755, "$options->{root}/sbin/start-stop-daemon" |
|
or error "cannot chmod start-stop-daemon: $!"; |
|
} |
|
} |
|
}; |
|
|
|
if ($@) { |
|
error "setup_mounts failed: $@"; |
|
} |
|
return @cleanup_tasks; |
|
} |
|
|
|
sub run_hooks { |
|
my $name = shift; |
|
my $options = shift; |
|
my $essential_pkgs = shift; |
|
|
|
if (scalar @{ $options->{"${name}_hook"} } == 0) { |
|
return; |
|
} |
|
|
|
if ($options->{dryrun}) { |
|
info "not running ${name}-hooks because of --dry-run"; |
|
return; |
|
} |
|
|
|
my @env_opts = (); |
|
# At this point TMPDIR is set to "$options->{root}/tmp". This is to have a |
|
# writable TMPDIR even in unshare mode. But if TMPDIR is still set when |
|
# running hooks, then every hook script calling chroot, will have to wrap |
|
# that into an "env --unset=TMPDIR". To avoid this, we unset TMPDIR here. |
|
# If the hook script needs a writable TMPDIR, then it can always use /tmp |
|
# inside the chroot. This is also why we do not set a new MMDEBSTRAP_TMPDIR |
|
# environment variable. |
|
if (length $ENV{TMPDIR}) { |
|
push @env_opts, '--unset=TMPDIR'; |
|
} |
|
# The APT_CONFIG variable, if set, will confuse any manual calls to |
|
# apt-get. If you want to use the same config used by mmdebstrap, the |
|
# original value is stored in MMDEBSTRAP_APT_CONFIG. |
|
if (length $ENV{APT_CONFIG}) { |
|
push @env_opts, '--unset=APT_CONFIG'; |
|
} |
|
if (length $ENV{APT_CONFIG}) { |
|
push @env_opts, "MMDEBSTRAP_APT_CONFIG=$ENV{APT_CONFIG}"; |
|
} |
|
# I hook script that wants to call mmdebstrap with --hook-helper needs to |
|
# know how mmdebstrap was executed |
|
push @env_opts, "MMDEBSTRAP_ARGV0=$PROGRAM_NAME"; |
|
# Storing the mode is important for hook scripts to potentially change |
|
# their behavior depending on the mode. It's also important for when the |
|
# hook wants to use the mmdebstrap --hook-helper. |
|
push @env_opts, "MMDEBSTRAP_MODE=$options->{mode}"; |
|
# Storing the hook name is important for hook scripts to potentially change |
|
# their behavior depending on the hook. It's also important for when the |
|
# hook wants to use the mmdebstrap --hook-helper. |
|
push @env_opts, "MMDEBSTRAP_HOOK=$name"; |
|
# This is the file descriptor of the socket that the mmdebstrap |
|
# --hook-helper can write to and read from to communicate with the outside. |
|
push @env_opts, ("MMDEBSTRAP_HOOKSOCK=" . fileno($options->{hooksock})); |
|
# Store the verbosity of mmdebstrap so that hooks can be just as verbose |
|
# as the mmdebstrap invocation that called them. |
|
push @env_opts, ("MMDEBSTRAP_VERBOSITY=" . $verbosity_level); |
|
# Store the packages given via --include in an environment variable so that |
|
# hooks can, for example, make .deb files available inside the chroot. |
|
{ |
|
my @escaped_includes = @{ $options->{include} }; |
|
foreach my $incl (@escaped_includes) { |
|
# We have to encode commas so that values containing commas can |
|
# be stored in the list. Since we encode using percent-encoding |
|
# (urlencoding) we also have to encode the percent sign. |
|
$incl =~ s/%/%25/g; |
|
$incl =~ s/,/%2C/g; |
|
} |
|
push @env_opts, |
|
("MMDEBSTRAP_INCLUDE=" . (join ",", @escaped_includes)); |
|
} |
|
# Give the extract hook access to the essential packages that are about to |
|
# be installed |
|
if ($name eq "extract" and scalar @{$essential_pkgs} > 0) { |
|
push @env_opts, |
|
("MMDEBSTRAP_ESSENTIAL=" . (join " ", @{$essential_pkgs})); |
|
} |
|
|
|
# Unset the close-on-exec flag, so that the file descriptor does not |
|
# get closed when we exec |
|
my $flags = fcntl($options->{hooksock}, F_GETFD, 0) |
|
or error "fcntl F_GETFD: $!"; |
|
fcntl($options->{hooksock}, F_SETFD, $flags & ~FD_CLOEXEC) |
|
or error "fcntl F_SETFD: $!"; |
|
|
|
{ |
|
foreach my $script (@{ $options->{"${name}_hook"} }) { |
|
my $type = $script->[0]; |
|
$script = $script->[1]; |
|
|
|
if ($type eq "pivoted") { |
|
info "running --chrooted-$name-hook in shell: sh -c " |
|
. "'$script'"; |
|
my $pid = fork() // error "fork() failed: $!"; |
|
if ($pid == 0) { |
|
# child |
|
my @cmdprefix = (); |
|
if ($options->{mode} eq 'fakechroot') { |
|
# we are calling the chroot executable instead of |
|
# chrooting the process so that fakechroot can handle |
|
# it |
|
@cmdprefix = ('chroot', $options->{root}); |
|
} elsif ($options->{mode} eq 'root') { |
|
# unsharing the mount namespace is not enough for |
|
# pivot_root to work as root (why?) unsharing the user |
|
# namespace as well (but without remapping) makes |
|
# pivot_root work (why??) but still makes later lazy |
|
# umounts fail (why???). Since pivot_root is mainly |
|
# useful for being able to run unshare mode inside |
|
# unshare mode, we fall back to just calling chroot() |
|
# until somebody has motivation and time to figure out |
|
# what is going on. |
|
chroot $options->{root} |
|
or error "failed to chroot(): $!"; |
|
$options->{root} = "/"; |
|
chdir "/" or error "failed chdir() to /: $!"; |
|
} elsif ($options->{mode} eq 'unshare') { |
|
0 == syscall &SYS_unshare, $CLONE_NEWNS |
|
or error "unshare() failed: $!"; |
|
pivot_root($options->{root}); |
|
} else { |
|
error "unknown mode: $options->{mode}"; |
|
} |
|
0 == system(@cmdprefix, 'env', @env_opts, 'sh', '-c', |
|
$script) |
|
or error "command failed: $script"; |
|
exit 0; |
|
} |
|
waitpid($pid, 0); |
|
$? == 0 or error "chrooted hook failed with exit code $?"; |
|
next; |
|
} |
|
|
|
# inode and device number of chroot before |
|
my ($dev_before, $ino_before, undef) = stat($options->{root}); |
|
|
|
if ( |
|
$script =~ /^( |
|
copy-in|copy-out |
|
|tar-in|tar-out |
|
|upload|download |
|
|sync-in|sync-out |
|
)\ /x |
|
) { |
|
info "running special hook: $script"; |
|
if ((any { $_ eq $options->{variant} } ('extract', 'custom')) |
|
and $options->{mode} eq 'fakechroot' |
|
and $name ne 'setup') { |
|
info "the copy-in, copy-out, tar-in and tar-out commands" |
|
. " in fakechroot mode might fail in" |
|
. " extract and custom variants because there might be" |
|
. " no tar inside the chroot"; |
|
} |
|
|
|
my $pid = fork() // error "fork() failed: $!"; |
|
if ($pid == 0) { |
|
# whatever the script writes on stdout is sent to the |
|
# socket |
|
# whatever is written to the socket, send to stdin |
|
open(STDOUT, '>&', $options->{hooksock}) |
|
or error "cannot open STDOUT: $!"; |
|
open(STDIN, '<&', $options->{hooksock}) |
|
or error "cannot open STDIN: $!"; |
|
|
|
# Text::ParseWords::shellwords does for perl what shlex |
|
# does for python |
|
my @args = shellwords $script; |
|
hookhelper($options->{root}, $options->{mode}, $name, |
|
$options->{qemu}, $verbosity_level, @args); |
|
exit 0; |
|
} |
|
waitpid($pid, 0); |
|
$? == 0 or error "special hook failed with exit code $?"; |
|
} elsif (-x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) { |
|
info "running --$name-hook directly: $script $options->{root}"; |
|
# execute it directly if it's an executable file |
|
# or if it there are no shell metacharacters |
|
# (the /a regex modifier makes \w match only ASCII) |
|
0 == system('env', @env_opts, $script, $options->{root}) |
|
or error "command failed: $script"; |
|
} else { |
|
info "running --$name-hook in shell: sh -c '$script' exec" |
|
. " $options->{root}"; |
|
# otherwise, wrap everything in sh -c |
|
0 == system('env', @env_opts, |
|
'sh', '-c', $script, 'exec', $options->{root}) |
|
or error "command failed: $script"; |
|
} |
|
|
|
# If the chroot directory vanished, check if pivot_root was |
|
# performed. |
|
# |
|
# Running pivot_root is only really useful in the customize-hooks |
|
# because mmdebstrap uses apt from the outside to install packages |
|
# and that will fail after pivot_root because the process doesn't |
|
# have access to the system on the outside anymore. |
|
if (!-e $options->{root}) { |
|
my ($dev_root, $ino_root, undef) = stat("/"); |
|
if ($dev_before == $dev_root and $ino_before == $ino_root) { |
|
info "detected pivot_root, changing chroot directory to /"; |
|
# the old chroot directory is now / |
|
# the hook probably executed pivot_root |
|
$options->{root} = "/"; |
|
chdir "/" or error "failed chdir() to /: $!"; |
|
} else { |
|
error "chroot directory $options->{root} vanished"; |
|
} |
|
} |
|
} |
|
}; |
|
|
|
# Restore flags |
|
fcntl($options->{hooksock}, F_SETFD, $flags) or error "fcntl F_SETFD: $!"; |
|
return; |
|
} |
|
|
|
sub setup { |
|
my $options = shift; |
|
|
|
foreach my $key (sort keys %{$options}) { |
|
my $value = $options->{$key}; |
|
if (!defined $value) { |
|
next; |
|
} |
|
if (ref $value eq '') { |
|
debug "$key: $options->{$key}"; |
|
} elsif (ref $value eq 'ARRAY') { |
|
debug "$key: [" . (join ', ', @{$value}) . "]"; |
|
} elsif (ref $value eq 'GLOB') { |
|
debug "$key: GLOB"; |
|
} else { |
|
error "unknown type for key $key: " . (ref $value); |
|
} |
|
} |
|
|
|
if (-e $options->{apttrusted} && !-r $options->{apttrusted}) { |
|
warning "cannot read $options->{apttrusted}"; |
|
} |
|
if (-e $options->{apttrustedparts} && !-r $options->{apttrustedparts}) { |
|
warning "cannot read $options->{apttrustedparts}"; |
|
} |
|
|
|
if (any { $_ eq 'setup' } @{ $options->{skip} }) { |
|
info "skipping setup as requested"; |
|
} else { |
|
run_setup($options); |
|
} |
|
|
|
run_hooks('setup', $options); |
|
|
|
# apt runs dpkg from inside the chroot and directly passes the filename to |
|
# dpkg. Hence, the included files on the outside must be present under the |
|
# same path on the inside. If they are not, dpkg cannot find them. |
|
if (scalar(grep { /^\// } @{ $options->{include} }) > 0) { |
|
my $ret = 0; |
|
foreach my $f (grep { /^\// } @{ $options->{include} }) { |
|
next if -e "$options->{root}/$f"; |
|
warning |
|
"path given via --include is not present inside the chroot: $f"; |
|
$ret = 1; |
|
} |
|
if ($ret != 0) { |
|
warning("apt runs chrooted dpkg which needs access to the " |
|
. "package paths given via --include inside the chroot."); |
|
warning "maybe try running mmdebstrap with " |
|
. "--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount"; |
|
} |
|
} |
|
|
|
if (any { $_ eq 'update' } @{ $options->{skip} }) { |
|
info "skipping update as requested"; |
|
} else { |
|
run_update($options); |
|
} |
|
|
|
(my $essential_pkgs, my $cached_debs) = run_download($options); |
|
|
|
# in theory, we don't have to extract the packages in chrootless mode |
|
# but we do it anyways because otherwise directory creation timestamps |
|
# will differ compared to non-chrootless and we want to create bit-by-bit |
|
# identical tar output |
|
# |
|
# FIXME: dpkg could be changed to produce the same results |
|
run_extract($options, $essential_pkgs); |
|
|
|
# setup mounts |
|
my @cleanup_tasks = (); |
|
my $cleanup = sub { |
|
my $signal = $_[0]; |
|
while (my $task = pop @cleanup_tasks) { |
|
$task->(); |
|
} |
|
if ($signal) { |
|
warning "pid $PID cought signal: $signal"; |
|
exit 1; |
|
} |
|
}; |
|
|
|
# we only need to setup the mounts if there is anything to do |
|
if ( $options->{variant} ne 'custom' |
|
or scalar @{ $options->{include} } > 0 |
|
or scalar @{ $options->{"extract_hook"} } > 0 |
|
or scalar @{ $options->{"essential_hook"} } > 0 |
|
or scalar @{ $options->{"customize_hook"} } > 0) { |
|
local $SIG{INT} = $cleanup; |
|
local $SIG{HUP} = $cleanup; |
|
local $SIG{PIPE} = $cleanup; |
|
local $SIG{TERM} = $cleanup; |
|
|
|
@cleanup_tasks = setup_mounts($options); |
|
} |
|
|
|
eval { |
|
my $chrootcmd = []; |
|
if ($options->{variant} ne 'extract') { |
|
if ($options->{mode} ne 'chrootless') { |
|
$chrootcmd = run_prepare($options); |
|
} |
|
} |
|
|
|
run_hooks('extract', $options, $essential_pkgs); |
|
|
|
if ($options->{variant} ne 'extract') { |
|
run_essential($options, $essential_pkgs, $chrootcmd, $cached_debs); |
|
|
|
run_hooks('essential', $options); |
|
|
|
run_install($options); |
|
|
|
run_hooks('customize', $options); |
|
} |
|
}; |
|
|
|
my $msg = $@; |
|
|
|
$cleanup->(0); |
|
if ($msg) { |
|
error "setup failed: $msg"; |
|
} |
|
|
|
if (any { $_ eq 'cleanup' } @{ $options->{skip} }) { |
|
info "skipping cleanup as requested"; |
|
} else { |
|
run_cleanup($options); |
|
} |
|
|
|
return; |
|
} |
|
|
|
sub run_setup() { |
|
my $options = shift; |
|
|
|
{ |
|
my @directories = ( |
|
'/etc/apt/apt.conf.d', '/etc/apt/sources.list.d', |
|
'/etc/apt/preferences.d', '/var/cache/apt', |
|
'/var/lib/apt/lists/partial', '/tmp' |
|
); |
|
# we need /var/lib/dpkg in case we need to write to /var/lib/dpkg/arch |
|
push @directories, '/var/lib/dpkg'; |
|
# since we do not know the dpkg version inside the chroot at this |
|
# point, we can only omit it in chrootless mode |
|
if ($options->{mode} ne 'chrootless' |
|
or scalar @{ $options->{dpkgopts} } > 0) { |
|
push @directories, '/etc/dpkg/dpkg.cfg.d/'; |
|
} |
|
# if dpkg and apt operate from the outside we need some more |
|
# directories because dpkg and apt might not even be installed inside |
|
# the chroot. Thus, the following block is not strictly necessary in |
|
# chrootless mode. We unconditionally add it anyways, so that the |
|
# output with and without chrootless mode is equal. |
|
{ |
|
push @directories, '/var/log/apt'; |
|
# since we do not know the dpkg version inside the chroot at this |
|
# point, we can only omit it in chrootless mode |
|
if ($options->{mode} ne 'chrootless') { |
|
push @directories, '/var/lib/dpkg/triggers', |
|
'/var/lib/dpkg/info', '/var/lib/dpkg/alternatives', |
|
'/var/lib/dpkg/updates'; |
|
} |
|
} |
|
foreach my $dir (@directories) { |
|
if (-e "$options->{root}/$dir") { |
|
if (!-d "$options->{root}/$dir") { |
|
error "$dir already exists but is not a directory"; |
|
} |
|
} else { |
|
my $num_created = make_path "$options->{root}/$dir", |
|
{ error => \my $err }; |
|
if ($err && @$err) { |
|
error( |
|
join "; ", |
|
(map { "cannot create " . (join ": ", %{$_}) } @$err)); |
|
} elsif ($num_created == 0) { |
|
error "cannot create $options->{root}/$dir"; |
|
} |
|
} |
|
} |
|
# make sure /tmp is not 0755 like the rest |
|
chmod 01777, "$options->{root}/tmp" or error "cannot chmod /tmp: $!"; |
|
} |
|
|
|
# The TMPDIR set by the user or even /tmp might be inaccessible by the |
|
# unshared user. Thus, we place all temporary files in /tmp inside the new |
|
# rootfs. |
|
# |
|
# This will affect calls to tempfile() as well as runs of "apt-get update" |
|
# which will create temporary clearsigned.message.XXXXXX files to verify |
|
# signatures. |
|
# |
|
# Setting TMPDIR to inside the chroot is also necessary for when packages |
|
# are installed with apt from outside the chroot with |
|
# DPkg::Chroot-Directory |
|
{ |
|
## no critic (Variables::RequireLocalizedPunctuationVars) |
|
$ENV{"TMPDIR"} = "$options->{root}/tmp"; |
|
} |
|
|
|
my ($conf, $tmpfile) |
|
= tempfile("mmdebstrap.apt.conf.XXXXXXXXXXXX", TMPDIR => 1) |
|
or error "cannot open apt.conf: $!"; |
|
print $conf "Apt::Architecture \"$options->{nativearch}\";\n"; |
|
# the host system might have configured additional architectures |
|
# force only the native architecture |
|
if (scalar @{ $options->{foreignarchs} } > 0) { |
|
print $conf "Apt::Architectures { \"$options->{nativearch}\"; "; |
|
foreach my $arch (@{ $options->{foreignarchs} }) { |
|
print $conf "\"$arch\"; "; |
|
} |
|
print $conf "};\n"; |
|
} else { |
|
print $conf "Apt::Architectures \"$options->{nativearch}\";\n"; |
|
} |
|
print $conf "Dir \"$options->{root}\";\n"; |
|
# not needed anymore for apt 1.3 and newer |
|
print $conf |
|
"Dir::State::Status \"$options->{root}/var/lib/dpkg/status\";\n"; |
|
# for authentication, use the keyrings from the host |
|
print $conf "Dir::Etc::Trusted \"$options->{apttrusted}\";\n"; |
|
print $conf "Dir::Etc::TrustedParts \"$options->{apttrustedparts}\";\n"; |
|
if ($options->{variant} ne 'apt') { |
|
# apt considers itself essential. Thus, when generating an EDSP |
|
# document for an external solver, it will add the Essential:yes field |
|
# to the apt package stanza. This is unnecessary for any other variant |
|
# than 'apt' because in all other variants we compile the set of |
|
# packages we consider essential ourselves and for the 'essential' |
|
# variant it would even be wrong to add apt. This workaround is only |
|
# needed when apt is used with an external solver but doesn't hurt |
|
# otherwise and we don't have a good way to figure out whether apt is |
|
# using an external solver or not short of parsing the --aptopt |
|
# options. |
|
print $conf "pkgCacheGen::ForceEssential \",\";\n"; |
|
} |
|
close $conf; |
|
|
|
# We put certain configuration items in their own configuration file |
|
# because they have to be valid for apt invocation from outside as well as |
|
# from inside the chroot. |
|
# The config filename is chosen such that any settings in it will be |
|
# overridden by what the user specified with --aptopt. |
|
if (!-e "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap") { |
|
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap" |
|
or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!"; |
|
print $fh "Apt::Install-Recommends false;\n"; |
|
print $fh "Acquire::Languages \"none\";\n"; |
|
close $fh; |
|
} |
|
|
|
# apt-get update requires this |
|
if (!-e "$options->{root}/var/lib/dpkg/status") { |
|
open my $fh, '>', "$options->{root}/var/lib/dpkg/status" |
|
or error "failed to open(): $!"; |
|
close $fh; |
|
} |
|
|
|
# In theory, /var/lib/dpkg/arch is only useful if there are foreign |
|
# architectures configured or if the architecture of a chrootless chroot |
|
# is different from the native architecture outside the chroot. |
|
# We nevertheless always add /var/lib/dpkg/arch to make a chroot built the |
|
# normal way bit-by-bit identical to a foreign arch chroot built in |
|
# chrootless mode. |
|
chomp(my $hostarch = `dpkg --print-architecture`); |
|
if ((!-e "$options->{root}/var/lib/dpkg/arch")) { |
|
open my $fh, '>', "$options->{root}/var/lib/dpkg/arch" |
|
or error "cannot open /var/lib/dpkg/arch: $!"; |
|
print $fh "$options->{nativearch}\n"; |
|
foreach my $arch (@{ $options->{foreignarchs} }) { |
|
print $fh "$arch\n"; |
|
} |
|
close $fh; |
|
} |
|
|
|
if (scalar @{ $options->{aptopts} } > 0 |
|
and (!-e "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap")) { |
|
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap" |
|
or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!"; |
|
foreach my $opt (@{ $options->{aptopts} }) { |
|
if (-r $opt) { |
|
# flush handle because copy() uses syswrite() which bypasses |
|
# buffered IO |
|
$fh->flush(); |
|
copy $opt, $fh or error "cannot copy $opt: $!"; |
|
} else { |
|
print $fh $opt; |
|
if ($opt !~ /;$/) { |
|
print $fh ';'; |
|
} |
|
if ($opt !~ /\n$/) { |
|
print $fh "\n"; |
|
} |
|
} |
|
} |
|
close $fh; |
|
if ($verbosity_level >= 3) { |
|
debug "content of /etc/apt/apt.conf.d/99mmdebstrap:"; |
|
copy("$options->{root}/etc/apt/apt.conf.d/99mmdebstrap", \*STDERR); |
|
} |
|
} |
|
|
|
if (scalar @{ $options->{dpkgopts} } > 0 |
|
and (!-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")) { |
|
# FIXME: in chrootless mode, dpkg will only read the configuration |
|
# from the host -- see #808203 |
|
if ($options->{mode} eq 'chrootless') { |
|
warning('dpkg is unable to read an alternative configuration in' |
|
. 'chrootless mode -- see Debian bug #808203'); |
|
} |
|
open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap" |
|
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!"; |
|
foreach my $opt (@{ $options->{dpkgopts} }) { |
|
if (-r $opt) { |
|
# flush handle because copy() uses syswrite() which bypasses |
|
# buffered IO |
|
$fh->flush(); |
|
copy $opt, $fh or error "cannot copy $opt: $!"; |
|
} else { |
|
print $fh $opt; |
|
if ($opt !~ /\n$/) { |
|
print $fh "\n"; |
|
} |
|
} |
|
} |
|
close $fh; |
|
if ($verbosity_level >= 3) { |
|
debug "content of /etc/dpkg/dpkg.cfg.d/99mmdebstrap:"; |
|
copy("$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap", |
|
\*STDERR); |
|
} |
|
} |
|
|
|
if (!-e "$options->{root}/etc/fstab") { |
|
open my $fh, '>', "$options->{root}/etc/fstab" |
|
or error "cannot open fstab: $!"; |
|
print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n"; |
|
close $fh; |
|
chmod 0644, "$options->{root}/etc/fstab" |
|
or error "cannot chmod fstab: $!"; |
|
} |
|
|
|
# write /etc/apt/sources.list and files in /etc/apt/sources.list.d/ |
|
if (scalar @{ $options->{sourceslists} } > 0) { |
|
my $firstentry = $options->{sourceslists}->[0]; |
|
# if the first sources.list entry is of one-line type and without |
|
# explicit filename, then write out an actual /etc/apt/sources.list |
|
# otherwise everything goes into /etc/apt/sources.list.d |
|
my $fname; |
|
if ($firstentry->{type} eq 'one-line' |
|
&& !defined $firstentry->{fname}) { |
|
$fname = "$options->{root}/etc/apt/sources.list"; |
|
} else { |
|
$fname = "$options->{root}/etc/apt/sources.list.d/0000"; |
|
if (defined $firstentry->{fname}) { |
|
$fname .= $firstentry->{fname}; |
|
if ( $firstentry->{fname} !~ /\.list/ |
|
&& $firstentry->{fname} !~ /\.sources/) { |
|
if ($firstentry->{type} eq 'one-line') { |
|
$fname .= '.list'; |
|
} elsif ($firstentry->{type} eq 'deb822') { |
|
$fname .= '.sources'; |
|
} else { |
|
error "invalid type: $firstentry->{type}"; |
|
} |
|
} |
|
} else { |
|
# if no filename is given, then this must be a deb822 file |
|
# because if it was a one-line type file, then it would've been |
|
# written to /etc/apt/sources.list |
|
$fname .= 'main.sources'; |
|
} |
|
} |
|
if (!-e $fname) { |
|
open my $fh, '>', "$fname" or error "cannot open $fname: $!"; |
|
print $fh $firstentry->{content}; |
|
close $fh; |
|
} |
|
# everything else goes into /etc/apt/sources.list.d/ |
|
for (my $i = 1 ; $i < scalar @{ $options->{sourceslists} } ; $i++) { |
|
my $entry = $options->{sourceslists}->[$i]; |
|
my $fname = "$options->{root}/etc/apt/sources.list.d/" |
|
. sprintf("%04d", $i); |
|
if (defined $entry->{fname}) { |
|
$fname .= $entry->{fname}; |
|
if ( $entry->{fname} !~ /\.list/ |
|
&& $entry->{fname} !~ /\.sources/) { |
|
if ($entry->{type} eq 'one-line') { |
|
$fname .= '.list'; |
|
} elsif ($entry->{type} eq 'deb822') { |
|
$fname .= '.sources'; |
|
} else { |
|
error "invalid type: $entry->{type}"; |
|
} |
|
} |
|
} else { |
|
if ($entry->{type} eq 'one-line') { |
|
$fname .= 'main.list'; |
|
} elsif ($entry->{type} eq 'deb822') { |
|
$fname .= 'main.sources'; |
|
} else { |
|
error "invalid type: $entry->{type}"; |
|
} |
|
} |
|
if (!-e $fname) { |
|
open my $fh, '>', "$fname" or error "cannot open $fname: $!"; |
|
print $fh $entry->{content}; |
|
close $fh; |
|
} |
|
} |
|
} |
|
|
|
# allow network access from within |
|
foreach my $file ("/etc/resolv.conf", "/etc/hostname") { |
|
if (-e $file && !-e "$options->{root}/$file") { |
|
# this will create a new file with 644 permissions and copy |
|
# contents only even if $file was a symlink |
|
copy($file, "$options->{root}/$file") |
|
or error "cannot copy $file: $!"; |
|
# if the source was a regular file, preserve the permissions |
|
if (-f $file) { |
|
my $mode = (stat($file))[2]; |
|
$mode &= oct(7777); # mask off bits that aren't the mode |
|
chmod $mode, "$options->{root}/$file" |
|
or error "cannot chmod $file: $!"; |
|
} |
|
} else { |
|
warning("Host system does not have a $file to copy into the" |
|
. " rootfs."); |
|
} |
|
} |
|
|
|
if ($options->{havemknod}) { |
|
foreach my $file (@devfiles) { |
|
my ($fname, $mode, $type, $linkname, $devmajor, $devminor) |
|
= @{$file}; |
|
if ($type == 0) { # normal file |
|
error "type 0 not implemented"; |
|
} elsif ($type == 1) { # hardlink |
|
error "type 1 not implemented"; |
|
} elsif ($type == 2) { # symlink |
|
if ( $options->{mode} eq 'fakechroot' |
|
and $linkname =~ /^\/proc/) { |
|
# there is no /proc in fakechroot mode |
|
next; |
|
} |
|
symlink $linkname, "$options->{root}/dev/$fname" |
|
or error "cannot create symlink ./dev/$fname"; |
|
next; # chmod cannot work on symlinks |
|
} elsif ($type == 3) { # character special |
|
0 == system('mknod', "$options->{root}/dev/$fname", 'c', |
|
$devmajor, $devminor) |
|
or error "mknod failed: $?"; |
|
} elsif ($type == 4) { # block special |
|
0 == system('mknod', "$options->{root}/dev/$fname", 'b', |
|
$devmajor, $devminor) |
|
or error "mknod failed: $?"; |
|
} elsif ($type == 5) { # directory |
|
if (-e "$options->{root}/dev/$fname") { |
|
if (!-d "$options->{root}/dev/$fname") { |
|
error |
|
"./dev/$fname already exists but is not a directory"; |
|
} |
|
} else { |
|
my $num_created = make_path "$options->{root}/dev/$fname", |
|
{ error => \my $err }; |
|
if ($err && @$err) { |
|
error( |
|
join "; ", |
|
( |
|
map { "cannot create " . (join ": ", %{$_}) } |
|
@$err |
|
)); |
|
} elsif ($num_created == 0) { |
|
error "cannot create $options->{root}/dev/$fname"; |
|
} |
|
} |
|
} else { |
|
error "unsupported type: $type"; |
|
} |
|
chmod $mode, "$options->{root}/dev/$fname" |
|
or error "cannot chmod ./dev/$fname: $!"; |
|
} |
|
} |
|
|
|
# we tell apt about the configuration via a config file passed via the |
|
# APT_CONFIG environment variable instead of using the --option command |
|
# line arguments because configuration settings like Dir::Etc have already |
|
# been evaluated at the time that apt takes its command line arguments |
|
# into account. |
|
{ |
|
## no critic (Variables::RequireLocalizedPunctuationVars) |
|
$ENV{"APT_CONFIG"} = "$tmpfile"; |
|
} |
|
# we have to make the config file world readable so that a possible |
|
# /usr/lib/apt/solvers/apt process which is run by the _apt user is also |
|
# able to read it |
|
chmod 0644, "$tmpfile" or error "cannot chmod $tmpfile: $!"; |
|
if ($verbosity_level >= 3) { |
|
0 == system('apt-get', '--version') |
|
or error "apt-get --version failed: $?"; |
|
0 == system('apt-config', 'dump') or error "apt-config failed: $?"; |
|
debug "content of $tmpfile:"; |
|
copy($tmpfile, \*STDERR); |
|
} |
|
|
|
if ($options->{mode} ne 'fakechroot') { |
|
# Apt dropping privileges to another user than root is not useful in |
|
# fakechroot mode because all users are faked and thus there is no real |
|
# privilege difference anyways. We could set APT::Sandbox::User "root" |
|
# in fakechroot mode but we don't because if we would, then |
|
# /var/cache/apt/archives/partial/ and /var/lib/apt/lists/partial/ |
|
# would not be owned by the _apt user if mmdebstrap was run in |
|
# fakechroot mode. |
|
# |
|
# when apt-get update is run by the root user, then apt will attempt to |
|
# drop privileges to the _apt user. This will fail if the _apt user |
|
# does not have permissions to read the root directory. In that case, |
|
# we have to disable apt sandboxing. This can for example happen in |
|
# root mode when the path of the chroot is not in a world-readable |
|
# location. |
|
my $partial = '/var/lib/apt/lists/partial'; |
|
my @testcmd = ( |
|
'/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test', |
|
'-r', "$options->{root}$partial" |
|
); |
|
my $pid = fork() // error "fork() failed: $!"; |
|
if ($pid == 0) { |
|
open(STDOUT, '>', '/dev/null') |
|
or error "cannot open /dev/null for writing: $!"; |
|
open(STDERR, '>', '/dev/null') |
|
or error "cannot open /dev/null for writing: $!"; |
|
exec { $testcmd[0] } @testcmd |
|
or error("cannot exec " . (join " ", @testcmd) . ": $!"); |
|
} |
|
waitpid $pid, 0; |
|
if ($? != 0) { |
|
warning "Download is performed unsandboxed as root as file" |
|
. " $options->{root}$partial couldn't be accessed by user _apt"; |
|
open my $fh, '>>', $tmpfile |
|
or error "cannot open $tmpfile for appending: $!"; |
|
print $fh "APT::Sandbox::User \"root\";\n"; |
|
close $fh; |
|
} |
|
} |
|
|
|
return; |
|
} |
|
|
|
sub run_update() { |
|
my $options = shift; |
|
|
|
my $aptopts = { |
|
ARGV => ['apt-get', 'update', '--error-on=any'], |
|
CHDIR => $options->{root}, |
|
}; |
|
|
|
# Maybe "apt-get update" was already run in the setup hook? If yes, skip |
|
# running it here. We are overly strict on purpose because better to run it |
|
# twice on accident than not at all. |
|
if ( !-d "$options->{root}/var/lib/apt/lists/auxfiles" |
|
|| !-d "$options->{root}/var/lib/apt/lists/partial" |
|
|| !-e "$options->{root}/var/lib/apt/lists/lock" |
|
|| !-e "$options->{root}/var/cache/apt/pkgcache.bin" |
|
|| !-e "$options->{root}/var/cache/apt/srcpkgcache.bin") { |
|
info "running apt-get update..."; |
|
run_apt_progress($aptopts); |
|
} else { |
|
info "skipping apt-get update because it was already run"; |
|
} |
|
|
|
# check if anything was downloaded at all |
|
{ |
|
open my $fh, '-|', 'apt-get', |
|
'indextargets' // error "failed to fork(): $!"; |
|
chomp( |
|
my $indextargets = do { local $/; <$fh> } |
|
); |
|
close $fh; |
|
if ($indextargets eq '') { |
|
warning("apt-get indextargets output is empty"); |
|
if (scalar @{ $options->{sourceslists} } == 0) { |
|
warning "no known apt sources.list entry"; |
|
} |
|
for my $list (@{ $options->{sourceslists} }) { |
|
if (defined $list->{fname}) { |
|
info("Filename: $list->{fname}"); |
|
} |
|
info("Type: $list->{type}"); |
|
info("Content:"); |
|
for my $line (split "\n", $list->{content}) { |
|
info(" $line"); |
|
} |
|
} |
|
open(my $fh, '-|', 'apt-cache', 'policy') |
|
// error "failed to fork(): $!"; |
|
while (my $line = <$fh>) { |
|
chomp $line; |
|
info $line; |
|
} |
|
close $fh; |
|
my $msg |
|
= "apt-get update did not find any indices " |
|
. "for architecture '$options->{nativearch}' in "; |
|
if (length $options->{suite}) { |
|
$msg .= "suite '$options->{suite}'"; |
|
} else { |
|
$msg .= "the configured apt sources"; |
|
} |
|
error $msg; |
|
} |
|
} |
|
|
|
return; |
|
} |
|
|
|
sub run_download() { |
|
my $options = shift; |
|
|
|
# In the future we want to replace downloading packages with "apt-get |
|
# install" and installing them with dpkg by just installing the essential |
|
# packages with apt from the outside with DPkg::Chroot-Directory. |
|
# We are not doing that because then the preinst script of base-passwd will |
|
# not be called early enough and packages will fail to install because they |
|
# are missing /etc/passwd. |
|
my @cached_debs = (); |
|
my @dl_debs = (); |
|
if ( |
|
!$options->{dryrun} |
|
&& ((none { $_ eq $options->{variant} } ('extract', 'custom')) |
|
|| scalar @{ $options->{include} } != 0) |
|
&& -d "$options->{root}/var/cache/apt/archives/" |
|
) { |
|
my $apt_archives = "/var/cache/apt/archives/"; |
|
opendir my $dh, "$options->{root}/$apt_archives" |
|
or error "cannot read $apt_archives"; |
|
while (my $deb = readdir $dh) { |
|
if ($deb !~ /\.deb$/) { |
|
next; |
|
} |
|
if (!-f "$options->{root}/$apt_archives/$deb") { |
|
next; |
|
} |
|
push @cached_debs, $deb; |
|
} |
|
closedir $dh; |
|
} |
|
|
|
# To figure out the right package set for the apt variant we can use: |
|
# $ apt-get dist-upgrade -o dir::state::status=/dev/null |
|
# This is because that variants only contain essential packages and |
|
# apt and libapt treats apt as essential. If we want to install less |
|
# (essential variant) then we have to compute the package set ourselves. |
|
# Same if we want to install priority based variants. |
|
if (any { $_ eq $options->{variant} } ('extract', 'custom')) { |
|
if (scalar @{ $options->{include} } == 0) { |
|
info "nothing to download -- skipping..."; |
|
return ([], \@cached_debs); |
|
} |
|
my @apt_argv = ('install', @{ $options->{include} }); |
|
|
|
@dl_debs = run_apt_download_progress({ |
|
APT_ARGV => [@apt_argv], |
|
dryrun => $options->{dryrun}, |
|
}, |
|
); |
|
} elsif ($options->{variant} eq 'apt') { |
|
# if we just want to install Essential:yes packages, apt and their |
|
# dependencies then we can make use of libapt treating apt as |
|
# implicitly essential. An upgrade with the (currently) empty status |
|
# file will trigger an installation of the essential packages plus apt. |
|
# |
|
# 2018-09-02, #debian-dpkg on OFTC, times in UTC+2 |
|
# 23:39 < josch> I'll just put it in my script and if it starts |
|
# breaking some time I just say it's apt's fault. :P |
|
# 23:42 < DonKult> that is how it usually works, so yes, do that :P (<- |
|
# and please add that line next to it so you can |
|
# remind me in 5+ years that I said that after I wrote |
|
# in the bugreport: "Are you crazy?!? Nobody in his |
|
# right mind would even suggest depending on it!") |
|
@dl_debs = run_apt_download_progress({ |
|
APT_ARGV => ['dist-upgrade'], |
|
dryrun => $options->{dryrun}, |
|
}, |
|
); |
|
} elsif (any { $_ eq $options->{variant} } |
|
('essential', 'standard', 'important', 'required', 'buildd')) { |
|
# 2021-06-07, #debian-apt on OFTC, times in UTC+2 |
|
# 17:27 < DonKult> (?essential includes 'apt' through) |
|
# 17:30 < josch> DonKult: no, because pkgCacheGen::ForceEssential ","; |
|
# 17:32 < DonKult> touché |
|
@dl_debs = run_apt_download_progress({ |
|
APT_ARGV => [ |
|
'install', |
|
'?narrow(' |
|
. ( |
|
length($options->{suite}) |
|
? '?or(?archive(^' |
|
. $options->{suite} |
|
. '$),?codename(^' |
|
. $options->{suite} . '$)),' |
|
: '' |
|
) |
|
. '?architecture(' |
|
. $options->{nativearch} |
|
. '),?essential)' |
|
], |
|
dryrun => $options->{dryrun}, |
|
}, |
|
); |
|
} else { |
|
error "unknown variant: $options->{variant}"; |
|
} |
|
|
|
my @essential_pkgs; |
|
# strip the chroot directory from the filenames |
|
foreach my $deb (@dl_debs) { |
|
# if filename does not start with chroot directory then the user |
|
# might've used a file:// mirror and we check whether the path is |
|
# accessible inside the chroot |
|
if (rindex $deb, $options->{root}, 0) { |
|
if (!-e "$options->{root}/$deb") { |
|
error "package file $deb not accessible from chroot directory" |
|
. " -- use copy:// instead of file:// or a bind-mount. You" |
|
. " can also try using --hook-dir=/usr/share/mmdebstrap/" |
|
. "hooks/file-mirror-automount to automatically create" |
|
. " bind-mounts or copy the files as necessary."; |
|
} |
|
push @essential_pkgs, $deb; |
|
next; |
|
} |
|
# filename starts with chroot directory, strip it off |
|
# this is the normal case |
|
if (!-e $deb) { |
|
error "cannot find package file $deb"; |
|
} |
|
push @essential_pkgs, substr($deb, length($options->{root})); |
|
} |
|
|
|
return (\@essential_pkgs, \@cached_debs); |
|
} |
|
|
|
sub run_extract() { |
|
my $options = shift; |
|
my $essential_pkgs = shift; |
|
|
|
if ($options->{dryrun}) { |
|
info "skip extracting packages because of --dry-run"; |
|
return; |
|
} |
|
|
|
if (scalar @{$essential_pkgs} == 0) { |
|
info "nothing to extract -- skipping..."; |
|
return; |
|
} |
|
|
|
info "extracting archives..."; |
|
print_progress 0.0; |
|
my $counter = 0; |
|
my $total = scalar @{$essential_pkgs}; |
|
foreach my $deb (@{$essential_pkgs}) { |
|
$counter += 1; |
|
|
|
my $tarfilter; |
|
my @tarfilterargs; |
|
# if the path-excluded option was added to the dpkg config, |
|
# insert the tarfilter between dpkg-deb and tar |
|
if (-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") { |
|
open(my $fh, '<', |
|
"$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") |
|
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!"; |
|
my @matches = grep { /^path-(?:exclude|include)=/ } <$fh>; |
|
close $fh; |
|
chop @matches; # remove trailing newline |
|
@tarfilterargs = map { "--" . $_ } @matches; |
|
} |
|
if (scalar @tarfilterargs > 0) { |
|
if (-x "./tarfilter") { |
|
$tarfilter = "./tarfilter"; |
|
} else { |
|
$tarfilter = "mmtarfilter"; |
|
} |
|
} |
|
|
|
my $dpkg_writer; |
|
my $tar_reader; |
|
my $filter_reader; |
|
my $filter_writer; |
|
if (scalar @tarfilterargs > 0) { |
|
pipe $filter_reader, $dpkg_writer or error "pipe failed: $!"; |
|
pipe $tar_reader, $filter_writer or error "pipe failed: $!"; |
|
} else { |
|
pipe $tar_reader, $dpkg_writer or error "pipe failed: $!"; |
|
} |
|
# not using dpkg-deb --extract as that would replace the |
|
# merged-usr symlinks with plain directories |
|
# https://bugs.debian.org/989602 |
|
# not using dpkg --unpack because that would try running preinst |
|
# maintainer scripts |
|
my $pid1 = fork() // error "fork() failed: $!"; |
|
if ($pid1 == 0) { |
|
open(STDOUT, '>&', $dpkg_writer) or error "cannot open STDOUT: $!"; |
|
close($tar_reader) or error "cannot close tar_reader: $!"; |
|
if (scalar @tarfilterargs > 0) { |
|
close($filter_reader) |
|
or error "cannot close filter_reader: $!"; |
|
close($filter_writer) |
|
or error "cannot close filter_writer: $!"; |
|
} |
|
debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb"); |
|
eval { Devel::Cover::set_coverage("none") } if $is_covering; |
|
exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb"; |
|
} |
|
my $pid2; |
|
if (scalar @tarfilterargs > 0) { |
|
$pid2 = fork() // error "fork() failed: $!"; |
|
if ($pid2 == 0) { |
|
open(STDIN, '<&', $filter_reader) |
|
or error "cannot open STDIN: $!"; |
|
open(STDOUT, '>&', $filter_writer) |
|
or error "cannot open STDOUT: $!"; |
|
close($dpkg_writer) or error "cannot close dpkg_writer: $!"; |
|
close($tar_reader) or error "cannot close tar_reader: $!"; |
|
debug("running $tarfilter " . (join " ", @tarfilterargs)); |
|
eval { Devel::Cover::set_coverage("none") } if $is_covering; |
|
exec $tarfilter, @tarfilterargs; |
|
} |
|
} |
|
my $pid3 = fork() // error "fork() failed: $!"; |
|
if ($pid3 == 0) { |
|
open(STDIN, '<&', $tar_reader) or error "cannot open STDIN: $!"; |
|
close($dpkg_writer) or error "cannot close dpkg_writer: $!"; |
|
if (scalar @tarfilterargs > 0) { |
|
close($filter_reader) |
|
or error "cannot close filter_reader: $!"; |
|
close($filter_writer) |
|
or error "cannot close filter_writer: $!"; |
|
} |
|
debug( "running tar -C $options->{root}" |
|
. " --keep-directory-symlink --extract --file -"); |
|
eval { Devel::Cover::set_coverage("none") } if $is_covering; |
|
exec 'tar', '-C', $options->{root}, |
|
'--keep-directory-symlink', '--extract', '--file', '-'; |
|
} |
|
close($dpkg_writer) or error "cannot close dpkg_writer: $!"; |
|
close($tar_reader) or error "cannot close tar_reader: $!"; |
|
if (scalar @tarfilterargs > 0) { |
|
close($filter_reader) or error "cannot close filter_reader: $!"; |
|
close($filter_writer) or error "cannot close filter_writer: $!"; |
|
} |
|
waitpid($pid1, 0); |
|
$? == 0 or error "dpkg-deb --fsys-tarfile failed: $?"; |
|
if (scalar @tarfilterargs > 0) { |
|
waitpid($pid2, 0); |
|
$? == 0 or error "tarfilter failed: $?"; |
|
} |
|
waitpid($pid3, 0); |
|
$? == 0 or error "tar --extract failed: $?"; |
|
print_progress($counter / $total * 100, "extracting"); |
|
} |
|
print_progress "done"; |
|
|
|
return; |
|
} |
|
|
|
sub run_prepare { |
|
my $options = shift; |
|
|
|
if ($options->{mode} eq 'fakechroot') { |
|
# this borrows from and extends |
|
# /etc/fakechroot/debootstrap.env and |
|
# /etc/fakechroot/chroot.env |
|
{ |
|
my %subst = ( |
|
chroot => "/usr/sbin/chroot.fakechroot", |
|
mkfifo => "/bin/true", |
|
ldconfig => (getcwd() . '/ldconfig.fakechroot'), |
|
ldd => "/usr/bin/ldd.fakechroot", |
|
ischroot => "/bin/true" |
|
); |
|
if (!-x $subst{ldconfig}) { |
|
$subst{ldconfig} |
|
= '/usr/libexec/mmdebstrap/ldconfig.fakechroot'; |
|
} |
|
my %mergedusrmap = ( |
|
"/bin" => "/usr/bin", |
|
"/sbin" => "/usr/sbin", |
|
"/usr/bin/" => "/bin", |
|
"/usr/sbin" => "/sbin" |
|
); |
|
my %fakechrootsubst; |
|
foreach my $d (split ':', $ENV{PATH}) { |
|
foreach my $k (sort %subst) { |
|
my $mapped_path = $mergedusrmap{$d} // $d; |
|
next if !-e "$d/$k" && !-e "$mapped_path/$k"; |
|
$fakechrootsubst{"$d/$k=$subst{$k}"} = 1; |
|
$fakechrootsubst{"$mapped_path/$k=$subst{$k}"} = 1; |
|
} |
|
} |
|
if (defined $ENV{FAKECHROOT_CMD_SUBST} |
|
&& $ENV{FAKECHROOT_CMD_SUBST} |