mmdebstrap/mmdebstrap

6715 lines
265 KiB
Text
Raw Normal View History

2018-09-18 09:20:24 +00:00
#!/usr/bin/perl
#
# Copyright: 2018 Johannes Schauer <josch@mister-muffin.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising
# from, out of or in connection with the software or the use or other dealings
# in the software.
2018-09-18 09:20:24 +00:00
use strict;
use warnings;
2020-09-18 11:43:42 +00:00
our $VERSION = '0.7.1';
2019-02-23 07:55:31 +00:00
2018-09-18 09:20:24 +00:00
use English;
use Getopt::Long;
use Pod::Usage;
use File::Copy;
use File::Path qw(make_path remove_tree);
use File::Temp qw(tempfile tempdir);
use File::Basename;
use File::Find;
use Cwd qw(abs_path getcwd);
2020-01-09 07:39:40 +00:00
require "syscall.ph"; ## no critic (Modules::RequireBarewordIncludes)
use Fcntl qw(S_IFCHR S_IFBLK FD_CLOEXEC F_GETFD F_SETFD);
2018-09-23 17:36:07 +00:00
use List::Util qw(any none);
use POSIX qw(SIGINT SIGHUP SIGPIPE SIGTERM SIG_BLOCK SIG_UNBLOCK strftime);
use Carp;
use Term::ANSIColor;
use Socket;
use Time::HiRes;
use version;
2018-09-18 09:20:24 +00:00
2020-01-09 07:39:40 +00:00
## no critic (InputOutput::RequireBriefOpen)
2018-09-18 09:20:24 +00:00
# from sched.h
2020-01-09 07:39:40 +00:00
# use typeglob constants because "use constant" has several drawback as
# explained in the documentation for the Readonly CPAN module
*CLONE_NEWNS = \0x20000;
*CLONE_NEWUTS = \0x4000000;
*CLONE_NEWIPC = \0x8000000;
*CLONE_NEWUSER = \0x10000000;
*CLONE_NEWPID = \0x20000000;
*CLONE_NEWNET = \0x40000000;
our (
$CLONE_NEWNS, $CLONE_NEWUTS, $CLONE_NEWIPC,
$CLONE_NEWUSER, $CLONE_NEWPID, $CLONE_NEWNET
);
2018-09-18 09:20:24 +00:00
# type codes:
# 0 -> normal file
# 1 -> hardlink
# 2 -> symlink
# 3 -> character special
# 4 -> block special
# 5 -> directory
my @devfiles = (
# filename mode type link target major minor
2020-01-09 07:39:40 +00:00
["./dev/", oct(755), 5, '', undef, undef],
["./dev/console", oct(666), 3, '', 5, 1],
["./dev/fd", oct(777), 2, '/proc/self/fd', undef, undef],
["./dev/full", oct(666), 3, '', 1, 7],
["./dev/null", oct(666), 3, '', 1, 3],
["./dev/ptmx", oct(666), 3, '', 5, 2],
["./dev/pts/", oct(755), 5, '', undef, undef],
["./dev/random", oct(666), 3, '', 1, 8],
["./dev/shm/", oct(755), 5, '', undef, undef],
["./dev/stderr", oct(777), 2, '/proc/self/fd/2', undef, undef],
["./dev/stdin", oct(777), 2, '/proc/self/fd/0', undef, undef],
["./dev/stdout", oct(777), 2, '/proc/self/fd/1', undef, undef],
["./dev/tty", oct(666), 3, '', 5, 0],
["./dev/urandom", oct(666), 3, '', 1, 9],
["./dev/zero", oct(666), 3, '', 1, 5],
2018-09-18 09:20:24 +00:00
);
# verbosity levels:
# 0 -> print nothing
# 1 -> normal output and progress bars
# 2 -> verbose output
# 3 -> debug output
my $verbosity_level = 1;
2020-01-09 07:39:40 +00:00
my $is_covering = !!(eval { Devel::Cover::get_coverage() });
# the reason why Perl::Critic warns about this is, that it suspects that the
# programmer wants to implement a test whether the terminal is interactive or
# not, in which case, complex interactions with the magic *ARGV indeed make it
# advisable to use IO::Interactive. In our case, we do not want to create an
# interactivity check but just want to check whether STDERR is opened to a tty,
# so our use of -t is fine and not "fragile and complicated" as is written in
# the description of InputOutput::ProhibitInteractiveTest. Also see
# https://github.com/Perl-Critic/Perl-Critic/issues/918
sub stderr_is_tty() {
## no critic (InputOutput::ProhibitInteractiveTest)
if (-t STDERR) {
return 1;
} else {
return 0;
}
}
sub debug {
if ($verbosity_level < 3) {
2020-01-08 14:41:49 +00:00
return;
}
my $msg = shift;
my ($package, $filename, $line) = caller;
$msg = "D: $PID $line $msg";
if (stderr_is_tty()) {
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'clear');
}
print STDERR "$msg\n";
2020-01-09 07:39:40 +00:00
return;
}
sub info {
if ($verbosity_level == 0) {
2020-01-08 14:41:49 +00:00
return;
}
my $msg = shift;
if ($verbosity_level >= 3) {
2020-01-08 14:41:49 +00:00
my ($package, $filename, $line) = caller;
2020-01-08 16:44:07 +00:00
$msg = "$PID $line $msg";
}
$msg = "I: $msg";
if (stderr_is_tty()) {
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'green');
}
print STDERR "$msg\n";
2020-01-09 07:39:40 +00:00
return;
}
sub warning {
if ($verbosity_level == 0) {
2020-01-08 14:41:49 +00:00
return;
}
my $msg = shift;
$msg = "W: $msg";
if (stderr_is_tty()) {
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'bold yellow');
}
print STDERR "$msg\n";
2020-01-09 07:39:40 +00:00
return;
}
sub error {
# if error() is called with the string from a previous error() that was
# caught inside an eval(), then the string will have a newline which we
# are stripping here
2020-01-08 16:44:07 +00:00
chomp(my $msg = shift);
$msg = "E: $msg";
if (stderr_is_tty()) {
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'bold red');
}
if ($verbosity_level == 3) {
croak $msg; # produces a backtrace
} else {
2020-01-08 14:41:49 +00:00
die "$msg\n";
}
}
# check whether a directory is mounted by comparing the device number of the
# directory itself with its parent
2020-01-09 07:39:40 +00:00
sub is_mountpoint {
my $dir = shift;
2020-01-08 16:44:07 +00:00
if (!-e $dir) {
2020-01-08 14:41:49 +00:00
return 0;
}
my @a = stat "$dir/.";
my @b = stat "$dir/..";
# if the device number is different, then the directory must be mounted
if ($a[0] != $b[0]) {
2020-01-08 14:41:49 +00:00
return 1;
}
# if the inode number is the same, then the directory must be mounted
if ($a[1] == $b[1]) {
2020-01-08 14:41:49 +00:00
return 1;
}
return 0;
}
2018-09-18 09:20:24 +00:00
# tar cannot figure out the decompression program when receiving data on
# standard input, thus we do it ourselves. This is copied from tar's
# src/suffix.c
2020-01-09 07:39:40 +00:00
sub get_tar_compressor {
2018-09-18 09:20:24 +00:00
my $filename = shift;
if ($filename eq '-') {
2020-01-09 07:39:40 +00:00
return;
} elsif ($filename =~ /\.tar$/) {
2020-01-09 07:39:40 +00:00
return;
} elsif ($filename =~ /\.(gz|tgz|taz)$/) {
2020-01-08 14:41:49 +00:00
return ['gzip'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(Z|taZ)$/) {
2020-01-08 14:41:49 +00:00
return ['compress'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(bz2|tbz|tbz2|tz2)$/) {
2020-01-08 14:41:49 +00:00
return ['bzip2'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lz$/) {
2020-01-08 14:41:49 +00:00
return ['lzip'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(lzma|tlz)$/) {
2020-01-08 14:41:49 +00:00
return ['lzma'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lzo$/) {
2020-01-08 14:41:49 +00:00
return ['lzop'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lz4$/) {
2020-01-08 14:41:49 +00:00
return ['lz4'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(xz|txz)$/) {
2020-01-08 14:41:49 +00:00
return ['xz', '--threads=0'];
} elsif ($filename =~ /\.zst$/) {
2020-01-08 14:41:49 +00:00
return ['zstd'];
2018-09-18 09:20:24 +00:00
}
2020-01-09 07:39:40 +00:00
return;
2018-09-18 09:20:24 +00:00
}
2020-01-09 07:39:40 +00:00
sub test_unshare {
my $verbose = shift;
if ($EFFECTIVE_USER_ID == 0) {
2020-01-08 14:41:49 +00:00
my $msg = "cannot use unshare mode when executing as root";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
return 0;
}
2018-09-18 09:20:24 +00:00
# arguments to syscalls have to be stored in their own variable or
# otherwise we will get "Modification of a read-only value attempted"
2020-01-09 07:39:40 +00:00
my $unshare_flags = $CLONE_NEWUSER;
2018-09-18 09:20:24 +00:00
# we spawn a new per process because if unshare succeeds, we would
2018-10-01 15:14:59 +00:00
# otherwise have unshared the mmdebstrap process itself which we don't want
my $pid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($pid == 0) {
2020-01-09 07:39:40 +00:00
my $ret = syscall(&SYS_unshare, $unshare_flags);
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
exit 0;
} else {
my $msg = "unshare syscall failed: $!";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
exit 1;
}
2018-09-18 09:20:24 +00:00
}
waitpid($pid, 0);
if (($? >> 8) != 0) {
2020-01-08 14:41:49 +00:00
return 0;
2018-09-18 09:20:24 +00:00
}
# if newuidmap and newgidmap exist, the exit status will be 1 when
# executed without parameters
system "newuidmap 2>/dev/null";
if (($? >> 8) != 1) {
2020-01-08 14:41:49 +00:00
if (($? >> 8) == 127) {
my $msg = "cannot find newuidmap";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
} else {
my $msg = "newuidmap returned unknown exit status: $?";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
}
return 0;
}
system "newgidmap 2>/dev/null";
if (($? >> 8) != 1) {
2020-01-08 14:41:49 +00:00
if (($? >> 8) == 127) {
my $msg = "cannot find newgidmap";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
} else {
my $msg = "newgidmap returned unknown exit status: $?";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
}
return 0;
}
2018-09-18 09:20:24 +00:00
return 1;
}
sub read_subuid_subgid() {
my $username = getpwuid $<;
my ($subid, $num_subid, $fh, $n);
my @result = ();
2020-01-08 16:44:07 +00:00
if (!-e "/etc/subuid") {
2020-01-08 14:41:49 +00:00
warning "/etc/subuid doesn't exist";
return;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
if (!-r "/etc/subuid") {
2020-01-08 14:41:49 +00:00
warning "/etc/subuid is not readable";
return;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
open $fh, "<", "/etc/subuid"
or error "cannot open /etc/subuid for reading: $!";
2018-09-18 09:20:24 +00:00
while (my $line = <$fh>) {
2020-01-08 14:41:49 +00:00
($n, $subid, $num_subid) = split(/:/, $line, 3);
last if ($n eq $username);
2018-09-18 09:20:24 +00:00
}
close $fh;
push @result, ["u", 0, $subid, $num_subid];
if (scalar(@result) < 1) {
2020-01-08 14:41:49 +00:00
warning "/etc/subuid does not contain an entry for $username";
return;
2018-09-18 09:20:24 +00:00
}
if (scalar(@result) > 1) {
2020-01-08 14:41:49 +00:00
warning "/etc/subuid contains multiple entries for $username";
return;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
open $fh, "<", "/etc/subgid"
or error "cannot open /etc/subgid for reading: $!";
2018-09-18 09:20:24 +00:00
while (my $line = <$fh>) {
2020-01-08 14:41:49 +00:00
($n, $subid, $num_subid) = split(/:/, $line, 3);
last if ($n eq $username);
2018-09-18 09:20:24 +00:00
}
close $fh;
push @result, ["g", 0, $subid, $num_subid];
if (scalar(@result) < 2) {
2020-01-08 14:41:49 +00:00
warning "/etc/subgid does not contain an entry for $username";
return;
2018-09-18 09:20:24 +00:00
}
if (scalar(@result) > 2) {
2020-01-08 14:41:49 +00:00
warning "/etc/subgid contains multiple entries for $username";
return;
2018-09-18 09:20:24 +00:00
}
return @result;
}
# This function spawns two child processes forming the following process tree
#
# A
# |
# fork()
# | \
# B C
# | |
# | fork()
# | | \
# | D E
# | | |
# |unshare()
# | close()
# | | |
# | | read()
# | | newuidmap(D)
# | | newgidmap(D)
# | | /
# | waitpid()
# | |
# | fork()
# | | \
# | F G
# | | |
# | | exec()
# | | /
# | waitpid()
# | /
# waitpid()
#
# To better refer to each individual part, we give each process a new
# identifier after calling fork(). Process A is the main process. After
# executing fork() we call the parent and child B and C, respectively. This
# first fork() is done because we do not want to modify A. B then remains
# waiting for its child C to finish. C calls fork() again, splitting into
# the parent D and its child E. In the parent D we call unshare() and close a
# pipe shared by D and E to signal to E that D is done with calling unshare().
# E notices this by using read() and follows up with executing the tools
# new[ug]idmap on D. E finishes and D continues with doing another fork().
# This is because when unsharing the PID namespace, we need a PID 1 to be kept
# alive or otherwise any child processes cannot fork() anymore themselves. So
# we keep F as PID 1 and finally call exec() in G.
2020-01-09 07:39:40 +00:00
sub get_unshare_cmd {
2020-01-08 16:44:07 +00:00
my $cmd = shift;
2018-09-18 09:20:24 +00:00
my $idmap = shift;
2020-01-08 16:44:07 +00:00
my $unshare_flags
2020-01-09 07:39:40 +00:00
= $CLONE_NEWUSER | $CLONE_NEWNS | $CLONE_NEWPID | $CLONE_NEWUTS
| $CLONE_NEWIPC;
2018-09-18 09:20:24 +00:00
if (0) {
2020-01-09 07:39:40 +00:00
$unshare_flags |= $CLONE_NEWNET;
2018-09-18 09:20:24 +00:00
}
# fork a new process and let the child get unshare()ed
# we don't want to unshare the parent process
my $gcpid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($gcpid == 0) {
# Create a pipe for the parent process to signal the child process that
# it is done with calling unshare() so that the child can go ahead
# setting up uid_map and gid_map.
2020-01-08 14:41:49 +00:00
pipe my $rfh, my $wfh;
# We have to do this dance with forking a process and then modifying
# the parent from the child because:
# - new[ug]idmap can only be called on a process id after that process
# has unshared the user namespace
# - a process looses its capabilities if it performs an execve() with
# nonzero user ids see the capabilities(7) man page for details.
# - a process that unshared the user namespace by default does not
# have the privileges to call new[ug]idmap on itself
2020-01-08 14:41:49 +00:00
#
# this also works the other way around (the child setting up a user
# namespace and being modified from the parent) but that way, the
# parent would have to stay around until the child exited (so a pid
# would be wasted). Additionally, that variant would require an
# additional pipe to let the parent signal the child that it is done
# with calling new[ug]idmap. The way it is done here, this signaling
# can instead be done by wait()-ing for the exit of the child.
2020-01-08 14:41:49 +00:00
my $ppid = $$;
my $cpid = fork() // error "fork() failed: $!";
if ($cpid == 0) {
# child
# Close the writing descriptor at our end of the pipe so that we
# see EOF when parent closes its descriptor.
close $wfh;
# Wait for the parent process to finish its unshare() call by
# waiting for an EOF.
0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF";
# The program's new[ug]idmap have to be used because they are
# setuid root. These privileges are needed to map the ids from
# /etc/sub[ug]id to the user namespace set up by the parent.
# Without these privileges, only the id of the user itself can be
# mapped into the new namespace.
#
# Since new[ug]idmap is setuid root we also don't need to write
# "deny" to /proc/$$/setgroups beforehand (this is otherwise
# required for unprivileged processes trying to write to
# /proc/$$/gid_map since kernel version 3.19 for security reasons)
# and therefore the parent process keeps its ability to change its
# own group here.
#
# Since /proc/$ppid/[ug]id_map can only be written to once,
# respectively, instead of making multiple calls to new[ug]idmap,
# we assemble a command line that makes one call each.
my $uidmapcmd = "";
my $gidmapcmd = "";
foreach (@{$idmap}) {
my ($t, $hostid, $nsid, $range) = @{$_};
if ($t ne "u" and $t ne "g" and $t ne "b") {
error "invalid idmap type: $t";
}
if ($t eq "u" or $t eq "b") {
$uidmapcmd .= " $hostid $nsid $range";
}
if ($t eq "g" or $t eq "b") {
$gidmapcmd .= " $hostid $nsid $range";
}
}
my $idmapcmd = '';
if ($uidmapcmd ne "") {
2020-01-08 16:44:07 +00:00
0 == system "newuidmap $ppid $uidmapcmd"
or error "newuidmap $ppid $uidmapcmd failed: $!";
2020-01-08 14:41:49 +00:00
}
if ($gidmapcmd ne "") {
2020-01-08 16:44:07 +00:00
0 == system "newgidmap $ppid $gidmapcmd"
or error "newgidmap $ppid $gidmapcmd failed: $!";
2020-01-08 14:41:49 +00:00
}
exit 0;
}
# parent
# After fork()-ing, the parent immediately calls unshare...
2020-01-08 16:44:07 +00:00
0 == syscall &SYS_unshare, $unshare_flags
or error "unshare() failed: $!";
2020-01-08 14:41:49 +00:00
# .. and then signals the child process that we are done with the
# unshare() call by sending an EOF.
close $wfh;
# Wait for the child process to finish its setup by waiting for its
# exit.
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
my $exit = $? >> 8;
if ($exit != 0) {
error "child had a non-zero exit status: $exit";
}
# Currently we are nobody (uid and gid are 65534). So we become root
# user and group instead.
#
# We are using direct syscalls instead of setting $(, $), $< and $>
# because then perl would do additional stuff which we don't need or
# want here, like checking /proc/sys/kernel/ngroups_max (which might
# not exist). It would also also call setgroups() in a way that makes
# the root user be part of the group unknown.
0 == syscall &SYS_setgid, 0 or error "setgid failed: $!";
0 == syscall &SYS_setuid, 0 or error "setuid failed: $!";
0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!";
if (1) {
# When the pid namespace is also unshared, then processes expect a
# master pid to always be alive within the namespace. To achieve
# this, we fork() here instead of exec() to always have one dummy
# process running as pid 1 inside the namespace. This is also what
# the unshare tool does when used with the --fork option.
#
# Otherwise, without a pid 1, new processes cannot be forked
# anymore after pid 1 finished.
my $cpid = fork() // error "fork() failed: $!";
if ($cpid != 0) {
# The parent process will stay alive as pid 1 in this
# namespace until the child finishes executing. This is
# important because pid 1 must never die or otherwise nothing
# new can be forked.
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
2020-01-08 16:44:07 +00:00
exit($? >> 8);
2020-01-08 14:41:49 +00:00
}
}
&{$cmd}();
exit 0;
2018-09-18 09:20:24 +00:00
}
# parent
return $gcpid;
}
2020-01-09 07:39:40 +00:00
sub havemknod {
2020-01-08 16:44:07 +00:00
my $root = shift;
2018-09-18 09:20:24 +00:00
my $havemknod = 0;
if (-e "$root/test-dev-null") {
2020-01-08 14:41:49 +00:00
error "/test-dev-null already exists";
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
TEST: {
2020-01-08 14:41:49 +00:00
# we fork so that we can read STDERR
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
open(STDERR, '>&', STDOUT) or error "cannot open STDERR: $!";
# we use mknod(1) instead of the system call because creating the
# right dev_t argument requires makedev(3)
exec 'mknod', "$root/test-dev-null", 'c', '1', '3';
}
2020-01-08 16:44:07 +00:00
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
{
last TEST unless $? == 0 and $content eq '';
last TEST unless -c "$root/test-dev-null";
last TEST unless open my $fh, '>', "$root/test-dev-null";
last TEST unless print $fh 'test';
}
$havemknod = 1;
2018-09-18 09:20:24 +00:00
}
if (-e "$root/test-dev-null") {
2020-01-08 16:44:07 +00:00
unlink "$root/test-dev-null"
or error "cannot unlink /test-dev-null: $!";
2018-09-18 09:20:24 +00:00
}
return $havemknod;
}
sub print_progress {
if ($verbosity_level != 1) {
2020-01-08 14:41:49 +00:00
return;
}
my $perc = shift;
if (!stderr_is_tty()) {
2020-01-08 14:41:49 +00:00
return;
}
if ($perc eq "done") {
2020-01-08 14:41:49 +00:00
# \e[2K clears everything on the current line (i.e. the progress bar)
print STDERR "\e[2Kdone\n";
return;
}
if ($perc >= 100) {
2020-01-08 14:41:49 +00:00
$perc = 100;
}
my $width = 50;
2020-01-08 16:44:07 +00:00
my $num_x = int($perc * $width / 100);
my $bar = '=' x $num_x;
if ($num_x != $width) {
2020-01-08 14:41:49 +00:00
$bar .= '>';
$bar .= ' ' x ($width - $num_x - 1);
}
printf STDERR "%6.2f [%s]\r", $perc, $bar;
2020-01-09 07:39:40 +00:00
return;
}
sub run_progress {
my ($get_exec, $line_handler, $line_has_error, $chdir) = @_;
pipe my $rfh, my $wfh;
my $got_signal = 0;
2020-01-08 16:44:07 +00:00
my $ignore = sub {
2020-01-08 14:41:49 +00:00
info "run_progress() received signal $_[0]: waiting for child...";
};
debug("run_progress: exec " . (join ' ', ($get_exec->('${FD}'))));
# delay signals so that we can fork and change behaviour of the signal
# handler in parent and child without getting interrupted
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
my $pid1 = open(my $pipe, '-|') // error "failed to fork(): $!";
if ($pid1 == 0) {
2020-01-08 14:41:49 +00:00
# child: default signal handlers
2020-01-09 07:39:40 +00:00
local $SIG{'INT'} = 'DEFAULT';
local $SIG{'HUP'} = 'DEFAULT';
local $SIG{'PIPE'} = 'DEFAULT';
local $SIG{'TERM'} = 'DEFAULT';
2020-01-08 14:41:49 +00:00
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-08 14:41:49 +00:00
close $rfh;
# Unset the close-on-exec flag, so that the file descriptor does not
# get closed when we exec
2020-01-08 16:44:07 +00:00
my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!";
fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC)
or error "fcntl F_SETFD: $!";
2020-01-08 14:41:49 +00:00
my $fd = fileno $wfh;
# redirect stderr to stdout so that we can capture it
open(STDERR, '>&', STDOUT) or error "cannot open STDOUT: $!";
my @execargs = $get_exec->($fd);
# before apt 1.5, "apt-get update" attempted to chdir() into the
# working directory. This will fail if the current working directory
# is not accessible by the user (for example in unshare mode). See
# Debian bug #860738
if (defined $chdir) {
chdir $chdir or error "failed chdir() to $chdir: $!";
}
2020-01-09 07:39:40 +00:00
eval { Devel::Cover::set_coverage("none") } if $is_covering;
2020-01-08 16:44:07 +00:00
exec { $execargs[0] } @execargs
or error 'cannot exec() ' . (join ' ', @execargs);
}
close $wfh;
# spawn two processes:
# parent will parse stdout to look for errors
# child will parse $rfh for the progress meter
my $pid2 = fork() // error "failed to fork(): $!";
if ($pid2 == 0) {
2020-01-08 14:41:49 +00:00
# child: default signal handlers
2020-01-09 07:39:40 +00:00
local $SIG{'INT'} = 'IGNORE';
local $SIG{'HUP'} = 'IGNORE';
local $SIG{'PIPE'} = 'IGNORE';
local $SIG{'TERM'} = 'IGNORE';
2020-01-08 14:41:49 +00:00
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
my $progress = 0.0;
my $status = undef;
print_progress($progress);
2020-01-08 14:41:49 +00:00
while (my $line = <$rfh>) {
my ($newprogress, $newstatus) = $line_handler->($line);
next unless $newprogress;
# start a new line if the new progress value is less than the
# previous one
if ($newprogress < $progress) {
print_progress("done");
}
if (defined $newstatus) {
$status = $newstatus;
}
if ( defined $status
and $verbosity_level == 1
and stderr_is_tty()) {
# \e[2K clears everything on the current line (i.e. the
# progress bar)
print STDERR "\e[2K$status: ";
}
print_progress($newprogress);
$progress = $newprogress;
2020-01-08 14:41:49 +00:00
}
2020-01-09 07:39:40 +00:00
print_progress("done");
2020-01-08 14:41:49 +00:00
exit 0;
}
# parent: ignore signals
# by using "local", the original is automatically restored once the
# function returns
2020-01-08 16:44:07 +00:00
local $SIG{'INT'} = $ignore;
local $SIG{'HUP'} = $ignore;
local $SIG{'PIPE'} = $ignore;
local $SIG{'TERM'} = $ignore;
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-08 16:44:07 +00:00
my $output = '';
my $has_error = 0;
while (my $line = <$pipe>) {
2020-01-08 14:41:49 +00:00
$has_error = $line_has_error->($line);
if ($verbosity_level >= 2) {
print STDERR $line;
} else {
# forward captured apt output
$output .= $line;
}
}
close($pipe);
my $fail = 0;
if ($? != 0 or $has_error) {
2020-01-08 14:41:49 +00:00
$fail = 1;
}
waitpid $pid2, 0;
$? == 0 or error "progress parsing failed";
if ($got_signal) {
2020-01-08 14:41:49 +00:00
error "run_progress() received signal: $got_signal";
}
# only print failure after progress output finished or otherwise it
# might interfere with the remaining output
if ($fail) {
2020-01-08 14:41:49 +00:00
if ($verbosity_level >= 1) {
print STDERR $output;
}
2020-01-08 16:44:07 +00:00
error((join ' ', $get_exec->('<$fd>')) . ' failed');
}
2020-01-09 07:39:40 +00:00
return;
}
sub run_dpkg_progress {
my $options = shift;
2020-01-08 16:44:07 +00:00
my @debs = @{ $options->{PKGS} // [] };
my $get_exec
= sub { return @{ $options->{ARGV} }, "--status-fd=$_[0]", @debs; };
my $line_has_error = sub { return 0; };
2020-01-08 16:44:07 +00:00
my $num = 0;
# each package has one install and one configure step, thus the total
# number is twice the number of packages
2020-01-08 16:44:07 +00:00
my $total = (scalar @debs) * 2;
my $line_handler = sub {
my $status = undef;
2020-01-08 14:41:49 +00:00
if ($_[0] =~ /^processing: (install|configure): /) {
if ($1 eq 'install') {
$status = 'installing';
} elsif ($1 eq 'configure') {
$status = 'configuring';
} else {
error "unknown status: $1";
}
2020-01-08 14:41:49 +00:00
$num += 1;
}
return $num / $total * 100, $status;
};
run_progress $get_exec, $line_handler, $line_has_error;
2020-01-09 07:39:40 +00:00
return;
}
sub run_apt_progress {
my $options = shift;
my @debs = @{ $options->{PKGS} // [] };
my $tmpedsp;
if (exists $options->{EDSP_RES}) {
(undef, $tmpedsp) = tempfile(
"mmdebstrap.edsp.XXXXXXXXXXXX",
OPEN => 0,
TMPDIR => 1
);
}
my $get_exec = sub {
my @prefix = ();
my @opts = ();
my $solverpath = "/usr/lib/mmdebstrap/solvers";
if (-e "./proxysolver") {
# for development purposes, use the current directory if it
# contains a file called proxysolver
$solverpath = getcwd();
}
if (exists $options->{EDSP_RES}) {
push @prefix, 'env', "APT_EDSP_DUMP_FILENAME=$tmpedsp";
push @opts, "-oDir::Bin::solvers=$solverpath",
'--solver=proxysolver';
}
2020-01-08 14:41:49 +00:00
return (
@prefix,
2020-01-08 16:44:07 +00:00
@{ $options->{ARGV} },
@opts,
2020-01-08 14:41:49 +00:00
"-oAPT::Status-Fd=$_[0]",
# prevent apt from messing up the terminal and allow dpkg to
# receive SIGINT and quit immediately without waiting for
# maintainer script to finish
'-oDpkg::Use-Pty=false',
@debs
2020-01-08 16:44:07 +00:00
);
};
my $line_has_error = sub { return 0; };
if ($options->{FIND_APT_WARNINGS}) {
2020-01-08 14:41:49 +00:00
$line_has_error = sub {
# apt-get doesn't report a non-zero exit if the update failed.
# Thus, we have to parse its output. See #778357, #776152, #696335
# and #745735
if ($_[0] =~ /^(W: |Err:)/) {
return 1;
}
return 0;
};
}
my $line_handler = sub {
2020-01-08 14:41:49 +00:00
if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) {
my $status = undef;
if ($1 eq 'pmstatus') {
$status = "installing";
} elsif ($1 eq 'dlstatus') {
$status = "downloading";
} else {
error "unknown status: $1";
}
return $2, $status;
2020-01-08 14:41:49 +00:00
}
};
run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR};
if (exists $options->{EDSP_RES}) {
info "parsing EDSP results...";
open my $fh, '<', $tmpedsp
or error "failed to open $tmpedsp for reading: $!";
my $inst = 0;
my $pkg;
my $ver;
while (my $line = <$fh>) {
chomp $line;
if ($line ne "") {
if ($line =~ /^Install: \d+/) {
$inst = 1;
} elsif ($line =~ /^Package: (.*)/) {
$pkg = $1;
} elsif ($line =~ /^Version: (.*)/) {
$ver = $1;
}
next;
}
if ($inst == 1 && defined $pkg && defined $ver) {
push @{ $options->{EDSP_RES} }, [$pkg, $ver];
}
$inst = 0;
undef $pkg;
undef $ver;
}
close $fh;
unlink $tmpedsp;
}
2020-01-09 07:39:40 +00:00
return;
}
2020-01-09 07:39:40 +00:00
sub run_chroot {
2020-01-08 16:44:07 +00:00
my $cmd = shift;
my $options = shift;
my @cleanup_tasks = ();
my $cleanup = sub {
2020-01-08 14:41:49 +00:00
my $signal = $_[0];
while (my $task = pop @cleanup_tasks) {
$task->();
}
if ($signal) {
warning "pid $PID cought signal: $signal";
exit 1;
}
};
2020-01-08 16:44:07 +00:00
local $SIG{INT} = $cleanup;
local $SIG{HUP} = $cleanup;
local $SIG{PIPE} = $cleanup;
local $SIG{TERM} = $cleanup;
eval {
2020-01-08 14:41:49 +00:00
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
# if more than essential should be installed, make the system look
# more like a real one by creating or bind-mounting the device
# nodes
2020-01-08 14:41:49 +00:00
foreach my $file (@devfiles) {
2020-01-08 16:44:07 +00:00
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
= @{$file};
2020-01-08 14:41:49 +00:00
next if $fname eq './dev/';
2020-01-08 16:44:07 +00:00
if ($type == 0) { # normal file
2020-01-08 14:41:49 +00:00
error "type 0 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 1) { # hardlink
2020-01-08 14:41:49 +00:00
error "type 1 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 2) { # symlink
2020-01-08 14:41:49 +00:00
if (!$options->{havemknod}) {
2020-01-08 16:44:07 +00:00
if ( $options->{mode} eq 'fakechroot'
and $linkname =~ /^\/proc/) {
2020-01-08 14:41:49 +00:00
# there is no /proc in fakechroot mode
next;
}
if (
any { $_ eq $options->{mode} }
('root', 'unshare')
) {
2020-01-08 14:41:49 +00:00
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
unlink "$options->{root}/$fname"
or warn "cannot unlink $fname: $!";
2020-01-08 14:41:49 +00:00
}
}
2020-01-08 16:44:07 +00:00
symlink $linkname, "$options->{root}/$fname"
or error "cannot create symlink $fname";
2020-01-08 14:41:49 +00:00
}
} elsif ($type == 3 or $type == 4) {
# character/block special
2020-01-08 14:41:49 +00:00
if (!$options->{havemknod}) {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/$fname"
or error "cannot open $options->{root}/$fname: $!";
2020-01-08 14:41:49 +00:00
close $fh;
if ($options->{mode} eq 'unshare') {
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
0 == system('umount', '--no-mtab',
"$options->{root}/$fname")
or warn "umount $fname failed: $?";
unlink "$options->{root}/$fname"
or warn "cannot unlink $fname: $!";
2020-01-08 14:41:49 +00:00
};
} elsif ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
0 == system('umount',
"$options->{root}/$fname")
or warn "umount failed: $?";
unlink "$options->{root}/$fname"
or warn "cannot unlink $fname: $!";
2020-01-08 14:41:49 +00:00
};
} else {
error "unknown mode: $options->{mode}";
}
2020-01-08 16:44:07 +00:00
0 == system('mount', '-o', 'bind', "/$fname",
"$options->{root}/$fname")
or error "mount $fname failed: $?";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
} elsif ($type == 5) { # directory
2020-01-08 14:41:49 +00:00
if (!$options->{havemknod}) {
if (
any { $_ eq $options->{mode} }
('root', 'unshare')
) {
2020-01-08 14:41:49 +00:00
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
rmdir "$options->{root}/$fname"
or warn "cannot rmdir $fname: $!";
2020-01-08 14:41:49 +00:00
}
}
if (-e "$options->{root}/$fname") {
2020-01-08 16:44:07 +00:00
if (!-d "$options->{root}/$fname") {
error "$fname already exists but is not a"
. " directory";
2020-01-08 14:41:49 +00:00
}
} else {
2020-01-08 16:44:07 +00:00
my $num_created
= make_path "$options->{root}/$fname",
{ error => \my $err };
2020-01-08 14:41:49 +00:00
if ($err && @$err) {
2020-01-08 16:44:07 +00:00
error(
join "; ",
(
map {
"cannot create "
. (join ": ", %{$_})
} @$err
));
2020-01-08 14:41:49 +00:00
} elsif ($num_created == 0) {
error "cannot create $options->{root}/$fname";
}
}
2020-01-08 16:44:07 +00:00
chmod $mode, "$options->{root}/$fname"
or error "cannot chmod $fname: $!";
2020-01-08 14:41:49 +00:00
}
if ($options->{mode} eq 'unshare') {
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
0 == system('umount', '--no-mtab',
"$options->{root}/$fname")
or warn "umount $fname failed: $?";
2020-01-08 14:41:49 +00:00
};
} elsif ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
0 == system('umount', "$options->{root}/$fname")
or warn "umount $fname failed: $?";
2020-01-08 14:41:49 +00:00
};
} else {
error "unknown mode: $options->{mode}";
}
2020-01-08 16:44:07 +00:00
0 == system('mount', '-o', 'bind', "/$fname",
"$options->{root}/$fname")
or error "mount $fname failed: $?";
2020-01-08 14:41:49 +00:00
} else {
error "unsupported type: $type";
}
}
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{mode} }
('proot', 'fakechroot', 'chrootless')
) {
2020-01-08 14:41:49 +00:00
# we cannot mount in fakechroot and proot mode
# in proot mode we have /dev bind-mounted already through
# --bind=/dev
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
# We can only mount /proc and /sys after extracting the essential
# set because if we mount it before, then base-files will not be able
# to extract those
if ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
0 == system('umount', "$options->{root}/sys")
or warn "umount /sys failed: $?";
2020-01-08 14:41:49 +00:00
};
2020-01-08 16:44:07 +00:00
0 == system(
'mount', '-t', 'sysfs',
'-o', 'ro,nosuid,nodev,noexec', 'sys',
"$options->{root}/sys"
2020-01-08 16:44:07 +00:00
) or error "mount /sys failed: $?";
2020-01-08 14:41:49 +00:00
} elsif ($options->{mode} eq 'unshare') {
# naturally we have to clean up after ourselves in sudo mode where
# we do a real mount. But we also need to unmount in unshare mode
# because otherwise, even with the --one-file-system tar option,
# the permissions of the mount source will be stored and not the
# mount target (the directory)
2020-01-08 14:41:49 +00:00
push @cleanup_tasks, sub {
# since we cannot write to /etc/mtab we need --no-mtab
# unmounting /sys only seems to be successful with --lazy
2020-01-08 16:44:07 +00:00
0 == system('umount', '--no-mtab', '--lazy',
"$options->{root}/sys")
or warn "umount /sys failed: $?";
2020-01-08 14:41:49 +00:00
};
# without the network namespace unshared, we cannot mount a new
# sysfs. Since we need network, we just bind-mount.
#
# we have to rbind because just using bind results in "wrong fs
# type, bad option, bad superblock" error
2020-01-08 16:44:07 +00:00
0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys")
or error "mount /sys failed: $?";
} elsif (
any { $_ eq $options->{mode} }
('proot', 'fakechroot', 'chrootless')
) {
2020-01-08 14:41:49 +00:00
# we cannot mount in fakechroot and proot mode
# in proot mode we have /proc bind-mounted already through
# --bind=/proc
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
if ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
# some maintainer scripts mount additional stuff into /proc
# which we need to unmount beforehand
if (
is_mountpoint(
$options->{root} . "/proc/sys/fs/binfmt_misc"
)
) {
2020-01-08 16:44:07 +00:00
0 == system('umount',
"$options->{root}/proc/sys/fs/binfmt_misc")
or error "umount /proc/sys/fs/binfmt_misc failed: $?";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
0 == system('umount', "$options->{root}/proc")
or error "umount /proc failed: $?";
2020-01-08 14:41:49 +00:00
};
0 == system('mount', '-t', 'proc', '-o', 'ro', 'proc',
"$options->{root}/proc")
2020-01-08 16:44:07 +00:00
or error "mount /proc failed: $?";
2020-01-08 14:41:49 +00:00
} elsif ($options->{mode} eq 'unshare') {
# naturally we have to clean up after ourselves in sudo mode where
# we do a real mount. But we also need to unmount in unshare mode
# because otherwise, even with the --one-file-system tar option,
# the permissions of the mount source will be stored and not the
# mount target (the directory)
2020-01-08 14:41:49 +00:00
push @cleanup_tasks, sub {
# since we cannot write to /etc/mtab we need --no-mtab
2020-01-08 16:44:07 +00:00
0 == system('umount', '--no-mtab', "$options->{root}/proc")
or error "umount /proc failed: $?";
2020-01-08 14:41:49 +00:00
};
2020-01-08 16:44:07 +00:00
0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc")
or error "mount /proc failed: $?";
} elsif (
any { $_ eq $options->{mode} }
('proot', 'fakechroot', 'chrootless')
) {
2020-01-08 14:41:49 +00:00
# we cannot mount in fakechroot and proot mode
# in proot mode we have /sys bind-mounted already through
# --bind=/sys
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
# prevent daemons from starting
# the directory might not exist in custom variant, for example
#
# ideally, we should use update-alternatives but we cannot rely on it
# existing inside the chroot
#
# See #911290 for more problems of this interface
2020-01-08 14:41:49 +00:00
if (-d "$options->{root}/usr/sbin/") {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d"
or error "cannot open policy-rc.d: $!";
2020-01-08 14:41:49 +00:00
print $fh "#!/bin/sh\n";
print $fh "exit 101\n";
close $fh;
2020-01-08 16:44:07 +00:00
chmod 0755, "$options->{root}/usr/sbin/policy-rc.d"
or error "cannot chmod policy-rc.d: $!";
2020-01-08 14:41:49 +00:00
}
# the file might not exist if it was removed in a hook
if (-e "$options->{root}/sbin/start-stop-daemon") {
if (-e "$options->{root}/sbin/start-stop-daemon.REAL") {
error "$options->{root}/sbin/start-stop-daemon.REAL already"
. " exists";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
move(
"$options->{root}/sbin/start-stop-daemon",
"$options->{root}/sbin/start-stop-daemon.REAL"
) or error "cannot move start-stop-daemon: $!";
open my $fh, '>', "$options->{root}/sbin/start-stop-daemon"
or error "cannot open start-stop-daemon: $!";
2020-01-08 14:41:49 +00:00
print $fh "#!/bin/sh\n";
print $fh "echo \"Warning: Fake start-stop-daemon called, doing"
. " nothing\">&2\n";
2020-01-08 14:41:49 +00:00
close $fh;
2020-01-08 16:44:07 +00:00
chmod 0755, "$options->{root}/sbin/start-stop-daemon"
or error "cannot chmod start-stop-daemon: $!";
2020-01-08 14:41:49 +00:00
}
&{$cmd}();
# cleanup
if (-e "$options->{root}/sbin/start-stop-daemon.REAL") {
2020-01-08 16:44:07 +00:00
move(
"$options->{root}/sbin/start-stop-daemon.REAL",
"$options->{root}/sbin/start-stop-daemon"
) or error "cannot move start-stop-daemon: $!";
2020-01-08 14:41:49 +00:00
}
if (-e "$options->{root}/usr/sbin/policy-rc.d") {
2020-01-08 16:44:07 +00:00
unlink "$options->{root}/usr/sbin/policy-rc.d"
or error "cannot unlink policy-rc.d: $!";
2020-01-08 14:41:49 +00:00
}
};
my $error = $@;
# we use the cleanup function to do the unmounting
$cleanup->(0);
if ($error) {
2020-01-08 14:41:49 +00:00
error "run_chroot failed: $error";
}
2020-01-09 07:39:40 +00:00
return;
}
2020-01-09 07:39:40 +00:00
sub run_hooks {
2020-01-08 16:44:07 +00:00
my $name = shift;
my $options = shift;
2020-01-08 16:44:07 +00:00
if (scalar @{ $options->{"${name}_hook"} } == 0) {
2020-01-08 14:41:49 +00:00
return;
}
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "not running ${name}-hooks because of --dry-run";
return;
}
my $runner = sub {
2020-01-08 16:44:07 +00:00
foreach my $script (@{ $options->{"${name}_hook"} }) {
2020-01-16 09:38:14 +00:00
if (
$script =~ /^(
copy-in|copy-out
|tar-in|tar-out
|upload|download
|sync-in|sync-out
)\ /x
) {
2020-01-08 14:41:49 +00:00
info "running special hook: $script";
2020-01-08 16:44:07 +00:00
if (
any { $_ eq $options->{variant} } ('extract', 'custom')
and any { $_ eq $options->{mode} }
('fakechroot', 'proot') and $name ne 'setup'
) {
info "the copy-in, copy-out, tar-in and tar-out commands"
. " in fakechroot mode or proot mode might fail in"
. " extract and custom variants because there might be"
. " no tar inside the chroot";
2020-01-08 14:41:49 +00:00
}
my $pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
# whatever the script writes on stdout is sent to the
# socket
# whatever is written to the socket, send to stdin
2020-01-08 16:44:07 +00:00
open(STDOUT, '>&', $options->{hooksock})
or error "cannot open STDOUT: $!";
open(STDIN, '<&', $options->{hooksock})
or error "cannot open STDIN: $!";
2020-01-08 14:41:49 +00:00
# we execute ourselves under sh to avoid having to
# implement a clever parser of the quoting used in $script
# for the filenames
my $prefix = "";
2020-01-08 16:44:07 +00:00
if ($is_covering) {
$prefix
= "$EXECUTABLE_NAME -MDevel::Cover=-silent,-nogcov ";
2020-01-08 14:41:49 +00:00
}
exec 'sh', '-c',
"$prefix$PROGRAM_NAME --hook-helper"
. " \"\$1\" \"\$2\" \"\$3\" \"\$4\" \"\$5\" $script",
2020-01-08 16:44:07 +00:00
'exec', $options->{root}, $options->{mode}, $name,
(
defined $options->{qemu}
? "qemu-$options->{qemu}"
: 'env',
$verbosity_level
);
2020-01-08 14:41:49 +00:00
}
waitpid($pid, 0);
$? == 0 or error "special hook failed with exit code $?";
2020-01-08 16:44:07 +00:00
} elsif (-x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) {
2020-01-08 14:41:49 +00:00
info "running --$name-hook directly: $script $options->{root}";
# execute it directly if it's an executable file
# or if it there are no shell metacharacters
# (the /a regex modifier makes \w match only ASCII)
0 == system('env', '--unset=TMPDIR', '--unset=APT_CONFIG',
$script, $options->{root})
2020-01-08 16:44:07 +00:00
or error "command failed: $script";
2020-01-08 14:41:49 +00:00
} else {
info "running --$name-hook in shell: sh -c '$script' exec"
. " $options->{root}";
2020-01-08 14:41:49 +00:00
# otherwise, wrap everything in sh -c
0 == system('env', '--unset=TMPDIR', '--unset=APT_CONFIG',
'sh', '-c', $script, 'exec', $options->{root})
2020-01-08 16:44:07 +00:00
or error "command failed: $script";
2020-01-08 14:41:49 +00:00
}
}
};
if ($name eq 'setup') {
2020-01-08 14:41:49 +00:00
# execute directly without mounting anything (the mount points do not
# exist yet)
&{$runner}();
} else {
2020-01-09 07:39:40 +00:00
run_chroot(\&$runner, $options);
}
2020-01-09 07:39:40 +00:00
return;
}
2018-09-18 09:20:24 +00:00
sub setup {
my $options = shift;
foreach my $key (sort keys %{$options}) {
2020-01-08 14:41:49 +00:00
my $value = $options->{$key};
if (!defined $value) {
next;
}
if (ref $value eq '') {
debug "$key: $options->{$key}";
} elsif (ref $value eq 'ARRAY') {
debug "$key: [" . (join ', ', @{$value}) . "]";
} elsif (ref $value eq 'GLOB') {
debug "$key: GLOB";
} else {
error "unknown type for key $key: " . (ref $value);
}
2018-09-18 09:20:24 +00:00
}
if (-e $options->{apttrusted} && !-r $options->{apttrusted}) {
warning "cannot read $options->{apttrusted}";
}
if (-e $options->{apttrustedparts} && !-r $options->{apttrustedparts}) {
warning "cannot read $options->{apttrustedparts}";
}
run_setup($options);
run_hooks('setup', $options);
run_update($options);
(my $pkgs_to_install, my $essential_pkgs, my $cached_debs)
= run_download($options);
if ( $options->{mode} ne 'chrootless'
or $options->{variant} eq 'extract') {
# We have to extract the packages from @essential_pkgs either if we run
# in chrootless mode and extract variant or in any other mode. In
# other words, the only scenario in which the @essential_pkgs are not
# extracted are in chrootless mode in any other than the extract
# variant.
run_extract($options, $essential_pkgs);
}
run_hooks('extract', $options);
if ($options->{variant} ne 'extract') {
my $chrootcmd = [];
if ($options->{mode} ne 'chrootless') {
$chrootcmd = run_prepare($options);
}
run_essential($options, $essential_pkgs, $chrootcmd, $cached_debs);
run_hooks('essential', $options);
run_install($options, $pkgs_to_install, $chrootcmd);
run_hooks('customize', $options);
}
run_cleanup($options);
return;
}
sub run_setup() {
my $options = shift;
my $dpkgversion;
{
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
# redirect stderr to /dev/null to hide error messages from dpkg
# versions before 1.20.0
open(STDERR, '>', '/dev/null')
or error "cannot open /dev/null for writing: $!";
exec 'dpkg', '--robot', '--version';
}
chomp(
$dpkgversion = do { local $/; <$fh> }
);
close $fh;
if ($? == 0 and $dpkgversion =~ /^([0-9]+\.[0-9]+\.[0-9]+) \(\S+\)$/) {
# dpkg is new enough for the --robot option
$dpkgversion = version->new($1);
} else {
$dpkgversion = undef;
}
}
{
my @directories = (
'/etc/apt/apt.conf.d', '/etc/apt/sources.list.d',
'/etc/apt/preferences.d', '/var/cache/apt',
'/var/lib/apt/lists/partial', '/tmp'
);
# we need /var/lib/dpkg in case we need to write to /var/lib/dpkg/arch
push @directories, '/var/lib/dpkg';
# since we do not know the dpkg version inside the chroot at this
# point, we can only omit it in chrootless mode
if ( $options->{mode} ne 'chrootless'
or not defined $dpkgversion
or $dpkgversion < "1.20.0") {
push @directories, '/etc/dpkg/dpkg.cfg.d/';
}
# if dpkg and apt operate from the outside we need some more
# directories because dpkg and apt might not even be installed inside
# the chroot
if ($options->{mode} eq 'chrootless') {
push @directories, '/var/log/apt';
# since we do not know the dpkg version inside the chroot at this
# point, we can only omit it in chrootless mode
if ( $options->{mode} ne 'chrootless'
or not defined $dpkgversion
or $dpkgversion < "1.20.0") {
push @directories, '/var/lib/dpkg/triggers',
'/var/lib/dpkg/info', '/var/lib/dpkg/alternatives',
'/var/lib/dpkg/updates';
}
}
foreach my $dir (@directories) {
if (-e "$options->{root}/$dir") {
if (!-d "$options->{root}/$dir") {
error "$dir already exists but is not a directory";
}
} else {
my $num_created = make_path "$options->{root}/$dir",
{ error => \my $err };
if ($err && @$err) {
error(
join "; ",
(map { "cannot create " . (join ": ", %{$_}) } @$err));
} elsif ($num_created == 0) {
error "cannot create $options->{root}/$dir";
}
}
}
2020-05-03 15:18:34 +00:00
# make sure /tmp is not 0755 like the rest
chmod 01777, "$options->{root}/tmp" or error "cannot chmod /tmp: $!";
}
# The TMPDIR set by the user or even /tmp might be inaccessible by the
# unshared user. Thus, we place all temporary files in /tmp inside the new
# rootfs.
#
# This will affect calls to tempfile() as well as runs of "apt-get update"
# which will create temporary clearsigned.message.XXXXXX files to verify
# signatures.
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{"TMPDIR"} = "$options->{root}/tmp";
}
my ($conf, $tmpfile)
= tempfile("mmdebstrap.apt.conf.XXXXXXXXXXXX", TMPDIR => 1)
2020-01-08 16:44:07 +00:00
or error "cannot open apt.conf: $!";
2018-09-18 09:20:24 +00:00
print $conf "Apt::Architecture \"$options->{nativearch}\";\n";
# the host system might have configured additional architectures
# force only the native architecture
2020-01-08 16:44:07 +00:00
if (scalar @{ $options->{foreignarchs} } > 0) {
2020-01-08 14:41:49 +00:00
print $conf "Apt::Architectures { \"$options->{nativearch}\"; ";
2020-01-08 16:44:07 +00:00
foreach my $arch (@{ $options->{foreignarchs} }) {
2020-01-08 14:41:49 +00:00
print $conf "\"$arch\"; ";
}
print $conf "};\n";
2018-09-18 09:20:24 +00:00
} else {
2020-01-08 14:41:49 +00:00
print $conf "Apt::Architectures \"$options->{nativearch}\";\n";
2018-09-18 09:20:24 +00:00
}
print $conf "Dir \"$options->{root}\";\n";
# not needed anymore for apt 1.3 and newer
2020-01-08 16:44:07 +00:00
print $conf
"Dir::State::Status \"$options->{root}/var/lib/dpkg/status\";\n";
2018-09-18 09:20:24 +00:00
# for authentication, use the keyrings from the host
print $conf "Dir::Etc::Trusted \"$options->{apttrusted}\";\n";
print $conf "Dir::Etc::TrustedParts \"$options->{apttrustedparts}\";\n";
if ($options->{variant} ne 'apt') {
2020-01-08 14:41:49 +00:00
# apt considers itself essential. Thus, when generating an EDSP
# document for an external solver, it will add the Essential:yes field
# to the apt package stanza. This is unnecessary for any other variant
# than 'apt' because in all other variants we compile the set of
# packages we consider essential ourselves and for the 'essential'
# variant it would even be wrong to add apt. This workaround is only
# needed when apt is used with an external solver but doesn't hurt
# otherwise and we don't have a good way to figure out whether apt is
# using an external solver or not short of parsing the --aptopt
# options.
print $conf "pkgCacheGen::ForceEssential \",\";\n";
}
if ($options->{dryrun}) {
# Without this option, apt will fail with:
# E: Could not configure 'libc6:amd64'.
# E: Could not perform immediate configuration on 'libgcc1:amd64'.
#
# See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=953260
print $conf "APT::Immediate-Configure false;\n";
}
2018-09-18 09:20:24 +00:00
close $conf;
# We put certain configuration items in their own configuration file
# because they have to be valid for apt invocation from outside as well as
# from inside the chroot.
# The config filename is chosen such that any settings in it will be
# overridden by what the user specified with --aptopt.
{
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!";
2020-01-08 14:41:49 +00:00
print $fh "Apt::Install-Recommends false;\n";
print $fh "Acquire::Languages \"none\";\n";
close $fh;
}
# apt-get update requires this
2018-09-18 09:20:24 +00:00
{
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/status"
or error "failed to open(): $!";
2020-01-08 14:41:49 +00:00
close $fh;
2018-09-18 09:20:24 +00:00
}
# /var/lib/dpkg/available is required to exist or otherwise package
# removals will fail
# since we do not know the dpkg version inside the chroot at this point, we
# can only omit it in chrootless mode
if ( $options->{mode} ne 'chrootless'
or not defined $dpkgversion
or $dpkgversion < "1.20.0") {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/available"
or error "failed to open(): $!";
2020-01-08 14:41:49 +00:00
close $fh;
}
# /var/lib/dpkg/cmethopt is used by dselect
# see #930788
# since we do not know the dpkg version inside the chroot at this point, we
# can only omit it in chrootless mode
if ( $options->{mode} ne 'chrootless'
or not defined $dpkgversion
or $dpkgversion < "1.20.0") {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/cmethopt"
or error "failed to open(): $!";
2020-01-08 14:41:49 +00:00
print $fh "apt apt\n";
close $fh;
}
# we create /var/lib/dpkg/arch inside the chroot either if there is more
# than the native architecture in the chroot or if chrootless mode is
# used to create a chroot of a different architecture than the native
# architecture outside the chroot.
2020-01-08 16:44:07 +00:00
chomp(my $hostarch = `dpkg --print-architecture`);
if (
scalar @{ $options->{foreignarchs} } > 0
or ( $options->{mode} eq 'chrootless'
and $hostarch ne $options->{nativearch})
) {
open my $fh, '>', "$options->{root}/var/lib/dpkg/arch"
or error "cannot open /var/lib/dpkg/arch: $!";
2020-01-08 14:41:49 +00:00
print $fh "$options->{nativearch}\n";
2020-01-08 16:44:07 +00:00
foreach my $arch (@{ $options->{foreignarchs} }) {
2020-01-08 14:41:49 +00:00
print $fh "$arch\n";
}
close $fh;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
if (scalar @{ $options->{aptopts} } > 0) {
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap"
or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!";
foreach my $opt (@{ $options->{aptopts} }) {
2020-01-08 14:41:49 +00:00
if (-r $opt) {
# flush handle because copy() uses syswrite() which bypasses
# buffered IO
$fh->flush();
copy $opt, $fh or error "cannot copy $opt: $!";
} else {
print $fh $opt;
if ($opt !~ /;$/) {
print $fh ';';
}
if ($opt !~ /\n$/) {
print $fh "\n";
}
}
}
close $fh;
if ($verbosity_level >= 3) {
debug "content of /etc/apt/apt.conf.d/99mmdebstrap:";
copy("$options->{root}/etc/apt/apt.conf.d/99mmdebstrap", \*STDERR);
}
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
if (scalar @{ $options->{dpkgopts} } > 0) {
2020-01-08 14:41:49 +00:00
# FIXME: in chrootless mode, dpkg will only read the configuration
# from the host
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap"
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
foreach my $opt (@{ $options->{dpkgopts} }) {
2020-01-08 14:41:49 +00:00
if (-r $opt) {
# flush handle because copy() uses syswrite() which bypasses
# buffered IO
$fh->flush();
copy $opt, $fh or error "cannot copy $opt: $!";
} else {
print $fh $opt;
if ($opt !~ /\n$/) {
print $fh "\n";
}
}
}
close $fh;
if ($verbosity_level >= 3) {
debug "content of /etc/dpkg/dpkg.cfg.d/99mmdebstrap:";
copy("$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap",
\*STDERR);
}
2018-09-18 09:20:24 +00:00
}
{
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/etc/fstab"
or error "cannot open fstab: $!";
2020-01-08 14:41:49 +00:00
print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n";
close $fh;
2020-01-08 16:44:07 +00:00
chmod 0644, "$options->{root}/etc/fstab"
or error "cannot chmod fstab: $!";
2018-09-18 09:20:24 +00:00
}
# write /etc/apt/sources.list and files in /etc/apt/sources.list.d/
2018-09-18 09:20:24 +00:00
{
my $firstentry = $options->{sourceslists}->[0];
# if the first sources.list entry is of one-line type and without
# explicit filename, then write out an actual /etc/apt/sources.list
# otherwise everything goes into /etc/apt/sources.list.d
my $fname;
if ($firstentry->{type} eq 'one-line'
&& !defined $firstentry->{fname}) {
$fname = "$options->{root}/etc/apt/sources.list";
} else {
$fname = "$options->{root}/etc/apt/sources.list.d/0000";
if (defined $firstentry->{fname}) {
$fname .= $firstentry->{fname};
if ( $firstentry->{fname} !~ /\.list/
&& $firstentry->{fname} !~ /\.sources/) {
if ($firstentry->{type} eq 'one-line') {
$fname .= '.list';
} elsif ($firstentry->{type} eq 'deb822') {
$fname .= '.sources';
} else {
error "invalid type: $firstentry->{type}";
}
}
} else {
# if no filename is given, then this must be a deb822 file
# because if it was a one-line type file, then it would've been
# written to /etc/apt/sources.list
$fname .= 'main.sources';
}
}
open my $fh, '>', "$fname" or error "cannot open $fname: $!";
print $fh $firstentry->{content};
2020-01-08 14:41:49 +00:00
close $fh;
# everything else goes into /etc/apt/sources.list.d/
for (my $i = 1 ; $i < scalar @{ $options->{sourceslists} } ; $i++) {
my $entry = $options->{sourceslists}->[$i];
my $fname = "$options->{root}/etc/apt/sources.list.d/"
. sprintf("%04d", $i);
if (defined $entry->{fname}) {
$fname .= $entry->{fname};
if ( $entry->{fname} !~ /\.list/
&& $entry->{fname} !~ /\.sources/) {
if ($entry->{type} eq 'one-line') {
$fname .= '.list';
} elsif ($entry->{type} eq 'deb822') {
$fname .= '.sources';
} else {
error "invalid type: $entry->{type}";
}
}
} else {
if ($entry->{type} eq 'one-line') {
$fname .= 'main.list';
} elsif ($entry->{type} eq 'deb822') {
$fname .= 'main.sources';
} else {
error "invalid type: $entry->{type}";
}
}
open my $fh, '>', "$fname" or error "cannot open $fname: $!";
print $fh $entry->{content};
close $fh;
}
2018-09-18 09:20:24 +00:00
}
# allow network access from within
if (-e "/etc/resolv.conf") {
2020-01-08 16:44:07 +00:00
copy("/etc/resolv.conf", "$options->{root}/etc/resolv.conf")
or error "cannot copy /etc/resolv.conf: $!";
} else {
warning("Host system does not have a /etc/resolv.conf to copy into the"
2020-01-08 16:44:07 +00:00
. " rootfs.");
}
if (-e "/etc/hostname") {
2020-01-08 16:44:07 +00:00
copy("/etc/hostname", "$options->{root}/etc/hostname")
or error "cannot copy /etc/hostname: $!";
} else {
warning("Host system does not have a /etc/hostname to copy into the"
2020-01-08 16:44:07 +00:00
. " rootfs.");
}
if ($options->{havemknod}) {
2020-01-08 14:41:49 +00:00
foreach my $file (@devfiles) {
2020-01-08 16:44:07 +00:00
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
= @{$file};
if ($type == 0) { # normal file
2020-01-08 14:41:49 +00:00
error "type 0 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 1) { # hardlink
2020-01-08 14:41:49 +00:00
error "type 1 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 2) { # symlink
if ( $options->{mode} eq 'fakechroot'
and $linkname =~ /^\/proc/) {
2020-01-08 14:41:49 +00:00
# there is no /proc in fakechroot mode
next;
}
2020-01-08 16:44:07 +00:00
symlink $linkname, "$options->{root}/$fname"
or error "cannot create symlink $fname";
next; # chmod cannot work on symlinks
} elsif ($type == 3) { # character special
0 == system('mknod', "$options->{root}/$fname", 'c',
$devmajor, $devminor)
or error "mknod failed: $?";
} elsif ($type == 4) { # block special
0 == system('mknod', "$options->{root}/$fname", 'b',
$devmajor, $devminor)
or error "mknod failed: $?";
} elsif ($type == 5) { # directory
2020-01-08 14:41:49 +00:00
if (-e "$options->{root}/$fname") {
2020-01-08 16:44:07 +00:00
if (!-d "$options->{root}/$fname") {
2020-01-08 14:41:49 +00:00
error "$fname already exists but is not a directory";
}
} else {
2020-01-08 16:44:07 +00:00
my $num_created = make_path "$options->{root}/$fname",
{ error => \my $err };
2020-01-08 14:41:49 +00:00
if ($err && @$err) {
2020-01-08 16:44:07 +00:00
error(
join "; ",
(
map { "cannot create " . (join ": ", %{$_}) }
@$err
));
2020-01-08 14:41:49 +00:00
} elsif ($num_created == 0) {
error "cannot create $options->{root}/$fname";
}
}
} else {
error "unsupported type: $type";
}
2020-01-08 16:44:07 +00:00
chmod $mode, "$options->{root}/$fname"
or error "cannot chmod $fname: $!";
2020-01-08 14:41:49 +00:00
}
}
2018-09-18 09:20:24 +00:00
# we tell apt about the configuration via a config file passed via the
# APT_CONFIG environment variable instead of using the --option command
# line arguments because configuration settings like Dir::Etc have already
# been evaluated at the time that apt takes its command line arguments
# into account.
2020-01-09 07:39:40 +00:00
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{"APT_CONFIG"} = "$tmpfile";
}
# we have to make the config file world readable so that a possible
# /usr/lib/apt/solvers/apt process which is run by the _apt user is also
# able to read it
chmod 0666, "$tmpfile" or error "cannot chmod $tmpfile: $!";
if ($verbosity_level >= 3) {
0 == system('apt-get', '--version')
or error "apt-get --version failed: $?";
0 == system('apt-config', 'dump') or error "apt-config failed: $?";
2020-03-07 01:13:26 +00:00
debug "content of $tmpfile:";
copy($tmpfile, \*STDERR);
}
if (any { $_ eq $options->{mode} } ('fakechroot', 'proot')) {
# Apt dropping privileges to another user than root is not useful in
# fakechroot and proot mode because all users are faked and thus there
# is no real privilege difference anyways. Thus, we also print no
# warning message in this case.
open my $fh, '>>', $tmpfile
or error "cannot open $tmpfile for appending: $!";
print $fh "APT::Sandbox::User \"root\";\n";
close $fh;
} else {
# when apt-get update is run by the root user, then apt will attempt to
# drop privileges to the _apt user. This will fail if the _apt user
# does not have permissions to read the root directory. In that case,
# we have to disable apt sandboxing. This can for example happen in
# root mode when the path of the chroot is not in a world-readable
# location.
2020-01-08 14:41:49 +00:00
my $partial = '/var/lib/apt/lists/partial';
2020-01-08 16:44:07 +00:00
if (
system('/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test',
'-r', "$options->{root}$partial") != 0
) {
warning "Download is performed unsandboxed as root as file"
. " $options->{root}$partial couldn't be accessed by user _apt";
2020-01-08 16:44:07 +00:00
open my $fh, '>>', $tmpfile
or error "cannot open $tmpfile for appending: $!";
2020-01-08 14:41:49 +00:00
print $fh "APT::Sandbox::User \"root\";\n";
close $fh;
}
}
# setting PATH for chroot, ldconfig, start-stop-daemon...
if (defined $ENV{PATH} && $ENV{PATH} ne "") {
2020-01-09 07:39:40 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2020-01-08 14:41:49 +00:00
$ENV{PATH} = "$ENV{PATH}:/usr/sbin:/usr/bin:/sbin:/bin";
} else {
2020-01-09 07:39:40 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2020-01-08 14:41:49 +00:00
$ENV{PATH} = "/usr/sbin:/usr/bin:/sbin:/bin";
}
return;
}
sub run_update() {
my $options = shift;
info "running apt-get update...";
2020-01-08 16:44:07 +00:00
run_apt_progress({
ARGV => ['apt-get', 'update'],
CHDIR => $options->{root},
FIND_APT_WARNINGS => 1
});
# check if anything was downloaded at all
{
2020-01-08 16:44:07 +00:00
open my $fh, '-|', 'apt-get',
'indextargets' // error "failed to fork(): $!";
chomp(
my $indextargets = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
if ($indextargets eq '') {
if ($verbosity_level >= 1) {
0 == system('apt-cache', 'policy')
or error "apt-cache failed: $?";
2020-01-08 14:41:49 +00:00
}
error "apt-get update didn't download anything";
}
}
2018-09-18 09:20:24 +00:00
return;
}
sub run_download() {
my $options = shift;
my %pkgs_to_install;
2020-01-08 16:44:07 +00:00
for my $incl (@{ $options->{include} }) {
2020-01-08 14:41:49 +00:00
for my $pkg (split /[,\s]+/, $incl) {
# strip leading and trailing whitespace
$pkg =~ s/^\s+|\s+$//g;
# skip if the remainder is an empty string
if ($pkg eq '') {
next;
}
$pkgs_to_install{$pkg} = ();
2020-01-08 14:41:49 +00:00
}
2018-09-18 09:20:24 +00:00
}
if ($options->{variant} eq 'buildd') {
$pkgs_to_install{'build-essential'} = ();
2018-09-18 09:20:24 +00:00
}
# We use /var/cache/apt/archives/ to figure out which packages apt chooses
# to install. That's why the directory must be empty if:
# - /var/cache/apt/archives exists, and
# - no simulation run is done, and
# - the variant is not extract or custom or the number to be
# installed packages not zero
my @cached_debs = ();
my @dl_debs = ();
if (
!$options->{dryrun}
&& ((none { $_ eq $options->{variant} } ('extract', 'custom'))
|| scalar keys %pkgs_to_install != 0)
&& -d "$options->{root}/var/cache/apt/archives/"
) {
my $apt_archives = "/var/cache/apt/archives/";
opendir my $dh, "$options->{root}/$apt_archives"
or error "cannot read $apt_archives";
while (my $deb = readdir $dh) {
if ($deb !~ /\.deb$/) {
next;
}
if (!-f "$options->{root}/$apt_archives/$deb") {
next;
}
push @cached_debs, $deb;
}
closedir $dh;
if (scalar @cached_debs > 0) {
if (any { $_ eq 'download/empty' } @{ $options->{skip} }) {
info "skipping download/empty as requested";
} else {
error("/var/cache/apt/archives/ inside the chroot contains: "
. (join ', ', (sort @cached_debs)));
}
}
}
2018-09-18 09:20:24 +00:00
# To figure out the right package set for the apt variant we can use:
# $ apt-get dist-upgrade -o dir::state::status=/dev/null
# This is because that variants only contain essential packages and
# apt and libapt treats apt as essential. If we want to install less
# (essential variant) then we have to compute the package set ourselves.
# Same if we want to install priority based variants.
if (any { $_ eq $options->{variant} } ('extract', 'custom')) {
if (scalar keys %pkgs_to_install == 0) {
info "nothing to download -- skipping...";
return ([], []);
}
my %result = ();
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
# if there are already packages in /var/cache/apt/archives/, we
# need to use our proxysolver to obtain the solution chosen by apt
if (scalar @cached_debs > 0) {
$result{EDSP_RES} = \@dl_debs;
}
2020-01-10 10:44:15 +00:00
info "downloading packages with apt...";
}
2020-01-08 14:41:49 +00:00
run_apt_progress({
2020-01-08 16:44:07 +00:00
ARGV => [
2020-01-10 10:44:15 +00:00
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
'install'
2020-01-08 16:44:07 +00:00
],
PKGS => [keys %pkgs_to_install],
%result
2020-01-08 14:41:49 +00:00
});
} elsif ($options->{variant} eq 'apt') {
2020-01-08 14:41:49 +00:00
# if we just want to install Essential:yes packages, apt and their
# dependencies then we can make use of libapt treating apt as
# implicitly essential. An upgrade with the (currently) empty status
# file will trigger an installation of the essential packages plus apt.
#
# 2018-09-02, #debian-dpkg on OFTC, times in UTC+2
# 23:39 < josch> I'll just put it in my script and if it starts
# breaking some time I just say it's apt's fault. :P
# 23:42 < DonKult> that is how it usually works, so yes, do that :P (<-
# and please add that line next to it so you can
# remind me in 5+ years that I said that after I wrote
# in the bugreport: "Are you crazy?!? Nobody in his
# right mind would even suggest depending on it!")
my %result = ();
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
# if there are already packages in /var/cache/apt/archives/, we
# need to use our proxysolver to obtain the solution chosen by apt
if (scalar @cached_debs > 0) {
$result{EDSP_RES} = \@dl_debs;
}
2020-01-10 10:44:15 +00:00
info "downloading packages with apt...";
}
2020-01-08 14:41:49 +00:00
run_apt_progress({
2020-01-08 16:44:07 +00:00
ARGV => [
2020-01-10 10:44:15 +00:00
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
'dist-upgrade'
2020-01-08 16:44:07 +00:00
],
%result
2020-01-08 14:41:49 +00:00
});
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{variant} } (
'essential', 'standard', 'important', 'required', 'buildd',
'minbase'
)
) {
2020-01-08 14:41:49 +00:00
my %ess_pkgs;
my %ess_pkgs_target;
my %pkgs_to_install_target;
my $num_indices = 0;
open(
my $pipe_apt,
'-|',
'apt-get',
'indextargets',
'--format',
('$(CODENAME)' . "\t" . '$(SUITE)' . "\t" . '$(FILENAME)'),
'Created-By: Packages'
) or error "cannot start apt-get indextargets: $!";
while (my $line = <$pipe_apt>) {
chomp $line;
$num_indices++;
my ($codename, $suite, $fname) = split /\t/, $line, 3;
my $suite_matches = 0;
if (
defined $options->{suite}
and
($options->{suite} eq $codename or $options->{suite} eq $suite)
) {
$suite_matches = 1;
}
2020-01-08 16:44:07 +00:00
open(my $pipe_cat, '-|', '/usr/lib/apt/apt-helper', 'cat-file',
$fname)
or error "cannot start apt-helper cat-file: $!";
2020-01-08 14:41:49 +00:00
my $pkgname;
2020-01-08 16:44:07 +00:00
my $ess = '';
2020-01-08 14:41:49 +00:00
my $prio = 'optional';
my $arch = '';
while (my $line = <$pipe_cat>) {
chomp $line;
# Dpkg::Index takes 10 seconds to parse a typical Packages
# file. Thus we instead use a simple parser that just retrieve
# the information we need.
if ($line ne "") {
if ($line =~ /^Package: (.*)/) {
$pkgname = $1;
} elsif ($line =~ /^Essential: yes$/) {
2020-01-08 16:44:07 +00:00
$ess = 'yes';
2020-01-08 14:41:49 +00:00
} elsif ($line =~ /^Priority: (.*)/) {
$prio = $1;
} elsif ($line =~ /^Architecture: (.*)/) {
$arch = $1;
}
next;
}
# we are only interested of packages of native architecture or
# Architecture:all
if ($arch eq $options->{nativearch} or $arch eq 'all') {
# the line is empty, thus a package stanza just finished
# processing and we can handle it now
if ($ess eq 'yes') {
$ess_pkgs{$pkgname} = ();
if ($suite_matches) {
$ess_pkgs_target{$pkgname} = ();
}
2020-01-08 14:41:49 +00:00
} elsif ($options->{variant} eq 'essential') {
# for this variant we are only interested in the
# essential packages
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{variant} } (
'standard', 'important', 'required', 'buildd',
'minbase'
)
) {
2020-01-08 14:41:49 +00:00
if ($prio eq 'optional' or $prio eq 'extra') {
# always ignore packages of priority optional and
# extra
2020-01-08 14:41:49 +00:00
} elsif ($prio eq 'standard') {
2020-01-08 16:44:07 +00:00
if (
none { $_ eq $options->{variant} }
('important', 'required', 'buildd', 'minbase')
) {
$pkgs_to_install{$pkgname} = ();
if ($suite_matches) {
$pkgs_to_install_target{$pkgname} = ();
}
2020-01-08 14:41:49 +00:00
}
} elsif ($prio eq 'important') {
2020-01-08 16:44:07 +00:00
if (
none { $_ eq $options->{variant} }
('required', 'buildd', 'minbase')
) {
$pkgs_to_install{$pkgname} = ();
if ($suite_matches) {
$pkgs_to_install_target{$pkgname} = ();
}
2020-01-08 14:41:49 +00:00
}
} elsif ($prio eq 'required') {
# required packages are part of all sets except
# essential and apt
$pkgs_to_install{$pkgname} = ();
if ($suite_matches) {
$pkgs_to_install_target{$pkgname} = ();
}
2020-01-08 14:41:49 +00:00
} else {
error "unknown priority: $prio";
}
} else {
error "unknown variant: $options->{variant}";
}
}
# reset values
undef $pkgname;
2020-01-08 16:44:07 +00:00
$ess = '';
2020-01-08 14:41:49 +00:00
$prio = 'optional';
$arch = '';
}
close $pipe_cat;
$? == 0 or error "apt-helper cat-file failed: $?";
}
close $pipe_apt;
$? == 0 or error "apt-get indextargets failed: $?";
# comparing the size of both arrays is sufficient because items are
# either only added to one or to both
if (defined $options->{suite} and $num_indices > 1) {
if (scalar keys %ess_pkgs_target > 0
and keys %ess_pkgs != %ess_pkgs_target) {
info( "multiple sources defined, using those matching "
. "'$options->{suite}' to find essential packages");
%ess_pkgs = %ess_pkgs_target;
}
if (scalar keys %pkgs_to_install_target > 0
and keys %pkgs_to_install != keys %pkgs_to_install_target) {
if ($options->{variant} eq 'essential') {
error "logic error";
} elsif (
any { $_ eq $options->{variant} }
('standard', 'important', 'required', 'buildd', 'minbase')
) {
info( "multiple sources defined -- using those matching "
. "'$options->{suite}' to find packages for variant "
. "'$options->{variant}'");
%pkgs_to_install = %pkgs_to_install_target;
} else {
error "unknown variant: $options->{variant}";
}
}
}
2020-01-08 14:41:49 +00:00
debug "Identified the following Essential:yes packages:";
foreach my $pkg (sort keys %ess_pkgs) {
debug " $pkg";
}
my %result = ();
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
# if there are already packages in /var/cache/apt/archives/, we
# need to use our proxysolver to obtain the solution chosen by apt
if (scalar @cached_debs > 0) {
$result{EDSP_RES} = \@dl_debs;
}
2020-01-10 10:44:15 +00:00
info "downloading packages with apt...";
}
2020-01-08 14:41:49 +00:00
run_apt_progress({
2020-01-08 16:44:07 +00:00
ARGV => [
2020-01-10 10:44:15 +00:00
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
'install'
2020-01-08 16:44:07 +00:00
],
2020-01-08 14:41:49 +00:00
PKGS => [keys %ess_pkgs],
%result
2020-01-08 14:41:49 +00:00
});
} else {
2020-01-08 14:41:49 +00:00
error "unknown variant: $options->{variant}";
2018-09-18 09:20:24 +00:00
}
my @essential_pkgs;
if (scalar @cached_debs > 0 && scalar @dl_debs > 0) {
my $archives = "/var/cache/apt/archives/";
# for each package in @dl_debs, check if it's in
# /var/cache/apt/archives/ and add it to @essential_pkgs
foreach my $p (@dl_debs) {
my ($pkg, $ver_epoch) = @{$p};
# apt appends the architecture at the end of the package name
($pkg, my $arch) = split ':', $pkg, 2;
# apt replaces the colon by its percent encoding %3a
my $ver = $ver_epoch;
$ver =~ s/:/%3a/;
# the architecture returned by apt is the native architecture.
# Since we don't know whether the package is architecture
# independent or not, we first try with the native arch and then
# with "all" and only error out if neither exists.
if (-e "$options->{root}/$archives/${pkg}_${ver}_$arch.deb") {
push @essential_pkgs, "$archives/${pkg}_${ver}_$arch.deb";
} elsif (-e "$options->{root}/$archives/${pkg}_${ver}_all.deb") {
push @essential_pkgs, "$archives/${pkg}_${ver}_all.deb";
} else {
error( "cannot find package for $pkg:$arch (= $ver_epoch) "
. "in /var/cache/apt/archives/");
2020-01-08 14:41:49 +00:00
}
}
} else {
# collect the .deb files that were downloaded by apt from the content
# of /var/cache/apt/archives/
if (!$options->{dryrun}) {
my $apt_archives = "/var/cache/apt/archives/";
opendir my $dh, "$options->{root}/$apt_archives"
or error "cannot read $apt_archives";
while (my $deb = readdir $dh) {
if ($deb !~ /\.deb$/) {
next;
2020-01-10 10:44:15 +00:00
}
$deb = "$apt_archives/$deb";
if (!-f "$options->{root}/$deb") {
next;
}
push @essential_pkgs, $deb;
}
closedir $dh;
if (scalar @essential_pkgs == 0) {
# check if a file:// URI was used
open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format',
'$(URI)', 'Created-By: Packages')
or error "cannot start apt-get indextargets: $!";
while (my $uri = <$pipe_apt>) {
if ($uri =~ /^file:\/\//) {
error
"nothing got downloaded -- use copy:// instead of"
. " file://";
}
}
error "nothing got downloaded";
2020-01-08 14:41:49 +00:00
}
}
}
# Unpack order matters. Since we create this list using two different
# methods but we want both methods to have the same result, we sort the
# list before returning it.
@essential_pkgs = sort @essential_pkgs;
return ([keys %pkgs_to_install], \@essential_pkgs, \@cached_debs);
}
sub run_extract() {
my $options = shift;
my $essential_pkgs = shift;
if ($options->{dryrun}) {
2020-01-10 10:44:15 +00:00
info "skip extracting packages because of --dry-run";
return;
}
if (scalar @{$essential_pkgs} == 0) {
info "nothing to extract -- skipping...";
return;
}
info "extracting archives...";
print_progress 0.0;
my $counter = 0;
my $total = scalar @{$essential_pkgs};
foreach my $deb (@{$essential_pkgs}) {
$counter += 1;
my $tarfilter;
my @tarfilterargs;
# if the path-excluded option was added to the dpkg config,
# insert the tarfilter between dpkg-deb and tar
if (-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") {
open(my $fh, '<',
"$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
my @matches = grep { /^path-(?:exclude|include)=/ } <$fh>;
close $fh;
chop @matches; # remove trailing newline
@tarfilterargs = map { "--" . $_ } @matches;
}
if (scalar @tarfilterargs > 0) {
if (-x "./tarfilter") {
$tarfilter = "./tarfilter";
} else {
$tarfilter = "mmtarfilter";
}
}
my $dpkg_writer;
my $tar_reader;
my $filter_reader;
my $filter_writer;
if (scalar @tarfilterargs > 0) {
pipe $filter_reader, $dpkg_writer or error "pipe failed: $!";
pipe $tar_reader, $filter_writer or error "pipe failed: $!";
} else {
pipe $tar_reader, $dpkg_writer or error "pipe failed: $!";
}
# not using dpkg-deb --extract as that would replace the
# merged-usr symlinks with plain directories
# not using dpkg --unpack because that would try running preinst
# maintainer scripts
my $pid1 = fork() // error "fork() failed: $!";
if ($pid1 == 0) {
open(STDOUT, '>&', $dpkg_writer) or error "cannot open STDOUT: $!";
close($tar_reader) or error "cannot close tar_reader: $!";
if (scalar @tarfilterargs > 0) {
close($filter_reader)
or error "cannot close filter_reader: $!";
close($filter_writer)
or error "cannot close filter_writer: $!";
}
debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb");
eval { Devel::Cover::set_coverage("none") } if $is_covering;
exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb";
}
my $pid2;
if (scalar @tarfilterargs > 0) {
$pid2 = fork() // error "fork() failed: $!";
if ($pid2 == 0) {
open(STDIN, '<&', $filter_reader)
or error "cannot open STDIN: $!";
open(STDOUT, '>&', $filter_writer)
or error "cannot open STDOUT: $!";
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
close($tar_reader) or error "cannot close tar_reader: $!";
debug("running $tarfilter " . (join " ", @tarfilterargs));
eval { Devel::Cover::set_coverage("none") } if $is_covering;
exec $tarfilter, @tarfilterargs;
}
}
my $pid3 = fork() // error "fork() failed: $!";
if ($pid3 == 0) {
open(STDIN, '<&', $tar_reader) or error "cannot open STDIN: $!";
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
if (scalar @tarfilterargs > 0) {
close($filter_reader)
or error "cannot close filter_reader: $!";
close($filter_writer)
or error "cannot close filter_writer: $!";
}
debug( "running tar -C $options->{root}"
. " --keep-directory-symlink --extract --file -");
eval { Devel::Cover::set_coverage("none") } if $is_covering;
exec 'tar', '-C', $options->{root},
'--keep-directory-symlink', '--extract', '--file', '-';
}
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
close($tar_reader) or error "cannot close tar_reader: $!";
if (scalar @tarfilterargs > 0) {
close($filter_reader) or error "cannot close filter_reader: $!";
close($filter_writer) or error "cannot close filter_writer: $!";
}
waitpid($pid1, 0);
$? == 0 or error "dpkg-deb --fsys-tarfile failed: $?";
if (scalar @tarfilterargs > 0) {
waitpid($pid2, 0);
$? == 0 or error "tarfilter failed: $?";
}
waitpid($pid3, 0);
$? == 0 or error "tar --extract failed: $?";
print_progress($counter / $total * 100);
}
print_progress "done";
return;
}
sub run_prepare {
my $options = shift;
if ($options->{mode} eq 'fakechroot') {
# this borrows from and extends
# /etc/fakechroot/debootstrap.env and
# /etc/fakechroot/chroot.env
{
my @fakechrootsubst = ();
foreach my $d ('/usr/sbin', '/usr/bin', '/sbin', '/bin') {
push @fakechrootsubst, "$d/chroot=/usr/sbin/chroot.fakechroot";
push @fakechrootsubst, "$d/mkfifo=/bin/true";
push @fakechrootsubst, "$d/ldconfig=/bin/true";
push @fakechrootsubst, "$d/ldd=/usr/bin/ldd.fakechroot";
push @fakechrootsubst, "$d/ischroot=/bin/true";
}
if (defined $ENV{FAKECHROOT_CMD_SUBST}
&& $ENV{FAKECHROOT_CMD_SUBST} ne "") {
push @fakechrootsubst, split /:/, $ENV{FAKECHROOT_CMD_SUBST};
}
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_CMD_SUBST} = join ':', @fakechrootsubst;
}
if (defined $ENV{FAKECHROOT_EXCLUDE_PATH}
&& $ENV{FAKECHROOT_EXCLUDE_PATH} ne "") {
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_EXCLUDE_PATH}
= "$ENV{FAKECHROOT_EXCLUDE_PATH}:/dev:/proc:/sys";
} else {
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_EXCLUDE_PATH} = '/dev:/proc:/sys';
}
# workaround for long unix socket path if FAKECHROOT_BASE
# exceeds the limit of 108 bytes
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_AF_UNIX_PATH} = "/tmp";
}
{
my @ldsoconf = ('/etc/ld.so.conf');
opendir(my $dh, '/etc/ld.so.conf.d')
or error "Can't opendir(/etc/ld.so.conf.d): $!";
while (my $entry = readdir $dh) {
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
next if $entry !~ /\.conf$/;
push @ldsoconf, "/etc/ld.so.conf.d/$entry";
}
closedir($dh);
my @ldlibpath = ();
if (defined $ENV{LD_LIBRARY_PATH}
&& $ENV{LD_LIBRARY_PATH} ne "") {
push @ldlibpath, (split /:/, $ENV{LD_LIBRARY_PATH});
}
# FIXME: workaround allowing installation of systemd should
# live in fakechroot, see #917920
push @ldlibpath, "/lib/systemd";
foreach my $fname (@ldsoconf) {
open my $fh, "<", $fname
or error "cannot open $fname for reading: $!";
while (my $line = <$fh>) {
next if $line !~ /^\//;
push @ldlibpath, $line;
}
close $fh;
}
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{LD_LIBRARY_PATH} = join ':', @ldlibpath;
}
}
# make sure that APT_CONFIG and TMPDIR are not set when executing
# anything inside the chroot
my @chrootcmd = ('env', '--unset=APT_CONFIG', '--unset=TMPDIR');
if ($options->{mode} eq 'proot') {
push @chrootcmd,
(
'proot', '--root-id',
'--bind=/dev', '--bind=/proc',
'--bind=/sys', "--rootfs=$options->{root}",
'--cwd=/'
);
} elsif (
any { $_ eq $options->{mode} }
('root', 'unshare', 'fakechroot')
) {
push @chrootcmd, ('/usr/sbin/chroot', $options->{root});
2018-11-20 23:13:10 +00:00
} else {
error "unknown mode: $options->{mode}";
}
# copy qemu-user-static binary into chroot or setup proot with
# --qemu
if (defined $options->{qemu}) {
if ($options->{mode} eq 'proot') {
push @chrootcmd, "--qemu=qemu-$options->{qemu}";
} elsif ($options->{mode} eq 'fakechroot') {
# Make sure that the fakeroot and fakechroot shared
# libraries exist for the right architecture
open my $fh, '-|', 'dpkg-architecture', '-a',
$options->{nativearch},
'-qDEB_HOST_MULTIARCH' // error "failed to fork(): $!";
chomp(
my $deb_host_multiarch = do { local $/; <$fh> }
);
close $fh;
if (($? != 0) or (!$deb_host_multiarch)) {
error "dpkg-architecture failed: $?";
2020-01-08 14:41:49 +00:00
}
my $fakechrootdir = "/usr/lib/$deb_host_multiarch/fakechroot";
if (!-e "$fakechrootdir/libfakechroot.so") {
error "$fakechrootdir/libfakechroot.so doesn't exist."
. " Install libfakechroot:$options->{nativearch}"
. " outside the chroot";
2020-01-08 14:41:49 +00:00
}
my $fakerootdir = "/usr/lib/$deb_host_multiarch/libfakeroot";
if (!-e "$fakerootdir/libfakeroot-sysv.so") {
error "$fakerootdir/libfakeroot-sysv.so doesn't exist."
. " Install libfakeroot:$options->{nativearch}"
. " outside the chroot";
}
# The rest of this block sets environment variables, so we
# have to add the "no critic" statement to stop perlcritic
# from complaining about setting global variables
## no critic (Variables::RequireLocalizedPunctuationVars)
# fakechroot only fills LD_LIBRARY_PATH with the
# directories of the host's architecture. We append the
# directories of the chroot architecture.
$ENV{LD_LIBRARY_PATH}
= "$ENV{LD_LIBRARY_PATH}:$fakechrootdir:$fakerootdir";
# The binfmt support on the outside is used, so qemu needs
# to know where it has to look for shared libraries
if (defined $ENV{QEMU_LD_PREFIX}
&& $ENV{QEMU_LD_PREFIX} ne "") {
$ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
} else {
$ENV{QEMU_LD_PREFIX} = $options->{root};
}
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) {
# other modes require a static qemu-user binary
my $qemubin = "/usr/bin/qemu-$options->{qemu}-static";
if (!-e $qemubin) {
error "cannot find $qemubin";
}
copy $qemubin, "$options->{root}/$qemubin"
or error "cannot copy $qemubin: $!";
# File::Copy does not retain permissions but on some
# platforms (like Travis CI) the binfmt interpreter must
# have the executable bit set or otherwise execve will
# fail with EACCES
chmod 0755, "$options->{root}/$qemubin"
or error "cannot chmod $qemubin: $!";
} else {
error "unknown mode: $options->{mode}";
2020-01-08 14:41:49 +00:00
}
2018-11-20 23:13:10 +00:00
}
# some versions of coreutils use the renameat2 system call in mv.
# This breaks certain versions of fakechroot and proot. Here we do
# a sanity check and warn the user in case things might break.
if (any { $_ eq $options->{mode} } ('fakechroot', 'proot')
and -e "$options->{root}/bin/mv") {
mkdir "$options->{root}/000-move-me"
or error "cannot create directory: $!";
my $ret = system @chrootcmd, '/bin/mv', '/000-move-me',
'/001-delete-me';
if ($ret != 0) {
if ($options->{mode} eq 'proot') {
info "the /bin/mv binary inside the chroot doesn't"
. " work under proot";
info "this is likely due to missing support for"
. " renameat2 in proot";
info "see https://github.com/proot-me/PRoot/issues/147";
} else {
info "the /bin/mv binary inside the chroot doesn't"
. " work under fakechroot";
info "with certain versions of coreutils and glibc,"
. " this is due to missing support for renameat2 in"
. " fakechroot";
info "see https://github.com/dex4er/fakechroot/issues/60";
}
info "expect package post installation scripts not to work";
rmdir "$options->{root}/000-move-me"
or error "cannot rmdir: $!";
} else {
rmdir "$options->{root}/001-delete-me"
or error "cannot rmdir: $!";
}
}
return \@chrootcmd;
}
sub run_essential() {
my $options = shift;
my $essential_pkgs = shift;
my $chrootcmd = shift;
my $cached_debs = shift;
2020-03-22 13:08:21 +00:00
if (scalar @{$essential_pkgs} == 0) {
info "no essential packages -- skipping...";
return;
}
2018-11-20 23:13:10 +00:00
if ($options->{mode} eq 'chrootless') {
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "simulate installing essential packages...";
2020-01-10 10:44:15 +00:00
} else {
info "installing essential packages...";
2020-01-10 10:44:15 +00:00
}
2020-01-08 14:41:49 +00:00
# FIXME: the dpkg config from the host is parsed before the command
# line arguments are parsed and might break this mode
# Example: if the host has --path-exclude set, then this will also
# affect the chroot.
my @chrootless_opts = (
'-oDPkg::Options::=--force-not-root',
'-oDPkg::Options::=--force-script-chrootless',
'-oDPkg::Options::=--root=' . $options->{root},
2020-01-10 10:44:15 +00:00
'-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log",
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
2020-01-08 16:44:07 +00:00
);
2020-01-08 14:41:49 +00:00
if (defined $options->{qemu}) {
# The binfmt support on the outside is used, so qemu needs to know
# where it has to look for shared libraries
if (defined $ENV{QEMU_LD_PREFIX}
&& $ENV{QEMU_LD_PREFIX} ne "") {
2020-01-09 07:39:40 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2020-01-08 14:41:49 +00:00
$ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
} else {
2020-01-09 07:39:40 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2020-01-08 14:41:49 +00:00
$ENV{QEMU_LD_PREFIX} = $options->{root};
}
}
run_apt_progress({
ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}],
});
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{mode} }
('root', 'unshare', 'fakechroot', 'proot')
) {
# install the extracted packages properly
# we need --force-depends because dpkg does not take Pre-Depends
# into account and thus doesn't install them in the right order
# And the --predep-package option is broken: #539133
if ($options->{dryrun}) {
info "simulate installing essential packages...";
} else {
info "installing essential packages...";
run_chroot(
sub {
run_dpkg_progress({
ARGV => [
@{$chrootcmd}, 'env',
'--unset=TMPDIR', 'dpkg',
'--install', '--force-depends'
],
PKGS => $essential_pkgs,
});
},
$options
);
}
} else {
error "unknown mode: $options->{mode}";
}
2020-01-08 14:41:49 +00:00
if (any { $_ eq 'essential/unlink' } @{ $options->{skip} }) {
info "skipping essential/unlink as requested";
} else {
foreach my $deb (@{$essential_pkgs}) {
# do not unlink those packages that were in /var/cache/apt/archive
# before the download phase
next
if any { "/var/cache/apt/archives/$_" eq $deb } @{$cached_debs};
unlink "$options->{root}/$deb"
or error "cannot unlink $deb: $!";
}
}
return;
}
sub run_install() {
my $options = shift;
my $pkgs_to_install = shift;
my $chrootcmd = shift;
if ($options->{mode} eq 'chrootless') {
if (scalar @{$pkgs_to_install} > 0) {
my @chrootless_opts = (
'-oDPkg::Options::=--force-not-root',
'-oDPkg::Options::=--force-script-chrootless',
'-oDPkg::Options::=--root=' . $options->{root},
'-oDPkg::Options::=--log='
. "$options->{root}/var/log/dpkg.log",
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
);
run_apt_progress({
ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
PKGS => $pkgs_to_install,
});
}
} elsif (
any { $_ eq $options->{mode} }
('root', 'unshare', 'fakechroot', 'proot')
) {
if ($options->{variant} ne 'custom'
and scalar @{$pkgs_to_install} > 0) {
# some packages have to be installed from the outside before
# anything can be installed from the inside.
#
# we do not need to install any *-archive-keyring packages
# inside the chroot prior to installing the packages, because
# the keyring is only used when doing "apt-get update" and that
# was already done at the beginning using key material from the
# outside. Since the apt cache is already filled and we are not
# calling "apt-get update" again, the keyring can be installed
# later during installation. But: if it's not installed during
# installation, then we might end up with a fully installed
# system without keyrings that are valid for its sources.list.
my @pkgs_to_install_from_outside;
# install apt if necessary
if ($options->{variant} ne 'apt') {
push @pkgs_to_install_from_outside, 'apt';
2020-01-08 14:41:49 +00:00
}
# since apt will be run inside the chroot, make sure that
# apt-transport-https and ca-certificates gets installed first
# if any mirror is a https URI
open(my $pipe_apt, '-|', 'apt-get', 'indextargets',
'--format', '$(URI)', 'Created-By: Packages')
or error "cannot start apt-get indextargets: $!";
while (my $uri = <$pipe_apt>) {
if ($uri =~ /^https:\/\//) {
info "https mirror found -- adding apt-transport-https "
. "and ca-certificates";
# FIXME: support for https is part of apt >= 1.5
push @pkgs_to_install_from_outside, 'apt-transport-https';
push @pkgs_to_install_from_outside, 'ca-certificates';
last;
} elsif ($uri =~ /^tor(\+[a-z]+)*:\/\//) {
# tor URIs can be tor+http://, tor+https:// or even
# tor+mirror+file://
info "tor mirror found -- adding apt-transport-tor";
push @pkgs_to_install_from_outside, 'apt-transport-tor';
last;
2020-01-08 14:41:49 +00:00
}
}
close $pipe_apt;
$? == 0 or error "apt-get indextargets failed";
2020-01-08 14:41:49 +00:00
if (scalar @pkgs_to_install_from_outside > 0) {
my @cached_debs = ();
my @dl_debs = ();
# /var/cache/apt/archives/ might not be empty either because
# the user used hooks to populate it or because skip options
# like essential/unlink or check/empty were used.
{
my $apt_archives = "/var/cache/apt/archives/";
opendir my $dh, "$options->{root}/$apt_archives"
or error "cannot read $apt_archives";
while (my $deb = readdir $dh) {
if ($deb !~ /\.deb$/) {
next;
}
if (!-f "$options->{root}/$apt_archives/$deb") {
next;
}
push @cached_debs, $deb;
}
closedir $dh;
}
my %result = ();
if ($options->{dryrun}) {
info 'simulate downloading '
. (join ', ', @pkgs_to_install_from_outside) . "...";
} else {
if (scalar @cached_debs > 0) {
$result{EDSP_RES} = \@dl_debs;
}
info 'downloading '
. (join ', ', @pkgs_to_install_from_outside) . "...";
2020-01-08 14:41:49 +00:00
}
run_apt_progress({
ARGV => [
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun}
? '-oAPT::Get::Simulate=true'
: (),
'install'
],
PKGS => [@pkgs_to_install_from_outside],
%result
});
if ($options->{dryrun}) {
info 'simulate installing '
. (join ', ', @pkgs_to_install_from_outside) . "...";
} else {
my @debs_to_install;
if (scalar @cached_debs > 0 && scalar @dl_debs > 0) {
my $archives = "/var/cache/apt/archives/";
my $prefix = "$options->{root}/$archives";
# for each package in @dl_debs, check if it's in
# /var/cache/apt/archives/ and add it to
# @debs_to_install
foreach my $p (@dl_debs) {
my ($pkg, $ver_epoch) = @{$p};
# apt appends the architecture at the end of the
# package name
($pkg, my $arch) = split ':', $pkg, 2;
# apt replaces the colon by its percent encoding
my $ver = $ver_epoch;
$ver =~ s/:/%3a/;
# the architecture returned by apt is the native
# architecture. Since we don't know whether the
# package is architecture independent or not, we
# first try with the native arch and then
# with "all" and only error out if neither exists.
if (-e "$prefix/${pkg}_${ver}_$arch.deb") {
push @debs_to_install,
"$archives/${pkg}_${ver}_$arch.deb";
} elsif (-e "$prefix/${pkg}_${ver}_all.deb") {
push @debs_to_install,
"$archives/${pkg}_${ver}_all.deb";
} else {
error( "cannot find package for "
. "$pkg:$arch (= $ver_epoch) "
. "in /var/cache/apt/archives/");
}
}
} else {
my $apt_archives = "/var/cache/apt/archives/";
opendir my $dh, "$options->{root}/$apt_archives"
or error "cannot read $apt_archives";
while (my $deb = readdir $dh) {
if ($deb !~ /\.deb$/) {
next;
}
$deb = "$apt_archives/$deb";
if (!-f "$options->{root}/$deb") {
next;
}
push @debs_to_install, $deb;
}
closedir $dh;
2020-01-10 10:44:15 +00:00
}
if (scalar @debs_to_install == 0) {
warning "nothing got downloaded -- maybe the packages"
. " were already installed?";
2020-01-10 10:44:15 +00:00
} else {
# we need --force-depends because dpkg does not take
# Pre-Depends into account and thus doesn't install
# them in the right order
info 'installing '
. (join ', ', @pkgs_to_install_from_outside) . "...";
run_dpkg_progress({
ARGV => [
@{$chrootcmd}, 'env',
'--unset=TMPDIR', 'dpkg',
'--install', '--force-depends'
],
PKGS => \@debs_to_install,
});
foreach my $deb (@debs_to_install) {
# do not unlink those packages that were in
# /var/cache/apt/archive before the install phase
next
if any { "/var/cache/apt/archives/$_" eq $deb }
@cached_debs;
unlink "$options->{root}/$deb"
or error "cannot unlink $deb: $!";
2020-01-08 14:41:49 +00:00
}
}
}
}
2020-01-08 14:41:49 +00:00
if (!$options->{dryrun}) {
run_chroot(
sub {
info "installing remaining packages inside the"
. " chroot...";
run_apt_progress({
ARGV => [
@{$chrootcmd}, 'env',
'--unset=APT_CONFIG', '--unset=TMPDIR',
'apt-get', '--yes',
'install'
],
PKGS => $pkgs_to_install,
});
},
$options
);
} else {
info "simulate installing remaining packages inside the"
. " chroot...";
run_apt_progress({
ARGV => [
'apt-get', '--yes',
'-oAPT::Get::Simulate=true', 'install'
],
PKGS => $pkgs_to_install,
});
2020-01-08 14:41:49 +00:00
}
}
} else {
2020-01-08 14:41:49 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
return;
}
sub run_cleanup() {
my $options = shift;
2019-01-08 10:28:27 +00:00
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/apt' } @{ $options->{skip} }) {
info "skipping cleanup/apt as requested";
} else {
info "cleaning package lists and apt cache...";
run_apt_progress({
ARGV => [
'apt-get', '--option',
'Dir::Etc::SourceList=/dev/null', '--option',
'Dir::Etc::SourceParts=/dev/null', 'update'
],
CHDIR => $options->{root},
});
run_apt_progress(
{ ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} });
2018-09-18 09:20:24 +00:00
2020-04-09 22:00:36 +00:00
# apt since 1.6 creates the auxfiles directory. If apt inside the
# chroot is older than that, then it will not know how to clean it.
if (-e "$options->{root}/var/lib/apt/lists/auxfiles") {
rmdir "$options->{root}/var/lib/apt/lists/auxfiles"
or die "cannot rmdir /var/lib/apt/lists/auxfiles: $!";
}
}
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/mmdebstrap' } @{ $options->{skip} }) {
info "skipping cleanup/mmdebstrap as requested";
} else {
# clean up temporary configuration file
unlink "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
or error "failed to unlink /etc/apt/apt.conf.d/00mmdebstrap: $!";
if (defined $ENV{APT_CONFIG} && -e $ENV{APT_CONFIG}) {
unlink $ENV{APT_CONFIG}
or error "failed to unlink $ENV{APT_CONFIG}: $!";
}
if (defined $options->{qemu}
and any { $_ eq $options->{mode} } ('root', 'unshare')) {
unlink "$options->{root}/usr/bin/qemu-$options->{qemu}-static"
or error
"cannot unlink /usr/bin/qemu-$options->{qemu}-static: $!";
}
}
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/reproducible' } @{ $options->{skip} }) {
info "skipping cleanup/reproducible as requested";
} else {
# clean up certain files to make output reproducible
foreach my $fname (
'/var/log/dpkg.log', '/var/log/apt/history.log',
'/var/log/apt/term.log', '/var/log/alternatives.log',
'/var/cache/ldconfig/aux-cache', '/var/log/apt/eipp.log.xz'
) {
my $path = "$options->{root}$fname";
if (!-e $path) {
next;
}
unlink $path or error "cannot unlink $path: $!";
}
if (-e "$options->{root}/etc/machine-id") {
# from machine-id(5):
# For operating system images which are created once and used on
# multiple machines, for example for containers or in the cloud,
# /etc/machine-id should be an empty file in the generic file
# system image. An ID will be generated during boot and saved to
# this file if possible. Having an empty file in place is useful
# because it allows a temporary file to be bind-mounted over the
# real file, in case the image is used read-only.
unlink "$options->{root}/etc/machine-id"
or error "cannot unlink /etc/machine-id: $!";
open my $fh, '>', "$options->{root}/etc/machine-id"
or error "failed to open(): $!";
close $fh;
2020-01-08 14:41:49 +00:00
}
}
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/tmp' } @{ $options->{skip} }) {
info "skipping cleanup/tmp as requested";
} else {
# remove any possible leftovers in /tmp but warn about it
if (-d "$options->{root}/tmp") {
opendir(my $dh, "$options->{root}/tmp")
or error "Can't opendir($options->{root}/tmp): $!";
while (my $entry = readdir $dh) {
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
warning "deleting files in /tmp: $entry";
remove_tree("$options->{root}/tmp/$entry",
{ error => \my $err });
if (@$err) {
for my $diag (@$err) {
my ($file, $message) = %$diag;
if ($file eq '') { warning "general error: $message"; }
else { warning "problem unlinking $file: $message"; }
}
2020-01-08 14:41:49 +00:00
}
}
2020-04-09 22:00:36 +00:00
closedir($dh);
2020-01-08 14:41:49 +00:00
}
}
2020-01-09 07:39:40 +00:00
return;
2018-09-18 09:20:24 +00:00
}
# messages from process inside unshared namespace to the outside
# openw -- open file for writing
# untar -- extract tar into directory
# write -- write data to last opened file or tar process
# close -- finish file writing or tar extraction
# adios -- last message and tear-down
# messages from process outside unshared namespace to the inside
# okthx -- success
sub checkokthx {
2020-01-08 16:44:07 +00:00
my $fh = shift;
my $ret = read($fh, my $buf, 2 + 5) // error "cannot read from socket: $!";
if ($ret == 0) { error "received eof on socket"; }
my ($len, $msg) = unpack("nA5", $buf);
if ($msg ne "okthx") { error "expected okthx but got: $msg"; }
2020-01-08 16:44:07 +00:00
if ($len != 0) { error "expected no payload but got $len bytes"; }
2020-01-09 07:39:40 +00:00
return;
}
# resolve a path inside a chroot
sub chrooted_realpath {
my $root = shift;
my $src = shift;
my $result = $root;
my $prefix;
# relative paths are relative to the root of the chroot
# remove prefixed slashes
$src =~ s{^/+}{};
my $loop = 0;
while (length $src) {
if ($loop > 25) {
error "too many levels of symbolic links";
}
# Get the first directory component.
($prefix, $src) = split m{/+}, $src, 2;
# Resolve the first directory component.
if ($prefix eq ".") {
# Ignore, stay at the same directory.
} elsif ($prefix eq "..") {
# Go up one directory.
$result =~ s{(.*)/[^/]*}{$1};
# but not further than the root
if ($result !~ m/^\Q$root\E/) {
$result = $root;
}
} elsif (-l "$result/$prefix") {
my $dst = readlink "$result/$prefix";
if ($dst =~ s{^/+}{}) {
# Absolute pathname, reset result back to $root.
$result = $root;
}
$src = length $src ? "$dst/$src" : $dst;
$loop++;
} else {
# Otherwise append the prefix.
$result = "$result/$prefix";
}
}
return $result;
}
sub hookhelper {
# we put everything in an eval block because that way we can easily handle
# errors without goto labels or much code duplication: the error handler
# has to send an "error" message to the other side
eval {
2020-01-08 14:41:49 +00:00
my $root = $ARGV[1];
my $mode = $ARGV[2];
my $hook = $ARGV[3];
my $qemu = $ARGV[4];
$verbosity_level = $ARGV[5];
my $command = $ARGV[6];
my @cmdprefix = ();
my @tarcmd = (
'tar', '--numeric-owner', '--xattrs', '--format=pax',
'--pax-option=exthdr.name=%d/PaxHeaders/%f,'
. 'delete=atime,delete=ctime'
);
2020-01-08 14:41:49 +00:00
if ($hook eq 'setup') {
if ($mode eq 'proot') {
# since we cannot run tar inside the chroot under proot during
# the setup hook because the chroot is empty, we have to run
# tar from the outside, which leads to all files being owned
# by the user running mmdebstrap. To let the ownership
# information not be completely off, we force all files be
# owned by the root user.
push @tarcmd, '--owner=0', '--group=0';
2020-01-08 14:41:49 +00:00
}
2020-03-22 13:08:21 +00:00
} elsif (any { $_ eq $hook } ('extract', 'essential', 'customize')) {
2020-01-08 14:41:49 +00:00
if ($mode eq 'fakechroot') {
# Fakechroot requires tar to run inside the chroot or
# otherwise absolute symlinks will include the path to the
# root directory
push @cmdprefix, '/usr/sbin/chroot', $root;
} elsif ($mode eq 'proot') {
# proot requires tar to run inside proot or otherwise
# permissions will be completely off
2020-01-08 16:44:07 +00:00
push @cmdprefix, 'proot', '--root-id', "--rootfs=$root",
'--cwd=/', "--qemu=$qemu";
2020-01-08 14:41:49 +00:00
} elsif (any { $_ eq $mode } ('root', 'chrootless', 'unshare')) {
# not chrooting in this case
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $mode";
}
} else {
error "unknown hook: $hook";
}
2020-01-16 09:38:14 +00:00
if (
any { $_ eq $command }
('copy-in', 'tar-in', 'upload', 'sync-in')
) {
2020-01-08 14:41:49 +00:00
if (scalar @ARGV < 9) {
error "$command needs at least one path on the"
. " outside and the output path inside the chroot";
2020-01-08 14:41:49 +00:00
}
my $outpath = $ARGV[-1];
2020-01-08 16:44:07 +00:00
for (my $i = 7 ; $i < $#ARGV ; $i++) {
2020-01-08 14:41:49 +00:00
# the right argument for tar's --directory argument depends on
# whether tar is called from inside the chroot or from the
# outside
my $directory;
if ($hook eq 'setup') {
# tar runs outside, so acquire the correct path
$directory = chrooted_realpath $root, $outpath;
2020-03-22 13:08:21 +00:00
} elsif (
any { $_ eq $hook }
('extract', 'essential', 'customize')
) {
if (any { $_ eq $mode } ('fakechroot', 'proot')) {
# tar will run inside the chroot
$directory = $outpath;
} elsif (
any { $_ eq $mode }
('root', 'chrootless', 'unshare')
) {
$directory = chrooted_realpath $root, $outpath;
} else {
error "unknown mode: $mode";
}
2020-01-08 14:41:49 +00:00
} else {
error "unknown hook: $hook";
}
# if chrooted_realpath was used and if neither fakechroot or
# proot were used (absolute symlinks will be broken) we can
# check and potentially fail early if the target does not exist
if (none { $_ eq $mode } ('fakechroot', 'proot')) {
my $dirtocheck = $directory;
if ($command eq 'upload') {
# check the parent directory instead
$dirtocheck =~ s/(.*)\/[^\/]*/$1/;
}
if (!-e $dirtocheck) {
error "path does not exist: $dirtocheck";
}
if (!-d $dirtocheck) {
error "path is not a directory: $dirtocheck";
}
}
2020-01-08 14:41:49 +00:00
my $fh;
if ($command eq 'upload') {
# open the requested file for writing
2020-01-08 16:44:07 +00:00
open $fh, '|-', @cmdprefix, 'sh', '-c', 'cat > "$1"',
'exec', $directory // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} elsif (
any { $_ eq $command }
('copy-in', 'tar-in', 'sync-in')
) {
# open a tar process that extracts the tarfile that we
# supply it with on stdin to the output directory inside
# the chroot
my @cmd = (
@cmdprefix, @tarcmd, '--xattrs-include=*',
'--directory', $directory, '--extract', '--file', '-'
);
debug("helper: running " . (join " ", @cmd));
open($fh, '|-', @cmd) // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
if ($command eq 'copy-in') {
# instruct the parent process to create a tarball of the
# requested path outside the chroot
debug "helper: sending mktar";
2020-01-08 16:44:07 +00:00
print STDOUT (
pack("n", length $ARGV[$i]) . "mktar" . $ARGV[$i]);
2020-01-16 09:38:14 +00:00
} elsif ($command eq 'sync-in') {
# instruct the parent process to create a tarball of the
# content of the requested path outside the chroot
debug "helper: sending mktac";
2020-01-16 09:38:14 +00:00
print STDOUT (
pack("n", length $ARGV[$i]) . "mktac" . $ARGV[$i]);
} elsif (any { $_ eq $command } ('upload', 'tar-in')) {
2020-01-08 14:41:49 +00:00
# instruct parent process to open a tarball of the
# requested path outside the chroot for reading
debug "helper: sending openr";
2020-01-08 16:44:07 +00:00
print STDOUT (
pack("n", length $ARGV[$i]) . "openr" . $ARGV[$i]);
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
STDOUT->flush();
debug "helper: waiting for okthx";
2020-01-08 14:41:49 +00:00
checkokthx \*STDIN;
# handle "write" messages from the parent process and feed
# their payload into the tar process until a "close" message
# is encountered
2020-01-08 16:44:07 +00:00
while (1) {
2020-01-08 14:41:49 +00:00
# receive the next message
2020-01-08 16:44:07 +00:00
my $ret = read(STDIN, my $buf, 2 + 5)
// error "cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
my ($len, $msg) = unpack("nA5", $buf);
debug "helper: received message: $msg";
2020-01-08 14:41:49 +00:00
if ($msg eq "close") {
# finish the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
debug "helper: sending okthx";
2020-01-08 16:44:07 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
STDOUT->flush();
last;
} elsif ($msg ne "write") {
error "expected write but got: $msg";
}
# read the payload
my $content;
{
2020-01-08 16:44:07 +00:00
my $ret = read(STDIN, $content, $len)
// error "error cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
}
# write the payload to the tar process
2020-01-08 16:44:07 +00:00
print $fh $content
or error "cannot write to tar process: $!";
debug "helper: sending okthx";
2020-01-08 16:44:07 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
STDOUT->flush();
}
close $fh;
if ($command ne 'upload' and $? != 0) {
error "tar failed";
}
}
2020-01-16 09:38:14 +00:00
} elsif (
any { $_ eq $command }
('copy-out', 'tar-out', 'download', 'sync-out')
) {
2020-01-08 14:41:49 +00:00
if (scalar @ARGV < 9) {
error "$command needs at least one path inside the chroot and"
. " the output path on the outside";
2020-01-08 14:41:49 +00:00
}
my $outpath = $ARGV[-1];
2020-01-08 16:44:07 +00:00
for (my $i = 7 ; $i < $#ARGV ; $i++) {
2020-01-08 14:41:49 +00:00
# the right argument for tar's --directory argument depends on
# whether tar is called from inside the chroot or from the
# outside
my $directory;
if ($hook eq 'setup') {
# tar runs outside, so acquire the correct path
$directory = chrooted_realpath $root, $ARGV[$i];
2020-03-22 13:08:21 +00:00
} elsif (
any { $_ eq $hook }
('extract', 'essential', 'customize')
) {
if (any { $_ eq $mode } ('fakechroot', 'proot')) {
# tar will run inside the chroot
$directory = $ARGV[$i];
} elsif (
any { $_ eq $mode }
('root', 'chrootless', 'unshare')
) {
$directory = chrooted_realpath $root, $ARGV[$i];
} else {
error "unknown mode: $mode";
}
2020-01-08 14:41:49 +00:00
} else {
error "unknown hook: $hook";
}
# if chrooted_realpath was used and if neither fakechroot or
# proot were used (absolute symlinks will be broken) we can
# check and potentially fail early if the source does not exist
if (none { $_ eq $mode } ('fakechroot', 'proot')) {
if (!-e $directory) {
error "path does not exist: $directory";
}
if ($command eq 'download') {
if (!-f $directory) {
error "path is not a file: $directory";
}
}
}
2020-01-08 14:41:49 +00:00
my $fh;
if ($command eq 'download') {
# open the requested file for reading
2020-01-08 16:44:07 +00:00
open $fh, '-|', @cmdprefix, 'sh', '-c', 'cat "$1"',
'exec', $directory // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} elsif ($command eq 'sync-out') {
# Open a tar process that creates a tarfile of everything
2020-01-16 09:38:14 +00:00
# inside the requested directory inside the chroot and
# writes it to stdout.
my @cmd = (
@cmdprefix, @tarcmd, '--directory',
$directory, '--create', '--file', '-', '.'
);
debug("helper: running " . (join " ", @cmd));
open($fh, '-|', @cmd) // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} elsif (any { $_ eq $command } ('copy-out', 'tar-out')) {
# Open a tar process that creates a tarfile of the
# requested directory inside the chroot and writes it to
# stdout. To emulate the behaviour of cp, change to the
# dirname of the requested path first.
my @cmd = (
@cmdprefix, @tarcmd, '--directory',
dirname($directory), '--create', '--file', '-',
basename($directory));
debug("helper: running " . (join " ", @cmd));
open($fh, '-|', @cmd) // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
2020-01-16 09:38:14 +00:00
if (any { $_ eq $command } ('copy-out', 'sync-out')) {
2020-01-08 14:41:49 +00:00
# instruct the parent process to extract a tarball to a
# certain path outside the chroot
debug "helper: sending untar";
2020-01-08 16:44:07 +00:00
print STDOUT (
pack("n", length $outpath) . "untar" . $outpath);
2020-01-16 09:38:14 +00:00
} elsif (any { $_ eq $command } ('download', 'tar-out')) {
2020-01-08 14:41:49 +00:00
# instruct parent process to open a tarball of the
# requested path outside the chroot for writing
debug "helper: sending openw";
2020-01-08 16:44:07 +00:00
print STDOUT (
pack("n", length $outpath) . "openw" . $outpath);
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
STDOUT->flush();
debug "helper: waiting for okthx";
2020-01-08 14:41:49 +00:00
checkokthx \*STDIN;
# read from the tar process and send as payload to the parent
# process
while (1) {
# read from tar
2020-01-08 16:44:07 +00:00
my $ret = read($fh, my $cont, 4096)
// error "cannot read from pipe: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) { last; }
debug "helper: sending write";
2020-01-08 14:41:49 +00:00
# send to parent
print STDOUT pack("n", $ret) . "write" . $cont;
STDOUT->flush();
debug "helper: waiting for okthx";
2020-01-08 14:41:49 +00:00
checkokthx \*STDIN;
if ($ret < 4096) { last; }
}
# signal to the parent process that we are done
debug "helper: sending close";
2020-01-08 14:41:49 +00:00
print STDOUT pack("n", 0) . "close";
STDOUT->flush();
debug "helper: waiting for okthx";
2020-01-08 14:41:49 +00:00
checkokthx \*STDIN;
close $fh;
2020-01-16 09:38:14 +00:00
if ($? != 0) {
error "$command failed";
2020-01-08 14:41:49 +00:00
}
}
} else {
error "unknown command: $command";
}
};
if ($@) {
# inform the other side that something went wrong
print STDOUT (pack("n", 0) . "error");
STDOUT->flush();
error "hookhelper failed: $@";
}
return;
}
2020-01-08 14:41:49 +00:00
sub hooklistener {
# we put everything in an eval block because that way we can easily handle
# errors without goto labels or much code duplication: the error handler
# has to send an "error" message to the other side
eval {
2020-11-13 21:37:53 +00:00
$verbosity_level = $ARGV[1];
while (1) {
# get the next message
my $msg = "error";
my $len = -1;
{
debug "listener: reading next command";
my $ret = read(STDIN, my $buf, 2 + 5)
// error "cannot read from socket: $!";
debug "listener: finished reading command";
if ($ret == 0) {
error "received eof on socket";
}
($len, $msg) = unpack("nA5", $buf);
}
if ($msg eq "adios") {
debug "listener: received message: adios";
# setup finished, so we break out of the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
last;
} elsif ($msg eq "openr") {
# handle the openr message
debug "listener: received message: openr";
my $infile;
{
my $ret = read(STDIN, $infile, $len)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the requested path exists outside the chroot
if (!-e $infile) {
error "$infile does not exist";
}
debug "listener: sending okthx";
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
open my $fh, '<', $infile
or error "failed to open $infile for reading: $!";
# read from the file and send as payload to the child process
while (1) {
# read from file
my $ret = read($fh, my $cont, 4096)
// error "cannot read from pipe: $!";
if ($ret == 0) { last; }
debug "listener: sending write";
# send to child
print STDOUT pack("n", $ret) . "write" . $cont;
STDOUT->flush();
debug "listener: waiting for okthx";
checkokthx \*STDIN;
if ($ret < 4096) { last; }
}
# signal to the child process that we are done
debug "listener: sending close";
print STDOUT pack("n", 0) . "close";
STDOUT->flush();
debug "listener: waiting for okthx";
checkokthx \*STDIN;
close $fh;
} elsif ($msg eq "openw") {
debug "listener: received message: openw";
# payload is the output directory
my $outfile;
{
my $ret = read(STDIN, $outfile, $len)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the directory exists
my $outdir = dirname($outfile);
if (-e $outdir) {
if (!-d $outdir) {
error "$outdir already exists but is not a directory";
}
} else {
my $num_created = make_path $outdir, { error => \my $err };
if ($err && @$err) {
error(
join "; ",
(
map { "cannot create " . (join ": ", %{$_}) }
@$err
));
} elsif ($num_created == 0) {
error "cannot create $outdir";
}
}
debug "listener: sending okthx";
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
# now we expect one or more "write" messages containing the
# tarball to write
open my $fh, '>', $outfile
or error "failed to open $outfile for writing: $!";
# handle "write" messages from the child process and feed
# their payload into the file handle until a "close" message
# is encountered
while (1) {
# receive the next message
my $ret = read(STDIN, my $buf, 2 + 5)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
my ($len, $msg) = unpack("nA5", $buf);
debug "listener: received message: $msg";
if ($msg eq "close") {
# finish the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
debug "listener: sending okthx";
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
last;
} elsif ($msg ne "write") {
# we should not receive this message at this point
error "expected write but got: $msg";
}
# read the payload
my $content;
{
my $ret = read(STDIN, $content, $len)
// error "error cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# write the payload to the file handle
print $fh $content
or error "cannot write to file handle: $!";
debug "listener: sending okthx";
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
}
close $fh;
} elsif (any { $_ eq $msg } ('mktar', 'mktac')) {
# handle the mktar message
debug "listener: received message: $msg";
my $indir;
{
my $ret = read(STDIN, $indir, $len)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the requested path exists outside the chroot
if (!-e $indir) {
error "$indir does not exist";
}
debug "listener: sending okthx";
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
# Open a tar process creating a tarfile of the instructed
# path. To emulate the behaviour of cp, change to the
# dirname of the requested path first.
my @cmd = (
'tar',
'--numeric-owner',
'--xattrs',
'--format=pax',
'--pax-option=exthdr.name=%d/PaxHeaders/%f,'
. 'delete=atime,delete=ctime',
'--directory',
$msg eq 'mktar' ? dirname($indir) : $indir,
'--create',
'--file',
'-',
$msg eq 'mktar' ? basename($indir) : '.'
);
debug("listener: running " . (join " ", @cmd));
open(my $fh, '-|', @cmd) // error "failed to fork(): $!";
# read from the tar process and send as payload to the child
# process
while (1) {
# read from tar
my $ret = read($fh, my $cont, 4096)
// error "cannot read from pipe: $!";
if ($ret == 0) { last; }
debug "listener: sending write ($ret bytes)";
# send to child
print STDOUT pack("n", $ret) . "write" . $cont;
STDOUT->flush();
debug "listener: waiting for okthx";
checkokthx \*STDIN;
if ($ret < 4096) { last; }
}
# signal to the child process that we are done
debug "listener: sending close";
print STDOUT pack("n", 0) . "close";
STDOUT->flush();
debug "listener: waiting for okthx";
checkokthx \*STDIN;
close $fh;
if ($? != 0) {
error "tar failed";
}
} elsif ($msg eq "untar") {
debug "listener: received message: untar";
# payload is the output directory
my $outdir;
{
my $ret = read(STDIN, $outdir, $len)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the directory exists
if (-e $outdir) {
if (!-d $outdir) {
error "$outdir already exists but is not a directory";
}
} else {
my $num_created = make_path $outdir, { error => \my $err };
if ($err && @$err) {
error(
join "; ",
(
map { "cannot create " . (join ": ", %{$_}) }
@$err
));
} elsif ($num_created == 0) {
error "cannot create $outdir";
}
}
debug "listener: sending okthx";
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
# now we expect one or more "write" messages containing the
# tarball to unpack
open my $fh, '|-', 'tar', '--numeric-owner', '--xattrs',
'--xattrs-include=*', '--directory', $outdir,
'--extract', '--file',
'-' // error "failed to fork(): $!";
# handle "write" messages from the child process and feed
# their payload into the tar process until a "close" message
# is encountered
while (1) {
# receive the next message
my $ret = read(STDIN, my $buf, 2 + 5)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
my ($len, $msg) = unpack("nA5", $buf);
debug "listener: received message: $msg";
if ($msg eq "close") {
# finish the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
debug "listener: sending okthx";
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
last;
} elsif ($msg ne "write") {
# we should not receive this message at this point
error "expected write but got: $msg";
}
# read the payload
my $content;
{
my $ret = read(STDIN, $content, $len)
// error "error cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# write the payload to the tar process
print $fh $content
or error "cannot write to tar process: $!";
debug "listener: sending okthx";
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
}
close $fh;
if ($? != 0) {
error "tar failed";
}
} else {
error "unknown message: $msg";
}
}
};
if ($@) {
2020-08-28 12:36:14 +00:00
debug("hooklistener errored out: $@");
# inform the other side that something went wrong
print STDOUT (pack("n", 0) . "error")
or error "cannot write to socket: $!";
STDOUT->flush();
}
return;
}
# parse files of the format found in /usr/share/distro-info/ and return two
# lists: the first contains codenames of end-of-life distros and the second
# list contains codenames of currently active distros
sub parse_distro_info {
my $file = shift;
my @eol = ();
my @current = ();
my $today = POSIX::strftime "%Y-%m-%d", localtime;
open my $fh, '<', $file or error "cannot open $file: $!";
my $i = 0;
while (my $line = <$fh>) {
chomp($line);
$i++;
my @cells = split /,/, $line;
if (scalar @cells < 4) {
error "cannot parse line $i of $file";
}
if (
$i == 1
and ( scalar @cells < 6
or $cells[0] ne 'version'
or $cells[1] ne 'codename'
or $cells[2] ne 'series'
or $cells[3] ne 'created'
or $cells[4] ne 'release'
or $cells[5] ne 'eol')
) {
error "cannot find correct header in $file";
}
if ($i == 1) {
next;
}
if (scalar @cells == 6) {
if ($cells[5] !~ m/^\d\d\d\d-\d\d-\d\d$/) {
error "invalid eof date format in $file:$i: $cells[5]";
}
# since the date format is iso8601, we can use lexicographic string
# comparison to compare dates
if ($cells[5] lt $today) {
push @eol, $cells[2];
} else {
push @current, $cells[2];
}
} else {
push @current, $cells[2];
}
}
close $fh;
return ([@eol], [@current]);
}
sub get_suite_by_vendor {
my %suite_by_vendor = (
'debian' => {},
'ubuntu' => {},
'tanglu' => {},
'kali' => {},
);
# pre-fill with some known values
foreach my $suite (
'potato', 'woody', 'sarge', 'etch',
'lenny', 'squeeze', 'wheezy', 'jessie'
) {
$suite_by_vendor{'debian'}->{$suite} = 1;
}
foreach my $suite (
'unstable', 'stable', 'oldstable', 'stretch',
'buster', 'bullseye', 'bookworm'
) {
$suite_by_vendor{'debian'}->{$suite} = 0;
}
foreach my $suite ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis') {
$suite_by_vendor{'tanglu'}->{$suite} = 0;
}
foreach my $suite ('kali-dev', 'kali-rolling', 'kali-bleeding-edge') {
$suite_by_vendor{'kali'}->{$suite} = 0;
}
foreach
my $suite ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic') {
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
}
# if the Debian package distro-info-data is installed, then we can use it,
# to get better data about new distros or EOL distros
if (-e '/usr/share/distro-info/debian.csv') {
my ($eol, $current)
= parse_distro_info('/usr/share/distro-info/debian.csv');
foreach my $suite (@{$eol}) {
$suite_by_vendor{'debian'}->{$suite} = 1;
}
foreach my $suite (@{$current}) {
$suite_by_vendor{'debian'}->{$suite} = 0;
}
}
if (-e '/usr/share/distro-info/ubuntu.csv') {
my ($eol, $current)
= parse_distro_info('/usr/share/distro-info/ubuntu.csv');
foreach my $suite (@{$eol}, @{$current}) {
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
}
}
# if debootstrap is installed we infer distro names from the symlink
# targets of the scripts in /usr/share/debootstrap/scripts/
my $debootstrap_scripts = '/usr/share/debootstrap/scripts/';
if (-d $debootstrap_scripts) {
opendir(my $dh, $debootstrap_scripts)
or error "Can't opendir($debootstrap_scripts): $!";
while (my $suite = readdir $dh) {
# this is only a heuristic -- don't overwrite anything but instead
# just update anything that was missing
if (!-l "$debootstrap_scripts/$suite") {
next;
}
my $target = readlink "$debootstrap_scripts/$suite";
if ($target eq "sid"
and not exists $suite_by_vendor{'debian'}->{$suite}) {
$suite_by_vendor{'debian'}->{$suite} = 0;
} elsif ($target eq "gutsy"
and not exists $suite_by_vendor{'ubuntu'}->{$suite}) {
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
} elsif ($target eq "aequorea"
and not exists $suite_by_vendor{'tanglu'}->{$suite}) {
$suite_by_vendor{'tanglu'}->{$suite} = 0;
} elsif ($target eq "kali"
and not exists $suite_by_vendor{'kali'}->{$suite}) {
$suite_by_vendor{'kali'}->{$suite} = 0;
}
}
closedir($dh);
}
return %suite_by_vendor;
}
# try to guess the right keyring path for the given suite
sub get_keyring_by_suite {
my $query = shift;
my $suite_by_vendor = shift;
my $debianvendor;
my $ubuntuvendor;
eval {
require Dpkg::Vendor::Debian;
require Dpkg::Vendor::Ubuntu;
$debianvendor = Dpkg::Vendor::Debian->new();
$ubuntuvendor = Dpkg::Vendor::Ubuntu->new();
};
my $keyring_by_vendor = sub {
my $vendor = shift;
my $eol = shift;
if ($vendor eq 'debian') {
if ($eol) {
if (defined $debianvendor) {
return $debianvendor->run_hook(
'archive-keyrings-historic');
} else {
return
'/usr/share/keyrings/debian-archive-removed-keys.gpg';
}
} else {
if (defined $debianvendor) {
return $debianvendor->run_hook('archive-keyrings');
} else {
return '/usr/share/keyrings/debian-archive-keyring.gpg';
}
}
} elsif ($vendor eq 'ubuntu') {
if (defined $ubuntuvendor) {
return $ubuntuvendor->run_hook('archive-keyrings');
} else {
return '/usr/share/keyrings/ubuntu-archive-keyring.gpg';
}
} elsif ($vendor eq 'tanglu') {
return '/usr/share/keyrings/tanglu-archive-keyring.gpg';
} elsif ($vendor eq 'kali') {
return '/usr/share/keyrings/kali-archive-keyring.gpg';
} else {
error "unknown vendor: $vendor";
}
};
my %keyrings = ();
foreach my $vendor (keys %{$suite_by_vendor}) {
foreach my $suite (keys %{ $suite_by_vendor->{$vendor} }) {
my $keyring = $keyring_by_vendor->(
$vendor, $suite_by_vendor->{$vendor}->{$suite});
debug "suite $suite with keyring $keyring";
$keyrings{$suite} = $keyring;
}
}
if (exists $keyrings{$query}) {
return $keyrings{$query};
} else {
return;
}
}
sub get_sourceslist_by_suite {
my $suite = shift;
my $arch = shift;
my $signedby = shift;
my $compstr = shift;
my $suite_by_vendor = shift;
my @debstable = keys %{ $suite_by_vendor->{'debian'} };
my @ubuntustable = keys %{ $suite_by_vendor->{'ubuntu'} };
my @tanglustable = keys %{ $suite_by_vendor->{'tanglu'} };
my @kali = keys %{ $suite_by_vendor->{'kali'} };
my $mirror = 'http://deb.debian.org/debian';
my $secmirror = 'http://security.debian.org/debian-security';
if (any { $_ eq $suite } @ubuntustable) {
if (any { $_ eq $arch } ('amd64', 'i386')) {
$mirror = 'http://archive.ubuntu.com/ubuntu';
$secmirror = 'http://security.ubuntu.com/ubuntu';
} else {
$mirror = 'http://ports.ubuntu.com/ubuntu-ports';
$secmirror = 'http://ports.ubuntu.com/ubuntu-ports';
}
if (-e '/usr/share/debootstrap/scripts/gutsy') {
# try running the debootstrap script but ignore errors
my $script = 'set -eu;
default_mirror() { echo $1; };
mirror_style() { :; };
download_style() { :; };
finddebs_style() { :; };
variants() { :; };
keyring() { :; };
doing_variant() { false; };
. /usr/share/debootstrap/scripts/gutsy;';
open my $fh, '-|', 'env', "ARCH=$arch", "SUITE=$suite",
'sh', '-c', $script // last;
chomp(
my $output = do { local $/; <$fh> }
);
close $fh;
if ($? == 0 && $output ne '') {
$mirror = $output;
}
}
} elsif (any { $_ eq $suite } @tanglustable) {
$mirror = 'http://archive.tanglu.org/tanglu';
} elsif (any { $_ eq $suite } @kali) {
$mirror = 'https://http.kali.org/kali';
}
my $sourceslist = '';
$sourceslist .= "deb$signedby $mirror $suite $compstr\n";
if (any { $_ eq $suite } @ubuntustable) {
$sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n";
$sourceslist .= "deb$signedby $secmirror $suite-security $compstr\n";
} elsif (any { $_ eq $suite } @tanglustable) {
$sourceslist .= "deb$signedby $secmirror $suite-updates $compstr\n";
} elsif (any { $_ eq $suite } @debstable
and none { $_ eq $suite } ('testing', 'unstable', 'sid')) {
$sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n";
# the security mirror changes, starting with bullseye
# https://lists.debian.org/87r26wqr2a.fsf@43-1.org
2020-11-27 23:46:48 +00:00
my $bullseye_or_later = 0;
my $distro_info = '/usr/share/distro-info/debian.csv';
eval { require Debian::DistroInfo; };
if (!$@) {
# libdistro-info-perl is installed
my $debinfo = DebianDistroInfo->new();
if ($debinfo->version($suite, 0) >= 11) {
$bullseye_or_later = 1;
}
} elsif (-f $distro_info) {
# distro-info-data is installed
open my $fh, '<', $distro_info
or error "cannot open $distro_info: $!";
my $i = 0;
my $matching_version;
while (my $line = <$fh>) {
chomp($line);
$i++;
my @cells = split /,/, $line;
if (scalar @cells < 4) {
error "cannot parse line $i of $distro_info";
}
if (
$i == 1
and ( scalar @cells < 6
or $cells[0] ne 'version'
or $cells[1] ne 'codename'
or $cells[2] ne 'series'
or $cells[3] ne 'created'
or $cells[4] ne 'release'
or $cells[5] ne 'eol')
) {
error "cannot find correct header in $distro_info";
}
if ($i == 1) {
next;
}
if (lc $cells[1] eq $suite or lc $cells[2] eq $suite) {
$matching_version = $cells[0];
last;
}
}
close $fh;
if ($matching_version >= 11) {
2020-11-27 23:46:48 +00:00
$bullseye_or_later = 1;
}
} else {
2020-11-27 23:46:48 +00:00
# neither libdistro-info-perl nor distro-info-data is installed
if (any { $_ eq $suite } ('bullseye', 'bookworm')) {
2020-11-27 23:46:48 +00:00
$bullseye_or_later = 1;
}
}
2020-11-27 23:46:48 +00:00
if ($bullseye_or_later) {
# starting from bullseye use
$sourceslist
.= "deb$signedby $secmirror $suite-security" . " $compstr\n";
} else {
$sourceslist
.= "deb$signedby $secmirror $suite/updates" . " $compstr\n";
}
}
return $sourceslist;
}
sub guess_sources_format {
my $content = shift;
my $is_deb822 = 0;
my $is_oneline = 0;
for my $line (split "\n", $content) {
if ($line =~ /^deb(-src)? /) {
$is_oneline = 1;
last;
}
if ($line =~ /^[^#:\s]+:/) {
$is_deb822 = 1;
last;
}
}
if ($is_deb822) {
return 'deb822';
}
if ($is_oneline) {
return 'one-line';
}
return;
}
sub approx_disk_usage {
my $directory = shift;
info "approximating disk usage...";
# the "du" utility reports different results depending on the underlying
# filesystem, see https://bugs.debian.org/650077 for a discussion
#
# we use code similar to the one used by dpkg-gencontrol instead
#
# Regular files are measured in number of 1024 byte blocks. All other
# entries are assumed to take one block of space.
#
# We ignore /dev because depending on the mode, the directory might be
# populated or not and we want consistent disk usage results independent
# of the mode.
my $installed_size = 0;
my $scan_installed_size = sub {
if ($File::Find::name eq "$directory/dev") {
# add all entries of @devfiles once
$installed_size += scalar @devfiles;
} elsif ($File::Find::name =~ /^$directory\/dev\//) {
# ignore everything below /dev
} elsif (-f $File::Find::name) {
# add file size in 1024 byte blocks, rounded up
$installed_size += int(((-s $File::Find::name) + 1024) / 1024);
} else {
# all other entries are assumed to only take up one block
$installed_size += 1;
}
};
find($scan_installed_size, $directory);
# because the above is only a heuristic we add 10% extra for good measure
return int($installed_size * 1.1);
}
sub main() {
my $before = Time::HiRes::time;
umask 022;
if (scalar @ARGV >= 7 && $ARGV[0] eq "--hook-helper") {
hookhelper();
2020-01-08 14:41:49 +00:00
exit 0;
}
# this is the counterpart to --hook-helper and will receive and carry
# out its instructions
2020-11-13 21:37:53 +00:00
if (scalar @ARGV == 2 && $ARGV[0] eq "--hook-listener") {
hooklistener();
exit 0;
}
2020-01-18 22:13:10 +00:00
# this is like:
# lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' ...
# but without needing lxc
if ($ARGV[0] eq "--unshare-helper") {
if (!test_unshare(1)) {
exit 1;
}
my @idmap = read_subuid_subgid;
my $pid = get_unshare_cmd(
sub {
0 == system @ARGV[1 .. $#ARGV] or error "system failed: $?";
},
\@idmap
);
waitpid $pid, 0;
$? == 0 or error "unshared command failed";
exit 0;
}
2018-09-18 09:20:24 +00:00
my $mtime = time;
if (exists $ENV{SOURCE_DATE_EPOCH}) {
2020-01-08 16:44:07 +00:00
$mtime = $ENV{SOURCE_DATE_EPOCH} + 0;
2018-09-18 09:20:24 +00:00
}
2020-01-09 07:39:40 +00:00
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{DEBIAN_FRONTEND} = 'noninteractive';
$ENV{DEBCONF_NONINTERACTIVE_SEEN} = 'true';
$ENV{LC_ALL} = 'C.UTF-8';
$ENV{LANGUAGE} = 'C.UTF-8';
$ENV{LANG} = 'C.UTF-8';
}
2018-09-18 09:20:24 +00:00
# copy ARGV because getopt modifies it
my @ARGVORIG = @ARGV;
# obtain the correct defaults for the keyring locations that apt knows
# about
my $apttrusted
= `eval \$(apt-config shell v Dir::Etc::trusted/f); printf \$v`;
my $apttrustedparts
= `eval \$(apt-config shell v Dir::Etc::trustedparts/d); printf \$v`;
2020-01-08 16:44:07 +00:00
chomp(my $hostarch = `dpkg --print-architecture`);
2018-09-18 09:20:24 +00:00
my $options = {
2020-01-08 16:44:07 +00:00
components => ["main"],
variant => "important",
include => [],
architectures => [$hostarch],
mode => 'auto',
dpkgopts => [],
aptopts => [],
apttrusted => $apttrusted,
apttrustedparts => $apttrustedparts,
2020-01-08 16:44:07 +00:00
noop => [],
setup_hook => [],
2020-03-22 13:08:21 +00:00
extract_hook => [],
2020-01-08 16:44:07 +00:00
essential_hook => [],
customize_hook => [],
2020-01-10 10:44:15 +00:00
dryrun => 0,
2020-04-09 22:00:36 +00:00
skip => [],
2018-09-18 09:20:24 +00:00
};
2019-02-23 07:43:15 +00:00
my $logfile = undef;
my $format = 'auto';
2020-01-08 16:44:07 +00:00
Getopt::Long::Configure('default', 'bundling', 'auto_abbrev',
'ignore_case_always');
2018-09-18 09:20:24 +00:00
GetOptions(
2020-01-08 16:44:07 +00:00
'h|help' => sub { pod2usage(-exitval => 0, -verbose => 1) },
'man' => sub { pod2usage(-exitval => 0, -verbose => 2) },
'version' => sub { print STDOUT "mmdebstrap $VERSION\n"; exit 0; },
'components=s@' => \$options->{components},
'variant=s' => \$options->{variant},
'include=s@' => \$options->{include},
2020-01-08 14:41:49 +00:00
'architectures=s@' => \$options->{architectures},
2020-01-08 16:44:07 +00:00
'mode=s' => \$options->{mode},
'dpkgopt=s@' => \$options->{dpkgopts},
'aptopt=s@' => \$options->{aptopts},
'keyring=s' => sub {
2020-01-08 14:41:49 +00:00
my ($opt_name, $opt_value) = @_;
if ($opt_value =~ /"/) {
error "--keyring: apt cannot handle paths with double quotes:"
. " $opt_value";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
if (!-e $opt_value) {
2020-01-08 14:41:49 +00:00
error "keyring \"$opt_value\" does not exist";
}
my $abs_path = abs_path($opt_value);
if (!defined $abs_path) {
error "unable to get absolute path of --keyring: $opt_value";
}
# since abs_path resolved all symlinks for us, we can now test
# what the actual target actually is
if (-d $abs_path) {
$options->{apttrustedparts} = $abs_path;
2020-01-08 14:41:49 +00:00
} else {
$options->{apttrusted} = $abs_path;
2020-01-08 14:41:49 +00:00
}
},
2020-01-08 16:44:07 +00:00
's|silent' => sub { $verbosity_level = 0; },
'q|quiet' => sub { $verbosity_level = 0; },
2020-01-08 14:41:49 +00:00
'v|verbose' => sub { $verbosity_level = 2; },
2020-01-08 16:44:07 +00:00
'd|debug' => sub { $verbosity_level = 3; },
'format=s' => \$format,
2020-01-08 14:41:49 +00:00
'logfile=s' => \$logfile,
# no-op options so that mmdebstrap can be used with
# sbuild-createchroot --debootstrap=mmdebstrap
2020-01-08 16:44:07 +00:00
'resolve-deps' => sub { push @{ $options->{noop} }, 'resolve-deps'; },
'merged-usr' => sub { push @{ $options->{noop} }, 'merged-usr'; },
'no-merged-usr' =>
sub { push @{ $options->{noop} }, 'no-merged-usr'; },
'force-check-gpg' =>
sub { push @{ $options->{noop} }, 'force-check-gpg'; },
'setup-hook=s@' => \$options->{setup_hook},
2020-03-22 13:08:21 +00:00
'extract-hook=s@' => \$options->{extract_hook},
2020-01-08 14:41:49 +00:00
'essential-hook=s@' => \$options->{essential_hook},
'customize-hook=s@' => \$options->{customize_hook},
'hook-directory=s' => sub {
my ($opt_name, $opt_value) = @_;
if (!-e $opt_value) {
error "hook directory \"$opt_value\" does not exist";
}
my $abs_path = abs_path($opt_value);
if (!defined $abs_path) {
error( "unable to get absolute path of "
. "--hook-directory: $opt_value");
}
# since abs_path resolved all symlinks for us, we can now test
# what the actual target actually is
if (!-d $opt_value) {
error "hook directory \"$opt_value\" is not a directory";
}
# gather all files starting with special prefixes into the
# respective keys of a hash
my %scripts;
opendir(my $dh, $opt_value)
or error "Can't opendir($opt_value): $!";
while (my $entry = readdir $dh) {
foreach
my $hook ('setup', 'extract', 'essential', 'customize') {
2020-08-17 16:57:36 +00:00
if ($entry =~ m/^\Q$hook\E/ and -x "$opt_value/$entry") {
push @{ $scripts{$hook} }, "$opt_value/$entry";
}
}
}
closedir($dh);
# add the sorted list associated with each key to the respective
# list of hooks
foreach my $hook (keys %scripts) {
2020-08-17 16:57:36 +00:00
push @{ $options->{"${hook}_hook"} },
(sort @{ $scripts{$hook} });
}
},
# Sometimes --simulate fails even though non-simulate succeeds because
# in simulate mode, apt cannot rely on dpkg to figure out tricky
# dependency situations and will give up instead when it cannot find
# a solution.
#
# 2020-02-06, #debian-apt on OFTC, times in UTC+1
# 12:52 < DonKult> [...] It works in non-simulation because simulate is
# more picky. If you wanna know why simulate complains
# here prepare for long suffering in dependency hell.
'simulate' => \$options->{dryrun},
'dry-run' => \$options->{dryrun},
2020-04-09 22:00:36 +00:00
'skip=s@' => \$options->{skip},
2018-09-18 09:20:24 +00:00
) or pod2usage(-exitval => 2, -verbose => 1);
2019-02-23 07:43:15 +00:00
if (defined($logfile)) {
2020-01-08 14:41:49 +00:00
open(STDERR, '>', $logfile) or error "cannot open $logfile: $!";
2019-02-23 07:43:15 +00:00
}
2020-01-08 16:44:07 +00:00
foreach my $arg (@{ $options->{noop} }) {
info "the option --$arg is a no-op. It only exists for compatibility"
. " with some debootstrap wrappers.";
}
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
2020-03-22 13:08:21 +00:00
foreach my $hook ('setup', 'extract', 'essential', 'customize') {
2020-01-10 10:44:15 +00:00
if (scalar @{ $options->{"${hook}_hook"} } > 0) {
warning "In dry-run mode, --$hook-hook options have no effect";
}
}
}
2020-01-08 16:44:07 +00:00
my @valid_variants = (
'extract', 'custom', 'essential', 'apt',
'required', 'minbase', 'buildd', 'important',
'debootstrap', '-', 'standard'
);
if (none { $_ eq $options->{variant} } @valid_variants) {
2020-01-08 14:41:49 +00:00
error "invalid variant. Choose from " . (join ', ', @valid_variants);
2018-09-18 09:20:24 +00:00
}
# debootstrap and - are an alias for important
2018-09-23 17:36:07 +00:00
if (any { $_ eq $options->{variant} } ('-', 'debootstrap')) {
2020-01-08 14:41:49 +00:00
$options->{variant} = 'important';
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
if ($options->{variant} eq 'essential'
and scalar @{ $options->{include} } > 0) {
warning "cannot install extra packages with variant essential because"
. " apt is missing";
}
2018-09-18 09:20:24 +00:00
# fakeroot is an alias for fakechroot
if ($options->{mode} eq 'fakeroot') {
2020-01-08 14:41:49 +00:00
$options->{mode} = 'fakechroot';
2018-09-18 09:20:24 +00:00
}
# sudo is an alias for root
if ($options->{mode} eq 'sudo') {
2020-01-08 14:41:49 +00:00
$options->{mode} = 'root';
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
my @valid_modes
= ('auto', 'root', 'unshare', 'fakechroot', 'proot', 'chrootless');
2018-09-23 17:36:07 +00:00
if (none { $_ eq $options->{mode} } @valid_modes) {
2020-01-08 14:41:49 +00:00
error "invalid mode. Choose from " . (join ', ', @valid_modes);
2018-09-18 09:20:24 +00:00
}
# sqfs is an alias for squashfs
if ($format eq 'sqfs') {
2020-05-02 21:53:41 +00:00
$format = 'squashfs';
}
# dir is an alias for directory
if ($format eq 'dir') {
$format = 'directory';
}
my @valid_formats = ('auto', 'directory', 'tar', 'squashfs', 'ext2');
if (none { $_ eq $format } @valid_formats) {
error "invalid format. Choose from " . (join ', ', @valid_formats);
}
foreach my $tool ('dpkg', 'dpkg-deb', 'apt-get', 'apt-cache', 'apt-config',
'tar') {
my $found = 0;
foreach my $path (split /:/, $ENV{PATH}) {
if (-f "$path/$tool" && -x _ ) {
$found = 1;
last;
}
}
if (!$found) {
error "cannot find $tool";
}
}
my $check_fakechroot_running = sub {
2020-01-08 14:41:49 +00:00
# test if we are inside fakechroot already
# We fork a child process because setting FAKECHROOT_DETECT seems to
# be an irreversible operation for fakechroot.
my $pid = open my $rfh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
# with the FAKECHROOT_DETECT environment variable set, any program
# execution will be replaced with the output "fakeroot [version]"
2020-01-09 07:39:40 +00:00
local $ENV{FAKECHROOT_DETECT} = 0;
2020-01-08 14:41:49 +00:00
exec 'echo', 'If fakechroot is running, this will not be printed';
}
my $content = do { local $/; <$rfh> };
waitpid $pid, 0;
my $result = 0;
if ($? == 0 and $content =~ /^fakechroot \d\.\d+$/) {
$result = 1;
}
return $result;
};
# figure out the mode to use or test whether the chosen mode is legal
if ($options->{mode} eq 'auto') {
2020-01-08 14:41:49 +00:00
if (&{$check_fakechroot_running}()) {
# if mmdebstrap is executed inside fakechroot, then we assume the
# user expects fakechroot mode
$options->{mode} = 'fakechroot';
} elsif ($EFFECTIVE_USER_ID == 0) {
# if mmdebstrap is executed as root, we assume the user wants root
# mode
$options->{mode} = 'root';
} elsif (test_unshare(0)) {
# otherwise, unshare mode is our best option if test_unshare()
# succeeds
$options->{mode} = 'unshare';
} elsif (system('fakechroot --version>/dev/null') == 0) {
# the next fallback is fakechroot
# exec ourselves again but within fakechroot
my @prefix = ();
2020-01-08 16:44:07 +00:00
if ($is_covering) {
2020-01-08 14:41:49 +00:00
@prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
}
exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
} elsif (system('proot --version>/dev/null') == 0) {
# and lastly, proot
$options->{mode} = 'proot';
} else {
error "unable to pick chroot mode automatically";
}
info "automatically chosen mode: $options->{mode}";
} elsif ($options->{mode} eq 'root') {
2020-01-08 14:41:49 +00:00
if ($EFFECTIVE_USER_ID != 0) {
error "need to be root";
}
} elsif ($options->{mode} eq 'proot') {
2020-01-08 14:41:49 +00:00
if (system('proot --version>/dev/null') != 0) {
error "need working proot binary";
}
} elsif ($options->{mode} eq 'fakechroot') {
2020-01-08 14:41:49 +00:00
if (&{$check_fakechroot_running}()) {
# fakechroot is already running
} elsif (system('fakechroot --version>/dev/null') != 0) {
error "need working fakechroot binary";
} else {
# exec ourselves again but within fakechroot
my @prefix = ();
2020-01-08 16:44:07 +00:00
if ($is_covering) {
2020-01-08 14:41:49 +00:00
@prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
}
exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
}
} elsif ($options->{mode} eq 'unshare') {
2020-01-08 14:41:49 +00:00
if (!test_unshare(1)) {
my $procfile = '/proc/sys/kernel/unprivileged_userns_clone';
2020-01-08 16:44:07 +00:00
open(my $fh, '<', $procfile)
or error "failed to open $procfile: $!";
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close($fh);
if ($content ne "1") {
info "/proc/sys/kernel/unprivileged_userns_clone is set to"
. " $content";
info "Try running:";
info " sudo sysctl -w kernel.unprivileged_userns_clone=1";
info "or permanently enable unprivileged usernamespaces by"
. " putting the setting into /etc/sysctl.d/";
info "THIS SETTING HAS SECURITY IMPLICATIONS!";
info "Refer to https://bugs.debian.org/cgi-bin/"
. "bugreport.cgi?bug=898446";
2020-01-08 14:41:49 +00:00
}
exit 1;
}
} elsif ($options->{mode} eq 'chrootless') {
if ($EFFECTIVE_USER_ID == 0) {
warning "running chrootless mode as root might damage the host "
. "system";
}
} else {
2020-01-08 14:41:49 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
my @architectures = ();
2020-01-08 16:44:07 +00:00
foreach my $archs (@{ $options->{architectures} }) {
2020-01-08 14:41:49 +00:00
foreach my $arch (split /[,\s]+/, $archs) {
# strip leading and trailing whitespace
$arch =~ s/^\s+|\s+$//g;
# skip if the remainder is an empty string
if ($arch eq '') {
next;
}
# do not append component if it's already in the list
2020-01-08 16:44:07 +00:00
if (any { $_ eq $arch } @architectures) {
2020-01-08 14:41:49 +00:00
next;
}
push @architectures, $arch;
}
}
2020-01-08 16:44:07 +00:00
$options->{nativearch} = $hostarch;
$options->{foreignarchs} = [];
if (scalar @architectures == 0) {
warning "empty architecture list: falling back to native architecture"
. " $hostarch";
} elsif (scalar @architectures == 1) {
2020-01-08 14:41:49 +00:00
$options->{nativearch} = $architectures[0];
} else {
2020-01-08 14:41:49 +00:00
$options->{nativearch} = $architectures[0];
2020-01-08 16:44:07 +00:00
push @{ $options->{foreignarchs} },
@architectures[1 .. $#architectures];
}
debug "Native architecture (outside): $hostarch";
debug "Native architecture (inside): $options->{nativearch}";
2020-01-08 16:44:07 +00:00
debug("Foreign architectures (inside): "
. (join ', ', @{ $options->{foreignarchs} }));
2018-09-18 09:20:24 +00:00
{
2020-01-08 14:41:49 +00:00
# FIXME: autogenerate this list
my $deb2qemu = {
2020-01-08 16:44:07 +00:00
alpha => 'alpha',
amd64 => 'x86_64',
arm => 'arm',
arm64 => 'aarch64',
armel => 'arm',
armhf => 'arm',
hppa => 'hppa',
i386 => 'i386',
m68k => 'm68k',
mips => 'mips',
mips64 => 'mips64',
2020-01-08 14:41:49 +00:00
mips64el => 'mips64el',
2020-01-08 16:44:07 +00:00
mipsel => 'mipsel',
powerpc => 'ppc',
ppc64 => 'ppc64',
ppc64el => 'ppc64le',
riscv64 => 'riscv64',
s390x => 's390x',
sh4 => 'sh4',
sparc => 'sparc',
sparc64 => 'sparc64',
2020-01-08 14:41:49 +00:00
};
2020-05-01 05:39:26 +00:00
if (any { $_ eq 'check/qemu' } @{ $options->{skip} }) {
info "skipping check/qemu as requested";
} elsif ($options->{mode} eq "chrootless") {
info "skipping emulation check in chrootless mode";
} elsif ($hostarch ne $options->{nativearch}) {
2020-04-10 10:25:24 +00:00
if (system('arch-test --version>/dev/null') != 0) {
error "install arch-test for foreign architecture support";
}
2020-01-08 14:41:49 +00:00
my $withemu = 0;
2020-01-08 16:44:07 +00:00
my $noemu = 0;
2020-01-08 14:41:49 +00:00
{
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
{
2020-01-09 07:39:40 +00:00
## no critic (TestingAndDebugging::ProhibitNoWarnings)
# don't print a warning if the following fails
no warnings;
2020-01-08 14:41:49 +00:00
exec 'arch-test', $options->{nativearch};
}
# if exec didn't work (for example because the arch-test
# program is missing) prepare for the worst and assume that
# the architecture cannot be executed
print "$options->{nativearch}: not supported on this"
. " machine/kernel\n";
2020-01-08 14:41:49 +00:00
exit 1;
}
2020-01-08 16:44:07 +00:00
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
if ($? == 0 and $content eq "$options->{nativearch}: ok") {
$withemu = 1;
}
}
{
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
{
2020-01-09 07:39:40 +00:00
## no critic (TestingAndDebugging::ProhibitNoWarnings)
# don't print a warning if the following fails
no warnings;
2020-01-08 14:41:49 +00:00
exec 'arch-test', '-n', $options->{nativearch};
}
# if exec didn't work (for example because the arch-test
# program is missing) prepare for the worst and assume that
# the architecture cannot be executed
print "$options->{nativearch}: not supported on this"
. " machine/kernel\n";
2020-01-08 14:41:49 +00:00
exit 1;
}
2020-01-08 16:44:07 +00:00
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
if ($? == 0 and $content eq "$options->{nativearch}: ok") {
$noemu = 1;
}
}
# four different outcomes, depending on whether arch-test
# succeeded with or without emulation
#
# withemu | noemu |
# --------+-------+-----------------
# 0 | 0 | test why emu doesn't work and quit
# 0 | 1 | should never happen
# 1 | 0 | use qemu emulation
# 1 | 1 | don't use qemu emulation
if ($withemu == 0 and $noemu == 0) {
{
2020-01-08 16:44:07 +00:00
open my $fh, '<', '/proc/filesystems'
or error "failed to open /proc/filesystems: $!";
2020-01-09 07:39:40 +00:00
unless (grep { /^nodev\tbinfmt_misc$/ } (<$fh>)) {
warning "binfmt_misc not found in /proc/filesystems --"
. " is the module loaded?";
2020-01-08 14:41:49 +00:00
}
close $fh;
}
{
2020-01-08 16:44:07 +00:00
open my $fh, '<', '/proc/mounts'
or error "failed to open /proc/mounts: $!";
unless (
2020-01-09 07:39:40 +00:00
grep {
/^binfmt_misc\s+
\/proc\/sys\/fs\/binfmt_misc\s+
binfmt_misc\s+/x
2020-01-09 07:39:40 +00:00
} (<$fh>)
2020-01-08 16:44:07 +00:00
) {
warning "binfmt_misc not found in /proc/mounts -- not"
. " mounted?";
2020-01-08 14:41:49 +00:00
}
close $fh;