mmdebstrap/mmdebstrap

5642 lines
222 KiB
Text
Raw Normal View History

2018-09-18 09:20:24 +00:00
#!/usr/bin/perl
#
# Copyright: 2018 Johannes Schauer <josch@mister-muffin.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising
# from, out of or in connection with the software or the use or other dealings
# in the software.
2018-09-18 09:20:24 +00:00
use strict;
use warnings;
2020-03-08 22:21:16 +00:00
our $VERSION = '0.6.1';
2019-02-23 07:55:31 +00:00
2018-09-18 09:20:24 +00:00
use English;
use Getopt::Long;
use Pod::Usage;
use File::Copy;
use File::Path qw(make_path remove_tree);
use File::Temp qw(tempfile tempdir);
use File::Basename;
2018-09-18 09:20:24 +00:00
use Cwd qw(abs_path);
2020-01-09 07:39:40 +00:00
require "syscall.ph"; ## no critic (Modules::RequireBarewordIncludes)
use Fcntl qw(S_IFCHR S_IFBLK FD_CLOEXEC F_GETFD F_SETFD);
2018-09-23 17:36:07 +00:00
use List::Util qw(any none);
use POSIX qw(SIGINT SIGHUP SIGPIPE SIGTERM SIG_BLOCK SIG_UNBLOCK);
use Carp;
use Term::ANSIColor;
use Socket;
2018-09-18 09:20:24 +00:00
2020-01-09 07:39:40 +00:00
## no critic (InputOutput::RequireBriefOpen)
2018-09-18 09:20:24 +00:00
# from sched.h
2020-01-09 07:39:40 +00:00
# use typeglob constants because "use constant" has several drawback as
# explained in the documentation for the Readonly CPAN module
*CLONE_NEWNS = \0x20000;
*CLONE_NEWUTS = \0x4000000;
*CLONE_NEWIPC = \0x8000000;
*CLONE_NEWUSER = \0x10000000;
*CLONE_NEWPID = \0x20000000;
*CLONE_NEWNET = \0x40000000;
our (
$CLONE_NEWNS, $CLONE_NEWUTS, $CLONE_NEWIPC,
$CLONE_NEWUSER, $CLONE_NEWPID, $CLONE_NEWNET
);
2018-09-18 09:20:24 +00:00
# type codes:
# 0 -> normal file
# 1 -> hardlink
# 2 -> symlink
# 3 -> character special
# 4 -> block special
# 5 -> directory
my @devfiles = (
# filename mode type link target major minor
2020-01-09 07:39:40 +00:00
["./dev/", oct(755), 5, '', undef, undef],
["./dev/console", oct(666), 3, '', 5, 1],
["./dev/fd", oct(777), 2, '/proc/self/fd', undef, undef],
["./dev/full", oct(666), 3, '', 1, 7],
["./dev/null", oct(666), 3, '', 1, 3],
["./dev/ptmx", oct(666), 3, '', 5, 2],
["./dev/pts/", oct(755), 5, '', undef, undef],
["./dev/random", oct(666), 3, '', 1, 8],
["./dev/shm/", oct(755), 5, '', undef, undef],
["./dev/stderr", oct(777), 2, '/proc/self/fd/2', undef, undef],
["./dev/stdin", oct(777), 2, '/proc/self/fd/0', undef, undef],
["./dev/stdout", oct(777), 2, '/proc/self/fd/1', undef, undef],
["./dev/tty", oct(666), 3, '', 5, 0],
["./dev/urandom", oct(666), 3, '', 1, 9],
["./dev/zero", oct(666), 3, '', 1, 5],
2018-09-18 09:20:24 +00:00
);
# verbosity levels:
# 0 -> print nothing
# 1 -> normal output and progress bars
# 2 -> verbose output
# 3 -> debug output
my $verbosity_level = 1;
2020-01-09 07:39:40 +00:00
my $is_covering = !!(eval { Devel::Cover::get_coverage() });
sub debug {
if ($verbosity_level < 3) {
2020-01-08 14:41:49 +00:00
return;
}
my $msg = shift;
my ($package, $filename, $line) = caller;
$msg = "D: $PID $line $msg";
2020-01-09 07:39:40 +00:00
if (-t STDERR) { ## no critic (InputOutput::ProhibitInteractiveTest)
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'clear');
}
print STDERR "$msg\n";
2020-01-09 07:39:40 +00:00
return;
}
sub info {
if ($verbosity_level == 0) {
2020-01-08 14:41:49 +00:00
return;
}
my $msg = shift;
if ($verbosity_level >= 3) {
2020-01-08 14:41:49 +00:00
my ($package, $filename, $line) = caller;
2020-01-08 16:44:07 +00:00
$msg = "$PID $line $msg";
}
$msg = "I: $msg";
2020-01-09 07:39:40 +00:00
if (-t STDERR) { ## no critic (InputOutput::ProhibitInteractiveTest)
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'green');
}
print STDERR "$msg\n";
2020-01-09 07:39:40 +00:00
return;
}
sub warning {
if ($verbosity_level == 0) {
2020-01-08 14:41:49 +00:00
return;
}
my $msg = shift;
$msg = "W: $msg";
2020-01-09 07:39:40 +00:00
if (-t STDERR) { ## no critic (InputOutput::ProhibitInteractiveTest)
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'bold yellow');
}
print STDERR "$msg\n";
2020-01-09 07:39:40 +00:00
return;
}
sub error {
if ($verbosity_level == 0) {
2020-01-08 14:41:49 +00:00
return;
}
# if error() is called with the string from a previous error() that was
# caught inside an eval(), then the string will have a newline which we
# are stripping here
2020-01-08 16:44:07 +00:00
chomp(my $msg = shift);
$msg = "E: $msg";
2020-01-09 07:39:40 +00:00
if (-t STDERR) { ## no critic (InputOutput::ProhibitInteractiveTest)
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'bold red');
}
if ($verbosity_level == 3) {
2020-01-09 07:39:40 +00:00
croak $msg; # produces a backtrace
} else {
2020-01-08 14:41:49 +00:00
die "$msg\n";
}
}
# check whether a directory is mounted by comparing the device number of the
# directory itself with its parent
2020-01-09 07:39:40 +00:00
sub is_mountpoint {
my $dir = shift;
2020-01-08 16:44:07 +00:00
if (!-e $dir) {
2020-01-08 14:41:49 +00:00
return 0;
}
my @a = stat "$dir/.";
my @b = stat "$dir/..";
# if the device number is different, then the directory must be mounted
if ($a[0] != $b[0]) {
2020-01-08 14:41:49 +00:00
return 1;
}
# if the inode number is the same, then the directory must be mounted
if ($a[1] == $b[1]) {
2020-01-08 14:41:49 +00:00
return 1;
}
return 0;
}
2018-09-18 09:20:24 +00:00
# tar cannot figure out the decompression program when receiving data on
# standard input, thus we do it ourselves. This is copied from tar's
# src/suffix.c
2020-01-09 07:39:40 +00:00
sub get_tar_compressor {
2018-09-18 09:20:24 +00:00
my $filename = shift;
if ($filename eq '-') {
2020-01-09 07:39:40 +00:00
return;
} elsif ($filename =~ /\.tar$/) {
2020-01-09 07:39:40 +00:00
return;
} elsif ($filename =~ /\.(gz|tgz|taz)$/) {
2020-01-08 14:41:49 +00:00
return ['gzip'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(Z|taZ)$/) {
2020-01-08 14:41:49 +00:00
return ['compress'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(bz2|tbz|tbz2|tz2)$/) {
2020-01-08 14:41:49 +00:00
return ['bzip2'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lz$/) {
2020-01-08 14:41:49 +00:00
return ['lzip'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(lzma|tlz)$/) {
2020-01-08 14:41:49 +00:00
return ['lzma'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lzo$/) {
2020-01-08 14:41:49 +00:00
return ['lzop'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lz4$/) {
2020-01-08 14:41:49 +00:00
return ['lz4'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(xz|txz)$/) {
2020-01-08 14:41:49 +00:00
return ['xz', '--threads=0'];
} elsif ($filename =~ /\.zst$/) {
2020-01-08 14:41:49 +00:00
return ['zstd'];
2018-09-18 09:20:24 +00:00
}
2020-01-09 07:39:40 +00:00
return;
2018-09-18 09:20:24 +00:00
}
2020-01-09 07:39:40 +00:00
sub test_unshare {
my $verbose = shift;
if ($EFFECTIVE_USER_ID == 0) {
2020-01-08 14:41:49 +00:00
my $msg = "cannot use unshare mode when executing as root";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
return 0;
}
2018-09-18 09:20:24 +00:00
# arguments to syscalls have to be stored in their own variable or
# otherwise we will get "Modification of a read-only value attempted"
2020-01-09 07:39:40 +00:00
my $unshare_flags = $CLONE_NEWUSER;
2018-09-18 09:20:24 +00:00
# we spawn a new per process because if unshare succeeds, we would
2018-10-01 15:14:59 +00:00
# otherwise have unshared the mmdebstrap process itself which we don't want
my $pid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($pid == 0) {
2020-01-09 07:39:40 +00:00
my $ret = syscall(&SYS_unshare, $unshare_flags);
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
exit 0;
} else {
my $msg = "unshare syscall failed: $!";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
exit 1;
}
2018-09-18 09:20:24 +00:00
}
waitpid($pid, 0);
if (($? >> 8) != 0) {
2020-01-08 14:41:49 +00:00
return 0;
2018-09-18 09:20:24 +00:00
}
# if newuidmap and newgidmap exist, the exit status will be 1 when
# executed without parameters
system "newuidmap 2>/dev/null";
if (($? >> 8) != 1) {
2020-01-08 14:41:49 +00:00
if (($? >> 8) == 127) {
my $msg = "cannot find newuidmap";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
} else {
my $msg = "newuidmap returned unknown exit status: $?";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
}
return 0;
}
system "newgidmap 2>/dev/null";
if (($? >> 8) != 1) {
2020-01-08 14:41:49 +00:00
if (($? >> 8) == 127) {
my $msg = "cannot find newgidmap";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
} else {
my $msg = "newgidmap returned unknown exit status: $?";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
}
return 0;
}
2018-09-18 09:20:24 +00:00
return 1;
}
sub read_subuid_subgid() {
my $username = getpwuid $<;
my ($subid, $num_subid, $fh, $n);
my @result = ();
2020-01-08 16:44:07 +00:00
if (!-e "/etc/subuid") {
2020-01-08 14:41:49 +00:00
warning "/etc/subuid doesn't exist";
return;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
if (!-r "/etc/subuid") {
2020-01-08 14:41:49 +00:00
warning "/etc/subuid is not readable";
return;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
open $fh, "<", "/etc/subuid"
or error "cannot open /etc/subuid for reading: $!";
2018-09-18 09:20:24 +00:00
while (my $line = <$fh>) {
2020-01-08 14:41:49 +00:00
($n, $subid, $num_subid) = split(/:/, $line, 3);
last if ($n eq $username);
2018-09-18 09:20:24 +00:00
}
close $fh;
push @result, ["u", 0, $subid, $num_subid];
if (scalar(@result) < 1) {
2020-01-08 14:41:49 +00:00
warning "/etc/subuid does not contain an entry for $username";
return;
2018-09-18 09:20:24 +00:00
}
if (scalar(@result) > 1) {
2020-01-08 14:41:49 +00:00
warning "/etc/subuid contains multiple entries for $username";
return;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
open $fh, "<", "/etc/subgid"
or error "cannot open /etc/subgid for reading: $!";
2018-09-18 09:20:24 +00:00
while (my $line = <$fh>) {
2020-01-08 14:41:49 +00:00
($n, $subid, $num_subid) = split(/:/, $line, 3);
last if ($n eq $username);
2018-09-18 09:20:24 +00:00
}
close $fh;
push @result, ["g", 0, $subid, $num_subid];
if (scalar(@result) < 2) {
2020-01-08 14:41:49 +00:00
warning "/etc/subgid does not contain an entry for $username";
return;
2018-09-18 09:20:24 +00:00
}
if (scalar(@result) > 2) {
2020-01-08 14:41:49 +00:00
warning "/etc/subgid contains multiple entries for $username";
return;
2018-09-18 09:20:24 +00:00
}
return @result;
}
# This function spawns two child processes forming the following process tree
#
# A
# |
# fork()
# | \
# B C
# | |
# | fork()
# | | \
# | D E
# | | |
# |unshare()
# | close()
# | | |
# | | read()
# | | newuidmap(D)
# | | newgidmap(D)
# | | /
# | waitpid()
# | |
# | fork()
# | | \
# | F G
# | | |
# | | exec()
# | | /
# | waitpid()
# | /
# waitpid()
#
# To better refer to each individual part, we give each process a new
# identifier after calling fork(). Process A is the main process. After
# executing fork() we call the parent and child B and C, respectively. This
# first fork() is done because we do not want to modify A. B then remains
# waiting for its child C to finish. C calls fork() again, splitting into
# the parent D and its child E. In the parent D we call unshare() and close a
# pipe shared by D and E to signal to E that D is done with calling unshare().
# E notices this by using read() and follows up with executing the tools
# new[ug]idmap on D. E finishes and D continues with doing another fork().
# This is because when unsharing the PID namespace, we need a PID 1 to be kept
# alive or otherwise any child processes cannot fork() anymore themselves. So
# we keep F as PID 1 and finally call exec() in G.
2020-01-09 07:39:40 +00:00
sub get_unshare_cmd {
2020-01-08 16:44:07 +00:00
my $cmd = shift;
2018-09-18 09:20:24 +00:00
my $idmap = shift;
2020-01-08 16:44:07 +00:00
my $unshare_flags
2020-01-09 07:39:40 +00:00
= $CLONE_NEWUSER | $CLONE_NEWNS | $CLONE_NEWPID | $CLONE_NEWUTS
| $CLONE_NEWIPC;
2018-09-18 09:20:24 +00:00
if (0) {
2020-01-09 07:39:40 +00:00
$unshare_flags |= $CLONE_NEWNET;
2018-09-18 09:20:24 +00:00
}
# fork a new process and let the child get unshare()ed
# we don't want to unshare the parent process
my $gcpid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($gcpid == 0) {
# Create a pipe for the parent process to signal the child process that
# it is done with calling unshare() so that the child can go ahead
# setting up uid_map and gid_map.
2020-01-08 14:41:49 +00:00
pipe my $rfh, my $wfh;
# We have to do this dance with forking a process and then modifying
# the parent from the child because:
# - new[ug]idmap can only be called on a process id after that process
# has unshared the user namespace
# - a process looses its capabilities if it performs an execve() with
# nonzero user ids see the capabilities(7) man page for details.
# - a process that unshared the user namespace by default does not
# have the privileges to call new[ug]idmap on itself
2020-01-08 14:41:49 +00:00
#
# this also works the other way around (the child setting up a user
# namespace and being modified from the parent) but that way, the
# parent would have to stay around until the child exited (so a pid
# would be wasted). Additionally, that variant would require an
# additional pipe to let the parent signal the child that it is done
# with calling new[ug]idmap. The way it is done here, this signaling
# can instead be done by wait()-ing for the exit of the child.
2020-01-08 14:41:49 +00:00
my $ppid = $$;
my $cpid = fork() // error "fork() failed: $!";
if ($cpid == 0) {
# child
# Close the writing descriptor at our end of the pipe so that we
# see EOF when parent closes its descriptor.
close $wfh;
# Wait for the parent process to finish its unshare() call by
# waiting for an EOF.
0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF";
# The program's new[ug]idmap have to be used because they are
# setuid root. These privileges are needed to map the ids from
# /etc/sub[ug]id to the user namespace set up by the parent.
# Without these privileges, only the id of the user itself can be
# mapped into the new namespace.
#
# Since new[ug]idmap is setuid root we also don't need to write
# "deny" to /proc/$$/setgroups beforehand (this is otherwise
# required for unprivileged processes trying to write to
# /proc/$$/gid_map since kernel version 3.19 for security reasons)
# and therefore the parent process keeps its ability to change its
# own group here.
#
# Since /proc/$ppid/[ug]id_map can only be written to once,
# respectively, instead of making multiple calls to new[ug]idmap,
# we assemble a command line that makes one call each.
my $uidmapcmd = "";
my $gidmapcmd = "";
foreach (@{$idmap}) {
my ($t, $hostid, $nsid, $range) = @{$_};
if ($t ne "u" and $t ne "g" and $t ne "b") {
error "invalid idmap type: $t";
}
if ($t eq "u" or $t eq "b") {
$uidmapcmd .= " $hostid $nsid $range";
}
if ($t eq "g" or $t eq "b") {
$gidmapcmd .= " $hostid $nsid $range";
}
}
my $idmapcmd = '';
if ($uidmapcmd ne "") {
2020-01-08 16:44:07 +00:00
0 == system "newuidmap $ppid $uidmapcmd"
or error "newuidmap $ppid $uidmapcmd failed: $!";
2020-01-08 14:41:49 +00:00
}
if ($gidmapcmd ne "") {
2020-01-08 16:44:07 +00:00
0 == system "newgidmap $ppid $gidmapcmd"
or error "newgidmap $ppid $gidmapcmd failed: $!";
2020-01-08 14:41:49 +00:00
}
exit 0;
}
# parent
# After fork()-ing, the parent immediately calls unshare...
2020-01-08 16:44:07 +00:00
0 == syscall &SYS_unshare, $unshare_flags
or error "unshare() failed: $!";
2020-01-08 14:41:49 +00:00
# .. and then signals the child process that we are done with the
# unshare() call by sending an EOF.
close $wfh;
# Wait for the child process to finish its setup by waiting for its
# exit.
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
my $exit = $? >> 8;
if ($exit != 0) {
error "child had a non-zero exit status: $exit";
}
# Currently we are nobody (uid and gid are 65534). So we become root
# user and group instead.
#
# We are using direct syscalls instead of setting $(, $), $< and $>
# because then perl would do additional stuff which we don't need or
# want here, like checking /proc/sys/kernel/ngroups_max (which might
# not exist). It would also also call setgroups() in a way that makes
# the root user be part of the group unknown.
0 == syscall &SYS_setgid, 0 or error "setgid failed: $!";
0 == syscall &SYS_setuid, 0 or error "setuid failed: $!";
0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!";
if (1) {
# When the pid namespace is also unshared, then processes expect a
# master pid to always be alive within the namespace. To achieve
# this, we fork() here instead of exec() to always have one dummy
# process running as pid 1 inside the namespace. This is also what
# the unshare tool does when used with the --fork option.
#
# Otherwise, without a pid 1, new processes cannot be forked
# anymore after pid 1 finished.
my $cpid = fork() // error "fork() failed: $!";
if ($cpid != 0) {
# The parent process will stay alive as pid 1 in this
# namespace until the child finishes executing. This is
# important because pid 1 must never die or otherwise nothing
# new can be forked.
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
2020-01-08 16:44:07 +00:00
exit($? >> 8);
2020-01-08 14:41:49 +00:00
}
}
&{$cmd}();
exit 0;
2018-09-18 09:20:24 +00:00
}
# parent
return $gcpid;
}
2020-01-09 07:39:40 +00:00
sub havemknod {
2020-01-08 16:44:07 +00:00
my $root = shift;
2018-09-18 09:20:24 +00:00
my $havemknod = 0;
if (-e "$root/test-dev-null") {
2020-01-08 14:41:49 +00:00
error "/test-dev-null already exists";
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
TEST: {
2020-01-08 14:41:49 +00:00
# we fork so that we can read STDERR
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
open(STDERR, '>&', STDOUT) or error "cannot open STDERR: $!";
# we use mknod(1) instead of the system call because creating the
# right dev_t argument requires makedev(3)
exec 'mknod', "$root/test-dev-null", 'c', '1', '3';
}
2020-01-08 16:44:07 +00:00
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
{
last TEST unless $? == 0 and $content eq '';
last TEST unless -c "$root/test-dev-null";
last TEST unless open my $fh, '>', "$root/test-dev-null";
last TEST unless print $fh 'test';
}
$havemknod = 1;
2018-09-18 09:20:24 +00:00
}
if (-e "$root/test-dev-null") {
2020-01-08 16:44:07 +00:00
unlink "$root/test-dev-null"
or error "cannot unlink /test-dev-null: $!";
2018-09-18 09:20:24 +00:00
}
return $havemknod;
}
sub print_progress {
if ($verbosity_level != 1) {
2020-01-08 14:41:49 +00:00
return;
}
my $perc = shift;
2020-01-09 07:39:40 +00:00
if (!-t STDERR) { ## no critic (InputOutput::ProhibitInteractiveTest)
2020-01-08 14:41:49 +00:00
return;
}
if ($perc eq "done") {
2020-01-08 14:41:49 +00:00
# \e[2K clears everything on the current line (i.e. the progress bar)
print STDERR "\e[2Kdone\n";
return;
}
if ($perc >= 100) {
2020-01-08 14:41:49 +00:00
$perc = 100;
}
my $width = 50;
2020-01-08 16:44:07 +00:00
my $num_x = int($perc * $width / 100);
my $bar = '=' x $num_x;
if ($num_x != $width) {
2020-01-08 14:41:49 +00:00
$bar .= '>';
$bar .= ' ' x ($width - $num_x - 1);
}
printf STDERR "%6.2f [%s]\r", $perc, $bar;
2020-01-09 07:39:40 +00:00
return;
}
sub run_progress {
my ($get_exec, $line_handler, $line_has_error, $chdir) = @_;
pipe my $rfh, my $wfh;
my $got_signal = 0;
2020-01-08 16:44:07 +00:00
my $ignore = sub {
2020-01-08 14:41:49 +00:00
info "run_progress() received signal $_[0]: waiting for child...";
};
# delay signals so that we can fork and change behaviour of the signal
# handler in parent and child without getting interrupted
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
my $pid1 = open(my $pipe, '-|') // error "failed to fork(): $!";
if ($pid1 == 0) {
2020-01-08 14:41:49 +00:00
# child: default signal handlers
2020-01-09 07:39:40 +00:00
local $SIG{'INT'} = 'DEFAULT';
local $SIG{'HUP'} = 'DEFAULT';
local $SIG{'PIPE'} = 'DEFAULT';
local $SIG{'TERM'} = 'DEFAULT';
2020-01-08 14:41:49 +00:00
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-08 14:41:49 +00:00
close $rfh;
# Unset the close-on-exec flag, so that the file descriptor does not
# get closed when we exec
2020-01-08 16:44:07 +00:00
my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!";
fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC)
or error "fcntl F_SETFD: $!";
2020-01-08 14:41:49 +00:00
my $fd = fileno $wfh;
# redirect stderr to stdout so that we can capture it
open(STDERR, '>&', STDOUT) or error "cannot open STDOUT: $!";
my @execargs = $get_exec->($fd);
# before apt 1.5, "apt-get update" attempted to chdir() into the
# working directory. This will fail if the current working directory
# is not accessible by the user (for example in unshare mode). See
# Debian bug #860738
if (defined $chdir) {
chdir $chdir or error "failed chdir() to $chdir: $!";
}
2020-01-09 07:39:40 +00:00
eval { Devel::Cover::set_coverage("none") } if $is_covering;
2020-01-08 16:44:07 +00:00
exec { $execargs[0] } @execargs
or error 'cannot exec() ' . (join ' ', @execargs);
}
close $wfh;
# spawn two processes:
# parent will parse stdout to look for errors
# child will parse $rfh for the progress meter
my $pid2 = fork() // error "failed to fork(): $!";
if ($pid2 == 0) {
2020-01-08 14:41:49 +00:00
# child: default signal handlers
2020-01-09 07:39:40 +00:00
local $SIG{'INT'} = 'IGNORE';
local $SIG{'HUP'} = 'IGNORE';
local $SIG{'PIPE'} = 'IGNORE';
local $SIG{'TERM'} = 'IGNORE';
2020-01-08 14:41:49 +00:00
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-09 07:39:40 +00:00
print_progress(0.0);
2020-01-08 14:41:49 +00:00
while (my $line = <$rfh>) {
my $output = $line_handler->($line);
next unless $output;
2020-01-09 07:39:40 +00:00
print_progress($output);
2020-01-08 14:41:49 +00:00
}
2020-01-09 07:39:40 +00:00
print_progress("done");
2020-01-08 14:41:49 +00:00
exit 0;
}
# parent: ignore signals
# by using "local", the original is automatically restored once the
# function returns
2020-01-08 16:44:07 +00:00
local $SIG{'INT'} = $ignore;
local $SIG{'HUP'} = $ignore;
local $SIG{'PIPE'} = $ignore;
local $SIG{'TERM'} = $ignore;
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-08 16:44:07 +00:00
my $output = '';
my $has_error = 0;
while (my $line = <$pipe>) {
2020-01-08 14:41:49 +00:00
$has_error = $line_has_error->($line);
if ($verbosity_level >= 2) {
print STDERR $line;
} else {
# forward captured apt output
$output .= $line;
}
}
close($pipe);
my $fail = 0;
if ($? != 0 or $has_error) {
2020-01-08 14:41:49 +00:00
$fail = 1;
}
waitpid $pid2, 0;
$? == 0 or error "progress parsing failed";
if ($got_signal) {
2020-01-08 14:41:49 +00:00
error "run_progress() received signal: $got_signal";
}
# only print failure after progress output finished or otherwise it
# might interfere with the remaining output
if ($fail) {
2020-01-08 14:41:49 +00:00
if ($verbosity_level >= 1) {
print STDERR $output;
}
2020-01-08 16:44:07 +00:00
error((join ' ', $get_exec->('<$fd>')) . ' failed');
}
2020-01-09 07:39:40 +00:00
return;
}
sub run_dpkg_progress {
my $options = shift;
2020-01-08 16:44:07 +00:00
my @debs = @{ $options->{PKGS} // [] };
my $get_exec
= sub { return @{ $options->{ARGV} }, "--status-fd=$_[0]", @debs; };
my $line_has_error = sub { return 0; };
2020-01-08 16:44:07 +00:00
my $num = 0;
# each package has one install and one configure step, thus the total
# number is twice the number of packages
2020-01-08 16:44:07 +00:00
my $total = (scalar @debs) * 2;
my $line_handler = sub {
2020-01-08 14:41:49 +00:00
if ($_[0] =~ /^processing: (install|configure): /) {
$num += 1;
}
2020-01-08 16:44:07 +00:00
return $num / $total * 100;
};
run_progress $get_exec, $line_handler, $line_has_error;
2020-01-09 07:39:40 +00:00
return;
}
sub run_apt_progress {
2020-01-08 16:44:07 +00:00
my $options = shift;
my @debs = @{ $options->{PKGS} // [] };
my $get_exec = sub {
2020-01-08 14:41:49 +00:00
return (
2020-01-08 16:44:07 +00:00
@{ $options->{ARGV} },
2020-01-08 14:41:49 +00:00
"-oAPT::Status-Fd=$_[0]",
# prevent apt from messing up the terminal and allow dpkg to
# receive SIGINT and quit immediately without waiting for
# maintainer script to finish
'-oDpkg::Use-Pty=false',
@debs
2020-01-08 16:44:07 +00:00
);
};
my $line_has_error = sub { return 0; };
if ($options->{FIND_APT_WARNINGS}) {
2020-01-08 14:41:49 +00:00
$line_has_error = sub {
# apt-get doesn't report a non-zero exit if the update failed.
# Thus, we have to parse its output. See #778357, #776152, #696335
# and #745735
if ($_[0] =~ /^(W: |Err:)/) {
return 1;
}
return 0;
};
}
my $line_handler = sub {
2020-01-08 14:41:49 +00:00
if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) {
return $2;
}
};
run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR};
2020-01-09 07:39:40 +00:00
return;
}
2020-01-09 07:39:40 +00:00
sub run_chroot {
2020-01-08 16:44:07 +00:00
my $cmd = shift;
my $options = shift;
my @cleanup_tasks = ();
my $cleanup = sub {
2020-01-08 14:41:49 +00:00
my $signal = $_[0];
while (my $task = pop @cleanup_tasks) {
$task->();
}
if ($signal) {
warning "pid $PID cought signal: $signal";
exit 1;
}
};
2020-01-08 16:44:07 +00:00
local $SIG{INT} = $cleanup;
local $SIG{HUP} = $cleanup;
local $SIG{PIPE} = $cleanup;
local $SIG{TERM} = $cleanup;
eval {
2020-01-08 14:41:49 +00:00
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
# if more than essential should be installed, make the system look
# more like a real one by creating or bind-mounting the device
# nodes
2020-01-08 14:41:49 +00:00
foreach my $file (@devfiles) {
2020-01-08 16:44:07 +00:00
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
= @{$file};
2020-01-08 14:41:49 +00:00
next if $fname eq './dev/';
2020-01-08 16:44:07 +00:00
if ($type == 0) { # normal file
2020-01-08 14:41:49 +00:00
error "type 0 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 1) { # hardlink
2020-01-08 14:41:49 +00:00
error "type 1 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 2) { # symlink
2020-01-08 14:41:49 +00:00
if (!$options->{havemknod}) {
2020-01-08 16:44:07 +00:00
if ( $options->{mode} eq 'fakechroot'
and $linkname =~ /^\/proc/) {
2020-01-08 14:41:49 +00:00
# there is no /proc in fakechroot mode
next;
}
if (
any { $_ eq $options->{mode} }
('root', 'unshare')
) {
2020-01-08 14:41:49 +00:00
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
unlink "$options->{root}/$fname"
or warn "cannot unlink $fname: $!";
2020-01-08 14:41:49 +00:00
}
}
2020-01-08 16:44:07 +00:00
symlink $linkname, "$options->{root}/$fname"
or error "cannot create symlink $fname";
2020-01-08 14:41:49 +00:00
}
} elsif ($type == 3 or $type == 4) {
# character/block special
2020-01-08 14:41:49 +00:00
if (!$options->{havemknod}) {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/$fname"
or error "cannot open $options->{root}/$fname: $!";
2020-01-08 14:41:49 +00:00
close $fh;
if ($options->{mode} eq 'unshare') {
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
0 == system('umount', '--no-mtab',
"$options->{root}/$fname")
or warn "umount $fname failed: $?";
unlink "$options->{root}/$fname"
or warn "cannot unlink $fname: $!";
2020-01-08 14:41:49 +00:00
};
} elsif ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
0 == system('umount',
"$options->{root}/$fname")
or warn "umount failed: $?";
unlink "$options->{root}/$fname"
or warn "cannot unlink $fname: $!";
2020-01-08 14:41:49 +00:00
};
} else {
error "unknown mode: $options->{mode}";
}
2020-01-08 16:44:07 +00:00
0 == system('mount', '-o', 'bind', "/$fname",
"$options->{root}/$fname")
or error "mount $fname failed: $?";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
} elsif ($type == 5) { # directory
2020-01-08 14:41:49 +00:00
if (!$options->{havemknod}) {
if (
any { $_ eq $options->{mode} }
('root', 'unshare')
) {
2020-01-08 14:41:49 +00:00
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
rmdir "$options->{root}/$fname"
or warn "cannot rmdir $fname: $!";
2020-01-08 14:41:49 +00:00
}
}
if (-e "$options->{root}/$fname") {
2020-01-08 16:44:07 +00:00
if (!-d "$options->{root}/$fname") {
error "$fname already exists but is not a"
. " directory";
2020-01-08 14:41:49 +00:00
}
} else {
2020-01-08 16:44:07 +00:00
my $num_created
= make_path "$options->{root}/$fname",
{ error => \my $err };
2020-01-08 14:41:49 +00:00
if ($err && @$err) {
2020-01-08 16:44:07 +00:00
error(
join "; ",
(
map {
"cannot create "
. (join ": ", %{$_})
} @$err
));
2020-01-08 14:41:49 +00:00
} elsif ($num_created == 0) {
error "cannot create $options->{root}/$fname";
}
}
2020-01-08 16:44:07 +00:00
chmod $mode, "$options->{root}/$fname"
or error "cannot chmod $fname: $!";
2020-01-08 14:41:49 +00:00
}
if ($options->{mode} eq 'unshare') {
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
0 == system('umount', '--no-mtab',
"$options->{root}/$fname")
or warn "umount $fname failed: $?";
2020-01-08 14:41:49 +00:00
};
} elsif ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
0 == system('umount', "$options->{root}/$fname")
or warn "umount $fname failed: $?";
2020-01-08 14:41:49 +00:00
};
} else {
error "unknown mode: $options->{mode}";
}
2020-01-08 16:44:07 +00:00
0 == system('mount', '-o', 'bind', "/$fname",
"$options->{root}/$fname")
or error "mount $fname failed: $?";
2020-01-08 14:41:49 +00:00
} else {
error "unsupported type: $type";
}
}
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{mode} }
('proot', 'fakechroot', 'chrootless')
) {
2020-01-08 14:41:49 +00:00
# we cannot mount in fakechroot and proot mode
# in proot mode we have /dev bind-mounted already through
# --bind=/dev
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
# We can only mount /proc and /sys after extracting the essential
# set because if we mount it before, then base-files will not be able
# to extract those
if ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
2020-01-08 16:44:07 +00:00
0 == system('umount', "$options->{root}/sys")
or warn "umount /sys failed: $?";
2020-01-08 14:41:49 +00:00
};
2020-01-08 16:44:07 +00:00
0 == system(
'mount', '-t', 'sysfs',
'-o', 'ro,nosuid,nodev,noexec', 'sys',
"$options->{root}/sys"
2020-01-08 16:44:07 +00:00
) or error "mount /sys failed: $?";
2020-01-08 14:41:49 +00:00
} elsif ($options->{mode} eq 'unshare') {
# naturally we have to clean up after ourselves in sudo mode where
# we do a real mount. But we also need to unmount in unshare mode
# because otherwise, even with the --one-file-system tar option,
# the permissions of the mount source will be stored and not the
# mount target (the directory)
2020-01-08 14:41:49 +00:00
push @cleanup_tasks, sub {
# since we cannot write to /etc/mtab we need --no-mtab
# unmounting /sys only seems to be successful with --lazy
2020-01-08 16:44:07 +00:00
0 == system('umount', '--no-mtab', '--lazy',
"$options->{root}/sys")
or warn "umount /sys failed: $?";
2020-01-08 14:41:49 +00:00
};
# without the network namespace unshared, we cannot mount a new
# sysfs. Since we need network, we just bind-mount.
#
# we have to rbind because just using bind results in "wrong fs
# type, bad option, bad superblock" error
2020-01-08 16:44:07 +00:00
0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys")
or error "mount /sys failed: $?";
} elsif (
any { $_ eq $options->{mode} }
('proot', 'fakechroot', 'chrootless')
) {
2020-01-08 14:41:49 +00:00
# we cannot mount in fakechroot and proot mode
# in proot mode we have /proc bind-mounted already through
# --bind=/proc
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
if ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
# some maintainer scripts mount additional stuff into /proc
# which we need to unmount beforehand
if (
is_mountpoint(
$options->{root} . "/proc/sys/fs/binfmt_misc"
)
) {
2020-01-08 16:44:07 +00:00
0 == system('umount',
"$options->{root}/proc/sys/fs/binfmt_misc")
or error "umount /proc/sys/fs/binfmt_misc failed: $?";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
0 == system('umount', "$options->{root}/proc")
or error "umount /proc failed: $?";
2020-01-08 14:41:49 +00:00
};
0 == system('mount', '-t', 'proc', '-o', 'ro', 'proc',
"$options->{root}/proc")
2020-01-08 16:44:07 +00:00
or error "mount /proc failed: $?";
2020-01-08 14:41:49 +00:00
} elsif ($options->{mode} eq 'unshare') {
# naturally we have to clean up after ourselves in sudo mode where
# we do a real mount. But we also need to unmount in unshare mode
# because otherwise, even with the --one-file-system tar option,
# the permissions of the mount source will be stored and not the
# mount target (the directory)
2020-01-08 14:41:49 +00:00
push @cleanup_tasks, sub {
# since we cannot write to /etc/mtab we need --no-mtab
2020-01-08 16:44:07 +00:00
0 == system('umount', '--no-mtab', "$options->{root}/proc")
or error "umount /proc failed: $?";
2020-01-08 14:41:49 +00:00
};
2020-01-08 16:44:07 +00:00
0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc")
or error "mount /proc failed: $?";
} elsif (
any { $_ eq $options->{mode} }
('proot', 'fakechroot', 'chrootless')
) {
2020-01-08 14:41:49 +00:00
# we cannot mount in fakechroot and proot mode
# in proot mode we have /sys bind-mounted already through
# --bind=/sys
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
# prevent daemons from starting
# the directory might not exist in custom variant, for example
#
# ideally, we should use update-alternatives but we cannot rely on it
# existing inside the chroot
#
# See #911290 for more problems of this interface
2020-01-08 14:41:49 +00:00
if (-d "$options->{root}/usr/sbin/") {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d"
or error "cannot open policy-rc.d: $!";
2020-01-08 14:41:49 +00:00
print $fh "#!/bin/sh\n";
print $fh "exit 101\n";
close $fh;
2020-01-08 16:44:07 +00:00
chmod 0755, "$options->{root}/usr/sbin/policy-rc.d"
or error "cannot chmod policy-rc.d: $!";
2020-01-08 14:41:49 +00:00
}
# the file might not exist if it was removed in a hook
if (-e "$options->{root}/sbin/start-stop-daemon") {
if (-e "$options->{root}/sbin/start-stop-daemon.REAL") {
error "$options->{root}/sbin/start-stop-daemon.REAL already"
. " exists";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
move(
"$options->{root}/sbin/start-stop-daemon",
"$options->{root}/sbin/start-stop-daemon.REAL"
) or error "cannot move start-stop-daemon: $!";
open my $fh, '>', "$options->{root}/sbin/start-stop-daemon"
or error "cannot open start-stop-daemon: $!";
2020-01-08 14:41:49 +00:00
print $fh "#!/bin/sh\n";
print $fh "echo \"Warning: Fake start-stop-daemon called, doing"
. " nothing\">&2\n";
2020-01-08 14:41:49 +00:00
close $fh;
2020-01-08 16:44:07 +00:00
chmod 0755, "$options->{root}/sbin/start-stop-daemon"
or error "cannot chmod start-stop-daemon: $!";
2020-01-08 14:41:49 +00:00
}
&{$cmd}();
# cleanup
if (-e "$options->{root}/sbin/start-stop-daemon.REAL") {
2020-01-08 16:44:07 +00:00
move(
"$options->{root}/sbin/start-stop-daemon.REAL",
"$options->{root}/sbin/start-stop-daemon"
) or error "cannot move start-stop-daemon: $!";
2020-01-08 14:41:49 +00:00
}
if (-e "$options->{root}/usr/sbin/policy-rc.d") {
2020-01-08 16:44:07 +00:00
unlink "$options->{root}/usr/sbin/policy-rc.d"
or error "cannot unlink policy-rc.d: $!";
2020-01-08 14:41:49 +00:00
}
};
my $error = $@;
# we use the cleanup function to do the unmounting
$cleanup->(0);
if ($error) {
2020-01-08 14:41:49 +00:00
error "run_chroot failed: $error";
}
2020-01-09 07:39:40 +00:00
return;
}
2020-01-09 07:39:40 +00:00
sub run_hooks {
2020-01-08 16:44:07 +00:00
my $name = shift;
my $options = shift;
2020-01-08 16:44:07 +00:00
if (scalar @{ $options->{"${name}_hook"} } == 0) {
2020-01-08 14:41:49 +00:00
return;
}
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "not running ${name}-hooks because of --dry-run";
return;
}
my $runner = sub {
2020-01-08 16:44:07 +00:00
foreach my $script (@{ $options->{"${name}_hook"} }) {
2020-01-16 09:38:14 +00:00
if (
$script =~ /^(
copy-in|copy-out
|tar-in|tar-out
|upload|download
|sync-in|sync-out
)\ /x
) {
2020-01-08 14:41:49 +00:00
info "running special hook: $script";
2020-01-08 16:44:07 +00:00
if (
any { $_ eq $options->{variant} } ('extract', 'custom')
and any { $_ eq $options->{mode} }
('fakechroot', 'proot') and $name ne 'setup'
) {
info "the copy-in, copy-out, tar-in and tar-out commands"
. " in fakechroot mode or proot mode might fail in"
. " extract and custom variants because there might be"
. " no tar inside the chroot";
2020-01-08 14:41:49 +00:00
}
my $pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
# whatever the script writes on stdout is sent to the
# socket
# whatever is written to the socket, send to stdin
2020-01-08 16:44:07 +00:00
open(STDOUT, '>&', $options->{hooksock})
or error "cannot open STDOUT: $!";
open(STDIN, '<&', $options->{hooksock})
or error "cannot open STDIN: $!";
2020-01-08 14:41:49 +00:00
# we execute ourselves under sh to avoid having to
# implement a clever parser of the quoting used in $script
# for the filenames
my $prefix = "";
2020-01-08 16:44:07 +00:00
if ($is_covering) {
$prefix
= "$EXECUTABLE_NAME -MDevel::Cover=-silent,-nogcov ";
2020-01-08 14:41:49 +00:00
}
exec 'sh', '-c',
"$prefix$PROGRAM_NAME --hook-helper"
. " \"\$1\" \"\$2\" \"\$3\" \"\$4\" \"\$5\" $script",
2020-01-08 16:44:07 +00:00
'exec', $options->{root}, $options->{mode}, $name,
(
defined $options->{qemu}
? "qemu-$options->{qemu}"
: 'env',
$verbosity_level
);
2020-01-08 14:41:49 +00:00
}
waitpid($pid, 0);
$? == 0 or error "special hook failed with exit code $?";
2020-01-08 16:44:07 +00:00
} elsif (-x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) {
2020-01-08 14:41:49 +00:00
info "running --$name-hook directly: $script $options->{root}";
# execute it directly if it's an executable file
# or if it there are no shell metacharacters
# (the /a regex modifier makes \w match only ASCII)
0 == system('env', '--unset=TMPDIR', '--unset=APT_CONFIG',
$script, $options->{root})
2020-01-08 16:44:07 +00:00
or error "command failed: $script";
2020-01-08 14:41:49 +00:00
} else {
info "running --$name-hook in shell: sh -c '$script' exec"
. " $options->{root}";
2020-01-08 14:41:49 +00:00
# otherwise, wrap everything in sh -c
0 == system('env', '--unset=TMPDIR', '--unset=APT_CONFIG',
'sh', '-c', $script, 'exec', $options->{root})
2020-01-08 16:44:07 +00:00
or error "command failed: $script";
2020-01-08 14:41:49 +00:00
}
}
};
if ($name eq 'setup') {
2020-01-08 14:41:49 +00:00
# execute directly without mounting anything (the mount points do not
# exist yet)
&{$runner}();
} else {
2020-01-09 07:39:40 +00:00
run_chroot(\&$runner, $options);
}
2020-01-09 07:39:40 +00:00
return;
}
2018-09-18 09:20:24 +00:00
sub setup {
my $options = shift;
foreach my $key (sort keys %{$options}) {
2020-01-08 14:41:49 +00:00
my $value = $options->{$key};
if (!defined $value) {
next;
}
if (ref $value eq '') {
debug "$key: $options->{$key}";
} elsif (ref $value eq 'ARRAY') {
debug "$key: [" . (join ', ', @{$value}) . "]";
} elsif (ref $value eq 'GLOB') {
debug "$key: GLOB";
} else {
error "unknown type for key $key: " . (ref $value);
}
2018-09-18 09:20:24 +00:00
}
if (-e $options->{apttrusted} && !-r $options->{apttrusted}) {
warning "cannot read $options->{apttrusted}";
}
if (-e $options->{apttrustedparts} && !-r $options->{apttrustedparts}) {
warning "cannot read $options->{apttrustedparts}";
}
run_setup($options);
run_hooks('setup', $options);
run_update($options);
(my $pkgs_to_install, my $essential_pkgs) = run_download($options);
if ( $options->{mode} ne 'chrootless'
or $options->{variant} eq 'extract') {
# We have to extract the packages from @essential_pkgs either if we run
# in chrootless mode and extract variant or in any other mode. In
# other words, the only scenario in which the @essential_pkgs are not
# extracted are in chrootless mode in any other than the extract
# variant.
run_extract($options, $essential_pkgs);
}
run_hooks('extract', $options);
if ($options->{variant} ne 'extract') {
my $chrootcmd = [];
if ($options->{mode} ne 'chrootless') {
$chrootcmd = run_prepare($options);
}
run_essential($options, $essential_pkgs, $chrootcmd);
run_hooks('essential', $options);
run_install($options, $pkgs_to_install, $chrootcmd);
run_hooks('customize', $options);
}
run_cleanup($options);
return;
}
sub run_setup() {
my $options = shift;
{
my @directories = (
'/etc/apt/apt.conf.d', '/etc/apt/sources.list.d',
'/etc/apt/preferences.d', '/var/cache/apt',
'/var/lib/apt/lists/partial', '/var/lib/dpkg',
'/etc/dpkg/dpkg.cfg.d/', '/tmp'
);
# if dpkg and apt operate from the outside we need some more
# directories because dpkg and apt might not even be installed inside
# the chroot
if ($options->{mode} eq 'chrootless') {
push @directories,
(
'/var/log/apt', '/var/lib/dpkg/triggers',
'/var/lib/dpkg/info', '/var/lib/dpkg/alternatives',
'/var/lib/dpkg/updates'
);
}
foreach my $dir (@directories) {
if (-e "$options->{root}/$dir") {
if (!-d "$options->{root}/$dir") {
error "$dir already exists but is not a directory";
}
} else {
my $num_created = make_path "$options->{root}/$dir",
{ error => \my $err };
if ($err && @$err) {
error(
join "; ",
(map { "cannot create " . (join ": ", %{$_}) } @$err));
} elsif ($num_created == 0) {
error "cannot create $options->{root}/$dir";
}
}
}
}
# The TMPDIR set by the user or even /tmp might be inaccessible by the
# unshared user. Thus, we place all temporary files in /tmp inside the new
# rootfs.
#
# This will affect calls to tempfile() as well as runs of "apt-get update"
# which will create temporary clearsigned.message.XXXXXX files to verify
# signatures.
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{"TMPDIR"} = "$options->{root}/tmp";
}
my ($conf, $tmpfile)
= tempfile("mmdebstrap.apt.conf.XXXXXXXXXXXX", TMPDIR => 1)
2020-01-08 16:44:07 +00:00
or error "cannot open apt.conf: $!";
2018-09-18 09:20:24 +00:00
print $conf "Apt::Architecture \"$options->{nativearch}\";\n";
# the host system might have configured additional architectures
# force only the native architecture
2020-01-08 16:44:07 +00:00
if (scalar @{ $options->{foreignarchs} } > 0) {
2020-01-08 14:41:49 +00:00
print $conf "Apt::Architectures { \"$options->{nativearch}\"; ";
2020-01-08 16:44:07 +00:00
foreach my $arch (@{ $options->{foreignarchs} }) {
2020-01-08 14:41:49 +00:00
print $conf "\"$arch\"; ";
}
print $conf "};\n";
2018-09-18 09:20:24 +00:00
} else {
2020-01-08 14:41:49 +00:00
print $conf "Apt::Architectures \"$options->{nativearch}\";\n";
2018-09-18 09:20:24 +00:00
}
print $conf "Dir \"$options->{root}\";\n";
# not needed anymore for apt 1.3 and newer
2020-01-08 16:44:07 +00:00
print $conf
"Dir::State::Status \"$options->{root}/var/lib/dpkg/status\";\n";
2018-09-18 09:20:24 +00:00
# for authentication, use the keyrings from the host
print $conf "Dir::Etc::Trusted \"$options->{apttrusted}\";\n";
print $conf "Dir::Etc::TrustedParts \"$options->{apttrustedparts}\";\n";
if ($options->{variant} ne 'apt') {
2020-01-08 14:41:49 +00:00
# apt considers itself essential. Thus, when generating an EDSP
# document for an external solver, it will add the Essential:yes field
# to the apt package stanza. This is unnecessary for any other variant
# than 'apt' because in all other variants we compile the set of
# packages we consider essential ourselves and for the 'essential'
# variant it would even be wrong to add apt. This workaround is only
# needed when apt is used with an external solver but doesn't hurt
# otherwise and we don't have a good way to figure out whether apt is
# using an external solver or not short of parsing the --aptopt
# options.
print $conf "pkgCacheGen::ForceEssential \",\";\n";
}
if ($options->{dryrun}) {
# Without this option, apt will fail with:
# E: Could not configure 'libc6:amd64'.
# E: Could not perform immediate configuration on 'libgcc1:amd64'.
#
# See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=953260
print $conf "APT::Immediate-Configure false;\n";
}
2018-09-18 09:20:24 +00:00
close $conf;
# We put certain configuration items in their own configuration file
# because they have to be valid for apt invocation from outside as well as
# from inside the chroot.
# The config filename is chosen such that any settings in it will be
# overridden by what the user specified with --aptopt.
{
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!";
2020-01-08 14:41:49 +00:00
print $fh "Apt::Install-Recommends false;\n";
print $fh "Acquire::Languages \"none\";\n";
close $fh;
}
2018-09-18 09:20:24 +00:00
{
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/status"
or error "failed to open(): $!";
2020-01-08 14:41:49 +00:00
close $fh;
2018-09-18 09:20:24 +00:00
}
# /var/lib/dpkg/available is required to exist or otherwise package
# removals will fail
{
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/available"
or error "failed to open(): $!";
2020-01-08 14:41:49 +00:00
close $fh;
}
# /var/lib/dpkg/cmethopt is used by dselect
# see #930788
{
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/cmethopt"
or error "failed to open(): $!";
2020-01-08 14:41:49 +00:00
print $fh "apt apt\n";
close $fh;
}
# we create /var/lib/dpkg/arch inside the chroot either if there is more
# than the native architecture in the chroot or if chrootless mode is
# used to create a chroot of a different architecture than the native
# architecture outside the chroot.
2020-01-08 16:44:07 +00:00
chomp(my $hostarch = `dpkg --print-architecture`);
if (
scalar @{ $options->{foreignarchs} } > 0
or ( $options->{mode} eq 'chrootless'
and $hostarch ne $options->{nativearch})
) {
open my $fh, '>', "$options->{root}/var/lib/dpkg/arch"
or error "cannot open /var/lib/dpkg/arch: $!";
2020-01-08 14:41:49 +00:00
print $fh "$options->{nativearch}\n";
2020-01-08 16:44:07 +00:00
foreach my $arch (@{ $options->{foreignarchs} }) {
2020-01-08 14:41:49 +00:00
print $fh "$arch\n";
}
close $fh;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
if (scalar @{ $options->{aptopts} } > 0) {
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap"
or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!";
foreach my $opt (@{ $options->{aptopts} }) {
2020-01-08 14:41:49 +00:00
if (-r $opt) {
# flush handle because copy() uses syswrite() which bypasses
# buffered IO
$fh->flush();
copy $opt, $fh or error "cannot copy $opt: $!";
} else {
print $fh $opt;
if ($opt !~ /;$/) {
print $fh ';';
}
if ($opt !~ /\n$/) {
print $fh "\n";
}
}
}
close $fh;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
if (scalar @{ $options->{dpkgopts} } > 0) {
2020-01-08 14:41:49 +00:00
# FIXME: in chrootless mode, dpkg will only read the configuration
# from the host
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap"
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
foreach my $opt (@{ $options->{dpkgopts} }) {
2020-01-08 14:41:49 +00:00
if (-r $opt) {
# flush handle because copy() uses syswrite() which bypasses
# buffered IO
$fh->flush();
copy $opt, $fh or error "cannot copy $opt: $!";
} else {
print $fh $opt;
if ($opt !~ /\n$/) {
print $fh "\n";
}
}
}
close $fh;
2018-09-18 09:20:24 +00:00
}
2018-12-06 23:17:10 +00:00
## setup merged usr
#my @amd64_dirs = ('lib32', 'lib64', 'libx32'); # only amd64 for now
2018-12-06 23:17:10 +00:00
#foreach my $dir ("bin", "sbin", "lib", @amd64_dirs) {
# symlink "usr/$dir", "$options->{root}/$dir"
# or die "cannot create symlink: $!";
# make_path("$options->{root}/usr/$dir")
# or die "cannot create /usr/$dir: $!";
2018-12-06 23:17:10 +00:00
#}
2018-09-18 09:20:24 +00:00
{
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/etc/fstab"
or error "cannot open fstab: $!";
2020-01-08 14:41:49 +00:00
print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n";
close $fh;
2020-01-08 16:44:07 +00:00
chmod 0644, "$options->{root}/etc/fstab"
or error "cannot chmod fstab: $!";
2018-09-18 09:20:24 +00:00
}
# write /etc/apt/sources.list and files in /etc/apt/sources.list.d/
2018-09-18 09:20:24 +00:00
{
my $firstentry = $options->{sourceslists}->[0];
# if the first sources.list entry is of one-line type and without
# explicit filename, then write out an actual /etc/apt/sources.list
# otherwise everything goes into /etc/apt/sources.list.d
my $fname;
if ($firstentry->{type} eq 'one-line'
&& !defined $firstentry->{fname}) {
$fname = "$options->{root}/etc/apt/sources.list";
} else {
$fname = "$options->{root}/etc/apt/sources.list.d/0000";
if (defined $firstentry->{fname}) {
$fname .= $firstentry->{fname};
if ( $firstentry->{fname} !~ /\.list/
&& $firstentry->{fname} !~ /\.sources/) {
if ($firstentry->{type} eq 'one-line') {
$fname .= '.list';
} elsif ($firstentry->{type} eq 'deb822') {
$fname .= '.sources';
} else {
error "invalid type: $firstentry->{type}";
}
}
} else {
# if no filename is given, then this must be a deb822 file
# because if it was a one-line type file, then it would've been
# written to /etc/apt/sources.list
$fname .= 'main.sources';
}
}
open my $fh, '>', "$fname" or error "cannot open $fname: $!";
print $fh $firstentry->{content};
2020-01-08 14:41:49 +00:00
close $fh;
# everything else goes into /etc/apt/sources.list.d/
for (my $i = 1 ; $i < scalar @{ $options->{sourceslists} } ; $i++) {
my $entry = $options->{sourceslists}->[$i];
my $fname = "$options->{root}/etc/apt/sources.list.d/"
. sprintf("%04d", $i);
if (defined $entry->{fname}) {
$fname .= $entry->{fname};
if ( $entry->{fname} !~ /\.list/
&& $entry->{fname} !~ /\.sources/) {
if ($entry->{type} eq 'one-line') {
$fname .= '.list';
} elsif ($entry->{type} eq 'deb822') {
$fname .= '.sources';
} else {
error "invalid type: $entry->{type}";
}
}
} else {
if ($entry->{type} eq 'one-line') {
$fname .= 'main.list';
} elsif ($entry->{type} eq 'deb822') {
$fname .= 'main.sources';
} else {
error "invalid type: $entry->{type}";
}
}
open my $fh, '>', "$fname" or error "cannot open $fname: $!";
print $fh $entry->{content};
close $fh;
}
2018-09-18 09:20:24 +00:00
}
# allow network access from within
if (-e "/etc/resolv.conf") {
2020-01-08 16:44:07 +00:00
copy("/etc/resolv.conf", "$options->{root}/etc/resolv.conf")
or error "cannot copy /etc/resolv.conf: $!";
} else {
warning("Host system does not have a /etc/resolv.conf to copy into the"
2020-01-08 16:44:07 +00:00
. " rootfs.");
}
if (-e "/etc/hostname") {
2020-01-08 16:44:07 +00:00
copy("/etc/hostname", "$options->{root}/etc/hostname")
or error "cannot copy /etc/hostname: $!";
} else {
warning("Host system does not have a /etc/hostname to copy into the"
2020-01-08 16:44:07 +00:00
. " rootfs.");
}
if ($options->{havemknod}) {
2020-01-08 14:41:49 +00:00
foreach my $file (@devfiles) {
2020-01-08 16:44:07 +00:00
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
= @{$file};
if ($type == 0) { # normal file
2020-01-08 14:41:49 +00:00
error "type 0 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 1) { # hardlink
2020-01-08 14:41:49 +00:00
error "type 1 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 2) { # symlink
if ( $options->{mode} eq 'fakechroot'
and $linkname =~ /^\/proc/) {
2020-01-08 14:41:49 +00:00
# there is no /proc in fakechroot mode
next;
}
2020-01-08 16:44:07 +00:00
symlink $linkname, "$options->{root}/$fname"
or error "cannot create symlink $fname";
next; # chmod cannot work on symlinks
} elsif ($type == 3) { # character special
0 == system('mknod', "$options->{root}/$fname", 'c',
$devmajor, $devminor)
or error "mknod failed: $?";
} elsif ($type == 4) { # block special
0 == system('mknod', "$options->{root}/$fname", 'b',
$devmajor, $devminor)
or error "mknod failed: $?";
} elsif ($type == 5) { # directory
2020-01-08 14:41:49 +00:00
if (-e "$options->{root}/$fname") {
2020-01-08 16:44:07 +00:00
if (!-d "$options->{root}/$fname") {
2020-01-08 14:41:49 +00:00
error "$fname already exists but is not a directory";
}
} else {
2020-01-08 16:44:07 +00:00
my $num_created = make_path "$options->{root}/$fname",
{ error => \my $err };
2020-01-08 14:41:49 +00:00
if ($err && @$err) {
2020-01-08 16:44:07 +00:00
error(
join "; ",
(
map { "cannot create " . (join ": ", %{$_}) }
@$err
));
2020-01-08 14:41:49 +00:00
} elsif ($num_created == 0) {
error "cannot create $options->{root}/$fname";
}
}
} else {
error "unsupported type: $type";
}
2020-01-08 16:44:07 +00:00
chmod $mode, "$options->{root}/$fname"
or error "cannot chmod $fname: $!";
2020-01-08 14:41:49 +00:00
}
}
2018-09-18 09:20:24 +00:00
# we tell apt about the configuration via a config file passed via the
# APT_CONFIG environment variable instead of using the --option command
# line arguments because configuration settings like Dir::Etc have already
# been evaluated at the time that apt takes its command line arguments
# into account.
2020-01-09 07:39:40 +00:00
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{"APT_CONFIG"} = "$tmpfile";
}
if ($verbosity_level >= 3) {
0 == system('apt-get', '--version')
or error "apt-get --version failed: $?";
0 == system('apt-config', 'dump') or error "apt-config failed: $?";
2020-03-07 01:13:26 +00:00
debug "content of $tmpfile:";
copy($tmpfile, \*STDERR);
}
# when apt-get update is run by the root user, then apt will attempt to
# drop privileges to the _apt user. This will fail if the _apt user does
# not have permissions to read the root directory. In that case, we have
# to disable apt sandboxing.
if ($options->{mode} eq 'root') {
2020-01-08 14:41:49 +00:00
my $partial = '/var/lib/apt/lists/partial';
2020-01-08 16:44:07 +00:00
if (
system('/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test',
'-r', "$options->{root}$partial") != 0
) {
warning "Download is performed unsandboxed as root as file"
. " $options->{root}$partial couldn't be accessed by user _apt";
2020-01-08 16:44:07 +00:00
open my $fh, '>>', $tmpfile
or error "cannot open $tmpfile for appending: $!";
2020-01-08 14:41:49 +00:00
print $fh "APT::Sandbox::User \"root\";\n";
close $fh;
}
}
# setting PATH for chroot, ldconfig, start-stop-daemon...
if (defined $ENV{PATH} && $ENV{PATH} ne "") {
2020-01-09 07:39:40 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2020-01-08 14:41:49 +00:00
$ENV{PATH} = "$ENV{PATH}:/usr/sbin:/usr/bin:/sbin:/bin";
} else {
2020-01-09 07:39:40 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2020-01-08 14:41:49 +00:00
$ENV{PATH} = "/usr/sbin:/usr/bin:/sbin:/bin";
}
return;
}
sub run_update() {
my $options = shift;
info "running apt-get update...";
2020-01-08 16:44:07 +00:00
run_apt_progress({
ARGV => ['apt-get', 'update'],
CHDIR => $options->{root},
FIND_APT_WARNINGS => 1
});
# check if anything was downloaded at all
{
2020-01-08 16:44:07 +00:00
open my $fh, '-|', 'apt-get',
'indextargets' // error "failed to fork(): $!";
chomp(
my $indextargets = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
if ($indextargets eq '') {
if ($verbosity_level >= 1) {
0 == system('apt-cache', 'policy')
or error "apt-cache failed: $?";
2020-01-08 14:41:49 +00:00
}
error "apt-get update didn't download anything";
}
}
2018-09-18 09:20:24 +00:00
return;
}
sub run_download() {
my $options = shift;
my @pkgs_to_install;
2020-01-08 16:44:07 +00:00
for my $incl (@{ $options->{include} }) {
2020-01-08 14:41:49 +00:00
for my $pkg (split /[,\s]+/, $incl) {
# strip leading and trailing whitespace
$pkg =~ s/^\s+|\s+$//g;
# skip if the remainder is an empty string
if ($pkg eq '') {
next;
}
# do not append component if it's already in the list
2020-01-08 16:44:07 +00:00
if (any { $_ eq $pkg } @pkgs_to_install) {
2020-01-08 14:41:49 +00:00
next;
}
push @pkgs_to_install, $pkg;
}
2018-09-18 09:20:24 +00:00
}
if ($options->{variant} eq 'buildd') {
2020-01-08 14:41:49 +00:00
push @pkgs_to_install, 'build-essential';
2018-09-18 09:20:24 +00:00
}
# To figure out the right package set for the apt variant we can use:
# $ apt-get dist-upgrade -o dir::state::status=/dev/null
# This is because that variants only contain essential packages and
# apt and libapt treats apt as essential. If we want to install less
# (essential variant) then we have to compute the package set ourselves.
# Same if we want to install priority based variants.
if (any { $_ eq $options->{variant} } ('extract', 'custom')) {
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
info "downloading packages with apt...";
}
2020-01-08 14:41:49 +00:00
run_apt_progress({
2020-01-08 16:44:07 +00:00
ARGV => [
2020-01-10 10:44:15 +00:00
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
'install'
2020-01-08 16:44:07 +00:00
],
2020-01-08 14:41:49 +00:00
PKGS => [@pkgs_to_install],
});
} elsif ($options->{variant} eq 'apt') {
2020-01-08 14:41:49 +00:00
# if we just want to install Essential:yes packages, apt and their
# dependencies then we can make use of libapt treating apt as
# implicitly essential. An upgrade with the (currently) empty status
# file will trigger an installation of the essential packages plus apt.
#
# 2018-09-02, #debian-dpkg on OFTC, times in UTC+2
# 23:39 < josch> I'll just put it in my script and if it starts
# breaking some time I just say it's apt's fault. :P
# 23:42 < DonKult> that is how it usually works, so yes, do that :P (<-
# and please add that line next to it so you can
# remind me in 5+ years that I said that after I wrote
# in the bugreport: "Are you crazy?!? Nobody in his
# right mind would even suggest depending on it!")
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
info "downloading packages with apt...";
}
2020-01-08 14:41:49 +00:00
run_apt_progress({
2020-01-08 16:44:07 +00:00
ARGV => [
2020-01-10 10:44:15 +00:00
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
'dist-upgrade'
2020-01-08 16:44:07 +00:00
],
2020-01-08 14:41:49 +00:00
});
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{variant} } (
'essential', 'standard', 'important', 'required', 'buildd',
'minbase'
)
) {
2020-01-08 14:41:49 +00:00
my %ess_pkgs;
2020-01-08 16:44:07 +00:00
open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format',
'$(FILENAME)', 'Created-By: Packages')
or error "cannot start apt-get indextargets: $!";
2020-01-08 14:41:49 +00:00
while (my $fname = <$pipe_apt>) {
chomp $fname;
2020-01-08 16:44:07 +00:00
open(my $pipe_cat, '-|', '/usr/lib/apt/apt-helper', 'cat-file',
$fname)
or error "cannot start apt-helper cat-file: $!";
2020-01-08 14:41:49 +00:00
my $pkgname;
2020-01-08 16:44:07 +00:00
my $ess = '';
2020-01-08 14:41:49 +00:00
my $prio = 'optional';
my $arch = '';
while (my $line = <$pipe_cat>) {
chomp $line;
# Dpkg::Index takes 10 seconds to parse a typical Packages
# file. Thus we instead use a simple parser that just retrieve
# the information we need.
if ($line ne "") {
if ($line =~ /^Package: (.*)/) {
$pkgname = $1;
} elsif ($line =~ /^Essential: yes$/) {
2020-01-08 16:44:07 +00:00
$ess = 'yes';
2020-01-08 14:41:49 +00:00
} elsif ($line =~ /^Priority: (.*)/) {
$prio = $1;
} elsif ($line =~ /^Architecture: (.*)/) {
$arch = $1;
}
next;
}
# we are only interested of packages of native architecture or
# Architecture:all
if ($arch eq $options->{nativearch} or $arch eq 'all') {
# the line is empty, thus a package stanza just finished
# processing and we can handle it now
if ($ess eq 'yes') {
$ess_pkgs{$pkgname} = ();
} elsif ($options->{variant} eq 'essential') {
# for this variant we are only interested in the
# essential packages
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{variant} } (
'standard', 'important', 'required', 'buildd',
'minbase'
)
) {
2020-01-08 14:41:49 +00:00
if ($prio eq 'optional' or $prio eq 'extra') {
# always ignore packages of priority optional and
# extra
2020-01-08 14:41:49 +00:00
} elsif ($prio eq 'standard') {
2020-01-08 16:44:07 +00:00
if (
none { $_ eq $options->{variant} }
('important', 'required', 'buildd', 'minbase')
) {
2020-01-08 14:41:49 +00:00
push @pkgs_to_install, $pkgname;
}
} elsif ($prio eq 'important') {
2020-01-08 16:44:07 +00:00
if (
none { $_ eq $options->{variant} }
('required', 'buildd', 'minbase')
) {
2020-01-08 14:41:49 +00:00
push @pkgs_to_install, $pkgname;
}
} elsif ($prio eq 'required') {
# required packages are part of all sets except
# essential and apt
push @pkgs_to_install, $pkgname;
} else {
error "unknown priority: $prio";
}
} else {
error "unknown variant: $options->{variant}";
}
}
# reset values
undef $pkgname;
2020-01-08 16:44:07 +00:00
$ess = '';
2020-01-08 14:41:49 +00:00
$prio = 'optional';
$arch = '';
}
close $pipe_cat;
$? == 0 or error "apt-helper cat-file failed: $?";
}
close $pipe_apt;
$? == 0 or error "apt-get indextargets failed: $?";
debug "Identified the following Essential:yes packages:";
foreach my $pkg (sort keys %ess_pkgs) {
debug " $pkg";
}
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
info "downloading packages with apt...";
}
2020-01-08 14:41:49 +00:00
run_apt_progress({
2020-01-08 16:44:07 +00:00
ARGV => [
2020-01-10 10:44:15 +00:00
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
'install'
2020-01-08 16:44:07 +00:00
],
2020-01-08 14:41:49 +00:00
PKGS => [keys %ess_pkgs],
});
} else {
2020-01-08 14:41:49 +00:00
error "unknown variant: $options->{variant}";
2018-09-18 09:20:24 +00:00
}
2020-01-10 10:44:15 +00:00
# collect the .deb files that were downloaded by apt
my @essential_pkgs;
2020-01-10 10:44:15 +00:00
if (!$options->{dryrun}) {
2020-01-08 14:41:49 +00:00
my $apt_archives = "/var/cache/apt/archives/";
2020-01-08 16:44:07 +00:00
opendir my $dh, "$options->{root}/$apt_archives"
or error "cannot read $apt_archives";
2020-01-08 14:41:49 +00:00
while (my $deb = readdir $dh) {
if ($deb !~ /\.deb$/) {
next;
}
$deb = "$apt_archives/$deb";
2020-01-08 16:44:07 +00:00
if (!-f "$options->{root}/$deb") {
2020-01-08 14:41:49 +00:00
next;
}
push @essential_pkgs, $deb;
}
close $dh;
2020-01-10 10:44:15 +00:00
if (scalar @essential_pkgs == 0) {
# check if a file:// URI was used
open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format',
'$(URI)', 'Created-By: Packages')
or error "cannot start apt-get indextargets: $!";
while (my $uri = <$pipe_apt>) {
if ($uri =~ /^file:\/\//) {
error "nothing got downloaded -- use copy:// instead of"
. " file://";
}
2020-01-08 14:41:49 +00:00
}
2020-01-10 10:44:15 +00:00
error "nothing got downloaded";
2020-01-08 14:41:49 +00:00
}
}
return (\@pkgs_to_install, \@essential_pkgs);
}
sub run_extract() {
my $options = shift;
my $essential_pkgs = shift;
if ($options->{dryrun}) {
2020-01-10 10:44:15 +00:00
info "skip extracting packages because of --dry-run";
return;
}
info "extracting archives...";
print_progress 0.0;
my $counter = 0;
my $total = scalar @{$essential_pkgs};
foreach my $deb (@{$essential_pkgs}) {
$counter += 1;
# not using dpkg-deb --extract as that would replace the
# merged-usr symlinks with plain directories
pipe my $rfh, my $wfh;
my $pid1 = fork() // error "fork() failed: $!";
if ($pid1 == 0) {
open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!";
debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb");
eval { Devel::Cover::set_coverage("none") } if $is_covering;
exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb";
}
my $pid2 = fork() // error "fork() failed: $!";
if ($pid2 == 0) {
open(STDIN, '<&', $rfh) or error "cannot open STDIN: $!";
debug( "running tar -C $options->{root}"
. " --keep-directory-symlink --extract --file -");
eval { Devel::Cover::set_coverage("none") } if $is_covering;
exec 'tar', '-C', $options->{root},
'--keep-directory-symlink', '--extract', '--file', '-';
}
waitpid($pid1, 0);
$? == 0 or error "dpkg-deb --fsys-tarfile failed: $?";
waitpid($pid2, 0);
$? == 0 or error "tar --extract failed: $?";
print_progress($counter / $total * 100);
}
print_progress "done";
return;
}
sub run_prepare {
my $options = shift;
if ($options->{mode} eq 'fakechroot') {
# this borrows from and extends
# /etc/fakechroot/debootstrap.env and
# /etc/fakechroot/chroot.env
{
my @fakechrootsubst = ();
foreach my $d ('/usr/sbin', '/usr/bin', '/sbin', '/bin') {
push @fakechrootsubst, "$d/chroot=/usr/sbin/chroot.fakechroot";
push @fakechrootsubst, "$d/mkfifo=/bin/true";
push @fakechrootsubst, "$d/ldconfig=/bin/true";
push @fakechrootsubst, "$d/ldd=/usr/bin/ldd.fakechroot";
push @fakechrootsubst, "$d/ischroot=/bin/true";
}
if (defined $ENV{FAKECHROOT_CMD_SUBST}
&& $ENV{FAKECHROOT_CMD_SUBST} ne "") {
push @fakechrootsubst, split /:/, $ENV{FAKECHROOT_CMD_SUBST};
}
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_CMD_SUBST} = join ':', @fakechrootsubst;
}
if (defined $ENV{FAKECHROOT_EXCLUDE_PATH}
&& $ENV{FAKECHROOT_EXCLUDE_PATH} ne "") {
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_EXCLUDE_PATH}
= "$ENV{FAKECHROOT_EXCLUDE_PATH}:/dev:/proc:/sys";
} else {
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_EXCLUDE_PATH} = '/dev:/proc:/sys';
}
# workaround for long unix socket path if FAKECHROOT_BASE
# exceeds the limit of 108 bytes
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_AF_UNIX_PATH} = "/tmp";
}
{
my @ldsoconf = ('/etc/ld.so.conf');
opendir(my $dh, '/etc/ld.so.conf.d')
or error "Can't opendir(/etc/ld.so.conf.d): $!";
while (my $entry = readdir $dh) {
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
next if $entry !~ /\.conf$/;
push @ldsoconf, "/etc/ld.so.conf.d/$entry";
}
closedir($dh);
my @ldlibpath = ();
if (defined $ENV{LD_LIBRARY_PATH}
&& $ENV{LD_LIBRARY_PATH} ne "") {
push @ldlibpath, (split /:/, $ENV{LD_LIBRARY_PATH});
}
# FIXME: workaround allowing installation of systemd should
# live in fakechroot, see #917920
push @ldlibpath, "/lib/systemd";
foreach my $fname (@ldsoconf) {
open my $fh, "<", $fname
or error "cannot open $fname for reading: $!";
while (my $line = <$fh>) {
next if $line !~ /^\//;
push @ldlibpath, $line;
}
close $fh;
}
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{LD_LIBRARY_PATH} = join ':', @ldlibpath;
}
}
# make sure that APT_CONFIG and TMPDIR are not set when executing
# anything inside the chroot
my @chrootcmd = ('env', '--unset=APT_CONFIG', '--unset=TMPDIR');
if ($options->{mode} eq 'proot') {
push @chrootcmd,
(
'proot', '--root-id',
'--bind=/dev', '--bind=/proc',
'--bind=/sys', "--rootfs=$options->{root}",
'--cwd=/'
);
} elsif (
any { $_ eq $options->{mode} }
('root', 'unshare', 'fakechroot')
) {
push @chrootcmd, ('/usr/sbin/chroot', $options->{root});
2018-11-20 23:13:10 +00:00
} else {
error "unknown mode: $options->{mode}";
}
# copy qemu-user-static binary into chroot or setup proot with
# --qemu
if (defined $options->{qemu}) {
if ($options->{mode} eq 'proot') {
push @chrootcmd, "--qemu=qemu-$options->{qemu}";
} elsif ($options->{mode} eq 'fakechroot') {
# Make sure that the fakeroot and fakechroot shared
# libraries exist for the right architecture
open my $fh, '-|', 'dpkg-architecture', '-a',
$options->{nativearch},
'-qDEB_HOST_MULTIARCH' // error "failed to fork(): $!";
chomp(
my $deb_host_multiarch = do { local $/; <$fh> }
);
close $fh;
if (($? != 0) or (!$deb_host_multiarch)) {
error "dpkg-architecture failed: $?";
2020-01-08 14:41:49 +00:00
}
my $fakechrootdir = "/usr/lib/$deb_host_multiarch/fakechroot";
if (!-e "$fakechrootdir/libfakechroot.so") {
error "$fakechrootdir/libfakechroot.so doesn't exist."
. " Install libfakechroot:$options->{nativearch}"
. " outside the chroot";
2020-01-08 14:41:49 +00:00
}
my $fakerootdir = "/usr/lib/$deb_host_multiarch/libfakeroot";
if (!-e "$fakerootdir/libfakeroot-sysv.so") {
error "$fakerootdir/libfakeroot-sysv.so doesn't exist."
. " Install libfakeroot:$options->{nativearch}"
. " outside the chroot";
}
# The rest of this block sets environment variables, so we
# have to add the "no critic" statement to stop perlcritic
# from complaining about setting global variables
## no critic (Variables::RequireLocalizedPunctuationVars)
# fakechroot only fills LD_LIBRARY_PATH with the
# directories of the host's architecture. We append the
# directories of the chroot architecture.
$ENV{LD_LIBRARY_PATH}
= "$ENV{LD_LIBRARY_PATH}:$fakechrootdir:$fakerootdir";
# The binfmt support on the outside is used, so qemu needs
# to know where it has to look for shared libraries
if (defined $ENV{QEMU_LD_PREFIX}
&& $ENV{QEMU_LD_PREFIX} ne "") {
$ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
} else {
$ENV{QEMU_LD_PREFIX} = $options->{root};
}
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) {
# other modes require a static qemu-user binary
my $qemubin = "/usr/bin/qemu-$options->{qemu}-static";
if (!-e $qemubin) {
error "cannot find $qemubin";
}
copy $qemubin, "$options->{root}/$qemubin"
or error "cannot copy $qemubin: $!";
# File::Copy does not retain permissions but on some
# platforms (like Travis CI) the binfmt interpreter must
# have the executable bit set or otherwise execve will
# fail with EACCES
chmod 0755, "$options->{root}/$qemubin"
or error "cannot chmod $qemubin: $!";
} else {
error "unknown mode: $options->{mode}";
2020-01-08 14:41:49 +00:00
}
2018-11-20 23:13:10 +00:00
}
# some versions of coreutils use the renameat2 system call in mv.
# This breaks certain versions of fakechroot and proot. Here we do
# a sanity check and warn the user in case things might break.
if (any { $_ eq $options->{mode} } ('fakechroot', 'proot')
and -e "$options->{root}/bin/mv") {
mkdir "$options->{root}/000-move-me"
or error "cannot create directory: $!";
my $ret = system @chrootcmd, '/bin/mv', '/000-move-me',
'/001-delete-me';
if ($ret != 0) {
if ($options->{mode} eq 'proot') {
info "the /bin/mv binary inside the chroot doesn't"
. " work under proot";
info "this is likely due to missing support for"
. " renameat2 in proot";
info "see https://github.com/proot-me/PRoot/issues/147";
} else {
info "the /bin/mv binary inside the chroot doesn't"
. " work under fakechroot";
info "with certain versions of coreutils and glibc,"
. " this is due to missing support for renameat2 in"
. " fakechroot";
info "see https://github.com/dex4er/fakechroot/issues/60";
}
info "expect package post installation scripts not to work";
rmdir "$options->{root}/000-move-me"
or error "cannot rmdir: $!";
} else {
rmdir "$options->{root}/001-delete-me"
or error "cannot rmdir: $!";
}
}
return \@chrootcmd;
}
sub run_essential() {
my $options = shift;
my $essential_pkgs = shift;
my $chrootcmd = shift;
2020-03-22 13:08:21 +00:00
2018-11-20 23:13:10 +00:00
if ($options->{mode} eq 'chrootless') {
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "simulate installing packages...";
} else {
info "installing packages...";
}
2020-01-08 14:41:49 +00:00
# FIXME: the dpkg config from the host is parsed before the command
# line arguments are parsed and might break this mode
# Example: if the host has --path-exclude set, then this will also
# affect the chroot.
my @chrootless_opts = (
'-oDPkg::Options::=--force-not-root',
'-oDPkg::Options::=--force-script-chrootless',
'-oDPkg::Options::=--root=' . $options->{root},
2020-01-10 10:44:15 +00:00
'-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log",
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
2020-01-08 16:44:07 +00:00
);
2020-01-08 14:41:49 +00:00
if (defined $options->{qemu}) {
# The binfmt support on the outside is used, so qemu needs to know
# where it has to look for shared libraries
if (defined $ENV{QEMU_LD_PREFIX}
&& $ENV{QEMU_LD_PREFIX} ne "") {
2020-01-09 07:39:40 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2020-01-08 14:41:49 +00:00
$ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
} else {
2020-01-09 07:39:40 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2020-01-08 14:41:49 +00:00
$ENV{QEMU_LD_PREFIX} = $options->{root};
}
}
run_apt_progress({
ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}],
});
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{mode} }
('root', 'unshare', 'fakechroot', 'proot')
) {
# install the extracted packages properly
# we need --force-depends because dpkg does not take Pre-Depends
# into account and thus doesn't install them in the right order
# And the --predep-package option is broken: #539133
if ($options->{dryrun}) {
info "simulate installing packages...";
} else {
info "installing packages...";
run_chroot(
sub {
run_dpkg_progress({
ARGV => [
@{$chrootcmd}, 'env',
'--unset=TMPDIR', 'dpkg',
'--install', '--force-depends'
],
PKGS => $essential_pkgs,
});
},
$options
);
}
# if the path-excluded option was added to the dpkg config,
# reinstall all packages
if ((!$options->{dryrun})
and -e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") {
open(my $fh, '<',
"$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
my $num_matches = grep { /^path-exclude=/ } <$fh>;
close $fh;
if ($num_matches > 0) {
# without --skip-same-version, dpkg will install the given
# packages even though they are already installed
info "re-installing packages because of path-exclude...";
2020-01-10 10:44:15 +00:00
run_chroot(
sub {
run_dpkg_progress({
ARGV => [
@{$chrootcmd}, 'env',
2020-01-10 10:44:15 +00:00
'--unset=TMPDIR', 'dpkg',
'--install', '--force-depends'
],
PKGS => $essential_pkgs,
2020-01-10 10:44:15 +00:00
});
},
$options
);
}
}
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
2020-01-08 14:41:49 +00:00
foreach my $deb (@{$essential_pkgs}) {
unlink "$options->{root}/$deb"
or error "cannot unlink $deb: $!";
}
return;
}
sub run_install() {
my $options = shift;
my $pkgs_to_install = shift;
my $chrootcmd = shift;
if ($options->{mode} eq 'chrootless') {
if (scalar @{$pkgs_to_install} > 0) {
my @chrootless_opts = (
'-oDPkg::Options::=--force-not-root',
'-oDPkg::Options::=--force-script-chrootless',
'-oDPkg::Options::=--root=' . $options->{root},
'-oDPkg::Options::=--log='
. "$options->{root}/var/log/dpkg.log",
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
);
run_apt_progress({
ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
PKGS => $pkgs_to_install,
});
}
} elsif (
any { $_ eq $options->{mode} }
('root', 'unshare', 'fakechroot', 'proot')
) {
# run essential hooks
if ($options->{variant} ne 'custom') {
}
2020-01-08 14:41:49 +00:00
if ($options->{variant} ne 'custom'
and scalar @{$pkgs_to_install} > 0) {
# some packages have to be installed from the outside before
# anything can be installed from the inside.
#
# we do not need to install any *-archive-keyring packages
# inside the chroot prior to installing the packages, because
# the keyring is only used when doing "apt-get update" and that
# was already done at the beginning using key material from the
# outside. Since the apt cache is already filled and we are not
# calling "apt-get update" again, the keyring can be installed
# later during installation. But: if it's not installed during
# installation, then we might end up with a fully installed
# system without keyrings that are valid for its sources.list.
my @pkgs_to_install_from_outside;
# install apt if necessary
if ($options->{variant} ne 'apt') {
push @pkgs_to_install_from_outside, 'apt';
2020-01-08 14:41:49 +00:00
}
# since apt will be run inside the chroot, make sure that
# apt-transport-https and ca-certificates gets installed first
# if any mirror is a https URI
open(my $pipe_apt, '-|', 'apt-get', 'indextargets',
'--format', '$(URI)', 'Created-By: Packages')
or error "cannot start apt-get indextargets: $!";
while (my $uri = <$pipe_apt>) {
if ($uri =~ /^https:\/\//) {
# FIXME: support for https is part of apt >= 1.5
push @pkgs_to_install_from_outside, 'apt-transport-https';
push @pkgs_to_install_from_outside, 'ca-certificates';
last;
} elsif ($uri =~ /^tor(\+[a-z]+)*:\/\//) {
# tor URIs can be tor+http://, tor+https:// or even
# tor+mirror+file://
push @pkgs_to_install_from_outside, 'apt-transport-tor';
last;
2020-01-08 14:41:49 +00:00
}
}
close $pipe_apt;
$? == 0 or error "apt-get indextargets failed";
2020-01-08 14:41:49 +00:00
if (scalar @pkgs_to_install_from_outside > 0) {
if ($options->{dryrun}) {
info 'simulate downloading '
. (join ', ', @pkgs_to_install_from_outside) . "...";
} else {
info 'downloading '
. (join ', ', @pkgs_to_install_from_outside) . "...";
2020-01-08 14:41:49 +00:00
}
run_apt_progress({
ARGV => [
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun}
? '-oAPT::Get::Simulate=true'
: (),
'install'
],
PKGS => [@pkgs_to_install_from_outside],
});
if ($options->{dryrun}) {
info 'simulate installing '
. (join ', ', @pkgs_to_install_from_outside) . "...";
} else {
my @debs_to_install;
my $apt_archives = "/var/cache/apt/archives/";
opendir my $dh, "$options->{root}/$apt_archives"
or error "cannot read $apt_archives";
while (my $deb = readdir $dh) {
if ($deb !~ /\.deb$/) {
next;
}
$deb = "$apt_archives/$deb";
if (!-f "$options->{root}/$deb") {
next;
}
push @debs_to_install, $deb;
2020-01-10 10:44:15 +00:00
}
close $dh;
if (scalar @debs_to_install == 0) {
warning "nothing got downloaded -- maybe the packages"
. " were already installed?";
2020-01-10 10:44:15 +00:00
} else {
# we need --force-depends because dpkg does not take
# Pre-Depends into account and thus doesn't install
# them in the right order
info 'installing '
. (join ', ', @pkgs_to_install_from_outside) . "...";
run_dpkg_progress({
ARGV => [
@{$chrootcmd}, 'env',
'--unset=TMPDIR', 'dpkg',
'--install', '--force-depends'
],
PKGS => \@debs_to_install,
});
foreach my $deb (@debs_to_install) {
unlink "$options->{root}/$deb"
or error "cannot unlink $deb: $!";
2020-01-08 14:41:49 +00:00
}
}
}
}
2020-01-08 14:41:49 +00:00
if (!$options->{dryrun}) {
run_chroot(
sub {
info "installing remaining packages inside the"
. " chroot...";
run_apt_progress({
ARGV => [
@{$chrootcmd}, 'env',
'--unset=APT_CONFIG', '--unset=TMPDIR',
'apt-get', '--yes',
'install'
],
PKGS => $pkgs_to_install,
});
},
$options
);
} else {
info "simulate installing remaining packages inside the"
. " chroot...";
run_apt_progress({
ARGV => [
'apt-get', '--yes',
'-oAPT::Get::Simulate=true', 'install'
],
PKGS => $pkgs_to_install,
});
2020-01-08 14:41:49 +00:00
}
}
} else {
2020-01-08 14:41:49 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
return;
}
sub run_cleanup() {
my $options = shift;
2019-01-08 10:28:27 +00:00
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/apt' } @{ $options->{skip} }) {
info "skipping cleanup/apt as requested";
} else {
info "cleaning package lists and apt cache...";
run_apt_progress({
ARGV => [
'apt-get', '--option',
'Dir::Etc::SourceList=/dev/null', '--option',
'Dir::Etc::SourceParts=/dev/null', 'update'
],
CHDIR => $options->{root},
});
run_apt_progress(
{ ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} });
2018-09-18 09:20:24 +00:00
2020-04-09 22:00:36 +00:00
# apt since 1.6 creates the auxfiles directory. If apt inside the
# chroot is older than that, then it will not know how to clean it.
if (-e "$options->{root}/var/lib/apt/lists/auxfiles") {
rmdir "$options->{root}/var/lib/apt/lists/auxfiles"
or die "cannot rmdir /var/lib/apt/lists/auxfiles: $!";
}
}
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/mmdebstrap' } @{ $options->{skip} }) {
info "skipping cleanup/mmdebstrap as requested";
} else {
# clean up temporary configuration file
unlink "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
or error "failed to unlink /etc/apt/apt.conf.d/00mmdebstrap: $!";
if (defined $ENV{APT_CONFIG} && -e $ENV{APT_CONFIG}) {
unlink $ENV{APT_CONFIG}
or error "failed to unlink $ENV{APT_CONFIG}: $!";
}
if (defined $options->{qemu}
and any { $_ eq $options->{mode} } ('root', 'unshare')) {
unlink "$options->{root}/usr/bin/qemu-$options->{qemu}-static"
or error
"cannot unlink /usr/bin/qemu-$options->{qemu}-static: $!";
}
}
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/reproducible' } @{ $options->{skip} }) {
info "skipping cleanup/reproducible as requested";
} else {
# clean up certain files to make output reproducible
foreach my $fname (
'/var/log/dpkg.log', '/var/log/apt/history.log',
'/var/log/apt/term.log', '/var/log/alternatives.log',
'/var/cache/ldconfig/aux-cache', '/var/log/apt/eipp.log.xz'
) {
my $path = "$options->{root}$fname";
if (!-e $path) {
next;
}
unlink $path or error "cannot unlink $path: $!";
}
if (-e "$options->{root}/etc/machine-id") {
# from machine-id(5):
# For operating system images which are created once and used on
# multiple machines, for example for containers or in the cloud,
# /etc/machine-id should be an empty file in the generic file
# system image. An ID will be generated during boot and saved to
# this file if possible. Having an empty file in place is useful
# because it allows a temporary file to be bind-mounted over the
# real file, in case the image is used read-only.
unlink "$options->{root}/etc/machine-id"
or error "cannot unlink /etc/machine-id: $!";
open my $fh, '>', "$options->{root}/etc/machine-id"
or error "failed to open(): $!";
close $fh;
2020-01-08 14:41:49 +00:00
}
}
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/tmp' } @{ $options->{skip} }) {
info "skipping cleanup/tmp as requested";
} else {
# remove any possible leftovers in /tmp but warn about it
if (-d "$options->{root}/tmp") {
opendir(my $dh, "$options->{root}/tmp")
or error "Can't opendir($options->{root}/tmp): $!";
while (my $entry = readdir $dh) {
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
warning "deleting files in /tmp: $entry";
remove_tree("$options->{root}/tmp/$entry",
{ error => \my $err });
if (@$err) {
for my $diag (@$err) {
my ($file, $message) = %$diag;
if ($file eq '') { warning "general error: $message"; }
else { warning "problem unlinking $file: $message"; }
}
2020-01-08 14:41:49 +00:00
}
}
2020-04-09 22:00:36 +00:00
closedir($dh);
2020-01-08 14:41:49 +00:00
}
}
2020-01-09 07:39:40 +00:00
return;
2018-09-18 09:20:24 +00:00
}
# messages from process inside unshared namespace to the outside
# openw -- open file for writing
# untar -- extract tar into directory
# write -- write data to last opened file or tar process
# close -- finish file writing or tar extraction
# adios -- last message and tear-down
# messages from process outside unshared namespace to the inside
# okthx -- success
sub checkokthx {
2020-01-08 16:44:07 +00:00
my $fh = shift;
my $ret = read($fh, my $buf, 2 + 5) // error "cannot read from socket: $!";
if ($ret == 0) { error "received eof on socket"; }
my ($len, $msg) = unpack("nA5", $buf);
if ($msg ne "okthx") { error "expected okthx but got: $msg"; }
2020-01-08 16:44:07 +00:00
if ($len != 0) { error "expected no payload but got $len bytes"; }
2020-01-09 07:39:40 +00:00
return;
}
sub hookhelper {
# we put everything in an eval block because that way we can easily handle
# errors without goto labels or much code duplication: the error handler
# has to send an "error" message to the other side
eval {
2020-01-08 14:41:49 +00:00
my $root = $ARGV[1];
my $mode = $ARGV[2];
my $hook = $ARGV[3];
my $qemu = $ARGV[4];
$verbosity_level = $ARGV[5];
my $command = $ARGV[6];
# unless we are in the setup hook (where there is no tar inside the
# chroot) we need to run tar on the inside because otherwise, possible
# absolute symlinks in the path given via --directory are not
# correctly resolved
#
# FIXME: the issue above can be fixed by a function that is able to
# resolve absolute symlinks even inside the chroot directory to a full
# path that is valid on the outside -- fakechroot and proot have their
# own reasons, see below
my @cmdprefix = ();
my @tarcmd = (
'tar', '--numeric-owner', '--xattrs', '--format=pax',
'--pax-option=exthdr.name=%d/PaxHeaders/%f,'
. 'delete=atime,delete=ctime'
);
2020-01-08 14:41:49 +00:00
if ($hook eq 'setup') {
if ($mode eq 'proot') {
# since we cannot run tar inside the chroot under proot during
# the setup hook because the chroot is empty, we have to run
# tar from the outside, which leads to all files being owned
# by the user running mmdebstrap. To let the ownership
# information not be completely off, we force all files be
# owned by the root user.
push @tarcmd, '--owner=0', '--group=0';
2020-01-08 14:41:49 +00:00
}
2020-03-22 13:08:21 +00:00
} elsif (any { $_ eq $hook } ('extract', 'essential', 'customize')) {
2020-01-08 14:41:49 +00:00
if ($mode eq 'fakechroot') {
# Fakechroot requires tar to run inside the chroot or
# otherwise absolute symlinks will include the path to the
# root directory
push @cmdprefix, '/usr/sbin/chroot', $root;
} elsif ($mode eq 'proot') {
# proot requires tar to run inside proot or otherwise
# permissions will be completely off
2020-01-08 16:44:07 +00:00
push @cmdprefix, 'proot', '--root-id', "--rootfs=$root",
'--cwd=/', "--qemu=$qemu";
2020-01-08 14:41:49 +00:00
} elsif (any { $_ eq $mode } ('root', 'chrootless', 'unshare')) {
push @cmdprefix, '/usr/sbin/chroot', $root;
} else {
error "unknown mode: $mode";
}
} else {
error "unknown hook: $hook";
}
2020-01-16 09:38:14 +00:00
if (
any { $_ eq $command }
('copy-in', 'tar-in', 'upload', 'sync-in')
) {
2020-01-08 14:41:49 +00:00
if (scalar @ARGV < 9) {
error "copy-in and tar-in need at least one path on the"
. " outside and the output path inside the chroot";
2020-01-08 14:41:49 +00:00
}
my $outpath = $ARGV[-1];
2020-01-08 16:44:07 +00:00
for (my $i = 7 ; $i < $#ARGV ; $i++) {
2020-01-08 14:41:49 +00:00
# the right argument for tar's --directory argument depends on
# whether tar is called from inside the chroot or from the
# outside
my $directory;
if ($hook eq 'setup') {
$directory = "$root/$outpath";
2020-03-22 13:08:21 +00:00
} elsif (
any { $_ eq $hook }
('extract', 'essential', 'customize')
) {
2020-01-08 14:41:49 +00:00
$directory = $outpath;
} else {
error "unknown hook: $hook";
}
# FIXME: here we would like to check if the path inside the
# chroot given by $directory actually exists but we cannot
# because we are missing a function that can resolve even
# paths including absolute symlinks to paths that are valid
# outside the chroot
my $fh;
if ($command eq 'upload') {
# open the requested file for writing
2020-01-08 16:44:07 +00:00
open $fh, '|-', @cmdprefix, 'sh', '-c', 'cat > "$1"',
'exec', $directory // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} elsif (
any { $_ eq $command }
('copy-in', 'tar-in', 'sync-in')
) {
# open a tar process that extracts the tarfile that we
# supply it with on stdin to the output directory inside
# the chroot
open $fh, '|-', @cmdprefix, @tarcmd, '--xattrs-include=*',
'--directory', $directory, '--extract', '--file',
2020-01-08 16:44:07 +00:00
'-' // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
if ($command eq 'copy-in') {
# instruct the parent process to create a tarball of the
# requested path outside the chroot
debug "sending mktar";
2020-01-08 16:44:07 +00:00
print STDOUT (
pack("n", length $ARGV[$i]) . "mktar" . $ARGV[$i]);
2020-01-16 09:38:14 +00:00
} elsif ($command eq 'sync-in') {
# instruct the parent process to create a tarball of the
# content of the requested path outside the chroot
debug "sending mktac";
print STDOUT (
pack("n", length $ARGV[$i]) . "mktac" . $ARGV[$i]);
} elsif (any { $_ eq $command } ('upload', 'tar-in')) {
2020-01-08 14:41:49 +00:00
# instruct parent process to open a tarball of the
# requested path outside the chroot for reading
debug "sending openr";
2020-01-08 16:44:07 +00:00
print STDOUT (
pack("n", length $ARGV[$i]) . "openr" . $ARGV[$i]);
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
STDOUT->flush();
debug "waiting for okthx";
checkokthx \*STDIN;
# handle "write" messages from the parent process and feed
# their payload into the tar process until a "close" message
# is encountered
2020-01-08 16:44:07 +00:00
while (1) {
2020-01-08 14:41:49 +00:00
# receive the next message
2020-01-08 16:44:07 +00:00
my $ret = read(STDIN, my $buf, 2 + 5)
// error "cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
my ($len, $msg) = unpack("nA5", $buf);
debug "received message: $msg";
if ($msg eq "close") {
# finish the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
debug "sending okthx";
2020-01-08 16:44:07 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
STDOUT->flush();
last;
} elsif ($msg ne "write") {
error "expected write but got: $msg";
}
# read the payload
my $content;
{
2020-01-08 16:44:07 +00:00
my $ret = read(STDIN, $content, $len)
// error "error cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
}
# write the payload to the tar process
2020-01-08 16:44:07 +00:00
print $fh $content
or error "cannot write to tar process: $!";
2020-01-08 14:41:49 +00:00
debug "sending okthx";
2020-01-08 16:44:07 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
STDOUT->flush();
}
close $fh;
if ($command ne 'upload' and $? != 0) {
error "tar failed";
}
}
2020-01-16 09:38:14 +00:00
} elsif (
any { $_ eq $command }
('copy-out', 'tar-out', 'download', 'sync-out')
) {
2020-01-08 14:41:49 +00:00
if (scalar @ARGV < 9) {
error "copy-out needs at least one path inside the chroot and"
. " the output path on the outside";
2020-01-08 14:41:49 +00:00
}
my $outpath = $ARGV[-1];
2020-01-08 16:44:07 +00:00
for (my $i = 7 ; $i < $#ARGV ; $i++) {
2020-01-08 14:41:49 +00:00
# the right argument for tar's --directory argument depends on
# whether tar is called from inside the chroot or from the
# outside
my $directory;
if ($hook eq 'setup') {
$directory = "$root/$ARGV[$i]";
2020-03-22 13:08:21 +00:00
} elsif (
any { $_ eq $hook }
('extract', 'essential', 'customize')
) {
2020-01-08 14:41:49 +00:00
$directory = $ARGV[$i];
} else {
error "unknown hook: $hook";
}
# FIXME: here we would like to check if the path inside the
# chroot given by $directory actually exists but we cannot
# because we are missing a function that can resolve even
# paths including absolute symlinks to paths that are valid
# outside the chroot
my $fh;
if ($command eq 'download') {
# open the requested file for reading
2020-01-08 16:44:07 +00:00
open $fh, '-|', @cmdprefix, 'sh', '-c', 'cat "$1"',
'exec', $directory // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} elsif ($command eq 'sync-out') {
# Open a tar process that creates a tarfile of everything
2020-01-16 09:38:14 +00:00
# inside the requested directory inside the chroot and
# writes it to stdout.
open $fh, '-|', @cmdprefix, @tarcmd, '--directory',
$directory, '--create', '--file', '-',
'.' // error "failed to fork(): $!";
} elsif (any { $_ eq $command } ('copy-out', 'tar-out')) {
# Open a tar process that creates a tarfile of the
# requested directory inside the chroot and writes it to
# stdout. To emulate the behaviour of cp, change to the
# dirname of the requested path first.
2020-01-08 16:44:07 +00:00
open $fh, '-|', @cmdprefix, @tarcmd, '--directory',
dirname($directory), '--create', '--file', '-',
basename($directory) // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
2020-01-16 09:38:14 +00:00
if (any { $_ eq $command } ('copy-out', 'sync-out')) {
2020-01-08 14:41:49 +00:00
# instruct the parent process to extract a tarball to a
# certain path outside the chroot
debug "sending untar";
2020-01-08 16:44:07 +00:00
print STDOUT (
pack("n", length $outpath) . "untar" . $outpath);
2020-01-16 09:38:14 +00:00
} elsif (any { $_ eq $command } ('download', 'tar-out')) {
2020-01-08 14:41:49 +00:00
# instruct parent process to open a tarball of the
# requested path outside the chroot for writing
debug "sending openw";
2020-01-08 16:44:07 +00:00
print STDOUT (
pack("n", length $outpath) . "openw" . $outpath);
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
STDOUT->flush();
debug "waiting for okthx";
checkokthx \*STDIN;
# read from the tar process and send as payload to the parent
# process
while (1) {
# read from tar
2020-01-08 16:44:07 +00:00
my $ret = read($fh, my $cont, 4096)
// error "cannot read from pipe: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) { last; }
debug "sending write";
# send to parent
print STDOUT pack("n", $ret) . "write" . $cont;
STDOUT->flush();
debug "waiting for okthx";
checkokthx \*STDIN;
if ($ret < 4096) { last; }
}
# signal to the parent process that we are done
debug "sending close";
print STDOUT pack("n", 0) . "close";
STDOUT->flush();
debug "waiting for okthx";
checkokthx \*STDIN;
close $fh;
2020-01-16 09:38:14 +00:00
if ($? != 0) {
error "$command failed";
2020-01-08 14:41:49 +00:00
}
}
} else {
error "unknown command: $command";
}
};
if ($@) {
# inform the other side that something went wrong
print STDOUT (pack("n", 0) . "error");
STDOUT->flush();
error "hookhelper failed: $@";
}
return;
}
2020-01-08 14:41:49 +00:00
sub guess_sources_format {
my $content = shift;
my $is_deb822 = 0;
my $is_oneline = 0;
for my $line (split "\n", $content) {
if ($line =~ /^deb(-src)? /) {
$is_oneline = 1;
last;
}
if ($line =~ /^[^#:\s]+:/) {
$is_deb822 = 1;
last;
}
}
if ($is_deb822) {
return 'deb822';
}
if ($is_oneline) {
return 'one-line';
}
return;
}
sub approx_disk_usage {
my $directory = shift;
info "approximating disk usage...";
open my $fh, '-|', 'du', '--block-size', '1024',
'--summarize', '--one-file-system',
$directory // error "failed to fork(): $!";
chomp(
my $du = do { local $/; <$fh> }
);
close $fh;
if (($? != 0) or (!$du)) {
error "du failed: $?";
}
if ($du =~ /^(\d+)\t/) {
return int($1 * 1.1);
} else {
error "unexpected du output: $du";
}
return;
}
sub main() {
umask 022;
if (scalar @ARGV >= 7 && $ARGV[0] eq "--hook-helper") {
hookhelper();
2020-01-08 14:41:49 +00:00
exit 0;
}
2020-01-18 22:13:10 +00:00
# this is like:
# lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' ...
# but without needing lxc
if ($ARGV[0] eq "--unshare-helper") {
if (!test_unshare(1)) {
exit 1;
}
my @idmap = read_subuid_subgid;
my $pid = get_unshare_cmd(
sub {
0 == system @ARGV[1 .. $#ARGV] or error "system failed: $?";
},
\@idmap
);
waitpid $pid, 0;
$? == 0 or error "unshared command failed";
exit 0;
}
2018-09-18 09:20:24 +00:00
my $mtime = time;
if (exists $ENV{SOURCE_DATE_EPOCH}) {
2020-01-08 16:44:07 +00:00
$mtime = $ENV{SOURCE_DATE_EPOCH} + 0;
2018-09-18 09:20:24 +00:00
}
2020-01-09 07:39:40 +00:00
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{DEBIAN_FRONTEND} = 'noninteractive';
$ENV{DEBCONF_NONINTERACTIVE_SEEN} = 'true';
$ENV{LC_ALL} = 'C.UTF-8';
$ENV{LANGUAGE} = 'C.UTF-8';
$ENV{LANG} = 'C.UTF-8';
}
2018-09-18 09:20:24 +00:00
# copy ARGV because getopt modifies it
my @ARGVORIG = @ARGV;
# obtain the correct defaults for the keyring locations that apt knows
# about
my $apttrusted
= `eval \$(apt-config shell v Dir::Etc::trusted/f); printf \$v`;
my $apttrustedparts
= `eval \$(apt-config shell v Dir::Etc::trustedparts/d); printf \$v`;
2020-01-08 16:44:07 +00:00
chomp(my $hostarch = `dpkg --print-architecture`);
2018-09-18 09:20:24 +00:00
my $options = {
2020-01-08 16:44:07 +00:00
components => ["main"],
variant => "important",
include => [],
architectures => [$hostarch],
mode => 'auto',
dpkgopts => [],
aptopts => [],
apttrusted => $apttrusted,
apttrustedparts => $apttrustedparts,
2020-01-08 16:44:07 +00:00
noop => [],
setup_hook => [],
2020-03-22 13:08:21 +00:00
extract_hook => [],
2020-01-08 16:44:07 +00:00
essential_hook => [],
customize_hook => [],
2020-01-10 10:44:15 +00:00
dryrun => 0,
2020-04-09 22:00:36 +00:00
skip => [],
2018-09-18 09:20:24 +00:00
};
2019-02-23 07:43:15 +00:00
my $logfile = undef;
my $format = 'auto';
2020-01-08 16:44:07 +00:00
Getopt::Long::Configure('default', 'bundling', 'auto_abbrev',
'ignore_case_always');
2018-09-18 09:20:24 +00:00
GetOptions(
2020-01-08 16:44:07 +00:00
'h|help' => sub { pod2usage(-exitval => 0, -verbose => 1) },
'man' => sub { pod2usage(-exitval => 0, -verbose => 2) },
'version' => sub { print STDOUT "mmdebstrap $VERSION\n"; exit 0; },
'components=s@' => \$options->{components},
'variant=s' => \$options->{variant},
'include=s@' => \$options->{include},
2020-01-08 14:41:49 +00:00
'architectures=s@' => \$options->{architectures},
2020-01-08 16:44:07 +00:00
'mode=s' => \$options->{mode},
'dpkgopt=s@' => \$options->{dpkgopts},
'aptopt=s@' => \$options->{aptopts},
'keyring=s' => sub {
2020-01-08 14:41:49 +00:00
my ($opt_name, $opt_value) = @_;
if ($opt_value =~ /"/) {
error "--keyring: apt cannot handle paths with double quotes:"
. " $opt_value";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
if (!-e $opt_value) {
2020-01-08 14:41:49 +00:00
error "keyring \"$opt_value\" does not exist";
}
my $abs_path = abs_path($opt_value);
if (!defined $abs_path) {
error "unable to get absolute path of --keyring: $opt_value";
}
# since abs_path resolved all symlinks for us, we can now test
# what the actual target actually is
if (-d $abs_path) {
$options->{apttrustedparts} = $abs_path;
2020-01-08 14:41:49 +00:00
} else {
$options->{apttrusted} = $abs_path;
2020-01-08 14:41:49 +00:00
}
},
2020-01-08 16:44:07 +00:00
's|silent' => sub { $verbosity_level = 0; },
'q|quiet' => sub { $verbosity_level = 0; },
2020-01-08 14:41:49 +00:00
'v|verbose' => sub { $verbosity_level = 2; },
2020-01-08 16:44:07 +00:00
'd|debug' => sub { $verbosity_level = 3; },
'format=s' => \$format,
2020-01-08 14:41:49 +00:00
'logfile=s' => \$logfile,
# no-op options so that mmdebstrap can be used with
# sbuild-createchroot --debootstrap=mmdebstrap
2020-01-08 16:44:07 +00:00
'resolve-deps' => sub { push @{ $options->{noop} }, 'resolve-deps'; },
'merged-usr' => sub { push @{ $options->{noop} }, 'merged-usr'; },
'no-merged-usr' =>
sub { push @{ $options->{noop} }, 'no-merged-usr'; },
'force-check-gpg' =>
sub { push @{ $options->{noop} }, 'force-check-gpg'; },
2020-01-08 14:41:49 +00:00
# hook options are hidden until I'm happy with them
2020-01-08 16:44:07 +00:00
'setup-hook=s@' => \$options->{setup_hook},
2020-03-22 13:08:21 +00:00
'extract-hook=s@' => \$options->{extract_hook},
2020-01-08 14:41:49 +00:00
'essential-hook=s@' => \$options->{essential_hook},
'customize-hook=s@' => \$options->{customize_hook},
# Sometimes --simulate fails even though non-simulate succeeds because
# in simulate mode, apt cannot rely on dpkg to figure out tricky
# dependency situations and will give up instead when it cannot find
# a solution.
#
# 2020-02-06, #debian-apt on OFTC, times in UTC+1
# 12:52 < DonKult> [...] It works in non-simulation because simulate is
# more picky. If you wanna know why simulate complains
# here prepare for long suffering in dependency hell.
'simulate' => \$options->{dryrun},
'dry-run' => \$options->{dryrun},
2020-04-09 22:00:36 +00:00
'skip=s@' => \$options->{skip},
2018-09-18 09:20:24 +00:00
) or pod2usage(-exitval => 2, -verbose => 1);
2019-02-23 07:43:15 +00:00
if (defined($logfile)) {
2020-01-08 14:41:49 +00:00
open(STDERR, '>', $logfile) or error "cannot open $logfile: $!";
2019-02-23 07:43:15 +00:00
}
2020-01-08 16:44:07 +00:00
foreach my $arg (@{ $options->{noop} }) {
info "The option --$arg is a no-op. It only exists for compatibility"
. " with some debootstrap wrappers.";
}
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
2020-03-22 13:08:21 +00:00
foreach my $hook ('setup', 'extract', 'essential', 'customize') {
2020-01-10 10:44:15 +00:00
if (scalar @{ $options->{"${hook}_hook"} } > 0) {
warning "In dry-run mode, --$hook-hook options have no effect";
}
}
}
2020-01-08 16:44:07 +00:00
my @valid_variants = (
'extract', 'custom', 'essential', 'apt',
'required', 'minbase', 'buildd', 'important',
'debootstrap', '-', 'standard'
);
if (none { $_ eq $options->{variant} } @valid_variants) {
2020-01-08 14:41:49 +00:00
error "invalid variant. Choose from " . (join ', ', @valid_variants);
2018-09-18 09:20:24 +00:00
}
# debootstrap and - are an alias for important
2018-09-23 17:36:07 +00:00
if (any { $_ eq $options->{variant} } ('-', 'debootstrap')) {
2020-01-08 14:41:49 +00:00
$options->{variant} = 'important';
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
if ($options->{variant} eq 'essential'
and scalar @{ $options->{include} } > 0) {
warning "cannot install extra packages with variant essential because"
. " apt is missing";
}
2018-09-18 09:20:24 +00:00
# fakeroot is an alias for fakechroot
if ($options->{mode} eq 'fakeroot') {
2020-01-08 14:41:49 +00:00
$options->{mode} = 'fakechroot';
2018-09-18 09:20:24 +00:00
}
# sudo is an alias for root
if ($options->{mode} eq 'sudo') {
2020-01-08 14:41:49 +00:00
$options->{mode} = 'root';
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
my @valid_modes
= ('auto', 'root', 'unshare', 'fakechroot', 'proot', 'chrootless');
2018-09-23 17:36:07 +00:00
if (none { $_ eq $options->{mode} } @valid_modes) {
2020-01-08 14:41:49 +00:00
error "invalid mode. Choose from " . (join ', ', @valid_modes);
2018-09-18 09:20:24 +00:00
}
# sqfs is an alias for squashfs
if ($format eq 'sqfs') {
$format = 'squasfs';
}
# dir is an alias for directory
if ($format eq 'dir') {
$format = 'directory';
}
my @valid_formats = ('auto', 'directory', 'tar', 'squashfs', 'ext2');
if (none { $_ eq $format } @valid_formats) {
error "invalid format. Choose from " . (join ', ', @valid_formats);
}
my $check_fakechroot_running = sub {
2020-01-08 14:41:49 +00:00
# test if we are inside fakechroot already
# We fork a child process because setting FAKECHROOT_DETECT seems to
# be an irreversible operation for fakechroot.
my $pid = open my $rfh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
# with the FAKECHROOT_DETECT environment variable set, any program
# execution will be replaced with the output "fakeroot [version]"
2020-01-09 07:39:40 +00:00
local $ENV{FAKECHROOT_DETECT} = 0;
2020-01-08 14:41:49 +00:00
exec 'echo', 'If fakechroot is running, this will not be printed';
}
my $content = do { local $/; <$rfh> };
waitpid $pid, 0;
my $result = 0;
if ($? == 0 and $content =~ /^fakechroot \d\.\d+$/) {
$result = 1;
}
return $result;
};
# figure out the mode to use or test whether the chosen mode is legal
if ($options->{mode} eq 'auto') {
2020-01-08 14:41:49 +00:00
if (&{$check_fakechroot_running}()) {
# if mmdebstrap is executed inside fakechroot, then we assume the
# user expects fakechroot mode
$options->{mode} = 'fakechroot';
} elsif ($EFFECTIVE_USER_ID == 0) {
# if mmdebstrap is executed as root, we assume the user wants root
# mode
$options->{mode} = 'root';
} elsif (test_unshare(0)) {
# otherwise, unshare mode is our best option if test_unshare()
# succeeds
$options->{mode} = 'unshare';
} elsif (system('fakechroot --version>/dev/null') == 0) {
# the next fallback is fakechroot
# exec ourselves again but within fakechroot
my @prefix = ();
2020-01-08 16:44:07 +00:00
if ($is_covering) {
2020-01-08 14:41:49 +00:00
@prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
}
exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
} elsif (system('proot --version>/dev/null') == 0) {
# and lastly, proot
$options->{mode} = 'proot';
} else {
error "unable to pick chroot mode automatically";
}
info "automatically chosen mode: $options->{mode}";
} elsif ($options->{mode} eq 'root') {
2020-01-08 14:41:49 +00:00
if ($EFFECTIVE_USER_ID != 0) {
error "need to be root";
}
} elsif ($options->{mode} eq 'proot') {
2020-01-08 14:41:49 +00:00
if (system('proot --version>/dev/null') != 0) {
error "need working proot binary";
}
} elsif ($options->{mode} eq 'fakechroot') {
2020-01-08 14:41:49 +00:00
if (&{$check_fakechroot_running}()) {
# fakechroot is already running
} elsif (system('fakechroot --version>/dev/null') != 0) {
error "need working fakechroot binary";
} else {
# exec ourselves again but within fakechroot
my @prefix = ();
2020-01-08 16:44:07 +00:00
if ($is_covering) {
2020-01-08 14:41:49 +00:00
@prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
}
exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
}
} elsif ($options->{mode} eq 'unshare') {
2020-01-08 14:41:49 +00:00
if (!test_unshare(1)) {
my $procfile = '/proc/sys/kernel/unprivileged_userns_clone';
2020-01-08 16:44:07 +00:00
open(my $fh, '<', $procfile)
or error "failed to open $procfile: $!";
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close($fh);
if ($content ne "1") {
info "/proc/sys/kernel/unprivileged_userns_clone is set to"
. " $content";
info "Try running:";
info " sudo sysctl -w kernel.unprivileged_userns_clone=1";
info "or permanently enable unprivileged usernamespaces by"
. " putting the setting into /etc/sysctl.d/";
info "THIS SETTING HAS SECURITY IMPLICATIONS!";
info "Refer to https://bugs.debian.org/cgi-bin/"
. "bugreport.cgi?bug=898446";
2020-01-08 14:41:49 +00:00
}
exit 1;
}
} elsif ($options->{mode} eq 'chrootless') {
2020-01-08 14:41:49 +00:00
# nothing to do
} else {
2020-01-08 14:41:49 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
my @architectures = ();
2020-01-08 16:44:07 +00:00
foreach my $archs (@{ $options->{architectures} }) {
2020-01-08 14:41:49 +00:00
foreach my $arch (split /[,\s]+/, $archs) {
# strip leading and trailing whitespace
$arch =~ s/^\s+|\s+$//g;
# skip if the remainder is an empty string
if ($arch eq '') {
next;
}
# do not append component if it's already in the list
2020-01-08 16:44:07 +00:00
if (any { $_ eq $arch } @architectures) {
2020-01-08 14:41:49 +00:00
next;
}
push @architectures, $arch;
}
}
2020-01-08 16:44:07 +00:00
$options->{nativearch} = $hostarch;
$options->{foreignarchs} = [];
if (scalar @architectures == 0) {
warning "empty architecture list: falling back to native architecture"
. " $hostarch";
} elsif (scalar @architectures == 1) {
2020-01-08 14:41:49 +00:00
$options->{nativearch} = $architectures[0];
} else {
2020-01-08 14:41:49 +00:00
$options->{nativearch} = $architectures[0];
2020-01-08 16:44:07 +00:00
push @{ $options->{foreignarchs} },
@architectures[1 .. $#architectures];
}
debug "Native architecture (outside): $hostarch";
debug "Native architecture (inside): $options->{nativearch}";
2020-01-08 16:44:07 +00:00
debug("Foreign architectures (inside): "
. (join ', ', @{ $options->{foreignarchs} }));
2018-09-18 09:20:24 +00:00
{
2020-01-08 14:41:49 +00:00
# FIXME: autogenerate this list
my $deb2qemu = {
2020-01-08 16:44:07 +00:00
alpha => 'alpha',
amd64 => 'x86_64',
arm => 'arm',
arm64 => 'aarch64',
armel => 'arm',
armhf => 'arm',
hppa => 'hppa',
i386 => 'i386',
m68k => 'm68k',
mips => 'mips',
mips64 => 'mips64',
2020-01-08 14:41:49 +00:00
mips64el => 'mips64el',
2020-01-08 16:44:07 +00:00
mipsel => 'mipsel',
powerpc => 'ppc',
ppc64 => 'ppc64',
ppc64el => 'ppc64le',
riscv64 => 'riscv64',
s390x => 's390x',
sh4 => 'sh4',
sparc => 'sparc',
sparc64 => 'sparc64',
2020-01-08 14:41:49 +00:00
};
if ($hostarch ne $options->{nativearch}) {
2020-04-10 10:25:24 +00:00
if (system('arch-test --version>/dev/null') != 0) {
error "install arch-test for foreign architecture support";
}
2020-01-08 14:41:49 +00:00
my $withemu = 0;
2020-01-08 16:44:07 +00:00
my $noemu = 0;
2020-01-08 14:41:49 +00:00
{
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
{
2020-01-09 07:39:40 +00:00
## no critic (TestingAndDebugging::ProhibitNoWarnings)
# don't print a warning if the following fails
no warnings;
2020-01-08 14:41:49 +00:00
exec 'arch-test', $options->{nativearch};
}
# if exec didn't work (for example because the arch-test
# program is missing) prepare for the worst and assume that
# the architecture cannot be executed
print "$options->{nativearch}: not supported on this"
. " machine/kernel\n";
2020-01-08 14:41:49 +00:00
exit 1;
}
2020-01-08 16:44:07 +00:00
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
if ($? == 0 and $content eq "$options->{nativearch}: ok") {
$withemu = 1;
}
}
{
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
{
2020-01-09 07:39:40 +00:00
## no critic (TestingAndDebugging::ProhibitNoWarnings)
# don't print a warning if the following fails
no warnings;
2020-01-08 14:41:49 +00:00
exec 'arch-test', '-n', $options->{nativearch};
}
# if exec didn't work (for example because the arch-test
# program is missing) prepare for the worst and assume that
# the architecture cannot be executed
print "$options->{nativearch}: not supported on this"
. " machine/kernel\n";
2020-01-08 14:41:49 +00:00
exit 1;
}
2020-01-08 16:44:07 +00:00
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
if ($? == 0 and $content eq "$options->{nativearch}: ok") {
$noemu = 1;
}
}
# four different outcomes, depending on whether arch-test
# succeeded with or without emulation
#
# withemu | noemu |
# --------+-------+-----------------
# 0 | 0 | test why emu doesn't work and quit
# 0 | 1 | should never happen
# 1 | 0 | use qemu emulation
# 1 | 1 | don't use qemu emulation
if ($withemu == 0 and $noemu == 0) {
{
2020-01-08 16:44:07 +00:00
open my $fh, '<', '/proc/filesystems'
or error "failed to open /proc/filesystems: $!";
2020-01-09 07:39:40 +00:00
unless (grep { /^nodev\tbinfmt_misc$/ } (<$fh>)) {
warning "binfmt_misc not found in /proc/filesystems --"
. " is the module loaded?";
2020-01-08 14:41:49 +00:00
}
close $fh;
}
{
2020-01-08 16:44:07 +00:00
open my $fh, '<', '/proc/mounts'
or error "failed to open /proc/mounts: $!";
unless (
2020-01-09 07:39:40 +00:00
grep {
/^binfmt_misc\s+
\/proc\/sys\/fs\/binfmt_misc\s+
binfmt_misc\s+/x
2020-01-09 07:39:40 +00:00
} (<$fh>)
2020-01-08 16:44:07 +00:00
) {
warning "binfmt_misc not found in /proc/mounts -- not"
. " mounted?";
2020-01-08 14:41:49 +00:00
}
close $fh;
}
{
2020-01-08 16:44:07 +00:00
if (!exists $deb2qemu->{ $options->{nativearch} }) {
warning "no mapping from $options->{nativearch} to"
. " qemu-user binary";
} elsif (
system('/usr/sbin/update-binfmts --version>/dev/null')
!= 0) {
warning "cannot find /usr/sbin/update-binfmts";
2020-01-08 14:41:49 +00:00
} else {
2020-01-08 16:44:07 +00:00
my $binfmt_identifier
= 'qemu-' . $deb2qemu->{ $options->{nativearch} };
open my $fh, '-|', '/usr/sbin/update-binfmts',
'--display',
$binfmt_identifier // error "failed to fork(): $!";
chomp(
my $binfmts = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
if ($? != 0 || $binfmts eq '') {
warning "$binfmt_identifier is not a supported"
. " binfmt name";
2020-01-08 14:41:49 +00:00
}
}
}
error "$options->{nativearch} can neither be executed natively"
. " nor via qemu user emulation with binfmt_misc";
2020-01-08 14:41:49 +00:00
} elsif ($withemu == 0 and $noemu == 1) {
error "arch-test succeeded without emu but not with emu";
} elsif ($withemu == 1 and $noemu == 0) {
info "$options->{nativearch} cannot be executed, falling back"
. " to qemu-user";
2020-01-08 16:44:07 +00:00
if (!exists $deb2qemu->{ $options->{nativearch} }) {
error "no mapping from $options->{nativearch} to qemu-user"
. " binary";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
$options->{qemu} = $deb2qemu->{ $options->{nativearch} };
2020-01-08 14:41:49 +00:00
} elsif ($withemu == 1 and $noemu == 1) {
info "$options->{nativearch} is different from $hostarch but"
. " can be executed natively";
2020-01-08 14:41:49 +00:00
} else {
error "logic error";
}
} else {
info "chroot architecture $options->{nativearch} is equal to the"
. " host's architecture";
2020-01-08 14:41:49 +00:00
}
2018-09-18 09:20:24 +00:00
}
{
2020-01-08 14:41:49 +00:00
my $suite;
if (scalar @ARGV > 0) {
$suite = shift @ARGV;
if (scalar @ARGV > 0) {
$options->{target} = shift @ARGV;
} else {
$options->{target} = '-';
}
} else {
2020-01-08 16:44:07 +00:00
info
"No SUITE specified, expecting sources.list on standard input";
2020-01-08 14:41:49 +00:00
$options->{target} = '-';
}
my $sourceslists = [];
2020-01-08 16:44:07 +00:00
if (!defined $suite) {
2020-01-08 14:41:49 +00:00
# If no suite was specified, then the whole sources.list has to
# come from standard input
info "Reading sources.list from standard input...";
my $content = do {
local $/;
## no critic (InputOutput::ProhibitExplicitStdin)
<STDIN>;
};
my $type = guess_sources_format($content);
if (!defined $type
|| ($type ne "deb822" and $type ne "one-line")) {
error "cannot determine sources.list format";
}
push @{$sourceslists},
{
type => $type,
fname => undef,
content => $content,
};
2020-01-08 14:41:49 +00:00
} else {
my @components = ();
2020-01-08 16:44:07 +00:00
foreach my $comp (@{ $options->{components} }) {
2020-01-08 14:41:49 +00:00
my @comps = split /[,\s]+/, $comp;
foreach my $c (@comps) {
# strip leading and trailing whitespace
$c =~ s/^\s+|\s+$//g;
# skip if the remainder is an empty string
if ($c eq "") {
next;
}
# do not append component if it's already in the list
2020-01-08 16:44:07 +00:00
if (any { $_ eq $c } @components) {
2020-01-08 14:41:49 +00:00
next;
}
push @components, $c;
}
}
my $compstr = join " ", @components;
# if the currently selected apt keyrings do not contain the
# necessary key material for the chosen suite, then attempt adding
# a signed-by option
my $signedby = '';
{
# try to guess the right keyring path for the given suite
my $debianvendor;
my $ubuntuvendor;
eval {
require Dpkg::Vendor::Debian;
require Dpkg::Vendor::Ubuntu;
$debianvendor = Dpkg::Vendor::Debian->new();
$ubuntuvendor = Dpkg::Vendor::Ubuntu->new();
};
2020-01-08 14:41:49 +00:00
my $keyring;
2020-01-08 16:44:07 +00:00
if (
any { $_ eq $suite } (
'potato', 'woody', 'sarge', 'etch',
'lenny', 'squeeze', 'wheezy'
)
) {
if (defined $debianvendor) {
$keyring = $debianvendor->run_hook(
'archive-keyrings-historic');
} else {
$keyring
= '/usr/share/keyrings/'
. 'debian-archive-removed-keys.gpg';
}
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $suite }
('aequorea', 'bartholomea', 'chromodoris', 'dasyatis')
) {
$keyring
= '/usr/share/keyrings/tanglu-archive-keyring.gpg';
} elsif (
any { $_ eq $suite }
('kali-dev', 'kali-rolling', 'kali-bleeding-edge')
) {
2020-01-08 14:41:49 +00:00
$keyring = '/usr/share/keyrings/kali-archive-keyring.gpg';
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $suite } (
'trusty', 'xenial', 'zesty', 'artful', 'bionic',
'cosmic'
)
) {
if (defined $ubuntuvendor) {
$keyring = $ubuntuvendor->run_hook('archive-keyrings');
} else {
$keyring
= '/usr/share/keyrings/'
. 'ubuntu-archive-keyring.gpg';
}
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $suite } (
'unstable', 'stable', 'oldstable', 'jessie',
'stretch', 'buster', 'bullseye', 'bookworm'
)
) {
if (defined $debianvendor) {
$keyring = $debianvendor->run_hook('archive-keyrings');
} else {
$keyring
= '/usr/share/keyrings/'
. 'debian-archive-keyring.gpg';
}
} else {
last;
2020-01-08 14:41:49 +00:00
}
2020-01-08 14:41:49 +00:00
# we can only check if we need the signed-by entry if we u
# automatically chosen keyring exists
if (!defined $keyring || !-e $keyring) {
last;
}
# we can only check key material if gpg is installed
my $gpghome = tempdir(
"mmdebstrap.gpghome.XXXXXXXXXXXX",
TMPDIR => 1,
CLEANUP => 1
);
my @gpgcmd = (
'gpg', '--quiet',
'--ignore-time-conflict', '--no-options',
'--no-default-keyring', '--homedir',
$gpghome, '--no-auto-check-trustdb',
'--trust-model', 'always'
);
2020-01-22 22:30:56 +00:00
my ($ret, $message);
{
2020-01-22 22:30:56 +00:00
my $fh;
{
# change warning handler to prevent message
# Can't exec "gpg": No such file or directory
local $SIG{__WARN__} = sub { $message = shift; };
$ret = open $fh, '-|', @gpgcmd, '--version';
}
# we only want to check if the gpg command exists
close $fh;
}
if ($? != 0 || !defined $ret || defined $message) {
info "gpg --version failed: cannot determine the right"
. " signed-by value";
last;
}
# find all the fingerprints of the keys apt currently
# knows about
my @keyringopts = ();
opendir my $dh, "$options->{apttrustedparts}"
or error "cannot read $options->{apttrustedparts}";
while (my $filename = readdir $dh) {
if ($filename !~ /\.(asc|gpg)$/) {
next;
2020-01-08 14:41:49 +00:00
}
push @keyringopts, '--keyring',
"$options->{apttrustedparts}/$filename";
}
if (-e $options->{apttrusted}) {
push @keyringopts, '--keyring', $options->{apttrusted};
}
my @aptfingerprints = ();
if (scalar @keyringopts == 0) {
$signedby = " [signed-by=\"$keyring\"]";
last;
}
2020-01-22 22:30:56 +00:00
{
open my $fh, '-|', @gpgcmd, @keyringopts, '--with-colons',
'--list-keys' // error "failed to fork(): $!";
while (my $line = <$fh>) {
if ($line !~ /^fpr:::::::::([^:]+):/) {
next;
}
push @aptfingerprints, $1;
2020-01-08 14:41:49 +00:00
}
2020-01-22 22:30:56 +00:00
close $fh;
}
if ($? != 0) {
error "gpg failed";
}
if (scalar @aptfingerprints == 0) {
$signedby = " [signed-by=\"$keyring\"]";
last;
}
# check if all fingerprints from the keyring that we guessed
# are known by apt and only add signed-by option if that's not
# the case
my @suitefingerprints = ();
2020-01-22 22:30:56 +00:00
{
open my $fh, '-|', @gpgcmd, '--keyring', $keyring,
'--with-colons',
'--list-keys' // error "failed to fork(): $!";
while (my $line = <$fh>) {
if ($line !~ /^fpr:::::::::([^:]+):/) {
next;
}
# if this fingerprint is not known by apt, then we need
#to add the signed-by option
if (none { $_ eq $1 } @aptfingerprints) {
$signedby = " [signed-by=\"$keyring\"]";
last;
}
2020-01-08 14:41:49 +00:00
}
2020-01-22 22:30:56 +00:00
close $fh;
2020-01-08 14:41:49 +00:00
}
if ($? != 0) {
error "gpg failed";
}
2020-01-08 14:41:49 +00:00
}
if (scalar @ARGV > 0) {
for my $arg (@ARGV) {
if ($arg eq '-') {
info 'Reading sources.list from standard input...';
my $content = do {
local $/;
## no critic (InputOutput::ProhibitExplicitStdin)
<STDIN>;
};
my $type = guess_sources_format($content);
if (!defined $type
|| ($type ne 'deb822' and $type ne 'one-line')) {
error "cannot determine sources.list format";
}
# if last entry is of same type and without filename,
# then append
if ( scalar @{$sourceslists} > 0
&& $sourceslists->[-1]{type} eq $type
&& !defined $sourceslists->[-1]{fname}) {
$sourceslists->[-1]{content}
.= ($type eq 'one-line' ? "\n" : "\n\n")
. $content;
} else {
push @{$sourceslists},
{
type => $type,
fname => undef,
content => $content,
};
}
2020-01-08 14:41:49 +00:00
} elsif ($arg =~ /^deb(-src)? /) {
my $content = "$arg\n";
# if last entry is of same type and without filename,
# then append
if ( scalar @{$sourceslists} > 0
&& $sourceslists->[-1]{type} eq 'one-line'
&& !defined $sourceslists->[-1]{fname}) {
$sourceslists->[-1]{content} .= "\n" . $content;
} else {
push @{$sourceslists},
{
type => 'one-line',
fname => undef,
content => $content,
};
}
2020-01-08 14:41:49 +00:00
} elsif ($arg =~ /:\/\//) {
my $content = "deb$signedby $arg $suite $compstr\n";
# if last entry is of same type and without filename,
# then append
if ( scalar @{$sourceslists} > 0
&& $sourceslists->[-1]{type} eq 'one-line'
&& !defined $sourceslists->[-1]{fname}) {
$sourceslists->[-1]{content} .= "\n" . $content;
} else {
push @{$sourceslists},
{
type => 'one-line',
fname => undef,
content => $content,
};
}
2020-01-08 14:41:49 +00:00
} elsif (-f $arg) {
my $content = '';
2020-01-08 14:41:49 +00:00
open my $fh, '<', $arg or error "cannot open $arg: $!";
while (my $line = <$fh>) {
$content .= $line;
2020-01-08 14:41:49 +00:00
}
close $fh;
my $type = undef;
if ($arg =~ /\.list$/) {
$type = 'one-line';
} elsif ($arg =~ /\.sources$/) {
$type = 'deb822';
} else {
$type = guess_sources_format($content);
}
if (!defined $type
|| ($type ne 'deb822' and $type ne 'one-line')) {
error "cannot determine sources.list format";
}
push @{$sourceslists},
{
type => $type,
fname => basename($arg),
content => $content,
};
2020-01-08 14:41:49 +00:00
} else {
error "invalid mirror: $arg";
}
}
} else {
2020-01-08 16:44:07 +00:00
my @debstable = (
'oldoldstable', 'oldstable', 'stable', 'jessie',
'stretch', 'buster', 'bullseye', 'bookworm'
);
my @ubuntustable
= ('trusty', 'xenial', 'zesty', 'artful', 'bionic',
'cosmic');
my @tanglustable
= ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis');
2020-01-08 14:41:49 +00:00
my @kali = ('kali-dev', 'kali-rolling', 'kali-bleeding-edge');
2020-01-08 16:44:07 +00:00
my $mirror = 'http://deb.debian.org/debian';
2020-01-08 14:41:49 +00:00
my $secmirror = 'http://security.debian.org/debian-security';
2020-01-08 16:44:07 +00:00
if (any { $_ eq $suite } @ubuntustable) {
if (
any { $_ eq $options->{nativearch} }
('amd64', 'i386')
) {
2020-01-08 16:44:07 +00:00
$mirror = 'http://archive.ubuntu.com/ubuntu';
2020-01-08 14:41:49 +00:00
$secmirror = 'http://security.ubuntu.com/ubuntu';
} else {
2020-01-08 16:44:07 +00:00
$mirror = 'http://ports.ubuntu.com/ubuntu-ports';
2020-01-08 14:41:49 +00:00
$secmirror = 'http://ports.ubuntu.com/ubuntu-ports';
}
2020-01-08 16:44:07 +00:00
} elsif (any { $_ eq $suite } @tanglustable) {
$mirror = 'http://archive.tanglu.org/tanglu';
} elsif (any { $_ eq $suite } @kali) {
$mirror = 'https://http.kali.org/kali';
2020-01-08 14:41:49 +00:00
}
my $sourceslist = '';
2020-01-08 14:41:49 +00:00
$sourceslist .= "deb$signedby $mirror $suite $compstr\n";
2020-01-08 16:44:07 +00:00
if (any { $_ eq $suite } @ubuntustable) {
$sourceslist
.= "deb$signedby $mirror $suite-updates $compstr\n";
$sourceslist
.= "deb$signedby $secmirror $suite-security $compstr\n";
} elsif (any { $_ eq $suite } @tanglustable) {
$sourceslist
.= "deb$signedby $secmirror $suite-updates $compstr\n";
} elsif (any { $_ eq $suite } @debstable) {
$sourceslist
.= "deb$signedby $mirror $suite-updates $compstr\n";
if (
any { $_ eq $suite } (
'oldoldstable', 'oldstable',
'stable', 'jessie',
'stretch', 'buster'
)
) {
$sourceslist
.= "deb$signedby $secmirror $suite/updates"
. " $compstr\n";
2020-01-08 14:41:49 +00:00
} else {
# starting from bullseye use
# https://lists.debian.org/87r26wqr2a.fsf@43-1.org
$sourceslist
.= "deb$signedby $secmirror $suite-security"
. " $compstr\n";
2020-01-08 14:41:49 +00:00
}
}
push @{$sourceslists},
{
type => 'one-line',
fname => undef,
content => $sourceslist,
};
2020-01-08 14:41:49 +00:00
}
}
if (scalar @{$sourceslists} == 0) {
2020-01-08 14:41:49 +00:00
error "empty apt sources.list";
}
debug("sources list entries:");
for my $list (@{$sourceslists}) {
if (defined $list->{fname}) {
debug("fname: $list->{fname}");
}
debug("type: $list->{type}");
debug("content:");
for my $line (split "\n", $list->{content}) {
debug(" $line");
}
}
$options->{sourceslists} = $sourceslists;
2018-09-18 09:20:24 +00:00
}
if ($options->{target} ne '-') {
2020-01-08 14:41:49 +00:00
my $abs_path = abs_path($options->{target});
if (!defined $abs_path) {
error "unable to get absolute path of target directory"
. " $options->{target}";
2020-01-08 14:41:49 +00:00
}
$options->{target} = $abs_path;
2018-09-18 09:20:24 +00:00
}
if ($options->{target} eq '/') {
2020-01-08 14:41:49 +00:00
error "refusing to use the filesystem root as output directory";
2018-09-18 09:20:24 +00:00
}
my $tar_compressor = get_tar_compressor($options->{target});
2018-09-18 09:20:24 +00:00
# figure out the right format
if ($format eq 'auto') {
if ($options->{target} ne '-' and -d $options->{target}) {
$format = 'directory';
} elsif (
defined $tar_compressor
or $options->{target} =~ /\.tar$/
or $options->{target} eq '-'
or -p $options->{target} # named pipe (fifo)
or -c $options->{target} # character special like /dev/null
) {
$format = 'tar';
# check if the compressor is installed
if (defined $tar_compressor) {
my $pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
open(STDOUT, '>', '/dev/null')
or error "cannot open /dev/null for writing: $!";
open(STDIN, '<', '/dev/null')
or error "cannot open /dev/null for reading: $!";
exec { $tar_compressor->[0] } @{$tar_compressor}
or error("cannot exec "
. (join " ", @{$tar_compressor})
. ": $!");
}
waitpid $pid, 0;
if ($? != 0) {
error("failed to start " . (join " ", @{$tar_compressor}));
}
}
} elsif ($options->{target} =~ /\.(squashfs|sqfs)$/) {
$format = 'squashfs';
# check if tar2sqfs is installed
2020-01-08 14:41:49 +00:00
my $pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
2020-01-08 16:44:07 +00:00
open(STDOUT, '>', '/dev/null')
or error "cannot open /dev/null for writing: $!";
open(STDIN, '<', '/dev/null')
or error "cannot open /dev/null for reading: $!";
exec('tar2sqfs', '--version')
or error("cannot exec tar2sqfs --version: $!");
2020-01-08 14:41:49 +00:00
}
waitpid $pid, 0;
if ($? != 0) {
error("failed to start tar2sqfs --version");
2020-01-08 14:41:49 +00:00
}
} elsif ($options->{target} =~ /\.ext2$/) {
$format = 'ext2';
# check if the installed version of genext2fs supports tarballs on
# stdin
(undef, my $filename)
= tempfile("mmdebstrap.ext2.XXXXXXXXXXXX", OPEN => 0);
open my $fh, '|-', 'genext2fs', '-B', '1024', '-b', '8', '-N',
'11',
$filename // error "failed to fork(): $!";
# write 10240 null-bytes to genext2fs -- this represents an empty
# tar archive
print $fh ("\0" x 10240)
or error "cannot write to genext2fs process";
close $fh;
my $exitstatus = $?;
unlink $filename // die "cannot unlink $filename";
if ($exitstatus != 0) {
error "genext2fs failed with exit status: $exitstatus";
}
} else {
$format = 'directory';
2020-01-08 14:41:49 +00:00
}
info "automatically chosen format: $format";
2018-09-18 09:20:24 +00:00
}
if ($options->{target} eq '-' and $format ne 'tar') {
error "the $format format is unable to write to standard output";
}
if (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
2020-01-08 16:44:07 +00:00
if ( any { $_ eq $options->{variant} } ('extract', 'custom')
and any { $_ eq $options->{mode} } ('fakechroot', 'proot')) {
info "creating a tarball or squashfs image or ext2 image in"
. " fakechroot mode or proot mode might fail in extract and"
. " custom variants because there might be no tar inside the"
. " chroot";
2020-01-08 14:41:49 +00:00
}
# try to fail early if target tarball or squashfs image cannot be
# opened for writing
if ($options->{target} ne '-') {
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
if (-e $options->{target}) {
info "not overwriting $options->{target} because in"
. " dry-run mode";
}
} else {
open my $fh, '>', $options->{target}
or error "cannot open $options->{target} for writing: $!";
close $fh;
}
2020-01-08 14:41:49 +00:00
}
# since the output is a tarball, we create the rootfs in a temporary
# directory
$options->{root} = tempdir('mmdebstrap.XXXXXXXXXX', TMPDIR => 1);
2020-01-08 14:41:49 +00:00
info "using $options->{root} as tempdir";
# in unshare and root mode, other users than the current user need to
# access the rootfs, most prominently, the _apt user. Thus, make the
# temporary directory world readable.
if (any { $_ eq $options->{mode} } ('unshare', 'root')) {
chmod 0755, $options->{root} or error "cannot chmod root: $!";
}
} elsif ($format eq 'directory') {
2020-01-08 14:41:49 +00:00
# user does not seem to have specified a tarball as output, thus work
# directly in the supplied directory
$options->{root} = $options->{target};
if (-e $options->{root}) {
2020-01-08 16:44:07 +00:00
if (!-d $options->{root}) {
2020-01-08 14:41:49 +00:00
error "$options->{root} exists and is not a directory";
}
# check if the directory is empty or contains nothing more than an
# empty lost+found directory. The latter exists on freshly created
# ext3 and ext4 partitions.
# rationale for requiring an empty directory:
# https://bugs.debian.org/833525
2020-01-08 16:44:07 +00:00
opendir(my $dh, $options->{root})
or error "Can't opendir($options->{root}): $!";
2020-01-08 14:41:49 +00:00
while (my $entry = readdir $dh) {
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
# if the entry is a directory named "lost+found" then skip it
# if it's empty
if ($entry eq "lost+found" and -d "$options->{root}/$entry") {
opendir(my $dh2, "$options->{root}/$entry");
# Attempt reading the directory thrice. If the third time
# succeeds, then it has more entries than just "." and ".."
# and must thus not be empty.
readdir $dh2;
readdir $dh2;
# rationale for requiring an empty directory:
# https://bugs.debian.org/833525
if (readdir $dh2) {
error "$options->{root} contains a non-empty"
. " lost+found directory";
2020-01-08 14:41:49 +00:00
}
closedir($dh2);
} else {
error "$options->{root} is not empty";
}
}
closedir($dh);
} else {
2020-01-08 16:44:07 +00:00
my $num_created = make_path "$options->{root}",
{ error => \my $err };
2020-01-08 14:41:49 +00:00
if ($err && @$err) {
2020-01-08 16:44:07 +00:00
error(join "; ",
(map { "cannot create " . (join ": ", %{$_}) } @$err));
2020-01-08 14:41:49 +00:00
} elsif ($num_created == 0) {
error "cannot create $options->{root}";
}
}
} else {
error "unknown format: $format";
2018-09-18 09:20:24 +00:00
}
# check for double quotes because apt doesn't allow to escape them and
# thus paths with double quotes are invalid in the apt config
if ($options->{root} =~ /"/) {
2020-01-08 14:41:49 +00:00
error "apt cannot handle paths with double quotes";
}
2018-09-18 09:20:24 +00:00
my @idmap;
# for unshare mode the rootfs directory has to have appropriate
# permissions
if ($options->{mode} eq 'unshare') {
2020-01-08 14:41:49 +00:00
@idmap = read_subuid_subgid;
# sanity check
if ( scalar(@idmap) != 2
|| $idmap[0][0] ne 'u'
|| $idmap[1][0] ne 'g') {
2020-01-08 14:41:49 +00:00
error "invalid idmap";
}
2018-09-18 09:20:24 +00:00
2020-01-08 16:44:07 +00:00
my $outer_gid = $REAL_GROUP_ID + 0;
2018-09-18 09:20:24 +00:00
2020-01-09 07:39:40 +00:00
my $pid = get_unshare_cmd(
sub { chown 1, 1, $options->{root} },
[
['u', '0', $REAL_USER_ID, '1'],
['g', '0', $outer_gid, '1'],
['u', '1', $idmap[0][2], '1'],
['g', '1', $idmap[1][2], '1']]);
2020-01-08 14:41:49 +00:00
waitpid $pid, 0;
$? == 0 or error "chown failed";
2018-09-18 09:20:24 +00:00
}
# figure out whether we have mknod
$options->{havemknod} = 0;
if ($options->{mode} eq 'unshare') {
2020-01-09 07:39:40 +00:00
my $pid = get_unshare_cmd(
sub {
$options->{havemknod} = havemknod($options->{root});
},
\@idmap
);
2020-01-08 14:41:49 +00:00
waitpid $pid, 0;
$? == 0 or error "havemknod failed";
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{mode} }
('root', 'fakechroot', 'proot', 'chrootless')
) {
2020-01-08 14:41:49 +00:00
$options->{havemknod} = havemknod($options->{root});
} else {
2020-01-08 14:41:49 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
my $devtar = '';
# We always craft the /dev entries ourselves if a tarball is to be created
if (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
2020-01-08 14:41:49 +00:00
foreach my $file (@devfiles) {
2020-01-08 16:44:07 +00:00
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
= @{$file};
my $entry = pack(
'a100 a8 a8 a8 a12 a12 A8 a1 a100 a8 a32 a32 a8 a8 a155 x12',
2020-01-08 14:41:49 +00:00
$fname,
2020-01-08 16:44:07 +00:00
sprintf('%07o', $mode),
sprintf('%07o', 0), # uid
sprintf('%07o', 0), # gid
sprintf('%011o', 0), # size
2020-01-08 14:41:49 +00:00
sprintf('%011o', $mtime),
2020-01-08 16:44:07 +00:00
'', # checksum
2020-01-08 14:41:49 +00:00
$type,
$linkname,
"ustar ",
2020-01-08 16:44:07 +00:00
'', # username
'', # groupname
2020-01-08 14:41:49 +00:00
defined($devmajor) ? sprintf('%07o', $devmajor) : '',
defined($devminor) ? sprintf('%07o', $devminor) : '',
2020-01-08 16:44:07 +00:00
'', # prefix
2020-01-08 14:41:49 +00:00
);
# compute and insert checksum
2020-01-08 16:44:07 +00:00
substr($entry, 148, 7)
= sprintf("%06o\0", unpack("%16C*", $entry));
2020-01-08 14:41:49 +00:00
$devtar .= $entry;
}
} elsif ($format eq 'directory') {
# nothing to do
} else {
error "unknown format: $format";
2018-09-18 09:20:24 +00:00
}
my $numblocks = 0;
my $exitstatus = 0;
2020-01-08 16:44:07 +00:00
my @taropts = (
'--sort=name',
"--mtime=\@$mtime",
'--clamp-mtime',
'--numeric-owner',
'--one-file-system',
'--xattrs',
'--format=pax',
'--pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime',
'-c',
'--exclude=./dev'
2020-01-08 16:44:07 +00:00
);
# disable signals so that we can fork and change behaviour of the signal
# handler in the parent and child without getting interrupted
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
2018-09-18 09:20:24 +00:00
my $pid;
# a pipe to transfer the final tarball from the child to the parent
2018-09-18 09:20:24 +00:00
pipe my $rfh, my $wfh;
# instead of two pipe calls, creating four file handles, we use socketpair
2020-01-08 16:44:07 +00:00
socketpair my $childsock, my $parentsock, AF_UNIX, SOCK_STREAM, PF_UNSPEC
or error "socketpair failed: $!";
$options->{hooksock} = $childsock;
2018-09-18 09:20:24 +00:00
if ($options->{mode} eq 'unshare') {
2020-01-09 07:39:40 +00:00
$pid = get_unshare_cmd(
sub {
# child
local $SIG{'INT'} = 'DEFAULT';
local $SIG{'HUP'} = 'DEFAULT';
local $SIG{'PIPE'} = 'DEFAULT';
local $SIG{'TERM'} = 'DEFAULT';
2020-01-09 07:39:40 +00:00
# unblock all delayed signals (and possibly handle them)
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-09 07:39:40 +00:00
close $rfh;
close $parentsock;
open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!";
2018-09-18 09:20:24 +00:00
2020-01-09 07:39:40 +00:00
setup($options);
2018-09-18 09:20:24 +00:00
if (!$options->{dryrun} && $format eq 'ext2') {
my $numblocks = approx_disk_usage($options->{root});
debug "sending nblks";
print $childsock (
pack("n", length "$numblocks") . "nblks$numblocks");
$childsock->flush();
debug "waiting for okthx";
checkokthx $childsock;
}
2020-01-09 07:39:40 +00:00
print $childsock (pack('n', 0) . 'adios');
$childsock->flush();
2020-01-09 07:39:40 +00:00
close $childsock;
if ($options->{dryrun}) {
info "simulate creating tarball...";
} elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
2020-01-09 07:39:40 +00:00
info "creating tarball...";
2018-09-23 20:27:49 +00:00
2020-01-09 07:39:40 +00:00
# redirect tar output to the writing end of the pipe so
# that the parent process can capture the output
open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!";
2018-09-18 09:20:24 +00:00
2020-01-09 07:39:40 +00:00
# Add ./dev as the first entries of the tar file.
# We cannot add them after calling tar, because there is no
# way to prevent tar from writing NULL entries at the end.
print $devtar;
2018-09-18 09:20:24 +00:00
2020-01-09 07:39:40 +00:00
# pack everything except ./dev
0 == system('tar', @taropts, '-C', $options->{root}, '.')
or error "tar failed: $?";
2018-09-18 09:20:24 +00:00
2020-01-09 07:39:40 +00:00
info "done";
} elsif ($format eq 'directory') {
# nothing to do
} else {
error "unknown format: $format";
2020-01-09 07:39:40 +00:00
}
2018-09-18 09:20:24 +00:00
2020-01-09 07:39:40 +00:00
exit 0;
},
\@idmap
);
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{mode} }
('root', 'fakechroot', 'proot', 'chrootless')
) {
2020-01-08 14:41:49 +00:00
$pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
2020-01-09 07:39:40 +00:00
local $SIG{'INT'} = 'DEFAULT';
local $SIG{'HUP'} = 'DEFAULT';
local $SIG{'PIPE'} = 'DEFAULT';
local $SIG{'TERM'} = 'DEFAULT';
2020-01-08 14:41:49 +00:00
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-08 14:41:49 +00:00
close $rfh;
close $parentsock;
open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!";
setup($options);
if (!$options->{dryrun} && $format eq 'ext2') {
my $numblocks = approx_disk_usage($options->{root});
print $childsock (
pack("n", length "$numblocks") . "nblks$numblocks");
$childsock->flush();
debug "waiting for okthx";
checkokthx $childsock;
}
2020-01-08 14:41:49 +00:00
print $childsock (pack('n', 0) . 'adios');
$childsock->flush();
close $childsock;
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "simulate creating tarball...";
} elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
2020-01-08 14:41:49 +00:00
info "creating tarball...";
# redirect tar output to the writing end of the pipe so that
# the parent process can capture the output
2020-01-08 14:41:49 +00:00
open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!";
# Add ./dev as the first entries of the tar file.
# We cannot add them after calling tar, because there is no way
# to prevent tar from writing NULL entries at the end.
2020-01-08 14:41:49 +00:00
print $devtar;
if ($options->{mode} eq 'fakechroot') {
# Fakechroot requires tar to run inside the chroot or
# otherwise absolute symlinks will include the path to the
# root directory
2020-01-08 16:44:07 +00:00
0 == system('/usr/sbin/chroot', $options->{root}, 'tar',
@taropts, '-C', '/', '.')
or error "tar failed: $?";
2020-01-08 14:41:49 +00:00
} elsif ($options->{mode} eq 'proot') {
# proot requires tar to run inside proot or otherwise
# permissions will be completely off
my @qemuopt = ();
if (defined $options->{qemu}) {
push @qemuopt, "--qemu=qemu-$options->{qemu}";
2020-01-08 16:44:07 +00:00
push @taropts, "--exclude=./host-rootfs";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
0 == system('proot', '--root-id',
"--rootfs=$options->{root}", '--cwd=/', @qemuopt,
'tar', @taropts, '-C', '/', '.')
or error "tar failed: $?";
} elsif (
any { $_ eq $options->{mode} }
('root', 'chrootless')
) {
2020-01-08 14:41:49 +00:00
# If the chroot directory is not owned by the root user,
# then we assume that no measure was taken to fake root
# permissions. Since the final tarball should contain
# entries with root ownership, we instruct tar to do so.
my @owneropts = ();
if ((stat $options->{root})[4] != 0) {
2020-01-08 16:44:07 +00:00
push @owneropts, '--owner=0', '--group=0',
'--numeric-owner';
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
0 == system('tar', @taropts, @owneropts, '-C',
$options->{root}, '.')
or error "tar failed: $?";
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
info "done";
} elsif ($format eq 'directory') {
# nothing to do
} else {
error "unknown format: $format";
2020-01-08 14:41:49 +00:00
}
exit 0;
}
} else {
2020-01-08 14:41:49 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
# parent
2020-01-08 16:44:07 +00:00
my $got_signal = 0;
my $waiting_for = "setup";
2020-01-08 16:44:07 +00:00
my $ignore = sub {
2020-01-08 14:41:49 +00:00
$got_signal = shift;
info "main() received signal $got_signal: waiting for $waiting_for...";
};
2020-01-09 07:39:40 +00:00
local $SIG{'INT'} = $ignore;
local $SIG{'HUP'} = $ignore;
local $SIG{'PIPE'} = $ignore;
local $SIG{'TERM'} = $ignore;
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2018-09-18 09:20:24 +00:00
close $wfh;
close $childsock;
debug "starting to listen for hooks";
# handle special hook commands via parentsock
# we use eval() so that error() doesn't take this process down and
# thus leaves the setup() process without a parent
eval {
2020-01-08 14:41:49 +00:00
while (1) {
# get the next message
my $msg = "error";
my $len = -1;
{
debug "reading from parentsock";
2020-01-08 16:44:07 +00:00
my $ret = read($parentsock, my $buf, 2 + 5)
// error "cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
debug "finished reading from parentsock";
if ($ret == 0) {
error "received eof on socket";
}
($len, $msg) = unpack("nA5", $buf);
}
if ($msg eq "adios") {
# setup finished, so we break out of the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
last;
} elsif ($msg eq "openr") {
# handle the openr message
debug "received message: openr";
my $infile;
{
2020-01-08 16:44:07 +00:00
my $ret = read($parentsock, $infile, $len)
// error "cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the requested path exists outside the chroot
2020-01-08 16:44:07 +00:00
if (!-e $infile) {
2020-01-08 14:41:49 +00:00
error "$infile does not exist";
}
debug "sending okthx";
2020-01-08 16:44:07 +00:00
print $parentsock (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
$parentsock->flush();
2020-01-08 16:44:07 +00:00
open my $fh, '<', $infile
or error "failed to open $infile for reading: $!";
2020-01-08 14:41:49 +00:00
# read from the file and send as payload to the child process
while (1) {
# read from file
2020-01-08 16:44:07 +00:00
my $ret = read($fh, my $cont, 4096)
// error "cannot read from pipe: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) { last; }
debug "sending write";
# send to child
print $parentsock pack("n", $ret) . "write" . $cont;
$parentsock->flush();
debug "waiting for okthx";
checkokthx $parentsock;
if ($ret < 4096) { last; }
}
# signal to the child process that we are done
debug "sending close";
print $parentsock pack("n", 0) . "close";
$parentsock->flush();
debug "waiting for okthx";
checkokthx $parentsock;
close $fh;
} elsif ($msg eq "openw") {
debug "received message: openw";
# payload is the output directory
my $outfile;
{
2020-01-08 16:44:07 +00:00
my $ret = read($parentsock, $outfile, $len)
// error "cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the directory exists
my $outdir = dirname($outfile);
if (-e $outdir) {
2020-01-08 16:44:07 +00:00
if (!-d $outdir) {
2020-01-08 14:41:49 +00:00
error "$outdir already exists but is not a directory";
}
} else {
2020-01-08 16:44:07 +00:00
my $num_created = make_path $outdir, { error => \my $err };
2020-01-08 14:41:49 +00:00
if ($err && @$err) {
2020-01-08 16:44:07 +00:00
error(
join "; ",
(
map { "cannot create " . (join ": ", %{$_}) }
@$err
));
2020-01-08 14:41:49 +00:00
} elsif ($num_created == 0) {
error "cannot create $outdir";
}
}
debug "sending okthx";
2020-01-08 16:44:07 +00:00
print $parentsock (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
$parentsock->flush();
# now we expect one or more "write" messages containing the
# tarball to write
2020-01-08 16:44:07 +00:00
open my $fh, '>', $outfile
or error "failed to open $outfile for writing: $!";
2020-01-08 14:41:49 +00:00
# handle "write" messages from the child process and feed
# their payload into the file handle until a "close" message
# is encountered
2020-01-08 16:44:07 +00:00
while (1) {
2020-01-08 14:41:49 +00:00
# receive the next message
2020-01-08 16:44:07 +00:00
my $ret = read($parentsock, my $buf, 2 + 5)
// error "cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
my ($len, $msg) = unpack("nA5", $buf);
debug "received message: $msg";
if ($msg eq "close") {
# finish the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
debug "sending okthx";
2020-01-08 16:44:07 +00:00
print $parentsock (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
$parentsock->flush();
last;
} elsif ($msg ne "write") {
# we should not receive this message at this point
error "expected write but got: $msg";
}
# read the payload
my $content;
{
2020-01-08 16:44:07 +00:00
my $ret = read($parentsock, $content, $len)
// error "error cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
}
# write the payload to the file handle
2020-01-08 16:44:07 +00:00
print $fh $content
or error "cannot write to file handle: $!";
2020-01-08 14:41:49 +00:00
debug "sending okthx";
2020-01-08 16:44:07 +00:00
print $parentsock (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
$parentsock->flush();
}
close $fh;
2020-01-16 09:38:14 +00:00
} elsif (any { $_ eq $msg } ('mktar', 'mktac')) {
2020-01-08 14:41:49 +00:00
# handle the mktar message
2020-01-16 09:38:14 +00:00
debug "received message: $msg";
2020-01-08 14:41:49 +00:00
my $indir;
{
2020-01-08 16:44:07 +00:00
my $ret = read($parentsock, $indir, $len)
// error "cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the requested path exists outside the chroot
2020-01-08 16:44:07 +00:00
if (!-e $indir) {
2020-01-08 14:41:49 +00:00
error "$indir does not exist";
}
debug "sending okthx";
2020-01-08 16:44:07 +00:00
print $parentsock (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
$parentsock->flush();
# Open a tar process creating a tarfile of the instructed
# path. To emulate the behaviour of cp, change to the
# dirname of the requested path first.
open my $fh, '-|', 'tar', '--numeric-owner', '--xattrs',
'--format=pax',
'--pax-option=exthdr.name=%d/PaxHeaders/%f,'
. 'delete=atime,delete=ctime',
'--directory',
2020-01-16 09:38:14 +00:00
$msg eq 'mktar' ? dirname($indir) : $indir,
2020-01-08 16:44:07 +00:00
'--create', '--file', '-',
2020-01-16 09:38:14 +00:00
$msg eq 'mktar' ? basename($indir) : '.'
// error "failed to fork(): $!";
2020-01-08 14:41:49 +00:00
# read from the tar process and send as payload to the child
# process
while (1) {
# read from tar
2020-01-08 16:44:07 +00:00
my $ret = read($fh, my $cont, 4096)
// error "cannot read from pipe: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) { last; }
debug "sending write";
# send to child
print $parentsock pack("n", $ret) . "write" . $cont;
$parentsock->flush();
debug "waiting for okthx";
checkokthx $parentsock;
if ($ret < 4096) { last; }
}
# signal to the child process that we are done
debug "sending close";
print $parentsock pack("n", 0) . "close";
$parentsock->flush();
debug "waiting for okthx";
checkokthx $parentsock;
close $fh;
if ($? != 0) {
error "tar failed";
}
} elsif ($msg eq "untar") {
debug "received message: untar";
# payload is the output directory
my $outdir;
{
2020-01-08 16:44:07 +00:00
my $ret = read($parentsock, $outdir, $len)
// error "cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the directory exists
if (-e $outdir) {
2020-01-08 16:44:07 +00:00
if (!-d $outdir) {
2020-01-08 14:41:49 +00:00
error "$outdir already exists but is not a directory";
}
} else {
2020-01-08 16:44:07 +00:00
my $num_created = make_path $outdir, { error => \my $err };
2020-01-08 14:41:49 +00:00
if ($err && @$err) {
2020-01-08 16:44:07 +00:00
error(
join "; ",
(
map { "cannot create " . (join ": ", %{$_}) }
@$err
));
2020-01-08 14:41:49 +00:00
} elsif ($num_created == 0) {
error "cannot create $outdir";
}
}
debug "sending okthx";
2020-01-08 16:44:07 +00:00
print $parentsock (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
$parentsock->flush();
# now we expect one or more "write" messages containing the
# tarball to unpack
open my $fh, '|-', 'tar', '--numeric-owner', '--xattrs',
'--xattrs-include=*', '--directory', $outdir,
'--extract', '--file',
'-' // error "failed to fork(): $!";
2020-01-08 14:41:49 +00:00
# handle "write" messages from the child process and feed
# their payload into the tar process until a "close" message
# is encountered
2020-01-08 16:44:07 +00:00
while (1) {
2020-01-08 14:41:49 +00:00
# receive the next message
2020-01-08 16:44:07 +00:00
my $ret = read($parentsock, my $buf, 2 + 5)
// error "cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
my ($len, $msg) = unpack("nA5", $buf);
debug "received message: $msg";
if ($msg eq "close") {
# finish the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
debug "sending okthx";
2020-01-08 16:44:07 +00:00
print $parentsock (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
$parentsock->flush();
last;
} elsif ($msg ne "write") {
# we should not receive this message at this point
error "expected write but got: $msg";
}
# read the payload
my $content;
{
2020-01-08 16:44:07 +00:00
my $ret = read($parentsock, $content, $len)
// error "error cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
}
# write the payload to the tar process
2020-01-08 16:44:07 +00:00
print $fh $content
or error "cannot write to tar process: $!";
2020-01-08 14:41:49 +00:00
debug "sending okthx";
2020-01-08 16:44:07 +00:00
print $parentsock (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
$parentsock->flush();
}
close $fh;
if ($? != 0) {
error "tar failed";
}
} elsif ($msg eq "nblks") {
# handle the nblks message
debug "received message: nblks";
{
my $ret = read($parentsock, $numblocks, $len)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
if ($numblocks !~ /^\d+$/) {
error "invalid number of blocks: $numblocks";
}
debug "sending okthx";
print $parentsock (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
$parentsock->flush();
2020-01-08 14:41:49 +00:00
} else {
error "unknown message: $msg";
}
}
};
if ($@) {
# inform the other side that something went wrong
print $parentsock (pack("n", 0) . "error")
or error "cannot write to socket: $!";
$parentsock->flush();
2020-01-08 14:41:49 +00:00
# we cannot die here because that would leave the other thread
# running without a parent
warning "listening on child socket failed: $@";
$exitstatus = 1;
}
debug "finish to listen for hooks";
close $parentsock;
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
# nothing to do
} elsif ($format eq 'directory') {
# nothing to do
} elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
2020-01-08 14:41:49 +00:00
# we use eval() so that error() doesn't take this process down and
# thus leaves the setup() process without a parent
eval {
if ($options->{target} eq '-') {
if (!copy($rfh, *STDOUT)) {
error "cannot copy to standard output: $!";
}
} else {
if ( $format eq 'squashfs'
or $format eq 'ext2'
or defined $tar_compressor) {
2020-01-08 14:41:49 +00:00
my @argv = ();
if ($format eq 'squashfs') {
2020-01-08 14:41:49 +00:00
push @argv, 'tar2sqfs',
2020-01-10 10:44:15 +00:00
'--quiet', '--no-skip', '--force',
'--exportable',
2020-01-08 16:44:07 +00:00
'--compressor', 'xz',
'--block-size', '1048576',
$options->{target};
} elsif ($format eq 'ext2') {
if ($numblocks <= 0) {
error "invalid number of blocks: $numblocks";
}
push @argv, 'genext2fs', '-B', 1024, '-b', $numblocks,
'-N', '0', $options->{target};
} elsif ($format eq 'tar') {
2020-01-08 14:41:49 +00:00
push @argv, @{$tar_compressor};
} else {
error "unknown format: $format";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_BLOCK, $sigset)
or error "Can't block signals: $!";
2020-01-08 14:41:49 +00:00
my $cpid = fork() // error "fork() failed: $!";
if ($cpid == 0) {
# child: default signal handlers
2020-01-09 07:39:40 +00:00
local $SIG{'INT'} = 'DEFAULT';
local $SIG{'HUP'} = 'DEFAULT';
local $SIG{'PIPE'} = 'DEFAULT';
local $SIG{'TERM'} = 'DEFAULT';
2020-01-08 14:41:49 +00:00
# unblock all delayed signals (and possibly handle
# them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-08 14:41:49 +00:00
# redirect stdout to file or /dev/null
if ($format eq 'squashfs' or $format eq 'ext2') {
2020-01-08 16:44:07 +00:00
open(STDOUT, '>', '/dev/null')
or error "cannot open /dev/null for writing: $!";
} elsif ($format eq 'tar') {
2020-01-08 16:44:07 +00:00
open(STDOUT, '>', $options->{target})
or error
"cannot open $options->{target} for writing: $!";
} else {
error "unknown format: $format";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
open(STDIN, '<&', $rfh)
or error "cannot open file handle for reading: $!";
2020-01-09 07:39:40 +00:00
eval { Devel::Cover::set_coverage("none") }
2020-01-08 16:44:07 +00:00
if $is_covering;
exec { $argv[0] } @argv
or
error("cannot exec " . (join " ", @argv) . ": $!");
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-08 14:41:49 +00:00
waitpid $cpid, 0;
if ($? != 0) {
2020-01-08 16:44:07 +00:00
error("failed to start " . (join " ", @argv));
2020-01-08 14:41:49 +00:00
}
} else {
2020-01-08 16:44:07 +00:00
if (!copy($rfh, $options->{target})) {
2020-01-08 14:41:49 +00:00
error "cannot copy to $options->{target}: $!";
}
}
}
};
if ($@) {
# we cannot die here because that would leave the other thread
# running without a parent
warning "creating tarball failed: $@";
$exitstatus = 1;
}
} else {
error "unknown format: $format";
2018-09-18 09:20:24 +00:00
}
close($rfh);
waitpid $pid, 0;
if ($? != 0) {
2020-01-08 14:41:49 +00:00
$exitstatus = 1;
}
2018-09-18 09:20:24 +00:00
# change signal handler message
$waiting_for = "cleanup";
if ($format eq 'directory') {
# nothing to do
} elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
if (!-e $options->{root}) {
error "$options->{root} does not exist";
}
2020-01-08 14:41:49 +00:00
info "removing tempdir $options->{root}...";
if ($options->{mode} eq 'unshare') {
# We don't have permissions to remove the directory outside
# the unshared namespace, so we remove it here.
# Since this is still inside the unshared namespace, there is
# no risk of removing anything important.
2020-01-09 07:39:40 +00:00
$pid = get_unshare_cmd(
sub {
# File::Path will produce the error "cannot stat initial
# working directory" if the working directory cannot be
# accessed by the unprivileged unshared user. Thus, we
# first navigate to the parent of the root directory.
chdir "$options->{root}/.."
or error "unable to chdir() to parent directory of"
. " $options->{root}: $!";
remove_tree($options->{root}, { error => \my $err });
if (@$err) {
for my $diag (@$err) {
my ($file, $message) = %$diag;
if ($file eq '') {
warning "general error: $message";
} else {
# the unshared process might not have
# sufficient permissions to remove the root
# directory. We attempt its removal later, so
# don't warn if its removal fails now.
if ($file ne $options->{root}) {
warning
"problem unlinking $file: $message";
}
2020-01-09 07:39:40 +00:00
}
2020-01-08 14:41:49 +00:00
}
}
2020-01-09 07:39:40 +00:00
},
\@idmap
);
2020-01-08 14:41:49 +00:00
waitpid $pid, 0;
$? == 0 or error "remove_tree failed";
# in unshare mode, the toplevel directory might've been created in
# a directory that the unshared user cannot change and thus cannot
# delete. If the root directory still exists after remove_tree
# above, we attempt its removal again outside as the normal user.
if (-e $options->{root}) {
rmdir "$options->{root}"
or error "cannot rmdir $options->{root}: $!";
}
2020-01-08 16:44:07 +00:00
} elsif (
any { $_ eq $options->{mode} }
('root', 'fakechroot', 'proot', 'chrootless')
) {
2020-01-08 14:41:49 +00:00
# without unshare, we use the system's rm to recursively remove the
# temporary directory just to make sure that we do not accidentally
# remove more than we should by using --one-file-system.
#
# --interactive=never is needed when in proot mode, the
# write-protected file /apt/apt.conf.d/01autoremove-kernels is to
# be removed.
2020-01-08 16:44:07 +00:00
0 == system('rm', '--interactive=never', '--recursive',
'--preserve-root', '--one-file-system', $options->{root})
or error "rm failed: $!";
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
} else {
error "unknown format: $format";
2018-09-18 09:20:24 +00:00
}
if ($got_signal) {
2020-01-08 14:41:49 +00:00
$exitstatus = 1;
}
exit $exitstatus;
2018-09-18 09:20:24 +00:00
}
main();
__END__
=head1 NAME
mmdebstrap - multi-mirror Debian chroot creation
=head1 SYNOPSIS
B<mmdebstrap> [B<OPTION...>] [I<SUITE> [I<TARGET> [I<MIRROR>...]]]
=head1 DESCRIPTION
B<mmdebstrap> creates a Debian chroot of I<SUITE> into I<TARGET> from one or
more I<MIRROR>s. It is meant as an alternative to the debootstrap tool (see
section B<DEBOOTSTRAP>). In contrast to debootstrap it uses apt to resolve
dependencies and is thus able to use more than one mirror and resolve more
complex dependencies.
If no I<MIRROR> option is provided, L<http://deb.debian.org/debian> is used.
If I<SUITE> is a stable release name and no I<MIRROR> is specified, then
mirrors for updates and security are automatically added. If a I<MIRROR>
option starts with "deb " or "deb-src " then it is used as a one-line-style
format entry for apt's sources.list inside the chroot. If a I<MIRROR> option
contains a "://" then it is interpreted as a mirror URI and the apt line
inside the chroot is assembled as "deb [arch=A] B C D" where A is the host's
native architecture, B is the I<MIRROR>, C is the given I<SUITE> and D is the
components given via B<--components> (defaults to "main"). If a I<MIRROR>
option happens to be an existing file, then its contents are pasted into the
chroot's sources.list. This can be used to supply a deb822 style
sources.list. If I<MIRROR> is C<-> then standard input is pasted into the
chroot's sources.list. More than one mirror can be specified and are appended
to the chroot's sources.list in the given order. If any mirror contains a
https URI, then the packages apt-transport-https and ca-certificates will be
installed inside the chroot. If any mirror contains a tor+xxx URI, then the
apt-transport-tor package will be installed inside the chroot.
2018-09-18 09:20:24 +00:00
The optional I<TARGET> argument can either be the path to a directory, the path
to a tarball filename, the path to a squashfs image, the path to an ext2 image,
a FIFO, a character special device, or C<->. Without the B<--format> option,
I<TARGET> will be used to choose the format. See the section B<FORMATS> for
more information. If no I<TARGET> was specified or if I<TARGET> is C<->, an
uncompressed tarball will be sent to standard output.
2018-09-18 09:20:24 +00:00
The I<SUITE> may be a valid release code name (eg, sid, stretch, jessie) or a
symbolic name (eg, unstable, testing, stable, oldstable). Any suite name that
works with apt on the given mirror will work. If no I<SUITE> was specified,
then a single I<MIRROR> C<-> is added and thus the information of the desired
suite has to come from standard input as part of a valid apt sources.list
file.
2019-02-23 07:43:15 +00:00
All status output is printed to standard error unless B<--logfile> is used to
redirect it to a file or B<--quiet> or B<--silent> is used to suppress any
output on standard error. Help and version information will be printed to
standard error with the B<--help> and B<--version> options, respectively.
Otherwise, an uncompressed tarball might be sent to standard output if
I<TARGET> is C<-> or if no I<TARGET> was specified.
2018-09-18 09:20:24 +00:00
=head1 OPTIONS
2019-02-20 17:00:52 +00:00
Options are case insensitive. Short options may be bundled. Long options
require a double dash and may be abbreviated to uniqueness.
2018-09-18 09:20:24 +00:00
=over 8
=item B<-h,--help>
Print synopsis and options of this man page and exit.
=item B<--man>
Show the full man page as generated from Perl POD in a pager. This requires
the perldoc program from the perl-doc package. This is the same as running:
pod2man /usr/bin/mmdebstrap | man -l -
2018-09-18 09:20:24 +00:00
2019-02-23 07:55:31 +00:00
=item B<--version>
Print the B<mmdebstrap> version and exit.
2019-01-08 10:27:56 +00:00
=item B<--variant>=I<name>
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
Choose which package set to install. Valid variant I<name>s are B<extract>,
B<custom>, B<essential>, B<apt>, B<required>, B<minbase>, B<buildd>,
B<important>, B<debootstrap>, B<->, and B<standard>. The default variant is
B<debootstrap>. See the section B<VARIANTS> for more information.
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
=item B<--mode>=I<name>
2018-09-18 09:20:24 +00:00
Choose how to perform the chroot operation and create a filesystem with
2020-01-08 16:19:30 +00:00
ownership information different from the current user. Valid mode I<name>s are
B<auto>, B<sudo>, B<root>, B<unshare>, B<fakeroot>, B<fakechroot>, B<proot> and
B<chrootless>. The default mode is B<auto>. See the section B<MODES> for more
information.
2018-09-18 09:20:24 +00:00
=item B<--format>=I<name>
Choose the output format. Valid format I<name>s are B<auto>, B<directory>,
B<tar>, B<squashfs>, and B<ext2>. The default format is B<auto>. See the
section B<FORMATS> for more information.
2019-01-08 10:27:56 +00:00
=item B<--aptopt>=I<option>|I<file>
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
Pass arbitrary I<option>s to apt. Will be added to
2018-10-22 15:03:39 +00:00
F</etc/apt/apt.conf.d/99mmdebstrap> inside the chroot. Can be specified
2020-01-08 16:19:30 +00:00
multiple times. Each I<option> will be appended to 99mmdebstrap. A semicolon
will be added at the end of the option if necessary. If the command line
argument is an existing I<file>, the content of the file will be appended to
99mmdebstrap verbatim.
2018-09-18 09:20:24 +00:00
Example: This is necessary for allowing old timestamps from snapshot.debian.org
2018-09-18 09:20:24 +00:00
--aptopt='Acquire::Check-Valid-Until "false"'
Example: Settings controlling download of package description translations
--aptopt='Acquire::Languages { "environment"; "en"; }'
--aptopt='Acquire::Languages "none"'
Example: Enable installing Recommends (by default mmdebstrap doesn't)
--aptopt='Apt::Install-Recommends "true"'
Example: Configure apt-cacher or apt-cacher-ng as an apt proxy
--aptopt='Acquire::http { Proxy "http://127.0.0.1:3142"; }'
Example: For situations in which the apt sandbox user cannot access the chroot
--aptopt='APT::Sandbox::User "root"'
2018-09-18 09:20:24 +00:00
Example: Minimizing the number of packages installed from experimental
--aptopt='APT::Solver "aspcud"'
2020-01-08 16:19:30 +00:00
--aptopt='APT::Solver::aspcud::Preferences
"-count(solution,APT-Release:=/a=experimental/),-removed,-changed,-new"'
=item B<--keyring>=I<file>|I<directory>
Change the default keyring to use by apt. By default, F</etc/apt/trusted.gpg>
and F</etc/apt/trusted.gpg.d> are used. Depending on whether a file or
directory is passed to this option, the former and latter default can be
changed, respectively. Since apt only supports a single keyring file and
directory, respectively, you can B<not> use this option to pass multiple files
and/or directories. Using the C<--keyring> argument in the following way is
equal to keeping the default:
--keyring=/etc/apt/trusted.gpg --keyring=/etc/apt/trusted.gpg.d
If you need to pass multiple keyrings, use the C<signed-by> option when
specifying the mirror like this:
mmdebstrap mysuite out.tar "deb [signed-by=/path/to/key.gpg] http://..."
The C<signed-by> option will automatically be added to the final
C<sources.list> if the keyring required for the selected I<SUITE> is not yet
trusted by apt. Automatically adding the C<signed-by> option in these cases
requires C<gpg> to be installed. If C<gpg> and C<ubuntu-archive-keyring> are
installed, then you can create a Ubuntu Bionic chroot on Debian like this:
mmdebstrap bionic ubuntu-bionic.tar
The resulting chroot will have a C<source.list> with a C<signed-by> option
pointing to F</usr/share/keyrings/ubuntu-archive-keyring.gpg>.
2019-01-08 10:27:56 +00:00
=item B<--dpkgopt>=I<option>|I<file>
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
Pass arbitrary I<option>s to dpkg. Will be added to
2018-10-22 15:03:39 +00:00
F</etc/dpkg/dpkg.cfg.d/99mmdebstrap> inside the chroot. Can be specified
2019-01-08 10:27:56 +00:00
multiple times. Each I<option> will be appended to 99mmdebstrap. If the command
2020-01-08 16:19:30 +00:00
line argument is an existing I<file>, the content of the file will be appended
to 99mmdebstrap verbatim.
2018-09-18 09:20:24 +00:00
Example: Exclude paths to reduce chroot size
--dpkgopt='path-exclude=/usr/share/man/*'
--dpkgopt='path-include=/usr/share/man/man[1-9]/*'
--dpkgopt='path-exclude=/usr/share/locale/*'
--dpkgopt='path-include=/usr/share/locale/locale.alias'
--dpkgopt='path-exclude=/usr/share/doc/*'
--dpkgopt='path-include=/usr/share/doc/*/copyright'
--dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*'
2018-09-18 09:20:24 +00:00
2020-03-07 01:25:55 +00:00
Example: Do not perform safe I/O operations when unpacking for more speed
--dpkgopt=force-unsafe-io
2019-01-08 10:27:56 +00:00
=item B<--include>=I<pkg1>[,I<pkg2>,...]
2018-09-18 09:20:24 +00:00
Comma or whitespace separated list of packages which will be installed in
addition to the packages installed by the specified variant. The direct and
indirect hard dependencies will also be installed. The behaviour of this
option depends on the selected variant. The B<extract> and B<custom> variants
install no packages by default, so for these variants, the packages specified
by this option will be the only ones that get either extracted or installed by
dpkg, respectively. For all other variants, apt is used to install the
additional packages. The B<essential> variant does not include apt and thus,
the include option will only work when the B<chrootless> mode is selected and
thus apt from the outside can be used. Package names are directly passed to
apt and thus, you can use apt features like C<pkg/suite>, C<pkg=version>,
C<pkg-> or use a glob or regex for C<pkg>. See apt(8) for the supported
syntax. The option can be specified multiple times and the packages are
concatenated in the order in which they are given on the command line. If
later list items are repeated, then they get dropped so that the resulting
package list is free of duplicates. So the following are equivalent:
--include="pkg1/stable pkg2=1.0 pkg3-"
--include=pkg1/stable,pkg2=1.0,pkg3-
--incl=pkg1/stable --incl="pkg2=1.0 pkg3-" --incl=pkg2=1.0,pkg3-
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
=item B<--components>=I<comp1>[,I<comp2>,...]
2018-09-18 09:20:24 +00:00
Comma or whitespace separated list of components like main, contrib and
non-free which will be used for all URI-only I<MIRROR> arguments. The option
can be specified multiple times and the components are concatenated in the
order in which they are given on the command line. If later list items are
repeated, then they get dropped so that the resulting component list is free
of duplicates. So the following are equivalent:
--components="main contrib non-free"
--components=main,contrib,non-free
--comp=main --comp="contrib non-free" --comp="main,non-free"
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
=item B<--architectures>=I<native>[,I<foreign1>,...]
2018-09-18 09:20:24 +00:00
Comma or whitespace separated list of architectures. The first architecture is
the I<native> architecture inside the chroot. The remaining architectures will
be added to the foreign dpkg architectures. Without this option, the I<native>
2019-01-08 10:27:56 +00:00
architecture of the chroot defaults to the native architecture of the system
running B<mmdebstrap>. The option can be specified multiple times and values
are concatenated. If later list items are repeated, then they get dropped so
that the resulting list is free of duplicates. So the following are
equivalent:
--architectures="amd64 armhf mipsel"
--architectures=amd64,armhf,mipsel
--arch=amd64 --arch="armhf mipsel" --arch=armhf,mipsel
2019-01-08 10:27:56 +00:00
2020-01-10 10:44:15 +00:00
=item B<--simulate>, B<--dry-run>
Run apt-get with B<--simulate>. Only the package cache is initialized but no
binary packages are downloaded or installed. Use this option to quickly check
whether a package selection within a certain suite and variant can in principle
be installed as far as their dependencies go. If the output is a tarball, then
no output is produced. If the output is a directory, then the directory will be
left populated with the skeleton files and directories necessary for apt to run
in it.
2019-01-08 10:28:27 +00:00
=begin comment
2020-01-10 10:44:15 +00:00
No hooks are executed in with B<--simulate> or B<--dry-run>.
=item B<--setup-hook>=I<command>
Execute arbitrary I<command>s right after initial setup (directory creation,
configuration of apt and dpkg, ...) but before any packages are downloaded or
installed. At that point, the chroot directory does not contain any
executables and thus cannot be chroot-ed into. See section B<HOOKS> for more
information.
Example: Setup merged-/usr via symlinks
2020-01-08 16:19:30 +00:00
--setup-hook='for d in bin sbin lib; do ln -s usr/$d "$1/$d";
mkdir -p "$1/usr/$d"; done'
Example: Setup chroot for installing a sub-essential busybox-based chroot with
2020-01-08 16:19:30 +00:00
--variant=custom
--include=dpkg,busybox,libc-bin,base-files,base-passwd,debianutils
--setup-hook='mkdir -p "$1/bin"'
2020-01-08 16:19:30 +00:00
--setup-hook='for p in awk cat chmod chown cp diff echo env grep less ln
mkdir mount rm rmdir sed sh sleep sort touch uname; do
ln -s busybox "$1/bin/$p"; done'
--setup-hook='echo root:x:0:0:root:/root:/bin/sh > "$1/etc/passwd"'
--setup-hook='printf "root:x:0:\nmail:x:8:\nutmp:x:43:\n" > "$1/etc/group"'
=item B<--essential-hook>=I<command>
Execute arbitrary I<command>s after the Essential:yes packages have been
installed but before installing the remaining packages. The hook is not
executed for the B<extract> and B<custom> variants. See section B<HOOKS> for
more information.
Example: Enable unattended upgrades
2020-01-08 16:19:30 +00:00
--essential-hook='echo unattended-upgrades
unattended-upgrades/enable_auto_updates boolean true
| chroot "$1" debconf-set-selections'
Example: Select Europe/Berlin as the timezone
2020-01-08 16:19:30 +00:00
--essential-hook='echo tzdata tzdata/Areas select Europe
| chroot "$1" debconf-set-selections'
--essential-hook='echo tzdata tzdata/Zones/Europe select Berlin
| chroot "$1" debconf-set-selections'
=item B<--customize-hook>=I<command>
2019-01-08 10:28:27 +00:00
Execute arbitrary I<command>s after the chroot is set up and all packages got
installed but before final cleanup actions are carried out. See section
B<HOOKS> for more information.
2019-01-08 10:28:27 +00:00
Example: Preparing a chroot for use with autopkgtest
2019-01-08 10:28:27 +00:00
--customize-hook='chroot "$1" passwd --delete root'
2020-01-08 16:19:30 +00:00
--customize-hook='chroot "$1" useradd --home-dir /home/user
--create-home user'
--customize-hook='chroot "$1" passwd --delete user'
--customize-hook='echo host > "$1/etc/hostname"'
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"'
--customize-hook=/usr/share/autopkgtest/setup-commands/setup-testbed
2019-01-08 10:28:27 +00:00
=end comment
2018-09-18 09:20:24 +00:00
=item B<-q,--quiet>, B<-s,--silent>
Do not write anything to standard error. If used together with B<--verbose> or
B<--debug>, only the last option will take effect.
=item B<-v,--verbose>
Instead of progress bars, write the dpkg and apt output directly to standard
error. If used together with B<--quiet> or B<--debug>, only the last option
will take effect.
=item B<-d,--debug>
In addition to the output produced by B<--verbose>, write detailed debugging
information to standard error. Errors will print a backtrace. If used together
with B<--quiet> or B<--verbose>, only the last option will take effect.
2019-02-23 07:43:15 +00:00
=item B<--logfile>=I<filename>
Instead of writing status information to standard error, write it into the
file given by I<filename>.
2018-09-18 09:20:24 +00:00
=back
=head1 MODES
Creating a Debian chroot requires not only permissions for running chroot but
also the ability to create files owned by the superuser. The selected mode
decides which way this is achieved.
=over 8
=item B<auto>
This mode automatically selects a fitting mode. If the effective user id is the
one of the superuser, then the B<sudo> mode is chosen. Otherwise, the
B<unshare> mode is picked if the system has the sysctl
C<kernel.unprivileged_userns_clone> set to C<1>. Should that not be the case
and if the fakechroot binary exists, the B<fakechroot> mode is chosen. Lastly,
the B<proot> mode is used if the proot binary exists.
2018-09-18 09:20:24 +00:00
=item B<sudo>, B<root>
This mode directly executes chroot and is the same mode of operation as is
used by debootstrap. It is the only mode that can directly create a directory
chroot with the right permissions. If the chroot directory is not accessible
by the _apt user, then apt sandboxing will be automatically disabled.
2018-09-18 09:20:24 +00:00
=item B<unshare>
2019-09-04 13:47:15 +00:00
This mode uses Linux user namespaces to allow unprivileged use of chroot and
2018-09-18 09:20:24 +00:00
creation of files that appear to be owned by the superuser inside the unshared
namespace. A directory chroot created with this mode will end up with wrong
ownership information. Choose to create a tarball instead. This mode requires
the sysctl C<kernel.unprivileged_userns_clone> being set to C<1>. B<SETTING
THIS OPTION HAS SECURITY IMPLICATIONS>. Refer to
L<https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=898446>
2018-09-18 09:20:24 +00:00
=item B<fakeroot>, B<fakechroot>
This mode will exec B<mmdebstrap> again under C<fakechroot fakeroot>. A
2020-01-16 17:03:13 +00:00
directory chroot created with this mode will end up with wrong permissions. If
you need a directory then run B<mmdebstrap> under C<fakechroot fakeroot -s
fakeroot.env> and use C<fakeroot.env> later when entering the chroot with
C<fakechroot fakeroot -i fakeroot.env chroot ...>. This mode will not work if
maintainer scripts are unable to handle C<LD_PRELOAD> correctly like the
package B<initramfs-tools> until version 0.132. This mode will also not work
with a different libc inside the chroot than on the outside. Since ldconfig
cannot run under fakechroot, the final system will not contain
F</etc/ld.so.cache>. See the section B<LIMITATIONS> in B<fakechroot(1)>.
2018-09-18 09:20:24 +00:00
=item B<proot>
This mode will carry out all calls to chroot with proot instead. Since
ownership information is only retained while proot is still running, this will
lead to wrong ownership information in the final directory (everything will be
owned by the user that executed B<mmdebstrap>) and tarball (everything will be
owned by the root user). Extended attributes are not retained. This mode is
useful if you plan to use the chroot with proot.
2018-09-18 09:20:24 +00:00
=item B<chrootless>
Uses the dpkg option C<--force-script-chrootless> to install packages into
2020-01-16 17:03:13 +00:00
B<TARGET> without dpkg and apt inside B<TARGET> but using apt and dpkg from
the machine running B<mmdebstrap>. Maintainer scripts are run without chrooting
into B<TARGET> and rely on their dependencies being installed on the machine
running B<mmdebstrap>. Unless B<mmdebstrap> was run inside fakeroot, the
2020-01-16 17:03:13 +00:00
B<TARGET> directory will be owned by the user running mmdebstrap.
2018-12-06 16:15:56 +00:00
=for TODO
=item B<qemu>
2018-09-18 09:20:24 +00:00
=back
=head1 VARIANTS
All package sets also include the direct and indirect hard dependencies (but
not recommends) of the selected package sets. The variants B<minbase>,
B<buildd> and B<->, resemble the package sets that debootstrap would install
with the same I<--variant> argument.
2018-09-18 09:20:24 +00:00
=over 8
=item B<extract>
Installs nothing by default (not even C<Essential:yes> packages). Packages
given by the C<--include> option are extracted but will not be installed.
=item B<custom>
Installs nothing by default (not even C<Essential:yes> packages). Packages
given by the C<--include> option will be installed. If another mode than
B<chrootless> was selected and dpkg was not part of the included package set,
then this variant will fail because it cannot configure the packages.
2018-09-18 09:20:24 +00:00
=item B<essential>
2018-10-22 15:03:39 +00:00
C<Essential:yes> packages.
2018-09-18 09:20:24 +00:00
=item B<apt>
The B<essential> set plus apt.
=item B<required>, B<minbase>
The B<essential> set plus all packages with Priority:required and apt.
=item B<buildd>
The B<minbase> set plus build-essential.
=item B<important>, B<debootstrap>, B<->
2020-01-08 16:19:30 +00:00
The B<required> set plus all packages with Priority:important. This is the
default of debootstrap.
2018-09-18 09:20:24 +00:00
=item B<standard>
The B<important> set plus all packages with Priority:standard.
=back
=head1 FORMATS
The output format of mmdebstrap is specified using the B<--format> option.
Without that option the default format is I<auto>. The following formats exist:
=over 8
=item B<auto>
When selecting this format (the default), the actual format will be inferred
from the I<TARGET> positional argument. If I<TARGET> is an existing directory,
and does not equal to C<->, then the B<directory> format will be chosen. If
I<TARGET> ends with C<.tar> or with one of the filename extensions listed in
the section B<COMPRESSION>, or if B<TARGET> equals C<->, or if B<TARGET> is a
named pipe (fifo) or if B<TARGET> is a character special file like
F</dev/null>, then the B<tar> format will be chosen. If I<TARGET> ends with
C<.squashfs> or C<.sqfs>, then the B<squashfs> format will be chosen. If
<TARGET> ends with C<.ext2> then the B<ext2> format will be chosen. If none of
these conditions apply, the B<directory> format will be chosen.
=item B<directory>, B<dir>
A chroot directory will be created in I<TARGET>. If the directory already
exists, it must either be empty or only contain an empty C<lost+found>
directory. The special I<TARGET> C<-> does not work with this format because a
directory cannot be written to standard output. If you need your directory be
named C<->, then just explicitly pass the relative path to it like F<./->. If
a directory is chosen as output in any other mode than B<sudo>, then its
contents will have wrong ownership information and special device files will be
missing.
=item B<tar>
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
C<$TMPDIR> is not set. A tarball of that directory will be stored in I<TARGET>
or sent to standard output if I<TARGET> was omitted or if I<TARGET> equals
C<->. If I<TARGET> ends with one of the filename extensions listed in the
section B<COMPRESSION>, then a compressed tarball will be created. The tarball
will be in POSIX 1003.1-2001 (pax) format and will contain extended attributes.
To preserve the extended attributes, you have to pass B<--xattrs
--xattrs-include='*'> to tar when extracting the tarball.
=item B<squashfs>, B<sqfs>
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
C<$TMPDIR> is not set. A tarball of that directory will be piped to the
C<tar2sqfs> utility, which will create an xz compressed squashfs image with a
blocksize of 1048576 bytes in I<TARGET>. The special I<TARGET> C<-> does not
work with this format because C<tar2sqfs> can only write to a regular file. If
you need your squashfs image be named C<->, then just explicitly pass the
relative path to it like F<./->.
=item B<ext2>
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
C<$TMPDIR> is not set. A tarball of that directory will be piped to the
C<genext2fs> utility, which will create an ext2 image that will be
approximately 90% full in I<TARGET>. The special I<TARGET> C<-> does not work
with this format because C<genext2fs> can only write to a regular file. If you
need your ext2 image be named C<->, then just explicitly pass the relative path
to it like F<./->.
=back
=begin comment
=head1 HOOKS
This section describes properties of the hook options B<--setup-hook>,
B<--essential-hook> and B<--customize-hook> which are common to all three of
them. Any information specific to each hook is documented under the specific
hook options in the section B<OPTIONS>.
The options can be specified multiple times and the commands are executed in
the order in which they are given on the command line. There are three
different types of hook option arguments. If the argument passed to the hook
option starts with C<copy-in>, C<copy-out>, C<tar-in>, C<tar-out>, C<upload> or
C<download> followed by a space, then the hook is interpreted as a special
hook. Otherwise, if I<command> is an existing executable file from C<$PATH> or
if I<command> does not contain any shell metacharacters, then I<command> is
directly exec-ed with the path to the chroot directory passed as the first
argument. Otherwise, I<command> is executed under I<sh> and the chroot
directory can be accessed via I<$1>. All environment variables set by
B<mmdebstrap> (like C<APT_CONFIG>, C<DEBIAN_FRONTEND>, C<LC_ALL> and C<PATH>)
are preserved. All environment variables set by the user are preserved, except
for C<TMPDIR> which is cleared.
The paths inside the chroot are relative to the root directory of the chroot.
The path on the outside is relative to current directory of the original
B<mmdebstrap> invocation. The path inside the chroot must already exist. Paths
outside the chroot are created as necessary.
To be able to resolve even absolute symlinks, C<tar> or C<sh> and C<cat> are
executed inside the chroot for the B<essential> and B<customize> hooks. This
means that the special hooks might fail for the B<extract> and B<custom>
variants if no C<tar> or C<sh> and C<cat> is available inside the chroot.
Since nothing is yet installed at the time of the B<setup> hook, no absolute
symlinks in paths inside the chroot are supported during that hook.
=over 8
=item B<copy-out> I<pathinside> [I<pathinside> ...] I<pathoutside>
Recursively copies one or more files and directories out of the chroot into,
placing them into I<pathoutside> outside of the chroot.
=item B<copy-in> I<pathoutside> [I<pathoutside> ...] I<pathinside>
Recursively copies one or more files and directories into the chroot into,
placing them into I<pathinside> inside of the chroot.
2020-01-16 09:38:14 +00:00
=item B<sync-out> I<pathinside> I<pathoutside>
Recursively copy everything inside I<pathinside> inside the chroot into
I<pathoutside>. In contrast to B<copy-out>, this command synchronizes the
content of I<pathinside> with the content of I<pathoutside> without deleting
anything from I<pathoutside> but overwriting content as necessary. Use this
command over B<copy-out> if you don't want to create a new directory outside
the chroot but only update the content of an existing directory.
=item B<sync-in> I<pathoutside> I<pathinside>
Recursively copy everything inside I<pathoutside> into I<pathinside> inside the
chroot. In contrast to B<copy-in>, this command synchronizes the content of
I<pathoutside> with the content of I<pathinside> without deleting anything from
I<pathinside> but overwriting content as necessary. Use this command over
B<copy-in> if you don't want to create a new directory inside the chroot but
only update the content of an existing directory.
=item B<tar-in> I<outside.tar> I<pathinside>
Unpacks a tarball I<outside.tar> from outside the chroot into a certain
location I<pathinside> inside the chroot.
=item B<tar-out> I<pathinside> I<outside.tar>
Packs the path I<pathinside> from inside the chroot into a tarball, placing it
into a certain location I<outside.tar> outside the chroot.
=item B<download> I<fileinside> I<fileoutside>
Copy the file given by I<fileinside> from inside the chroot to outside the
chroot as I<fileoutside>. In contrast to B<copy-out>, this command only
handles files and not directories. To copy a directory recursively out of the
chroot, use B<copy-out> or B<tar-out>. Its advantage is, that by being able to
specify the full path on the outside, including the filename, the file on the
outside can have a different name from the file on the inside.
=item B<upload> I<fileoutside> I<fileinside>
Copy the file given by I<fileoutside> from outside the chroot to inside the
chroot as I<fileinside>. In contrast to B<copy-in>, this command only
handles files and not directories. To copy a directory recursively into the
chroot, use B<copy-in> or B<tar-in>. Its advantage is, that by being able to
specify the full path on the inside, including the filename, the file on the
inside can have a different name from the file on the outside.
=back
=end comment
2018-09-18 09:20:24 +00:00
=head1 EXAMPLES
Use like debootstrap:
2019-01-08 10:27:56 +00:00
$ sudo mmdebstrap unstable ./unstable-chroot
2018-09-18 09:20:24 +00:00
Without superuser privileges:
2019-01-08 10:27:56 +00:00
$ mmdebstrap unstable unstable-chroot.tar
2018-09-18 09:20:24 +00:00
With no command line arguments at all. The chroot content is entirely defined
by a sources.list file on standard input.
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
$ mmdebstrap < /etc/apt/sources.list > unstable-chroot.tar
2018-09-18 09:20:24 +00:00
2019-10-05 05:51:05 +00:00
Since the tarball is output on stdout, members of it can be excluded using tar
on-the-fly. For example the /dev directory can be removed from the final
tarbal in cases where it is to be extracted by a non-root user who cannot
create device nodes:
$ mmdebstrap unstable | tar --delete ./dev > unstable-chroot.tar
Instead of a tarball, a squashfs image can be created:
$ mmdebstrap unstable unstable-chroot.squashfs
2020-01-16 17:03:13 +00:00
By default, B<mmdebstrap> runs B<tar2sqfs> with C<--no-skip --exportable
--compressor xz --block-size 1048576>. To choose a different set of options,
pipe the output of B<mmdebstrap> into B<tar2sqfs> manually.
2019-10-05 05:51:05 +00:00
By default, debootstrapping a stable distribution will add mirrors for security
and updates to the sources.list.
$ mmdebstrap stable stable-chroot.tar
If you don't want this behaviour, you can override it by manually specifying a
mirror in various different ways:
$ mmdebstrap stable stable-chroot.tar http://deb.debian.org/debian
2020-01-08 16:19:30 +00:00
$ mmdebstrap stable stable-chroot.tar \
"deb http://deb.debian.org/debian stable main"
2019-10-05 05:51:05 +00:00
$ mmdebstrap stable stable-chroot.tar /path/to/sources.list
$ mmdebstrap stable stable-chroot.tar - < /path/to/sources.list
2018-09-18 09:20:24 +00:00
Drop locales (but not the symlink to the locale name alias database),
translated manual packages (but not the untranslated ones), and documentation
(but not copyright and Debian changelog).
2019-01-08 10:27:56 +00:00
$ mmdebstrap --variant=essential \
2018-09-18 09:20:24 +00:00
--dpkgopt='path-exclude=/usr/share/man/*' \
--dpkgopt='path-include=/usr/share/man/man[1-9]/*' \
--dpkgopt='path-exclude=/usr/share/locale/*' \
--dpkgopt='path-include=/usr/share/locale/locale.alias' \
--dpkgopt='path-exclude=/usr/share/doc/*' \
--dpkgopt='path-include=/usr/share/doc/*/copyright' \
--dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*' \
unstable debian-unstable.tar
2019-01-08 10:27:56 +00:00
Use as debootstrap replacement in sbuild-createchroot:
$ sbuild-createchroot --debootstrap=mmdebstrap \
--make-sbuild-tarball ~/.cache/sbuild/unstable-amd64.tar.gz \
unstable $(mktemp -d)
2019-01-08 10:28:27 +00:00
=begin comment
Create a bootable USB Stick that boots into a full Debian desktop:
$ mmdebstrap --aptopt='Apt::Install-Recommends "true"' --customize-hook \
'chroot "$1" adduser --gecos user --disabled-password user' \
--customize-hook='echo 'user:live' | chroot "$1" chpasswd' \
--customize-hook='echo host > "$1/etc/hostname"' \
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
--include=linux-image-amd64,task-desktop unstable debian-unstable.tar
$ cat << END > extlinux.conf
> default linux
> timeout 0
>
> label linux
> kernel /vmlinuz
> append initrd=/initrd.img root=LABEL=rootfs
END
# You can use $(sudo blockdev --getsize64 /dev/sdXXX) to get the right
# image size for the target medium in bytes
$ guestfish -N debian-unstable.img=disk:8G -- part-disk /dev/sda mbr : \
part-set-bootable /dev/sda 1 true : mkfs ext2 /dev/sda1 : \
set-label /dev/sda1 rootfs : mount /dev/sda1 / : \
tar-in debian-unstable.tar / xattrs:true : \
upload /usr/lib/SYSLINUX/mbr.bin /mbr.bin : \
copy-file-to-device /mbr.bin /dev/sda size:440 : rm /mbr.bin : \
extlinux / : copy-in extlinux.conf / : sync : umount / : shutdown
$ qemu-system-x86_64 -m 1G -enable-kvm debian-unstable.img
$ sudo dd if=debian-unstable.img of=/dev/sdXXX status=progress
2020-03-07 01:25:55 +00:00
Build libdvdcss2.deb without installing installing anything or changing apt
sources on the current system:
$ mmdebstrap --variant=apt --components=main,contrib --include=libdvd-pkg \
--customize-hook='chroot $1 /usr/lib/libdvd-pkg/b-i_libdvdcss.sh' \
| tar --extract --verbose --strip-components=4 \
--wildcards './usr/src/libdvd-pkg/libdvdcss2_*_*.deb'
$ ls libdvdcss2_*_*.deb
2019-01-08 10:28:27 +00:00
Use as replacement for autopkgtest-build-qemu and vmdb2:
$ mmdebstrap --variant=important --include=linux-image-amd64 \
--customize-hook='chroot "$1" passwd --delete root' \
2020-01-08 16:19:30 +00:00
--customize-hook='chroot "$1" useradd --home-dir /home/user
--create-home user' \
--customize-hook='chroot "$1" passwd --delete user' \
--customize-hook='echo host > "$1/etc/hostname"' \
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
--customize-hook=/usr/share/autopkgtest/setup-commands/setup-testbed \
2019-01-08 10:28:27 +00:00
unstable debian-unstable.tar
$ cat << END > extlinux.conf
> default linux
> timeout 0
>
> label linux
> kernel /vmlinuz
> append initrd=/initrd.img root=/dev/vda1 console=ttyS0
2019-01-08 10:28:27 +00:00
END
$ guestfish -N debian-unstable.img=disk:8G -- \
2019-01-08 10:28:27 +00:00
part-disk /dev/sda mbr : \
part-set-bootable /dev/sda 1 true : \
mkfs ext2 /dev/sda1 : mount /dev/sda1 / : \
tar-in debian-unstable.tar / xattrs:true : \
extlinux / : copy-in extlinux.conf / : \
sync : umount / : shutdown
2019-01-08 10:28:27 +00:00
$ qemu-img convert -O qcow2 debian-unstable.img debian-unstable.qcow2
=end comment
2019-10-05 05:51:05 +00:00
Build a non-Debian chroot like Ubuntu bionic:
2020-01-08 16:19:30 +00:00
$ mmdebstrap --aptopt='Dir::Etc::Trusted
"/usr/share/keyrings/ubuntu-keyring-2012-archive.gpg"' bionic bionic.tar
2019-10-05 05:51:05 +00:00
=head1 ENVIRONMENT VARIABLES
=over 8
=item C<SOURCE_DATE_EPOCH>
By setting C<SOURCE_DATE_EPOCH> the result will be reproducible over multiple
runs with the same options and mirror content.
=item C<TMPDIR>
When creating a tarball, a temporary directory is populated with the rootfs
before the tarball is packed. The location of that temporary directory will be
in F</tmp> or the location pointed to by C<TMPDIR> if that environment variable
is set.
=back
2018-09-18 09:20:24 +00:00
=head1 DEBOOTSTRAP
This section lists some differences to debootstrap.
=over 8
=item * More than one mirror possible
=item * Default mirrors for stable releases include updates and security mirror
=item * Multiple ways to operate as non-root: fakechroot, proot, unshare
=item * 3-6 times faster
2018-09-18 09:20:24 +00:00
2020-01-08 16:19:30 +00:00
=item * Can create a chroot with only C<Essential:yes> packages and their deps
2018-09-18 09:20:24 +00:00
=item * Reproducible output by default if $SOURCE_DATE_EPOCH is set
=item * Can create output on filesystems with nodev set
=item * apt cache and lists are cleaned at the end
=item * foreign architecture chroots using qemu-user
=back
Limitations in comparison to debootstrap:
=over 8
=item * Only runs on systems with apt installed (Debian and derivatives)
2018-09-18 09:20:24 +00:00
=item * No I<SCRIPT> argument
2020-01-08 16:19:30 +00:00
=item * Some debootstrap options don't exist, namely:
I<--second-stage>, I<--exclude>, <--resolve-deps>, I<--force-check-gpg>,
I<--merged-usr> and I<--no-merged-usr>
2018-09-18 09:20:24 +00:00
=back
=head1 COMPRESSION
B<mmdebstrap> will choose a suitable compressor for the output tarball
depending on the filename extension. The following mapping from filename
extension to compressor applies:
extension compressor
--------------------
.tar none
.gz gzip
.tgz gzip
.taz gzip
.Z compress
.taZ compress
.bz2 bzip2
.tbz bzip2
.tbz2 bzip2
.tz2 bzip2
.lz lzip
.lzma lzma
.tlz lzma
.lzo lzop
.lz4 lz4
2020-01-16 17:03:13 +00:00
.xz xz --threads=0
.txz xz --threads=0
.zst zstd
2020-01-16 17:03:13 +00:00
To change compression specific options, either use the respecitve environment
variables like B<XZ_OPT> or send mmdebstrap output to your compressor of choice
with a pipe.
2019-03-01 11:53:16 +00:00
=head1 BUGS
2019-10-05 05:51:18 +00:00
https://gitlab.mister-muffin.de/josch/mmdebstrap/issues
https://bugs.debian.org/src:mmdebstrap
2019-03-01 11:53:16 +00:00
As of version 1.19.5, dpkg does not provide facilities preventing it from
reading the dpkg configuration of the machine running B<mmdebstrap>.
2020-04-09 10:49:07 +00:00
Therefore, until this dpkg limitation is fixed, a default dpkg configuration is
recommended on machines running B<mmdebstrap>. If you are using B<mmdebstrap>
as the non-root user, then as a workaround you could run C<chmod 600
/etc/dpkg/dpkg.cfg.d/*> so that the config files are only accessible by the
root user.
2019-03-01 11:53:16 +00:00
2019-10-05 05:51:18 +00:00
Setting [trusted=yes] to allow signed archives without a known public key will
fail because of a gpg warning in the apt output. Since apt does not
communicate its status via any other means than human readable strings,
B<mmdebstrap> treats any warning from "apt-get update" as an error. Fixing
this will require apt to provide a machine readable status interface. See
Debian bugs #778357, #776152, #696335, and #745735.
=begin comment
Special hooks either require C<tar> or C<sh> and C<cat> be present inside the
chroot or otherwise there is no support for absolute symlinks. This limitation
can be dropped for the B<root> and B<unshare> modes as well as for the
B<setup> hook in all modes if code is added that supports resolving paths with
absolute symlinks even inside the chroot directory reliably. Due to how
symlinks are handled by B<fakechroot> and B<proot>, the requirement of having
C<tar> or C<sh> and C<cat> inside the chroot can never be dropped for these
modes.
=end comment
2018-09-18 09:20:24 +00:00
=head1 SEE ALSO
debootstrap(8)
=cut
2020-01-08 15:22:51 +00:00
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 ft=perl tw=79