2018-09-18 09:20:24 +00:00
#!/usr/bin/perl
#
# Copyright: 2018 Johannes Schauer <josch@mister-muffin.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
use strict;
use warnings;
2019-03-01 11:56:57 +00:00
our $VERSION = '0.4.1';
2019-02-23 07:55:31 +00:00
2018-09-18 09:20:24 +00:00
use English;
use Getopt::Long;
use Pod::Usage;
use File::Copy;
use File::Path qw(make_path remove_tree);
use File::Temp qw(tempfile tempdir);
use Cwd qw(abs_path);
require "syscall.ph";
2018-09-23 17:47:14 +00:00
use Fcntl qw(S_IFCHR S_IFBLK FD_CLOEXEC F_GETFD F_SETFD);
2018-09-23 17:36:07 +00:00
use List::Util qw(any none);
2019-01-13 09:17:46 +00:00
use POSIX qw(SIGINT SIGHUP SIGPIPE SIGTERM SIG_BLOCK SIG_UNBLOCK);
2019-01-20 09:39:01 +00:00
use Carp;
use Term::ANSIColor;
2018-09-18 09:20:24 +00:00
# from sched.h
use constant {
CLONE_NEWNS => 0x20000,
CLONE_NEWUTS => 0x4000000,
CLONE_NEWIPC => 0x8000000,
CLONE_NEWUSER => 0x10000000,
CLONE_NEWPID => 0x20000000,
CLONE_NEWNET => 0x40000000,
};
# type codes:
# 0 -> normal file
# 1 -> hardlink
# 2 -> symlink
# 3 -> character special
# 4 -> block special
# 5 -> directory
my @devfiles = (
# filename mode type link target major minor
[ "./dev/", 0755, 5, '', undef, undef ],
[ "./dev/console", 0666, 3, '', 5, 1 ],
[ "./dev/fd", 0777, 2, '/proc/self/fd', undef, undef ],
[ "./dev/full", 0666, 3, '', 1, 7 ],
[ "./dev/null", 0666, 3, '', 1, 3 ],
[ "./dev/ptmx", 0666, 3, '', 5, 2 ],
[ "./dev/pts/", 0755, 5, '', undef, undef ],
[ "./dev/random", 0666, 3, '', 1, 8 ],
[ "./dev/shm/", 0755, 5, '', undef, undef ],
[ "./dev/stderr", 0777, 2, '/proc/self/fd/2', undef, undef ],
[ "./dev/stdin", 0777, 2, '/proc/self/fd/0', undef, undef ],
[ "./dev/stdout", 0777, 2, '/proc/self/fd/1', undef, undef ],
[ "./dev/tty", 0666, 3, '', 5, 0 ],
[ "./dev/urandom", 0666, 3, '', 1, 9 ],
[ "./dev/zero", 0666, 3, '', 1, 5 ],
);
2019-01-20 09:39:01 +00:00
# verbosity levels:
# 0 -> print nothing
# 1 -> normal output and progress bars
# 2 -> verbose output
# 3 -> debug output
my $verbosity_level = 1;
sub debug {
if ($verbosity_level < 3) {
return;
}
my $msg = shift;
$msg = "D: $msg";
if ( -t STDERR ) {
$msg = colored($msg, 'clear')
}
print STDERR "$msg\n";
}
sub info {
if ($verbosity_level == 0) {
return;
}
my $msg = shift;
$msg = "I: $msg";
if ( -t STDERR ) {
$msg = colored($msg, 'green')
}
print STDERR "$msg\n";
}
sub warning {
if ($verbosity_level == 0) {
return;
}
my $msg = shift;
$msg = "W: $msg";
if ( -t STDERR ) {
$msg = colored($msg, 'bold yellow')
}
print STDERR "$msg\n";
}
sub error {
if ($verbosity_level == 0) {
return;
}
# if error() is called with the string from a previous error() that was
# caught inside an eval(), then the string will have a newline which we
# are stripping here
chomp (my $msg = shift);
$msg = "E: $msg";
if ( -t STDERR ) {
$msg = colored($msg, 'bold red')
}
if ($verbosity_level == 3) {
croak $msg; # produces a backtrace
} else {
die "$msg\n";
}
}
2018-09-18 09:20:24 +00:00
# tar cannot figure out the decompression program when receiving data on
# standard input, thus we do it ourselves. This is copied from tar's
# src/suffix.c
2019-01-20 09:41:29 +00:00
sub get_tar_compressor($) {
2018-09-18 09:20:24 +00:00
my $filename = shift;
2019-01-20 09:41:29 +00:00
if ($filename eq '-') {
return undef
} elsif ($filename =~ /\.tar$/) {
return undef
} elsif ($filename =~ /\.(gz|tgz|taz)$/) {
return 'gzip';
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(Z|taZ)$/) {
2019-01-20 09:41:29 +00:00
return 'compress';
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(bz2|tbz|tbz2|tz2)$/) {
2019-01-20 09:41:29 +00:00
return 'bzip2';
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lz$/) {
2019-01-20 09:41:29 +00:00
return 'lzip';
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(lzma|tlz)$/) {
2019-01-20 09:41:29 +00:00
return 'lzma';
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lzo$/) {
2019-01-20 09:41:29 +00:00
return 'lzop';
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lz4$/) {
2019-01-20 09:41:29 +00:00
return 'lz4';
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(xz|txz)$/) {
2019-01-20 09:41:29 +00:00
return 'xz';
} elsif ($filename =~ /\.zst$/) {
return 'zstd';
2018-09-18 09:20:24 +00:00
}
2019-01-20 09:41:29 +00:00
return undef
2018-09-18 09:20:24 +00:00
}
2018-12-05 07:06:26 +00:00
sub test_unshare($) {
my $verbose = shift;
2018-09-24 18:07:46 +00:00
if ($EFFECTIVE_USER_ID == 0) {
2019-01-20 09:39:01 +00:00
my $msg = "cannot use unshare mode when executing as root";
2018-12-05 07:06:26 +00:00
if ($verbose) {
2019-01-20 09:39:01 +00:00
warning $msg;
} else {
debug $msg;
2018-12-05 07:06:26 +00:00
}
2018-09-24 18:07:46 +00:00
return 0;
}
2018-09-18 09:20:24 +00:00
# arguments to syscalls have to be stored in their own variable or
# otherwise we will get "Modification of a read-only value attempted"
my $unshare_flags = CLONE_NEWUSER;
# we spawn a new per process because if unshare succeeds, we would
2018-10-01 15:14:59 +00:00
# otherwise have unshared the mmdebstrap process itself which we don't want
2019-01-20 09:39:01 +00:00
my $pid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($pid == 0) {
my $ret = syscall &SYS_unshare, $unshare_flags;
2018-12-05 07:06:26 +00:00
if ($ret == 0) {
2018-09-18 09:20:24 +00:00
exit 0;
} else {
2019-01-20 09:39:01 +00:00
my $msg = "unshare syscall failed: $!";
2018-12-05 07:06:26 +00:00
if ($verbose) {
2019-01-20 09:39:01 +00:00
warning $msg;
} else {
debug $msg;
2018-12-05 07:06:26 +00:00
}
2018-09-18 09:20:24 +00:00
exit 1;
}
}
waitpid($pid, 0);
if (($? >> 8) != 0) {
return 0;
}
2018-09-24 18:09:08 +00:00
# if newuidmap and newgidmap exist, the exit status will be 1 when
# executed without parameters
system "newuidmap 2>/dev/null";
if (($? >> 8) != 1) {
2019-01-20 09:39:01 +00:00
if (($? >> 8) == 127) {
my $msg = "cannot find newuidmap";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
} else {
my $msg = "newuidmap returned unknown exit status: $?";
if ($verbose) {
warning $msg;
2018-12-05 07:06:26 +00:00
} else {
2019-01-20 09:39:01 +00:00
debug $msg;
2018-12-05 07:06:26 +00:00
}
}
2018-09-24 18:09:08 +00:00
return 0;
}
system "newgidmap 2>/dev/null";
if (($? >> 8) != 1) {
2019-01-20 09:39:01 +00:00
if (($? >> 8) == 127) {
my $msg = "cannot find newgidmap";
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
} else {
my $msg = "newgidmap returned unknown exit status: $?";
if ($verbose) {
warning $msg;
2018-12-05 07:06:26 +00:00
} else {
2019-01-20 09:39:01 +00:00
debug $msg;
2018-12-05 07:06:26 +00:00
}
}
2018-09-24 18:09:08 +00:00
return 0;
}
2018-09-18 09:20:24 +00:00
return 1;
}
sub read_subuid_subgid() {
my $username = getpwuid $<;
my ($subid, $num_subid, $fh, $n);
my @result = ();
if (! -e "/etc/subuid") {
2019-01-20 09:39:01 +00:00
warning "/etc/subuid doesn't exist";
2018-09-18 09:20:24 +00:00
return;
}
if (! -r "/etc/subuid") {
2019-01-20 09:39:01 +00:00
warning "/etc/subuid is not readable";
2018-09-18 09:20:24 +00:00
return;
}
2019-01-20 09:39:01 +00:00
open $fh, "<", "/etc/subuid" or error "cannot open /etc/subuid for reading: $!";
2018-09-18 09:20:24 +00:00
while (my $line = <$fh>) {
($n, $subid, $num_subid) = split(/:/, $line, 3);
last if ($n eq $username);
}
close $fh;
push @result, ["u", 0, $subid, $num_subid];
if (scalar(@result) < 1) {
2019-01-20 09:39:01 +00:00
warning "/etc/subuid does not contain an entry for $username";
2018-09-18 09:20:24 +00:00
return;
}
if (scalar(@result) > 1) {
2019-01-20 09:39:01 +00:00
warning "/etc/subuid contains multiple entries for $username";
2018-09-18 09:20:24 +00:00
return;
}
2019-01-20 09:39:01 +00:00
open $fh, "<", "/etc/subgid" or error "cannot open /etc/subgid for reading: $!";
2018-09-18 09:20:24 +00:00
while (my $line = <$fh>) {
($n, $subid, $num_subid) = split(/:/, $line, 3);
last if ($n eq $username);
}
close $fh;
push @result, ["g", 0, $subid, $num_subid];
if (scalar(@result) < 2) {
2019-01-20 09:39:01 +00:00
warning "/etc/subgid does not contain an entry for $username";
2018-09-18 09:20:24 +00:00
return;
}
if (scalar(@result) > 2) {
2019-01-20 09:39:01 +00:00
warning "/etc/subgid contains multiple entries for $username";
2018-09-18 09:20:24 +00:00
return;
}
return @result;
}
# This function spawns two child processes forming the following process tree
#
# A
# |
# fork()
# | \
# B C
# | |
# | fork()
# | | \
# | D E
# | | |
# |unshare()
# | close()
# | | |
# | | read()
# | | newuidmap(D)
# | | newgidmap(D)
# | | /
# | waitpid()
# | |
# | fork()
# | | \
# | F G
# | | |
# | | exec()
# | | /
# | waitpid()
# | /
# waitpid()
#
# To better refer to each individual part, we give each process a new
# identifier after calling fork(). Process A is the main process. After
# executing fork() we call the parent and child B and C, respectively. This
# first fork() is done because we do not want to modify A. B then remains
# waiting for its child C to finish. C calls fork() again, splitting into
# the parent D and its child E. In the parent D we call unshare() and close a
# pipe shared by D and E to signal to E that D is done with calling unshare().
# E notices this by using read() and follows up with executing the tools
# new[ug]idmap on D. E finishes and D continues with doing another fork().
# This is because when unsharing the PID namespace, we need a PID 1 to be kept
# alive or otherwise any child processes cannot fork() anymore themselves. So
# we keep F as PID 1 and finally call exec() in G.
sub get_unshare_cmd(&$) {
my $cmd = shift;
my $idmap = shift;
my $unshare_flags = CLONE_NEWUSER | CLONE_NEWNS | CLONE_NEWPID | CLONE_NEWUTS | CLONE_NEWIPC;
if (0) {
$unshare_flags |= CLONE_NEWNET;
}
# fork a new process and let the child get unshare()ed
# we don't want to unshare the parent process
2019-01-20 09:39:01 +00:00
my $gcpid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($gcpid == 0) {
# Create a pipe for the parent process to signal the child process that it is
# done with calling unshare() so that the child can go ahead setting up
# uid_map and gid_map.
pipe my $rfh, my $wfh;
# We have to do this dance with forking a process and then modifying the
# parent from the child because:
# - new[ug]idmap can only be called on a process id after that process has
# unshared the user namespace
# - a process looses its capabilities if it performs an execve() with nonzero
# user ids see the capabilities(7) man page for details.
# - a process that unshared the user namespace by default does not have the
# privileges to call new[ug]idmap on itself
#
# this also works the other way around (the child setting up a user namespace
# and being modified from the parent) but that way, the parent would have to
# stay around until the child exited (so a pid would be wasted). Additionally,
# that variant would require an additional pipe to let the parent signal the
# child that it is done with calling new[ug]idmap. The way it is done here,
# this signaling can instead be done by wait()-ing for the exit of the child.
my $ppid = $$;
2019-01-20 09:39:01 +00:00
my $cpid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($cpid == 0) {
# child
# Close the writing descriptor at our end of the pipe so that we
# see EOF when parent closes its descriptor.
close $wfh;
# Wait for the parent process to finish its unshare() call by
# waiting for an EOF.
2019-01-20 09:39:01 +00:00
0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF";
2018-09-18 09:20:24 +00:00
# The program's new[ug]idmap have to be used because they are
# setuid root. These privileges are needed to map the ids from
# /etc/sub[ug]id to the user namespace set up by the parent.
# Without these privileges, only the id of the user itself can be
# mapped into the new namespace.
#
# Since new[ug]idmap is setuid root we also don't need to write
# "deny" to /proc/$$/setgroups beforehand (this is otherwise
# required for unprivileged processes trying to write to
# /proc/$$/gid_map since kernel version 3.19 for security reasons)
# and therefore the parent process keeps its ability to change its
# own group here.
#
# Since /proc/$ppid/[ug]id_map can only be written to once,
# respectively, instead of making multiple calls to new[ug]idmap,
# we assemble a command line that makes one call each.
my $uidmapcmd = "";
my $gidmapcmd = "";
foreach (@{$idmap}) {
my ($t, $hostid, $nsid, $range) = @{$_};
if ($t ne "u" and $t ne "g" and $t ne "b") {
2019-01-20 09:39:01 +00:00
error "invalid idmap type: $t";
2018-09-18 09:20:24 +00:00
}
if ($t eq "u" or $t eq "b") {
$uidmapcmd .= " $hostid $nsid $range";
}
if ($t eq "g" or $t eq "b") {
$gidmapcmd .= " $hostid $nsid $range";
}
}
my $idmapcmd = '';
if ($uidmapcmd ne "") {
2019-01-20 09:39:01 +00:00
0 == system "newuidmap $ppid $uidmapcmd" or error "newuidmap $ppid $uidmapcmd failed: $!";
2018-09-18 09:20:24 +00:00
}
if ($gidmapcmd ne "") {
2019-01-20 09:39:01 +00:00
0 == system "newgidmap $ppid $gidmapcmd" or error "newgidmap $ppid $gidmapcmd failed: $!";
2018-09-18 09:20:24 +00:00
}
exit 0;
}
# parent
# After fork()-ing, the parent immediately calls unshare...
2019-01-20 09:39:01 +00:00
0 == syscall &SYS_unshare, $unshare_flags or error "unshare() failed: $!";
2018-09-18 09:20:24 +00:00
# .. and then signals the child process that we are done with the
# unshare() call by sending an EOF.
close $wfh;
# Wait for the child process to finish its setup by waiting for its
# exit.
2019-01-20 09:39:01 +00:00
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
2018-09-18 09:20:24 +00:00
my $exit = $? >> 8;
if ($exit != 0) {
2019-01-20 09:39:01 +00:00
error "child had a non-zero exit status: $exit";
2018-09-18 09:20:24 +00:00
}
# Currently we are nobody (uid and gid are 65534). So we become root
# user and group instead.
#
# We are using direct syscalls instead of setting $(, $), $< and $>
# because then perl would do additional stuff which we don't need or
# want here, like checking /proc/sys/kernel/ngroups_max (which might
# not exist). It would also also call setgroups() in a way that makes
# the root user be part of the group unknown.
2019-01-20 09:39:01 +00:00
0 == syscall &SYS_setgid, 0 or error "setgid failed: $!";
0 == syscall &SYS_setuid, 0 or error "setuid failed: $!";
0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!";
2018-09-18 09:20:24 +00:00
if (1) {
# When the pid namespace is also unshared, then processes expect a
# master pid to always be alive within the namespace. To achieve
# this, we fork() here instead of exec() to always have one dummy
# process running as pid 1 inside the namespace. This is also what
# the unshare tool does when used with the --fork option.
#
# Otherwise, without a pid 1, new processes cannot be forked
# anymore after pid 1 finished.
2019-01-20 09:39:01 +00:00
my $cpid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($cpid != 0) {
# The parent process will stay alive as pid 1 in this
# namespace until the child finishes executing. This is
# important because pid 1 must never die or otherwise nothing
# new can be forked.
2019-01-20 09:39:01 +00:00
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
2018-09-18 09:20:24 +00:00
exit ($? >> 8);
}
}
&{$cmd}();
exit 0;
}
# parent
return $gcpid;
}
sub havemknod($) {
my $root = shift;
my $havemknod = 0;
if (-e "$root/test-dev-null") {
2019-01-20 09:39:01 +00:00
error "/test-dev-null already exists";
2018-09-18 09:20:24 +00:00
}
2018-10-03 05:29:50 +00:00
TEST: {
2018-09-23 13:26:47 +00:00
# we fork so that we can read STDERR
2019-01-20 09:39:01 +00:00
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
2018-09-23 13:26:47 +00:00
if ($pid == 0) {
open(STDERR, '>&', STDOUT);
# we use mknod(1) instead of the system call because creating the
# right dev_t argument requires makedev(3)
exec 'mknod', "$root/test-dev-null", 'c', '1', '3';
}
chomp (my $content = do { local $/; <$fh> });
close $fh;
{
2018-10-03 05:29:50 +00:00
last TEST unless $? == 0 and $content eq '';
last TEST unless -c "$root/test-dev-null";
last TEST unless open my $fh, '>', "$root/test-dev-null";
last TEST unless print $fh 'test';
2018-09-23 13:26:47 +00:00
}
2018-09-18 09:20:24 +00:00
$havemknod = 1;
}
if (-e "$root/test-dev-null") {
2019-02-28 11:20:42 +00:00
unlink "$root/test-dev-null" or error "cannot unlink /test-dev-null: $!";
2018-09-18 09:20:24 +00:00
}
return $havemknod;
}
2018-09-23 17:47:14 +00:00
sub print_progress {
2019-01-20 09:39:01 +00:00
if ($verbosity_level != 1) {
return;
}
2018-09-23 17:47:14 +00:00
my $perc = shift;
if (!-t STDERR) {
return;
}
if ($perc eq "done") {
# \e[2K clears everything on the current line (i.e. the progress bar)
print STDERR "\e[2Kdone\n";
return;
}
if ($perc >= 100) {
$perc = 100;
}
my $width = 50;
my $num_x = int($perc*$width/100);
my $bar = '=' x $num_x;
if ($num_x != $width) {
$bar .= '>';
$bar .= ' ' x ($width - $num_x - 1);
}
printf STDERR "%6.2f [%s]\r", $perc, $bar;
}
2019-01-13 21:04:25 +00:00
sub run_progress {
2019-04-25 06:49:28 +00:00
my ($get_exec, $line_handler, $line_has_error, $chdir) = @_;
2018-09-23 17:47:14 +00:00
pipe my $rfh, my $wfh;
2019-01-13 09:17:46 +00:00
my $got_signal = 0;
my $ignore = sub {
2019-01-20 09:39:01 +00:00
info "run_progress() received signal $_[0]: waiting for child...";
2019-01-13 09:17:46 +00:00
};
# delay signals so that we can fork and change behaviour of the signal
# handler in parent and child without getting interrupted
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
2019-01-20 09:39:01 +00:00
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
2019-01-13 09:17:46 +00:00
2019-01-20 09:39:01 +00:00
my $pid1 = open(my $pipe, '-|') // error "failed to fork(): $!";
2019-01-13 09:17:46 +00:00
2019-01-13 21:04:25 +00:00
if ($pid1 == 0) {
2019-01-13 09:17:46 +00:00
# child: default signal handlers
$SIG{'INT'} = 'DEFAULT';
$SIG{'HUP'} = 'DEFAULT';
$SIG{'PIPE'} = 'DEFAULT';
$SIG{'TERM'} = 'DEFAULT';
# unblock all delayed signals (and possibly handle them)
2019-01-20 09:39:01 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!";
2019-01-13 09:17:46 +00:00
2018-09-23 17:47:14 +00:00
close $rfh;
# Unset the close-on-exec flag, so that the file descriptor does not
2019-01-13 21:04:25 +00:00
# get closed when we exec
2019-01-20 09:39:01 +00:00
my $flags = fcntl( $wfh, F_GETFD, 0 ) or error "fcntl F_GETFD: $!";
fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC ) or error "fcntl F_SETFD: $!";
2018-09-23 17:47:14 +00:00
my $fd = fileno $wfh;
2019-01-13 21:04:25 +00:00
# redirect stderr to stdout so that we can capture it
2018-09-23 17:47:14 +00:00
open(STDERR, '>&', STDOUT);
2019-01-13 21:04:25 +00:00
my @execargs = $get_exec->($fd);
2019-04-25 06:49:28 +00:00
# before apt 1.5, "apt-get update" attempted to chdir() into the
# working directory. This will fail if the current working directory
# is not accessible by the user (for example in unshare mode). See
# Debian bug #860738
if (defined $chdir) {
chdir $chdir or error "failed chdir() to $chdir: $!";
}
2019-01-20 09:39:01 +00:00
exec { $execargs[0] } @execargs or error 'cannot exec() ' . (join ' ', @execargs);
2018-09-23 17:47:14 +00:00
}
close $wfh;
# spawn two processes:
2019-01-13 21:04:25 +00:00
# parent will parse stdout to look for errors
2018-09-23 17:47:14 +00:00
# child will parse $rfh for the progress meter
2019-01-20 09:39:01 +00:00
my $pid2 = fork() // error "failed to fork(): $!";
2019-01-13 21:04:25 +00:00
if ($pid2 == 0) {
2019-01-13 09:17:46 +00:00
# child: default signal handlers
$SIG{'INT'} = 'IGNORE';
$SIG{'HUP'} = 'IGNORE';
$SIG{'PIPE'} = 'IGNORE';
$SIG{'TERM'} = 'IGNORE';
# unblock all delayed signals (and possibly handle them)
2019-01-20 09:39:01 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!";
2019-01-13 09:17:46 +00:00
2019-01-20 09:39:01 +00:00
print_progress 0.0;
2018-09-23 17:47:14 +00:00
while (my $line = <$rfh>) {
2019-01-13 21:04:25 +00:00
my $output = $line_handler->($line);
next unless $output;
print_progress $output;
2018-09-23 17:47:14 +00:00
}
2019-01-20 09:39:01 +00:00
print_progress "done";
2018-09-23 17:47:14 +00:00
exit 0;
}
2019-01-13 09:17:46 +00:00
# parent: ignore signals
# by using "local", the original is automatically restored once the
# function returns
local $SIG{'INT'} = $ignore;
local $SIG{'HUP'} = $ignore;
local $SIG{'PIPE'} = $ignore;
local $SIG{'TERM'} = $ignore;
# unblock all delayed signals (and possibly handle them)
2019-01-20 09:39:01 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!";
2019-01-13 09:17:46 +00:00
2019-01-13 21:04:25 +00:00
my $output = '';
my $has_error = 0;
while (my $line = <$pipe>) {
$has_error = $line_has_error->($line);
2019-01-20 09:39:01 +00:00
if ($verbosity_level >= 2) {
2018-12-27 20:08:53 +00:00
print STDERR $line;
} else {
# forward captured apt output
2019-01-13 21:04:25 +00:00
$output .= $line;
2018-12-27 20:08:53 +00:00
}
2018-09-23 17:47:14 +00:00
}
2019-01-13 21:04:25 +00:00
close($pipe);
2018-09-23 17:47:14 +00:00
my $fail = 0;
2019-01-13 21:04:25 +00:00
if ($? != 0 or $has_error) {
2018-09-23 17:47:14 +00:00
$fail = 1;
}
2019-01-13 21:04:25 +00:00
waitpid $pid2, 0;
2019-01-20 09:39:01 +00:00
$? == 0 or error "progress parsing failed";
2018-09-23 17:47:14 +00:00
2019-01-13 09:17:46 +00:00
if ($got_signal) {
2019-01-20 09:39:01 +00:00
error "run_progress() received signal: $got_signal";
2019-01-13 09:17:46 +00:00
}
2019-01-13 21:04:25 +00:00
# only print failure after progress output finished or otherwise it
# might interfere with the remaining output
2018-09-23 17:47:14 +00:00
if ($fail) {
2019-01-20 09:39:01 +00:00
if ($verbosity_level >= 1) {
print STDERR $output;
}
error ((join ' ', $get_exec->('<$fd>')) . ' failed');
2018-09-23 17:47:14 +00:00
}
}
2019-01-13 21:04:25 +00:00
sub run_dpkg_progress {
2018-12-27 20:08:53 +00:00
my $options = shift;
my @debs = @{$options->{PKGS} // []};
2019-01-13 21:04:25 +00:00
my $get_exec = sub { return @{$options->{ARGV}}, "--status-fd=$_[0]", @debs; };
my $line_has_error = sub { return 0; };
my $num = 0;
# each package has one install and one configure step, thus the total
# number is twice the number of packages
my $total = (scalar @debs) * 2;
my $line_handler = sub {
if ($_[0] =~ /^processing: (install|configure): /) {
$num += 1;
2018-09-23 17:47:14 +00:00
}
2019-01-13 21:04:25 +00:00
return $num/$total*100;
};
2019-01-20 09:39:01 +00:00
run_progress $get_exec, $line_handler, $line_has_error;
2019-01-13 21:04:25 +00:00
}
2019-01-13 09:17:46 +00:00
2019-01-13 21:04:25 +00:00
sub run_apt_progress {
my $options = shift;
my @debs = @{$options->{PKGS} // []};
my $get_exec = sub {
2019-01-14 21:23:02 +00:00
return (
@{$options->{ARGV}},
"-oAPT::Status-Fd=$_[0]",
# prevent apt from messing up the terminal and allow dpkg to
# receive SIGINT and quit immediately without waiting for
# maintainer script to finish
'-oDpkg::Use-Pty=false',
@debs
)};
2019-04-29 22:07:35 +00:00
my $line_has_error = sub { return 0; };
if ($options->{FIND_APT_WARNINGS}) {
$line_has_error = sub {
# apt-get doesn't report a non-zero exit if the update failed.
# Thus, we have to parse its output. See #778357, #776152, #696335
# and #745735
if ($_[0] =~ /^(W: |Err:)/) {
return 1;
}
return 0;
};
}
2019-01-13 21:04:25 +00:00
my $line_handler = sub {
if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) {
return $2;
2018-12-27 20:08:53 +00:00
}
2019-01-13 21:04:25 +00:00
};
2019-04-25 06:49:28 +00:00
run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR};
2018-09-23 17:47:14 +00:00
}
2019-01-13 09:17:46 +00:00
sub run_chroot(&$) {
my $cmd = shift;
my $options = shift;
my @cleanup_tasks = ();
my $cleanup = sub {
my $signal = $_[0];
while (my $task = pop @cleanup_tasks) {
$task->();
}
if ($signal) {
2019-01-20 09:39:01 +00:00
warning "pid $PID cought signal: $signal";
2019-01-13 09:17:46 +00:00
exit 1;
}
};
local $SIG{INT} = $cleanup;
local $SIG{HUP} = $cleanup;
local $SIG{PIPE} = $cleanup;
local $SIG{TERM} = $cleanup;
eval {
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
# if more than essential should be installed, make the system look
# more like a real one by creating or bind-mounting the device nodes
foreach my $file (@devfiles) {
my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file};
next if $fname eq './dev/';
if ($type == 0) { # normal file
2019-01-20 09:39:01 +00:00
error "type 0 not implemented";
2019-01-13 09:17:46 +00:00
} elsif ($type == 1) { # hardlink
2019-01-20 09:39:01 +00:00
error "type 1 not implemented";
2019-01-13 09:17:46 +00:00
} elsif ($type == 2) { # symlink
if (!$options->{havemknod}) {
if ($options->{mode} eq 'fakechroot' and $linkname =~ /^\/proc/) {
# there is no /proc in fakechroot mode
next;
}
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
push @cleanup_tasks, sub {
unlink "$options->{root}/$fname" or warn "cannot unlink $fname: $!";
}
}
2019-01-20 09:39:01 +00:00
symlink $linkname, "$options->{root}/$fname" or error "cannot create symlink $fname";
2019-01-13 09:17:46 +00:00
}
} elsif ($type == 3 or $type == 4) { # character/block special
if (!$options->{havemknod}) {
2019-01-20 09:39:01 +00:00
open my $fh, '>', "$options->{root}/$fname" or error "cannot open $options->{root}/$fname: $!";
2019-01-13 09:17:46 +00:00
close $fh;
if ($options->{mode} eq 'unshare') {
push @cleanup_tasks, sub {
0 == system('umount', '--no-mtab', "$options->{root}/$fname") or warn "umount $fname failed: $?";
unlink "$options->{root}/$fname" or warn "cannot unlink $fname: $!";
};
} elsif ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
0 == system('umount', "$options->{root}/$fname") or warn "umount failed: $?";
unlink "$options->{root}/$fname" or warn "cannot unlink $fname: $!";
};
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2019-01-13 09:17:46 +00:00
}
2019-01-20 09:39:01 +00:00
0 == system('mount', '-o', 'bind', "/$fname", "$options->{root}/$fname") or error "mount $fname failed: $?";
2019-01-13 09:17:46 +00:00
}
} elsif ($type == 5) { # directory
if (!$options->{havemknod}) {
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
push @cleanup_tasks, sub {
rmdir "$options->{root}/$fname" or warn "cannot rmdir $fname: $!";
}
}
2019-01-20 09:39:01 +00:00
make_path "$options->{root}/$fname" or error "cannot make_path $fname";
chmod $mode, "$options->{root}/$fname" or error "cannot chmod $fname: $!";
2019-01-13 09:17:46 +00:00
}
if ($options->{mode} eq 'unshare') {
push @cleanup_tasks, sub {
0 == system('umount', '--no-mtab', "$options->{root}/$fname") or warn "umount $fname failed: $?";
};
} elsif ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
0 == system('umount', "$options->{root}/$fname") or warn "umount $fname failed: $?";
};
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2019-01-13 09:17:46 +00:00
}
2019-01-20 09:39:01 +00:00
0 == system('mount', '-o', 'bind', "/$fname", "$options->{root}/$fname") or error "mount $fname failed: $?";
2019-01-13 09:17:46 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unsupported type: $type";
2019-01-13 09:17:46 +00:00
}
}
} elsif (any { $_ eq $options->{mode} } ('proot', 'fakechroot')) {
# we cannot mount in fakechroot and proot mode
# in proot mode we have /dev bind-mounted already through --bind=/dev
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2019-01-13 09:17:46 +00:00
}
# We can only mount /proc and /sys after extracting the essential
# set because if we mount it before, then base-files will not be able
# to extract those
if ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
0 == system('umount', "$options->{root}/sys") or warn "umount /sys failed: $?";
};
2019-01-20 09:39:01 +00:00
0 == system('mount', '-t', 'sysfs', '-o', 'nosuid,nodev,noexec', 'sys', "$options->{root}/sys") or error "mount /sys failed: $?";
2019-01-13 09:17:46 +00:00
} elsif ($options->{mode} eq 'unshare') {
# naturally we have to clean up after ourselves in sudo mode where we
# do a real mount. But we also need to unmount in unshare mode because
# otherwise, even with the --one-file-system tar option, the
# permissions of the mount source will be stored and not the mount
# target (the directory)
push @cleanup_tasks, sub {
# since we cannot write to /etc/mtab we need --no-mtab
# unmounting /sys only seems to be successful with --lazy
0 == system('umount', '--no-mtab', '--lazy', "$options->{root}/sys") or warn "umount /sys failed: $?";
};
# without the network namespace unshared, we cannot mount a new
# sysfs. Since we need network, we just bind-mount.
#
# we have to rbind because just using bind results in "wrong fs
# type, bad option, bad superblock" error
2019-01-20 09:39:01 +00:00
0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys") or error "mount /sys failed: $?";
2019-01-13 09:17:46 +00:00
} elsif (any { $_ eq $options->{mode} } ('proot', 'fakechroot')) {
# we cannot mount in fakechroot and proot mode
# in proot mode we have /proc bind-mounted already through --bind=/proc
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2019-01-13 09:17:46 +00:00
}
if ($options->{mode} eq 'root') {
push @cleanup_tasks, sub {
2019-01-20 09:39:01 +00:00
0 == system('umount', "$options->{root}/proc") or error "umount /proc failed: $?";
2019-01-13 09:17:46 +00:00
};
2019-01-20 09:39:01 +00:00
0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc") or error "mount /proc failed: $?";
2019-01-13 09:17:46 +00:00
} elsif ($options->{mode} eq 'unshare') {
# naturally we have to clean up after ourselves in sudo mode where we
# do a real mount. But we also need to unmount in unshare mode because
# otherwise, even with the --one-file-system tar option, the
# permissions of the mount source will be stored and not the mount
# target (the directory)
push @cleanup_tasks, sub {
# since we cannot write to /etc/mtab we need --no-mtab
2019-01-20 09:39:01 +00:00
0 == system('umount', '--no-mtab', "$options->{root}/proc") or error "umount /proc failed: $?";
2019-01-13 09:17:46 +00:00
};
2019-01-20 09:39:01 +00:00
0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc") or error "mount /proc failed: $?";
2019-01-13 09:17:46 +00:00
} elsif (any { $_ eq $options->{mode} } ('proot', 'fakechroot')) {
# we cannot mount in fakechroot and proot mode
# in proot mode we have /sys bind-mounted already through --bind=/sys
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2019-01-13 09:17:46 +00:00
}
# prevent daemons from starting
{
2019-01-20 09:39:01 +00:00
open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d" or error "cannot open policy-rc.d: $!";
2019-01-13 09:17:46 +00:00
print $fh "#!/bin/sh\n";
print $fh "exit 101\n";
close $fh;
2019-01-20 09:39:01 +00:00
chmod 0755, "$options->{root}/usr/sbin/policy-rc.d" or error "cannot chmod policy-rc.d: $!";
2019-01-13 09:17:46 +00:00
}
2019-09-13 10:37:28 +00:00
if (-e "$options->{root}/sbin/start-stop-daemon") {
2019-09-13 10:36:59 +00:00
move("$options->{root}/sbin/start-stop-daemon", "$options->{root}/sbin/start-stop-daemon.REAL") or error "cannot move start-stop-daemon: $!";
2019-09-13 10:36:02 +00:00
open my $fh, '>', "$options->{root}/sbin/start-stop-daemon" or error "cannot open start-stop-daemon: $!";
2019-01-13 09:17:46 +00:00
print $fh "#!/bin/sh\n";
print $fh "echo \"Warning: Fake start-stop-daemon called, doing nothing\">&2\n";
close $fh;
2019-01-20 09:39:01 +00:00
chmod 0755, "$options->{root}/sbin/start-stop-daemon" or error "cannot chmod start-stop-daemon: $!";
2019-01-13 09:17:46 +00:00
}
&{$cmd}();
# cleanup
2019-09-13 10:37:28 +00:00
if (-e "$options->{root}/sbin/start-stop-daemon.REAL") {
move("$options->{root}/sbin/start-stop-daemon.REAL", "$options->{root}/sbin/start-stop-daemon") or error "cannot move start-stop-daemon: $!";
}
2019-02-28 11:20:42 +00:00
unlink "$options->{root}/usr/sbin/policy-rc.d" or error "cannot unlink policy-rc.d: $!";
2019-01-13 09:17:46 +00:00
};
my $error = $@;
# we use the cleanup function to do the unmounting
$cleanup->(0);
if ($error) {
2019-01-20 09:39:01 +00:00
error "run_chroot failed: $error";
2019-01-13 09:17:46 +00:00
}
}
2019-02-20 12:32:49 +00:00
sub run_hooks($$) {
my $name = shift;
my $options = shift;
if (scalar @{$options->{"${name}_hook"}} == 0) {
return;
}
my $runner = sub {
foreach my $script (@{$options->{"${name}_hook"}}) {
if ( -x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) {
info "running --$name-hook directly: $script $options->{root}";
# execute it directly if it's an executable file
# or if it there are no shell metacharacters
# (the /a regex modifier makes \w match only ASCII)
0 == system($script, $options->{root}) or error "command failed: $script";
} else {
info "running --$name-hook in shell: sh -c '$script' exec $options->{root}";
# otherwise, wrap everything in sh -c
0 == system('sh', '-c', $script, 'exec', $options->{root}) or error "command failed: $script";
}
}
};
if ($name eq 'setup') {
# execute directly without mounting anything (the mount points do not
# exist yet)
&{$runner}();
} else {
run_chroot \&$runner, $options;
}
}
2018-09-18 09:20:24 +00:00
sub setup {
my $options = shift;
2019-01-20 09:39:01 +00:00
foreach my $key (sort keys %{$options}) {
my $value = $options->{$key};
if (!defined $value) {
next;
}
if (ref $value eq '') {
debug "$key: $options->{$key}";
} elsif (ref $value eq 'ARRAY') {
debug "$key: [" . (join ', ', @{$value}) . "]";
} else {
error "unknown type";
2018-09-18 09:20:24 +00:00
}
}
2019-01-20 09:39:01 +00:00
my ($conf, $tmpfile) = tempfile(UNLINK => 1) or error "cannot open apt.conf: $!";
2018-09-18 09:20:24 +00:00
print $conf "Apt::Architecture \"$options->{nativearch}\";\n";
# the host system might have configured additional architectures
# force only the native architecture
if (scalar @{$options->{foreignarchs}} > 0) {
2018-11-04 19:37:36 +00:00
print $conf "Apt::Architectures { \"$options->{nativearch}\"; ";
foreach my $arch (@{$options->{foreignarchs}}) {
print $conf "\"$arch\"; ";
}
print $conf "};\n";
2018-09-18 09:20:24 +00:00
} else {
print $conf "Apt::Architectures \"$options->{nativearch}\";\n";
}
2019-02-28 11:22:42 +00:00
print $conf "Dir \"$options->{root}\";\n";
2019-04-25 06:51:42 +00:00
# not needed anymore for apt 1.3 and newer
print $conf "Dir::State::Status \"$options->{root}/var/lib/dpkg/status\";\n";
2018-09-18 09:20:24 +00:00
# for authentication, use the keyrings from the host
print $conf "Dir::Etc::Trusted \"/etc/apt/trusted.gpg\";\n";
print $conf "Dir::Etc::TrustedParts \"/etc/apt/trusted.gpg.d\";\n";
2019-03-25 13:31:45 +00:00
if ($options->{variant} ne 'apt') {
# apt considers itself essential. Thus, when generating an EDSP
# document for an external solver, it will add the Essential:yes field
# to the apt package stanza. This is unnecessary for any other variant
# than 'apt' because in all other variants we compile the set of
# packages we consider essential ourselves and for the 'essential'
# variant it would even be wrong to add apt. This workaround is only
# needed when apt is used with an external solver but doesn't hurt
# otherwise and we don't have a good way to figure out whether apt is
# using an external solver or not short of parsing the --aptopt
# options.
print $conf "pkgCacheGen::ForceEssential \",\";\n";
}
2018-09-18 09:20:24 +00:00
close $conf;
2018-10-22 15:05:56 +00:00
{
my @directories = ('/etc/apt/apt.conf.d', '/etc/apt/sources.list.d',
'/etc/apt/preferences.d', '/var/cache/apt',
'/var/lib/apt/lists/partial', '/var/lib/dpkg',
'/etc/dpkg/dpkg.cfg.d/');
# if dpkg and apt operate from the outside we need some more
# directories because dpkg and apt might not even be installed inside
# the chroot
if ($options->{mode} eq 'chrootless') {
push @directories, ('/var/log/apt', '/var/lib/dpkg/triggers',
'/var/lib/dpkg/info', '/var/lib/dpkg/alternatives',
'/var/lib/dpkg/updates');
}
foreach my $dir (@directories) {
2019-01-20 09:39:01 +00:00
make_path("$options->{root}/$dir") or error "failed to create $dir: $!";
2018-10-22 15:05:56 +00:00
}
2018-09-18 09:20:24 +00:00
}
2018-09-23 17:43:14 +00:00
# We put certain configuration items in their own configuration file
# because they have to be valid for apt invocation from outside as well as
# from inside the chroot.
# The config filename is chosen such that any settings in it will be
# overridden by what the user specified with --aptopt.
{
2019-01-20 09:39:01 +00:00
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap" or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!";
2018-09-23 17:43:14 +00:00
print $fh "Apt::Install-Recommends false;\n";
print $fh "Acquire::Languages \"none\";\n";
close $fh;
}
2018-09-18 09:20:24 +00:00
{
2019-01-20 09:39:01 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/status" or error "failed to open(): $!";
2018-09-18 09:20:24 +00:00
close $fh;
}
2019-01-07 12:16:51 +00:00
# /var/lib/dpkg/available is required to exist or otherwise package
# removals will fail
{
2019-01-20 09:39:01 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/available" or error "failed to open(): $!";
2019-01-07 12:16:51 +00:00
close $fh;
}
2019-08-21 12:57:54 +00:00
# /var/lib/dpkg/cmethopt is used by dselect
# see #930788
{
open my $fh, '>', "$options->{root}/var/lib/dpkg/cmethopt" or error "failed to open(): $!";
print $fh "apt apt\n";
close $fh;
}
2019-08-26 16:25:21 +00:00
# we create /var/lib/dpkg/arch inside the chroot either if there is more
# than the native architecture in the chroot or if chrootless mode is
# used to create a chroot of a different architecture than the native
# architecture outside the chroot.
chomp (my $hostarch = `dpkg --print-architecture`);
if (scalar @{$options->{foreignarchs}} > 0 or (
$options->{mode} eq 'chrootless' and $hostarch ne $options->{nativearch})) {
2019-01-20 09:39:01 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/arch" or error "cannot open /var/lib/dpkg/arch: $!";
2018-09-18 09:20:24 +00:00
print $fh "$options->{nativearch}\n";
2018-11-02 16:24:28 +00:00
foreach my $arch (@{$options->{foreignarchs}}) {
2018-09-18 09:20:24 +00:00
print $fh "$arch\n";
}
close $fh;
}
if (scalar @{$options->{aptopts}} > 0) {
2019-01-20 09:39:01 +00:00
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap" or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!";
2018-09-18 09:20:24 +00:00
foreach my $opt (@{$options->{aptopts}}) {
if (-r $opt) {
2019-01-20 09:39:45 +00:00
# flush handle because copy() uses syswrite() which bypasses
# buffered IO
$fh->flush();
2019-01-20 09:39:01 +00:00
copy $opt, $fh or error "cannot copy $opt: $!";
2018-09-18 09:20:24 +00:00
} else {
print $fh $opt;
if ($opt !~ /;$/) {
print $fh ';';
}
if ($opt !~ /\n$/) {
print $fh "\n";
}
}
}
close $fh;
}
if (scalar @{$options->{dpkgopts}} > 0) {
2018-10-22 15:05:56 +00:00
# FIXME: in chrootless mode, dpkg will only read the configuration
# from the host
2019-01-20 09:39:01 +00:00
open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap" or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
2018-09-18 09:20:24 +00:00
foreach my $opt (@{$options->{dpkgopts}}) {
if (-r $opt) {
2019-01-20 09:39:45 +00:00
# flush handle because copy() uses syswrite() which bypasses
# buffered IO
$fh->flush();
2019-01-20 09:39:01 +00:00
copy $opt, $fh or error "cannot copy $opt: $!";
2018-09-18 09:20:24 +00:00
} else {
print $fh $opt;
if ($opt !~ /\n$/) {
print $fh "\n";
}
}
}
close $fh;
}
2018-12-06 23:17:10 +00:00
## setup merged usr
#my @amd64_dirs = ('lib32', 'lib64', 'libx32'); # only amd64 for now
#foreach my $dir ("bin", "sbin", "lib", @amd64_dirs) {
# symlink "usr/$dir", "$options->{root}/$dir" or die "cannot create symlink: $!";
# make_path("$options->{root}/usr/$dir") or die "cannot create /usr/$dir: $!";
#}
2018-09-18 09:20:24 +00:00
{
2019-01-20 09:39:01 +00:00
open my $fh, '>', "$options->{root}/etc/fstab" or error "cannot open fstab: $!";
2018-09-18 09:20:24 +00:00
print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n";
close $fh;
2019-01-20 09:39:01 +00:00
chmod 0644, "$options->{root}/etc/fstab" or error "cannot chmod fstab: $!";
2018-09-18 09:20:24 +00:00
}
2018-10-02 08:09:22 +00:00
# write /etc/apt/sources.list
2018-09-18 09:20:24 +00:00
{
2019-01-20 09:39:01 +00:00
open my $fh, '>', "$options->{root}/etc/apt/sources.list" or error "cannot open /etc/apt/sources.list: $!";
2018-10-02 08:09:22 +00:00
print $fh $options->{sourceslist};
2018-09-18 09:20:24 +00:00
close $fh;
}
2018-10-22 15:05:56 +00:00
# allow network access from within
2019-09-15 12:12:49 +00:00
if (-e "/etc/resolv.conf") {
copy("/etc/resolv.conf", "$options->{root}/etc/resolv.conf") or error "cannot copy /etc/resolv.conf: $!";
} else {
warning("Host system does not have a /etc/resolv.conf to copy into the rootfs.")
}
if (-e "/etc/hostname") {
copy("/etc/hostname", "$options->{root}/etc/hostname") or error "cannot copy /etc/hostname: $!";
} else {
warning("Host system does not have a /etc/hostname to copy into the rootfs.")
}
2018-10-22 15:05:56 +00:00
if ($options->{havemknod}) {
foreach my $file (@devfiles) {
my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file};
if ($type == 0) { # normal file
2019-01-20 09:39:01 +00:00
error "type 0 not implemented";
2018-10-22 15:05:56 +00:00
} elsif ($type == 1) { # hardlink
2019-01-20 09:39:01 +00:00
error "type 1 not implemented";
2018-10-22 15:05:56 +00:00
} elsif ($type == 2) { # symlink
2018-10-23 14:03:32 +00:00
if ($options->{mode} eq 'fakechroot' and $linkname =~ /^\/proc/) {
# there is no /proc in fakechroot mode
next;
}
2019-01-20 09:39:01 +00:00
symlink $linkname, "$options->{root}/$fname" or error "cannot create symlink $fname";
2018-10-22 15:05:56 +00:00
next; # chmod cannot work on symlinks
} elsif ($type == 3) { # character special
2019-01-20 09:39:01 +00:00
0 == system('mknod', "$options->{root}/$fname", 'c', $devmajor, $devminor) or error "mknod failed: $?";
2018-10-22 15:05:56 +00:00
} elsif ($type == 4) { # block special
2019-01-20 09:39:01 +00:00
0 == system('mknod', "$options->{root}/$fname", 'b', $devmajor, $devminor) or error "mknod failed: $?";
2018-10-22 15:05:56 +00:00
} elsif ($type == 5) { # directory
make_path "$options->{root}/$fname", { error => \my $err };
if (@$err) {
2019-01-20 09:39:01 +00:00
error "cannot create $fname";
2018-10-22 15:05:56 +00:00
}
} else {
2019-01-20 09:39:01 +00:00
error "unsupported type: $type";
2018-10-22 15:05:56 +00:00
}
2019-01-20 09:39:01 +00:00
chmod $mode, "$options->{root}/$fname" or error "cannot chmod $fname: $!";
2018-10-22 15:05:56 +00:00
}
}
2018-09-18 09:20:24 +00:00
# we tell apt about the configuration via a config file passed via the
# APT_CONFIG environment variable instead of using the --option command
# line arguments because configuration settings like Dir::Etc have already
# been evaluated at the time that apt takes its command line arguments
# into account.
$ENV{"APT_CONFIG"} = "$tmpfile";
2018-09-21 06:04:40 +00:00
2019-02-28 10:54:03 +00:00
# when apt-get update is run by the root user, then apt will attempt to
# drop privileges to the _apt user. This will fail if the _apt user does
# not have permissions to read the root directory. In that case, we have
# to disable apt sandboxing.
if ($options->{mode} eq 'root') {
2019-03-01 00:05:27 +00:00
my $partial = '/var/lib/apt/lists/partial';
if (system('/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test', '-r', "$options->{root}$partial") != 0) {
warning "Download is performed unsandboxed as root as file $options->{root}$partial couldn't be accessed by user _apt";
2019-02-28 10:54:03 +00:00
open my $fh, '>>', $tmpfile or error "cannot open $tmpfile for appending: $!";
print $fh "APT::Sandbox::User \"root\";\n";
close $fh;
}
}
2019-02-20 12:32:49 +00:00
# setting PATH for chroot, ldconfig, start-stop-daemon...
if (defined $ENV{PATH} && $ENV{PATH} ne "") {
$ENV{PATH} = "$ENV{PATH}:/usr/sbin:/usr/bin:/sbin:/bin";
} else {
$ENV{PATH} = "/usr/sbin:/usr/bin:/sbin:/bin";
}
# run setup hooks
run_hooks('setup', $options);
2019-01-20 09:39:01 +00:00
info "running apt-get update...";
2019-04-29 22:07:35 +00:00
run_apt_progress({ ARGV => ['apt-get', 'update'],
CHDIR => $options->{root},
FIND_APT_WARNINGS => 1 });
2018-09-21 06:04:40 +00:00
# check if anything was downloaded at all
{
2019-01-20 09:39:01 +00:00
open my $fh, '-|', 'apt-get', 'indextargets' // error "failed to fork(): $!";
2018-09-21 06:04:40 +00:00
chomp (my $indextargets = do { local $/; <$fh> });
close $fh;
if ($indextargets eq '') {
2019-01-20 09:39:01 +00:00
info "content of /etc/apt/sources.list:";
if ($verbosity_level >= 1) {
copy("$options->{root}/etc/apt/sources.list", *STDERR);
}
error "apt-get update didn't download anything";
2018-09-21 06:04:40 +00:00
}
}
2018-09-18 09:20:24 +00:00
my %pkgs_to_install;
if (defined $options->{include}) {
for my $pkg (split /,/, $options->{include}) {
$pkgs_to_install{$pkg} = ();
}
}
if ($options->{variant} eq 'buildd') {
$pkgs_to_install{'build-essential'} = ();
}
# To figure out the right package set for the apt variant we can use:
# $ apt-get dist-upgrade -o dir::state::status=/dev/null
# This is because that variants only contain essential packages and
# apt and libapt treats apt as essential. If we want to install less
# (essential variant) then we have to compute the package set ourselves.
# Same if we want to install priority based variants.
2018-10-22 15:05:56 +00:00
if (any { $_ eq $options->{variant} } ('extract', 'custom')) {
2019-01-20 09:39:01 +00:00
info "downloading packages with apt...";
2018-12-27 20:08:53 +00:00
run_apt_progress({
ARGV => ['apt-get', '--yes',
'-oApt::Get::Download-Only=true',
'install'],
PKGS => [keys %pkgs_to_install],
});
2018-10-22 15:05:56 +00:00
} elsif ($options->{variant} eq 'apt') {
# if we just want to install Essential:yes packages, apt and their
# dependencies then we can make use of libapt treating apt as
# implicitly essential. An upgrade with the (currently) empty status
# file will trigger an installation of the essential packages plus apt.
#
# 2018-09-02, #debian-dpkg on OFTC, times in UTC+2
# 23:39 < josch> I'll just put it in my script and if it starts
# breaking some time I just say it's apt's fault. :P
# 23:42 < DonKult> that is how it usually works, so yes, do that :P (<-
# and please add that line next to it so you can
# remind me in 5+ years that I said that after I wrote
# in the bugreport: "Are you crazy?!? Nobody in his
# right mind would even suggest depending on it!")
2019-01-20 09:39:01 +00:00
info "downloading packages with apt...";
2018-12-27 20:08:53 +00:00
run_apt_progress({
ARGV => ['apt-get', '--yes',
'-oApt::Get::Download-Only=true',
'dist-upgrade'],
});
2018-10-22 15:05:56 +00:00
} elsif (any { $_ eq $options->{variant} } ('essential', 'standard', 'important', 'required', 'buildd', 'minbase')) {
2018-09-18 09:20:24 +00:00
my %ess_pkgs;
2019-01-20 09:39:01 +00:00
open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format', '$(FILENAME)', 'Created-By: Packages') or error "cannot start apt-get indextargets: $!";
2018-09-18 09:20:24 +00:00
while (my $fname = <$pipe_apt>) {
chomp $fname;
2019-01-20 09:39:01 +00:00
open (my $pipe_cat, '-|', '/usr/lib/apt/apt-helper', 'cat-file', $fname) or error "cannot start apt-helper cat-file: $!";
2018-09-18 09:20:24 +00:00
2018-09-23 19:15:12 +00:00
my $pkgname;
my $ess = '';
my $prio = 'optional';
2018-10-22 09:29:56 +00:00
my $arch = '';
2018-09-23 19:15:12 +00:00
while (my $line = <$pipe_cat>) {
chomp $line;
# Dpkg::Index takes 10 seconds to parse a typical Packages
# file. Thus we instead use a simple parser that just retrieve
# the information we need.
if ($line ne "") {
if ($line =~ /^Package: (.*)/) {
$pkgname = $1;
} elsif ($line =~ /^Essential: yes$/) {
$ess = 'yes'
} elsif ($line =~ /^Priority: (.*)/) {
$prio = $1;
2018-10-22 09:29:56 +00:00
} elsif ($line =~ /^Architecture: (.*)/) {
$arch = $1;
2018-09-23 19:15:12 +00:00
}
next;
}
2018-10-22 09:29:56 +00:00
# we are only interested of packages of native architecture or
# Architecture:all
if ($arch eq $options->{nativearch} or $arch eq 'all') {
# the line is empty, thus a package stanza just finished
# processing and we can handle it now
if ($ess eq 'yes') {
$ess_pkgs{$pkgname} = ();
} elsif ($options->{variant} eq 'essential') {
# for this variant we are only interested in the
# essential packages
} elsif (any { $_ eq $options->{variant} } ('standard', 'important', 'required', 'buildd', 'minbase')) {
if ($prio eq 'optional' or $prio eq 'extra') {
# always ignore packages of priority optional and extra
} elsif ($prio eq 'standard') {
if (none { $_ eq $options->{variant} } ('important', 'required', 'buildd', 'minbase')) {
$pkgs_to_install{$pkgname} = ();
}
} elsif ($prio eq 'important') {
if (none { $_ eq $options->{variant} } ('required', 'buildd', 'minbase')) {
$pkgs_to_install{$pkgname} = ();
}
} elsif ($prio eq 'required') {
# required packages are part of all sets except
# essential and apt
2018-09-23 19:15:12 +00:00
$pkgs_to_install{$pkgname} = ();
2018-10-22 09:29:56 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unknown priority: $prio";
2018-09-18 09:20:24 +00:00
}
} else {
2019-01-20 09:39:01 +00:00
error "unknown variant: $options->{variant}";
2018-09-18 09:20:24 +00:00
}
}
2018-09-23 19:15:12 +00:00
# reset values
undef $pkgname;
$ess = '';
$prio = 'optional';
2018-10-22 09:29:56 +00:00
$arch = '';
2018-09-18 09:20:24 +00:00
}
close $pipe_cat;
2019-01-20 09:39:01 +00:00
$? == 0 or error "apt-helper cat-file failed: $?";
2018-09-18 09:20:24 +00:00
}
close $pipe_apt;
2019-01-20 09:39:01 +00:00
$? == 0 or error "apt-get indextargets failed: $?";
2018-09-18 09:20:24 +00:00
2019-09-04 13:44:54 +00:00
debug "Identified the following Essential:yes packages:";
foreach my $pkg (sort keys %ess_pkgs) {
debug " $pkg";
}
2019-01-20 09:39:01 +00:00
info "downloading packages with apt...";
2018-12-27 20:08:53 +00:00
run_apt_progress({
ARGV => ['apt-get', '--yes',
'-oApt::Get::Download-Only=true',
'install'],
PKGS => [keys %ess_pkgs],
});
2018-10-22 12:44:45 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unknown variant: $options->{variant}";
2018-09-18 09:20:24 +00:00
}
# extract the downloaded packages
2018-09-21 18:32:07 +00:00
my @essential_pkgs;
2018-09-21 20:10:14 +00:00
{
my $apt_archives = "/var/cache/apt/archives/";
2019-01-20 09:39:01 +00:00
opendir my $dh, "$options->{root}/$apt_archives" or error "cannot read $apt_archives";
2018-09-21 20:10:14 +00:00
while (my $deb = readdir $dh) {
if ($deb !~ /\.deb$/) {
next;
}
$deb = "$apt_archives/$deb";
if (!-f "$options->{root}/$deb") {
next;
}
push @essential_pkgs, $deb;
2018-09-21 18:32:07 +00:00
}
2018-09-21 20:10:14 +00:00
close $dh;
2018-09-21 18:32:07 +00:00
}
if (scalar @essential_pkgs == 0) {
# check if a file:// URI was used
2019-01-20 09:39:01 +00:00
open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format', '$(URI)', 'Created-By: Packages') or error "cannot start apt-get indextargets: $!";
2018-09-21 18:32:07 +00:00
while (my $uri = <$pipe_apt>) {
if ($uri =~ /^file:\/\//) {
2019-01-20 09:39:01 +00:00
error "nothing got downloaded -- use copy:// instead of file://";
2018-09-21 18:32:07 +00:00
}
}
2019-01-20 09:39:01 +00:00
error "nothing got downloaded";
2018-09-21 18:32:07 +00:00
}
2018-11-20 23:13:10 +00:00
# We have to extract the packages from @essential_pkgs either if we run in
# chrootless mode and extract variant or in any other mode.
# In other words, the only scenario in which the @essential_pkgs are not
# extracted are in chrootless mode in any other than the extract variant.
if ($options->{mode} eq 'chrootless' and $options->{variant} ne 'extract') {
# nothing to do
} else {
2019-01-20 09:39:01 +00:00
info "extracting archives...";
2018-10-22 15:05:56 +00:00
print_progress 0.0;
my $counter = 0;
my $total = scalar @essential_pkgs;
foreach my $deb (@essential_pkgs) {
$counter += 1;
# not using dpkg-deb --extract as that would replace the
# merged-usr symlinks with plain directories
pipe my $rfh, my $wfh;
2019-01-20 09:39:01 +00:00
my $pid1 = fork() // error "fork() failed: $!";
2018-10-22 15:05:56 +00:00
if ($pid1 == 0) {
open(STDOUT, '>&', $wfh);
2019-09-04 13:44:54 +00:00
debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb");
2018-10-22 15:05:56 +00:00
exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb";
2018-09-18 09:20:24 +00:00
}
2019-01-20 09:39:01 +00:00
my $pid2 = fork() // error "fork() failed: $!";
2018-10-22 15:05:56 +00:00
if ($pid2 == 0) {
open(STDIN, '<&', $rfh);
2019-09-04 13:44:54 +00:00
debug("running tar -C $options->{root} --keep-directory-symlink --extract --file -");
2018-10-22 15:05:56 +00:00
exec 'tar', '-C', $options->{root}, '--keep-directory-symlink', '--extract', '--file', '-';
}
waitpid($pid1, 0);
2019-01-20 09:39:01 +00:00
$? == 0 or error "dpkg-deb --fsys-tarfile failed: $?";
2018-10-22 15:05:56 +00:00
waitpid($pid2, 0);
2019-01-20 09:39:01 +00:00
$? == 0 or error "tar --extract failed: $?";
2018-10-22 15:05:56 +00:00
print_progress ($counter/$total*100);
2018-09-18 09:20:24 +00:00
}
2018-10-22 15:05:56 +00:00
print_progress "done";
2018-11-20 23:13:10 +00:00
}
if ($options->{mode} eq 'chrootless') {
2019-01-20 09:39:01 +00:00
info "installing packages...";
2018-11-20 23:13:10 +00:00
# FIXME: the dpkg config from the host is parsed before the command
# line arguments are parsed and might break this mode
# Example: if the host has --path-exclude set, then this will also
# affect the chroot.
my @chrootless_opts = (
'-oDPkg::Options::=--force-not-root',
'-oDPkg::Options::=--force-script-chrootless',
'-oDPkg::Options::=--root=' . $options->{root},
'-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log");
if ($options->{variant} eq 'extract') {
# nothing to do
} else {
2018-12-27 20:08:53 +00:00
run_apt_progress({
ARGV => ['apt-get', '--yes',
@chrootless_opts,
'install'],
PKGS => [map { "$options->{root}/$_" } @essential_pkgs],
});
2018-11-20 23:13:10 +00:00
}
if (any { $_ eq $options->{variant} } ('extract', 'custom')) {
# nothing to do
} elsif (any { $_ eq $options->{variant} } ('essential', 'apt', 'standard', 'important', 'required', 'buildd', 'minbase')) {
2019-02-20 12:32:49 +00:00
# run essential hooks
run_hooks('essential', $options);
2018-11-20 23:13:10 +00:00
if (%pkgs_to_install) {
2018-12-27 20:08:53 +00:00
run_apt_progress({
ARGV => ['apt-get', '--yes',
@chrootless_opts,
'install'],
PKGS => [keys %pkgs_to_install],
});
2018-11-20 23:13:10 +00:00
}
} else {
2019-01-20 09:39:01 +00:00
error "unknown variant: $options->{variant}";
2018-11-20 23:13:10 +00:00
}
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot', 'proot')) {
2018-09-21 20:10:14 +00:00
2019-03-25 13:27:34 +00:00
if (any { $_ eq $options->{variant} } ('extract')) {
2018-11-20 23:13:10 +00:00
# nothing to do
2019-03-25 13:27:34 +00:00
} elsif (any { $_ eq $options->{variant} } ('custom', 'essential', 'apt', 'standard', 'important', 'required', 'buildd', 'minbase')) {
2018-10-22 15:05:56 +00:00
if ($options->{mode} eq 'fakechroot') {
2019-01-01 13:28:56 +00:00
# this borrows from and extends
# /etc/fakechroot/debootstrap.env and /etc/fakechroot/chroot.env
{
my @fakechrootsubst = ();
foreach my $dir ('/usr/sbin', '/usr/bin', '/sbin', '/bin') {
push @fakechrootsubst, "$dir/chroot=/usr/sbin/chroot.fakechroot";
push @fakechrootsubst, "$dir/mkfifo=/bin/true";
push @fakechrootsubst, "$dir/ldconfig=/bin/true";
push @fakechrootsubst, "$dir/ldd=/usr/bin/ldd.fakechroot";
push @fakechrootsubst, "$dir/ischroot=/bin/true";
}
if (defined $ENV{FAKECHROOT_CMD_SUBST}
&& $ENV{FAKECHROOT_CMD_SUBST} ne "") {
push @fakechrootsubst, split /:/, $ENV{FAKECHROOT_CMD_SUBST};
}
$ENV{FAKECHROOT_CMD_SUBST} = join ':', @fakechrootsubst;
}
if (defined $ENV{FAKECHROOT_EXCLUDE_PATH}
&& $ENV{FAKECHROOT_EXCLUDE_PATH} ne "") {
$ENV{FAKECHROOT_EXCLUDE_PATH} = "$ENV{FAKECHROOT_EXCLUDE_PATH}:/dev:/proc:/sys";
} else {
$ENV{FAKECHROOT_EXCLUDE_PATH} = '/dev:/proc:/sys';
}
# workaround for long unix socket path if FAKECHROOT_BASE
# exceeds the limit of 108 bytes
$ENV{FAKECHROOT_AF_UNIX_PATH} = "/tmp";
{
my @ldsoconf = ('/etc/ld.so.conf');
2019-01-20 09:39:01 +00:00
opendir(my $dh, '/etc/ld.so.conf.d') or error "Can't opendir(/etc/ld.so.conf.d): $!";
2019-01-01 13:28:56 +00:00
while (my $entry = readdir $dh) {
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
next if $entry !~ /\.conf$/;
push @ldsoconf, "/etc/ld.so.conf.d/$entry";
}
closedir($dh);
my @ldlibpath = ();
if (defined $ENV{LD_LIBRARY_PATH}
&& $ENV{LD_LIBRARY_PATH} ne "") {
push @ldlibpath, (split /:/, $ENV{LD_LIBRARY_PATH});
}
# FIXME: workaround allowing installation of systemd should
# live in fakechroot, see #917920
push @ldlibpath, "/lib/systemd";
foreach my $fname (@ldsoconf) {
2019-01-20 09:39:01 +00:00
open my $fh, "<", $fname or error "cannot open $fname for reading: $!";
2019-01-01 13:28:56 +00:00
while (my $line = <$fh>) {
next if $line !~ /^\//;
push @ldlibpath, $line;
}
close $fh;
}
$ENV{LD_LIBRARY_PATH} = join ':', @ldlibpath;
}
2018-10-22 15:05:56 +00:00
}
2018-09-20 20:42:44 +00:00
2018-10-22 15:05:56 +00:00
# make sure that APT_CONFIG is not set when executing anything inside the
# chroot
2019-02-15 10:40:06 +00:00
my @chrootcmd = ();
2018-10-22 15:05:56 +00:00
if ($options->{mode} eq 'proot') {
2019-01-01 13:28:56 +00:00
push @chrootcmd, (
'proot',
'--root-id',
'--bind=/dev',
'--bind=/proc',
'--bind=/sys',
"--rootfs=$options->{root}",
'--cwd=/');
2018-10-22 15:05:56 +00:00
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot')) {
push @chrootcmd, ('/usr/sbin/chroot', $options->{root});
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2018-09-20 20:42:44 +00:00
}
2018-10-22 15:05:56 +00:00
# copy qemu-user-static binary into chroot or setup proot with --qemu
if (defined $options->{qemu}) {
if ($options->{mode} eq 'proot') {
push @chrootcmd, "--qemu=qemu-$options->{qemu}";
} elsif ($options->{mode} eq 'fakechroot') {
# The binfmt support on the outside is used, so qemu needs to know
# where it has to look for shared libraries
$ENV{QEMU_LD_PREFIX} = $options->{root};
# Make sure that the fakeroot and fakechroot shared libraries
# exist for the right architecture
2019-01-20 09:39:01 +00:00
open my $fh, '-|', 'dpkg-architecture', '-a', $options->{nativearch}, '-qDEB_HOST_MULTIARCH' // error "failed to fork(): $!";
2018-10-22 15:05:56 +00:00
chomp (my $deb_host_multiarch = do { local $/; <$fh> });
close $fh;
if ($? != 0 or !$deb_host_multiarch) {
2019-01-20 09:39:01 +00:00
error "dpkg-architecture failed: $?";
2018-10-22 15:05:56 +00:00
}
my $fakechrootdir = "/usr/lib/$deb_host_multiarch/fakechroot";
if (!-e "$fakechrootdir/libfakechroot.so") {
2019-01-20 09:39:01 +00:00
error "$fakechrootdir/libfakechroot.so doesn't exist. Install libfakechroot:$options->{nativearch} outside the chroot";
2018-10-22 15:05:56 +00:00
}
my $fakerootdir = "/usr/lib/$deb_host_multiarch/libfakeroot";
if (!-e "$fakerootdir/libfakeroot-sysv.so") {
2019-01-20 09:39:01 +00:00
error "$fakerootdir/libfakeroot-sysv.so doesn't exist. Install libfakeroot:$options->{nativearch} outside the chroot";
2018-10-22 15:05:56 +00:00
}
# fakechroot only fills LD_LIBRARY_PATH with the directories of
# the host's architecture. We append the directories of the chroot
# architecture.
$ENV{LD_LIBRARY_PATH} .= ":$fakechrootdir:$fakerootdir";
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) {
# other modes require a static qemu-user binary
my $qemubin = "/usr/bin/qemu-$options->{qemu}-static";
if (!-e $qemubin) {
2019-01-20 09:39:01 +00:00
error "cannot find $qemubin";
2018-10-22 15:05:56 +00:00
}
2019-01-20 09:39:01 +00:00
copy $qemubin, "$options->{root}/$qemubin" or error "cannot copy $qemubin: $!";
2019-04-25 06:54:31 +00:00
# File::Copy does not retain permissions but on some
# platforms (like Travis CI) the binfmt interpreter must
# have the executable bit set or otherwise execve will
# fail with EACCES
chmod 0755, "$options->{root}/$qemubin" or error "cannot chmod $qemubin: $!";
2018-10-22 15:05:56 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2018-09-21 20:10:14 +00:00
}
}
2018-10-22 15:05:56 +00:00
2018-10-24 00:41:43 +00:00
# some versions of coreutils use the renameat2 system call in mv.
# This breaks certain versions of fakechroot and proot. Here we do
# a sanity check and warn the user in case things might break.
if (any { $_ eq $options->{mode} } ('fakechroot', 'proot') and -e "$options->{root}/bin/mv") {
2019-01-20 09:39:01 +00:00
mkdir "$options->{root}/000-move-me" or error "cannot create directory: $!";
2018-10-24 00:41:43 +00:00
my $ret = system @chrootcmd, '/bin/mv', '/000-move-me', '/001-delete-me';
if ($ret != 0) {
if ($options->{mode} eq 'proot') {
2019-01-20 09:39:01 +00:00
info "the /bin/mv binary inside the chroot doesn't work under proot";
info "this is likely due to missing support for renameat2 in proot";
info "see https://github.com/proot-me/PRoot/issues/147";
2018-10-24 00:41:43 +00:00
} else {
2019-01-20 09:39:01 +00:00
info "the /bin/mv binary inside the chroot doesn't work under fakechroot";
info "with certain versions of coreutils and glibc, this is due to missing support for renameat2 in fakechroot";
info "see https://github.com/dex4er/fakechroot/issues/60";
2018-10-24 00:41:43 +00:00
}
2019-01-20 09:39:01 +00:00
info "expect package post installation scripts not to work";
rmdir "$options->{root}/000-move-me" or error "cannot rmdir: $!";
2018-10-24 00:41:43 +00:00
} else {
2019-01-20 09:39:01 +00:00
rmdir "$options->{root}/001-delete-me" or error "cannot rmdir: $!";
2018-10-24 00:41:43 +00:00
}
}
2018-10-22 15:05:56 +00:00
# install the extracted packages properly
# we need --force-depends because dpkg does not take Pre-Depends into
# account and thus doesn't install them in the right order
2018-11-04 19:40:04 +00:00
# And the --predep-package option is broken: #539133
2019-01-20 09:39:01 +00:00
info "installing packages...";
2018-12-27 20:08:53 +00:00
run_dpkg_progress({
2019-02-15 10:40:06 +00:00
ARGV => [@chrootcmd, 'env', '--unset=TMPDIR',
'dpkg', '--install', '--force-depends'],
2018-12-27 20:08:53 +00:00
PKGS => \@essential_pkgs,
});
2018-10-22 15:05:56 +00:00
# if the path-excluded option was added to the dpkg config, reinstall all
# packages
if (-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") {
2019-01-20 09:39:01 +00:00
open(my $fh, '<', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
2018-10-22 15:05:56 +00:00
my $num_matches = grep /^path-exclude=/, <$fh>;
close $fh;
if ($num_matches > 0) {
# without --skip-same-version, dpkg will install the given
# packages even though they are already installed
2019-01-20 09:39:01 +00:00
info "re-installing packages because of path-exclude...";
2018-12-27 20:08:53 +00:00
run_dpkg_progress({
2019-02-15 10:40:06 +00:00
ARGV => [@chrootcmd, 'env', '--unset=TMPDIR',
'dpkg', '--install', '--force-depends'],
2018-12-27 20:08:53 +00:00
PKGS => \@essential_pkgs,
});
2018-10-22 15:05:56 +00:00
}
2018-09-21 20:10:14 +00:00
}
2018-10-22 15:05:56 +00:00
foreach my $deb (@essential_pkgs) {
2019-02-28 11:20:42 +00:00
unlink "$options->{root}/$deb" or error "cannot unlink $deb: $!";
2018-09-21 20:10:14 +00:00
}
2018-09-20 20:42:44 +00:00
2019-02-20 12:32:49 +00:00
# run essential hooks
2019-03-25 13:27:34 +00:00
if ($options->{variant} ne 'custom') {
run_hooks('essential', $options);
}
2019-02-20 12:32:49 +00:00
2019-03-25 13:27:34 +00:00
if ($options->{variant} ne 'custom' and %pkgs_to_install) {
2018-10-22 15:05:56 +00:00
# some packages have to be installed from the outside before anything
# can be installed from the inside.
#
# we do not need to install any *-archive-keyring packages inside the
# chroot prior to installing the packages, because the keyring is only
# used when doing "apt-get update" and that was already done at the
# beginning using key material from the outside. Since the apt cache
# is already filled and we are not calling "apt-get update" again, the
# keyring can be installed later during installation. But: if it's not
# installed during installation, then we might end up with a fully
# installed system without keyrings that are valid for its
# sources.list.
my %pkgs_to_install_from_outside;
# install apt if necessary
if ($options->{variant} ne 'apt') {
$pkgs_to_install_from_outside{apt} = ();
2018-10-22 09:36:02 +00:00
}
2018-10-22 15:05:56 +00:00
# since apt will be run inside the chroot, make sure that
# apt-transport-https and ca-certificates gets installed first if any
# mirror is a https URI
2019-01-20 09:39:01 +00:00
open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format', '$(URI)', 'Created-By: Packages') or error "cannot start apt-get indextargets: $!";
2018-10-22 15:05:56 +00:00
while (my $uri = <$pipe_apt>) {
if ($uri =~ /^https:\/\//) {
# FIXME: support for https is part of apt >= 1.5
$pkgs_to_install_from_outside{'apt-transport-https'} = ();
$pkgs_to_install_from_outside{'ca-certificates'} = ();
last;
} elsif ($uri =~ /^tor(\+[a-z]+)*:\/\//) {
# tor URIs can be tor+http://, tor+https:// or even
# tor+mirror+file://
$pkgs_to_install_from_outside{'apt-transport-tor'} = ();
last;
}
2018-09-18 09:20:24 +00:00
}
2018-10-22 15:05:56 +00:00
close $pipe_apt;
2019-01-20 09:39:01 +00:00
$? == 0 or error "apt-get indextargets failed";
2018-10-22 15:05:56 +00:00
if (%pkgs_to_install_from_outside) {
2019-01-20 09:39:01 +00:00
info 'downloading ' . (join ', ', keys %pkgs_to_install_from_outside) . "...";
2018-12-27 20:08:53 +00:00
run_apt_progress({
ARGV => ['apt-get', '--yes',
'-oApt::Get::Download-Only=true',
'install'],
PKGS => [keys %pkgs_to_install_from_outside],
});
2018-10-22 15:05:56 +00:00
my @debs_to_install;
my $apt_archives = "/var/cache/apt/archives/";
2019-01-20 09:39:01 +00:00
opendir my $dh, "$options->{root}/$apt_archives" or error "cannot read $apt_archives";
2018-10-22 15:05:56 +00:00
while (my $deb = readdir $dh) {
if ($deb !~ /\.deb$/) {
next;
}
$deb = "$apt_archives/$deb";
if (!-f "$options->{root}/$deb") {
next;
}
push @debs_to_install, $deb;
}
close $dh;
if (scalar @debs_to_install == 0) {
2019-01-20 09:39:01 +00:00
error "nothing got downloaded";
2018-10-22 15:05:56 +00:00
}
# we need --force-depends because dpkg does not take Pre-Depends
# into account and thus doesn't install them in the right order
2019-01-20 09:39:01 +00:00
info 'installing ' . (join ', ', keys %pkgs_to_install_from_outside) . "...";
2018-12-27 20:08:53 +00:00
run_dpkg_progress({
2019-02-15 10:40:06 +00:00
ARGV => [@chrootcmd, 'env', '--unset=TMPDIR',
'dpkg', '--install', '--force-depends'],
2018-12-27 20:08:53 +00:00
PKGS => \@debs_to_install,
});
2018-10-22 15:05:56 +00:00
foreach my $deb (@debs_to_install) {
2019-02-28 11:20:42 +00:00
unlink "$options->{root}/$deb" or error "cannot unlink $deb: $!";
2018-10-22 15:05:56 +00:00
}
2018-09-18 09:20:24 +00:00
}
2019-01-13 09:17:46 +00:00
run_chroot {
2019-01-20 09:39:01 +00:00
info "installing remaining packages inside the chroot...";
2019-01-13 09:17:46 +00:00
run_apt_progress({
2019-02-15 10:40:06 +00:00
ARGV => [@chrootcmd, 'env',
'--unset=APT_CONFIG',
'--unset=TMPDIR',
'apt-get', '--yes', 'install'],
2019-01-13 09:17:46 +00:00
PKGS => [keys %pkgs_to_install],
});
} $options;
2018-10-22 15:05:56 +00:00
2018-09-18 09:20:24 +00:00
}
2018-10-22 12:44:45 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unknown variant: $options->{variant}";
2018-09-18 09:20:24 +00:00
}
2018-10-22 15:05:56 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
2019-02-20 12:32:49 +00:00
run_hooks('customize', $options);
2019-01-08 10:28:27 +00:00
2018-09-23 17:43:14 +00:00
# clean up temporary configuration file
2019-01-20 09:39:01 +00:00
unlink "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap" or error "failed to unlink /etc/apt/apt.conf.d/00mmdebstrap: $!";
info "cleaning package lists and apt cache...";
2018-12-27 20:08:53 +00:00
run_apt_progress({
ARGV => ['apt-get', '--option', 'Dir::Etc::SourceList=/dev/null', 'update'],
2019-04-25 06:49:28 +00:00
CHDIR => $options->{root},
2018-12-27 20:08:53 +00:00
});
2019-04-25 06:49:28 +00:00
run_apt_progress({ ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} });
2018-09-18 09:20:24 +00:00
2019-04-23 11:28:55 +00:00
# apt since 1.6 creates the auxfiles directory. If apt inside the chroot
# is older than that, then it will not know how to clean it.
if (-e "$options->{root}/var/lib/apt/lists/auxfiles") {
rmdir "$options->{root}/var/lib/apt/lists/auxfiles" or die "cannot rmdir /var/lib/apt/lists/auxfiles: $!";
}
2019-08-26 14:34:35 +00:00
if (defined $options->{qemu} and any { $_ eq $options->{mode} } ('root', 'unshare')) {
2019-02-28 11:20:42 +00:00
unlink "$options->{root}/usr/bin/qemu-$options->{qemu}-static" or error "cannot unlink /usr/bin/qemu-$options->{qemu}-static: $!";
2018-09-18 15:11:02 +00:00
}
2018-09-18 09:20:24 +00:00
# clean up certain files to make output reproducible
unlink "$options->{root}/var/log/dpkg.log";
unlink "$options->{root}/var/log/apt/history.log";
unlink "$options->{root}/var/log/apt/term.log";
unlink "$options->{root}/var/log/alternatives.log";
unlink "$options->{root}/var/cache/ldconfig/aux-cache";
}
sub main() {
umask 022;
my $mtime = time;
if (exists $ENV{SOURCE_DATE_EPOCH}) {
$mtime = $ENV{SOURCE_DATE_EPOCH}+0;
}
$ENV{DEBIAN_FRONTEND} = 'noninteractive';
$ENV{DEBCONF_NONINTERACTIVE_SEEN} = 'true';
$ENV{LC_ALL} = 'C.UTF-8';
$ENV{LANGUAGE} = 'C.UTF-8';
$ENV{LANG} = 'C.UTF-8';
# copy ARGV because getopt modifies it
my @ARGVORIG = @ARGV;
my $options = {
components => "main",
variant => "important",
include => undef,
mode => 'auto',
dpkgopts => [],
aptopts => [],
2019-02-15 11:42:46 +00:00
noop => [],
2019-02-20 12:32:49 +00:00
setup_hook => [],
essential_hook => [],
customize_hook => [],
2018-09-18 09:20:24 +00:00
};
chomp ($options->{architectures} = `dpkg --print-architecture`);
2019-02-23 07:43:15 +00:00
my $logfile = undef;
2019-02-20 17:00:52 +00:00
Getopt::Long::Configure ('default', 'bundling', 'auto_abbrev', 'ignore_case_always');
2018-09-18 09:20:24 +00:00
GetOptions(
'h|help' => sub { pod2usage(-exitval => 0, -verbose => 2) },
2019-02-23 07:55:31 +00:00
'version' => sub { print STDOUT "mmdebstrap $VERSION\n"; exit 0; },
2018-09-18 09:20:24 +00:00
'components=s' => \$options->{components},
'variant=s' => \$options->{variant},
2018-09-18 14:48:18 +00:00
'include=s' => \$options->{include},
2018-09-18 09:20:24 +00:00
'architectures=s' => \$options->{architectures},
'mode=s' => \$options->{mode},
'dpkgopt=s@' => \$options->{dpkgopts},
'aptopt=s@' => \$options->{aptopts},
2019-02-20 17:02:32 +00:00
's|silent' => sub { $verbosity_level = 0; },
'q|quiet' => sub { $verbosity_level = 0; },
'v|verbose' => sub { $verbosity_level = 2; },
'd|debug' => sub { $verbosity_level = 3; },
2019-02-23 07:43:15 +00:00
'logfile=s' => \$logfile,
2019-01-07 12:18:59 +00:00
# no-op options so that mmdebstrap can be used with
2018-12-28 04:09:39 +00:00
# sbuild-createchroot --debootstrap=mmdebstrap
2019-02-15 11:42:46 +00:00
'resolve-deps' => sub { push @{$options->{noop}}, 'resolve-deps'; },
'merged-usr' => sub { push @{$options->{noop}}, 'merged-usr'; },
'no-merged-usr' => sub { push @{$options->{noop}}, 'no-merged-usr'; },
2019-09-14 16:25:40 +00:00
'force-check-gpg' => sub { push @{$options->{noop}}, 'force-check-gpg'; },
2019-02-20 12:32:49 +00:00
# hook options are hidden until I'm happy with them
'setup-hook=s@' => \$options->{setup_hook},
'essential-hook=s@' => \$options->{essential_hook},
'customize-hook=s@' => \$options->{customize_hook},
2018-09-18 09:20:24 +00:00
) or pod2usage(-exitval => 2, -verbose => 1);
2019-02-23 07:43:15 +00:00
if (defined($logfile)) {
open(STDERR, '>', $logfile) or error "cannot open $logfile: $!";
}
2019-02-15 11:42:46 +00:00
foreach my $arg (@{$options->{noop}}) {
info "The option --$arg is a no-op. It only exists for compatibility with some debootstrap wrappers.";
}
2018-10-22 15:05:56 +00:00
my @valid_variants = ('extract', 'custom', 'essential', 'apt', 'required',
'minbase', 'buildd', 'important', 'debootstrap', '-', 'standard');
2018-09-23 17:36:07 +00:00
if (none { $_ eq $options->{variant}} @valid_variants) {
2019-01-20 09:39:01 +00:00
error "invalid variant. Choose from " . (join ', ', @valid_variants);
2018-09-18 09:20:24 +00:00
}
# debootstrap and - are an alias for important
2018-09-23 17:36:07 +00:00
if (any { $_ eq $options->{variant} } ('-', 'debootstrap')) {
2018-09-18 09:20:24 +00:00
$options->{variant} = 'important';
}
2018-10-23 16:04:05 +00:00
if ($options->{variant} eq 'essential' and defined $options->{include}) {
2019-01-20 09:39:01 +00:00
error "cannot install extra packages with variant essential because apt is missing";
2018-10-23 16:04:05 +00:00
}
2018-09-18 09:20:24 +00:00
# fakeroot is an alias for fakechroot
if ($options->{mode} eq 'fakeroot') {
$options->{mode} = 'fakechroot';
}
# sudo is an alias for root
if ($options->{mode} eq 'sudo') {
$options->{mode} = 'root';
}
2018-10-22 15:05:56 +00:00
my @valid_modes = ('auto', 'root', 'unshare', 'fakechroot', 'proot',
'chrootless');
2018-09-23 17:36:07 +00:00
if (none { $_ eq $options->{mode} } @valid_modes) {
2019-01-20 09:39:01 +00:00
error "invalid mode. Choose from " . (join ', ', @valid_modes);
2018-09-18 09:20:24 +00:00
}
2019-03-25 13:35:38 +00:00
my $check_fakechroot_running = sub {
# test if we are inside fakechroot already
# We fork a child process because setting FAKECHROOT_DETECT seems to
# be an irreversible operation for fakechroot.
my $pid = open my $rfh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
# with the FAKECHROOT_DETECT environment variable set, any program
# execution will be replaced with the output "fakeroot [version]"
$ENV{FAKECHROOT_DETECT} = 0;
exec 'echo', 'If fakechroot is running, this will not be printed';
}
my $content = do { local $/; <$rfh> };
waitpid $pid, 0;
my $result = 0;
if ($? == 0 and $content =~ /^fakechroot \d\.\d+$/) {
$result = 1;
}
return $result;
};
2018-10-23 16:04:05 +00:00
# figure out the mode to use or test whether the chosen mode is legal
if ($options->{mode} eq 'auto') {
2019-03-25 13:35:38 +00:00
if (&{$check_fakechroot_running}()) {
# if mmdebstrap is executed inside fakechroot, then we assume the
# user expects fakechroot mode
$options->{mode} = 'fakechroot';
} elsif ($EFFECTIVE_USER_ID == 0) {
# if mmdebstrap is executed as root, we assume the user wants root
# mode
2018-10-23 16:04:05 +00:00
$options->{mode} = 'root';
2018-12-05 07:06:26 +00:00
} elsif (test_unshare(0)) {
2019-03-25 13:35:38 +00:00
# otherwise, unshare mode is our best option if test_unshare()
# succeeds
2018-10-23 16:04:05 +00:00
$options->{mode} = 'unshare';
} elsif (system('fakechroot --version>/dev/null') == 0) {
2019-03-25 13:35:38 +00:00
# the next fallback is fakechroot
# exec ourselves again but within fakechroot
exec 'fakechroot', 'fakeroot', $PROGRAM_NAME, @ARGVORIG;
2019-01-24 11:37:44 +00:00
} elsif (system('proot --version>/dev/null') == 0) {
2019-03-25 13:35:38 +00:00
# and lastly, proot
2019-01-24 11:37:44 +00:00
$options->{mode} = 'proot';
2018-10-23 16:04:05 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unable to pick chroot mode automatically";
2018-10-23 16:04:05 +00:00
}
2019-01-20 09:39:01 +00:00
info "automatically chosen mode: $options->{mode}";
2018-10-23 16:04:05 +00:00
} elsif ($options->{mode} eq 'root') {
if ($EFFECTIVE_USER_ID != 0) {
2019-01-20 09:39:01 +00:00
error "need to be root";
2018-10-23 16:04:05 +00:00
}
} elsif ($options->{mode} eq 'proot') {
if (system('proot --version>/dev/null') != 0) {
2019-01-20 09:39:01 +00:00
error "need working proot binary";
2018-10-23 16:04:05 +00:00
}
} elsif ($options->{mode} eq 'fakechroot') {
2019-03-25 13:35:38 +00:00
if (&{$check_fakechroot_running}()) {
2018-10-23 16:04:05 +00:00
# fakechroot is already running
} elsif (system('fakechroot --version>/dev/null') != 0) {
2019-01-20 09:39:01 +00:00
error "need working fakechroot binary";
2018-10-23 16:04:05 +00:00
} else {
# exec ourselves again but within fakechroot
exec 'fakechroot', 'fakeroot', $PROGRAM_NAME, @ARGVORIG;
}
} elsif ($options->{mode} eq 'unshare') {
2018-12-05 07:06:26 +00:00
if (!test_unshare(1)) {
2018-10-23 16:04:05 +00:00
my $procfile = '/proc/sys/kernel/unprivileged_userns_clone';
2019-01-20 09:39:01 +00:00
open(my $fh, '<', $procfile) or error "failed to open $procfile: $!";
2018-10-23 16:04:05 +00:00
chomp(my $content = do { local $/; <$fh> });
close($fh);
if ($content ne "1") {
2019-01-20 09:39:01 +00:00
info "/proc/sys/kernel/unprivileged_userns_clone is set to $content";
info "try running: sudo sysctl -w kernel.unprivileged_userns_clone=1";
info "or permanently enable unprivileged usernamespaces by putting the setting into /etc/sysctl.d/";
info "see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=898446";
2018-10-23 16:04:05 +00:00
}
exit 1;
}
} elsif ($options->{mode} eq 'chrootless') {
# nothing to do
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
my ($nativearch, @foreignarchs) = split /,/, $options->{architectures};
$options->{nativearch} = $nativearch;
$options->{foreignarchs} = \@foreignarchs;
2018-09-21 06:00:06 +00:00
{
2018-10-02 02:11:22 +00:00
# FIXME: autogenerate this list
2018-09-21 06:00:06 +00:00
my $deb2qemu = {
alpha => 'alpha',
amd64 => 'x86_64',
arm => 'arm',
arm64 => 'aarch64',
armel => 'arm',
armhf => 'arm',
hppa => 'hppa',
i386 => 'i386',
m68k => 'm68k',
mips => 'mips',
mips64 => 'mips64',
mips64el => 'mips64el',
mipsel => 'mipsel',
powerpc => 'ppc',
ppc64 => 'ppc64',
ppc64el => 'ppc64le',
riscv64 => 'riscv64',
s390x => 's390x',
sh4 => 'sh4',
sparc => 'sparc',
sparc64 => 'sparc64',
};
chomp (my $hostarch = `dpkg --print-architecture`);
if ($hostarch ne $nativearch) {
2019-09-26 08:14:45 +00:00
my $withemu = 0;
my $noemu = 0;
{
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
{
no warnings; # don't print a warning if the following fails
exec 'arch-test', $nativearch;
}
# if exec didn't work (for example because the arch-test program is
# missing) prepare for the worst and assume that the architecture
# cannot be executed
print "$nativearch: not supported on this machine/kernel\n";
exit 1;
}
chomp (my $content = do { local $/; <$fh> });
close $fh;
if ($? == 0 and $content eq "$nativearch: ok") {
$withemu = 1;
2018-10-02 02:11:41 +00:00
}
2018-09-21 06:00:06 +00:00
}
2019-09-26 08:14:45 +00:00
{
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
{
no warnings; # don't print a warning if the following fails
exec 'arch-test', '-n', $nativearch;
}
# if exec didn't work (for example because the arch-test program is
# missing) prepare for the worst and assume that the architecture
# cannot be executed
print "$nativearch: not supported on this machine/kernel\n";
exit 1;
2018-09-21 06:00:06 +00:00
}
2019-09-26 08:14:45 +00:00
chomp (my $content = do { local $/; <$fh> });
close $fh;
if ($? == 0 and $content eq "$nativearch: ok") {
$noemu = 1;
}
}
# four different outcomes, depending on whether arch-test
# succeeded with or without emulation
#
# withemu | noemu |
# --------+-------+-----------------
# 0 | 0 | test why emu doesn't work and quit
# 0 | 1 | should never happen
# 1 | 0 | use qemu emulation
# 1 | 1 | don't use qemu emulation
if ($withemu == 0 and $noemu == 0) {
2018-12-28 04:39:14 +00:00
{
2019-01-20 09:39:01 +00:00
open my $fh, '<', '/proc/filesystems' or error "failed to open /proc/filesystems: $!";
2018-12-28 04:39:14 +00:00
unless (grep /^nodev\tbinfmt_misc$/, (<$fh>)) {
2019-01-20 09:39:01 +00:00
error "binfmt_misc not found in /proc/filesystems -- is the module loaded?";
2018-12-28 04:39:14 +00:00
}
close $fh;
}
{
2019-01-20 09:39:01 +00:00
open my $fh, '<', '/proc/mounts' or error "failed to open /proc/mounts: $!";
2018-12-28 04:39:14 +00:00
unless (grep /^binfmt_misc \/proc\/sys\/fs\/binfmt_misc binfmt_misc/, (<$fh>)) {
2019-01-20 09:39:01 +00:00
error "binfmt_misc not found in /proc/mounts -- not mounted?";
2018-12-28 04:39:14 +00:00
}
close $fh;
}
{
2019-01-20 09:39:01 +00:00
open my $fh, '-|', '/usr/sbin/update-binfmts', '--display', "qemu-$options->{qemu}" // error "failed to fork(): $!";
2018-12-28 04:39:14 +00:00
chomp (my $binfmts = do { local $/; <$fh> });
close $fh;
if ($binfmts eq '') {
2019-01-20 09:39:01 +00:00
error "qemu-$options->{qemu} is not a supported binfmt name";
2018-12-28 04:39:14 +00:00
}
}
2019-09-26 08:14:45 +00:00
error "qemu emulation of $nativearch failed for an unknown reason";
} elsif ($withemu == 0 and $noemu == 1) {
error "arch-test succeeded without emu but not with emu";
} elsif ($withemu == 1 and $noemu == 0) {
info "$nativearch cannot be executed, falling back to qemu-user";
if (!exists $deb2qemu->{$nativearch}) {
error "no mapping from $nativearch to qemu-user binary";
}
$options->{qemu} = $deb2qemu->{$nativearch};
} elsif ($withemu == 1 and $noemu == 1) {
info "$nativearch is different from $hostarch but can be executed natively";
2018-09-21 06:00:06 +00:00
} else {
2019-09-26 08:14:45 +00:00
error "logic error";
2018-09-21 06:00:06 +00:00
}
} else {
2019-01-20 09:39:01 +00:00
info "chroot architecture $nativearch is equal to the host's architecture";
2018-09-18 09:20:24 +00:00
}
}
2018-10-02 08:09:22 +00:00
{
my $suite;
2018-09-18 09:20:24 +00:00
if (scalar @ARGV > 0) {
2018-10-02 08:09:22 +00:00
$suite = shift @ARGV;
if (scalar @ARGV > 0) {
$options->{target} = shift @ARGV;
} else {
$options->{target} = '-';
}
2018-09-18 09:20:24 +00:00
} else {
2019-01-20 09:39:01 +00:00
info "No SUITE specified, expecting sources.list on standard input";
2018-09-18 09:20:24 +00:00
$options->{target} = '-';
}
2018-10-02 08:09:22 +00:00
my $sourceslist = '';
if (! defined $suite) {
# If no suite was specified, then the whole sources.list has to
# come from standard input
2019-08-27 22:53:04 +00:00
info "Reading sources.list from standard input...";
$sourceslist = do { local $/; <STDIN> };
2018-10-02 08:09:22 +00:00
} else {
if (scalar @ARGV > 0) {
for my $arg (@ARGV) {
if ($arg eq '-') {
2019-08-27 22:53:04 +00:00
info "Reading sources.list from standard input...";
$sourceslist .= do { local $/; <STDIN> };
2018-10-02 08:09:22 +00:00
} elsif ($arg =~ /^deb(-src)? /) {
$sourceslist .= "$arg\n";
} elsif ($arg =~ /:\/\//) {
2018-11-04 19:38:55 +00:00
$sourceslist .= "deb $arg $suite $options->{components}\n";
2018-10-02 08:09:22 +00:00
} elsif (-f $arg) {
2019-01-20 09:39:01 +00:00
open my $fh, '<', $arg or error "cannot open $arg: $!";
2018-10-02 08:09:22 +00:00
while (my $line = <$fh>) {
$sourceslist .= $line;
}
close $fh;
} else {
2019-01-20 09:39:01 +00:00
error "invalid mirror: $arg";
2018-10-02 08:09:22 +00:00
}
}
} else {
2019-09-10 10:46:49 +00:00
my @debstable = ('stable', 'oldstable', 'stretch', 'buster');
my @ubuntustable = ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic');
my @tanglustable = ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis');
my @kali = ('kali-dev', 'kali-rolling', 'kali-bleeding-edge');
my $mirror = 'http://deb.debian.org/debian';
my $secmirror = 'http://security.debian.org/debian-security';
if (any {$_ eq $suite} @ubuntustable) {
if (any {$_ eq $options->{nativearch}} ('amd64', 'i386')) {
$mirror = 'http://archive.ubuntu.com/ubuntu';
$secmirror = 'http://archive.ubuntu.com/ubuntu';
} else {
$mirror = 'http://ports.ubuntu.com/ubuntu-ports';
$secmirror = 'http://ports.ubuntu.com/ubuntu-ports';
}
} elsif (any {$_ eq $suite} @tanglustable) {
$mirror = 'http://archive.tanglu.org/tanglu'
} elsif (any {$_ eq $suite} @kali) {
$mirror = 'https://http.kali.org/kali'
}
$sourceslist .= "deb $mirror $suite $options->{components}\n";
if (any {$_ eq $suite} @ubuntustable) {
$sourceslist .= "deb $mirror $suite-updates $options->{components}\n";
$sourceslist .= "deb $secmirror $suite-updates $options->{components}\n";
} elsif (any {$_ eq $suite} @tanglustable) {
$sourceslist .= "deb $secmirror $suite-updates $options->{components}\n";
} elsif (any {$_ eq $suite} @debstable) {
$sourceslist .= "deb $mirror $suite-updates $options->{components}\n";
if (any {$_ eq $suite} ('stable', 'oldstable', 'stretch', 'buster')) {
$sourceslist .= "deb $secmirror $suite/updates $options->{components}\n";
} else {
# starting from bullseye use
# https://lists.debian.org/87r26wqr2a.fsf@43-1.org
$sourceslist .= "deb $secmirror $suite-security $options->{components}\n";
}
2018-10-02 08:09:22 +00:00
}
}
}
if ($sourceslist eq '') {
2019-01-20 09:39:01 +00:00
error "empty apt sources.list";
2018-10-02 08:09:22 +00:00
}
$options->{sourceslist} = $sourceslist;
2018-09-18 09:20:24 +00:00
}
if ($options->{target} ne '-') {
2018-09-18 14:55:25 +00:00
my $abs_path = abs_path($options->{target});
if (!defined $abs_path) {
2019-01-20 09:39:01 +00:00
error "unable to get absolute path of target directory $options->{target}";
2018-09-18 14:55:25 +00:00
}
$options->{target} = $abs_path;
2018-09-18 09:20:24 +00:00
}
if ($options->{target} eq '/') {
2019-01-20 09:39:01 +00:00
error "refusing to use the filesystem root as output directory";
2018-09-18 09:20:24 +00:00
}
2019-01-20 09:41:29 +00:00
my $tar_compressor = get_tar_compressor($options->{target});
2018-09-18 09:20:24 +00:00
# figure out whether a tarball has to be created in the end
$options->{maketar} = 0;
2019-01-20 09:41:29 +00:00
if (defined $tar_compressor or $options->{target} =~ /\.tar$/ or $options->{target} eq '-') {
2018-09-18 09:20:24 +00:00
$options->{maketar} = 1;
2018-10-22 15:05:56 +00:00
if (any { $_ eq $options->{variant} } ('extract', 'custom') and $options->{mode} eq 'fakechroot') {
2019-01-20 09:39:01 +00:00
info "creating a tarball in fakechroot mode might fail in extract and custom variants because there might be no tar inside the chroot";
2018-10-22 15:05:56 +00:00
}
2018-12-28 04:09:08 +00:00
# try to fail early if target tarball cannot be opened for writing
if ($options->{target} ne '-') {
2019-01-20 09:39:01 +00:00
open my $fh, '>', $options->{target} or error "cannot open $options->{target} for writing: $!";
2018-12-28 04:09:08 +00:00
close $fh;
}
2019-01-20 09:41:29 +00:00
# check if the compressor is installed
if (defined $tar_compressor) {
my $pid = fork();
if ($pid == 0) {
open(STDOUT, '>', '/dev/null') or error "cannot open /dev/null for writing: $!";
open(STDIN, '<', '/dev/null') or error "cannot open /dev/null for reading: $!";
exec $tar_compressor or error "cannot exec $tar_compressor: $!";
}
waitpid $pid, 0;
if ($? != 0) {
error "failed to start $tar_compressor";
}
}
2018-09-18 09:20:24 +00:00
}
if ($options->{maketar}) {
# since the output is a tarball, we create the rootfs in a temporary
# directory
2019-01-14 21:24:33 +00:00
$options->{root} = tempdir(
'mmdebstrap.XXXXXXXXXX',
DIR => File::Spec->tmpdir
);
2019-02-23 07:50:02 +00:00
info "using $options->{root} as tempdir";
2018-09-21 16:57:57 +00:00
# in unshare and root mode, other users than the current user need to
# access the rootfs, most prominently, the _apt user. Thus, make the
# temporary directory world readable.
2018-09-23 17:36:07 +00:00
if (any { $_ eq $options->{mode} } ('unshare', 'root')) {
2019-01-20 09:39:01 +00:00
chmod 0755, $options->{root} or error "cannot chmod root: $!";
2018-09-21 16:57:57 +00:00
}
2018-09-18 09:20:24 +00:00
} else {
# user does not seem to have specified a tarball as output, thus work
# directly in the supplied directory
$options->{root} = $options->{target};
if (-e $options->{root}) {
if (!-d $options->{root}) {
2019-01-20 09:39:01 +00:00
error "$options->{root} exists and is not a directory";
2018-09-18 09:20:24 +00:00
}
2018-11-20 07:34:13 +00:00
# check if the directory is empty or contains nothing more than an
# empty lost+found directory. The latter exists on freshly created
# ext3 and ext4 partitions.
2018-09-18 09:20:24 +00:00
# rationale for requiring an empty directory: https://bugs.debian.org/833525
2019-01-20 09:39:01 +00:00
opendir(my $dh, $options->{root}) or error "Can't opendir($options->{root}): $!";
2018-11-20 07:34:13 +00:00
while (my $entry = readdir $dh) {
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
# if the entry is a directory named "lost+found" then skip it
# if it's empty
if ($entry eq "lost+found" and -d "$options->{root}/$entry") {
opendir(my $dh2, "$options->{root}/$entry");
# Attempt reading the directory thrice. If the third time
# succeeds, then it has more entries than just "." and ".."
# and must thus not be empty.
readdir $dh2;
readdir $dh2;
# rationale for requiring an empty directory:
# https://bugs.debian.org/833525
if (readdir $dh2) {
2019-01-20 09:39:01 +00:00
error "$options->{root} contains a non-empty lost+found directory";
2018-11-20 07:34:13 +00:00
}
closedir($dh2);
} else {
2019-01-20 09:39:01 +00:00
error "$options->{root} is not empty";
2018-11-20 07:34:13 +00:00
}
}
closedir($dh);
2018-09-18 09:20:24 +00:00
} else {
2019-01-20 09:39:01 +00:00
make_path($options->{root}) or error "cannot create root: $!";
2018-09-18 09:20:24 +00:00
}
}
2018-10-22 13:05:19 +00:00
# check for double quotes because apt doesn't allow to escape them and
# thus paths with double quotes are invalid in the apt config
if ($options->{root} =~ /"/) {
2019-01-20 09:39:01 +00:00
error "apt cannot handle paths with double quotes";
2018-10-22 13:05:19 +00:00
}
2018-09-18 09:20:24 +00:00
my @idmap;
# for unshare mode the rootfs directory has to have appropriate
# permissions
if ($options->{mode} eq 'unshare') {
@idmap = read_subuid_subgid;
# sanity check
if (scalar(@idmap) != 2 || $idmap[0][0] ne 'u' || $idmap[1][0] ne 'g') {
2019-01-20 09:39:01 +00:00
error "invalid idmap";
2018-09-18 09:20:24 +00:00
}
my $outer_gid = $REAL_GROUP_ID+0;
my $pid = get_unshare_cmd { chown 1, 1, $options->{root} }
[
['u', '0', $REAL_USER_ID, '1'],
['g', '0', $outer_gid, '1'],
['u', '1', $idmap[0][2], '1'],
['g', '1', $idmap[1][2], '1']];
waitpid $pid, 0;
2019-01-20 09:39:01 +00:00
$? == 0 or error "chown failed";
2018-09-18 09:20:24 +00:00
}
# figure out whether we have mknod
$options->{havemknod} = 0;
if ($options->{mode} eq 'unshare') {
my $pid = get_unshare_cmd {
$options->{havemknod} = havemknod($options->{root});
} \@idmap;
waitpid $pid, 0;
2019-01-20 09:39:01 +00:00
$? == 0 or error "havemknod failed";
2018-10-22 15:05:56 +00:00
} elsif (any { $_ eq $options->{mode} } ('root', 'fakechroot', 'proot', 'chrootless')) {
2018-09-18 09:20:24 +00:00
$options->{havemknod} = havemknod($options->{root});
2018-10-22 12:44:45 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
my $devtar = '';
2018-10-23 13:19:01 +00:00
# We always craft the /dev entries ourselves if a tarball is to be created
if ($options->{maketar}) {
2018-09-18 09:20:24 +00:00
foreach my $file (@devfiles) {
my ($fname, $mode, $type, $linkname, $devmajor, $devminor) = @{$file};
my $entry = pack('a100 a8 a8 a8 a12 a12 A8 a1 a100 a8 a32 a32 a8 a8 a155 x12',
$fname,
sprintf('%07o', $mode),
sprintf('%07o', 0), # uid
sprintf('%07o', 0), # gid
sprintf('%011o', 0), # size
sprintf('%011o', $mtime),
'', # checksum
$type,
$linkname,
"ustar ",
'', # username
'', # groupname
defined($devmajor) ? sprintf('%07o', $devmajor) : '',
defined($devminor) ? sprintf('%07o', $devminor) : '',
'', # prefix
);
# compute and insert checksum
substr($entry,148,7) = sprintf("%06o\0", unpack("%16C*",$entry));
$devtar .= $entry;
}
}
2018-09-21 17:06:47 +00:00
my $exitstatus = 0;
2018-10-23 13:19:01 +00:00
my @taropts = ('--sort=name', "--mtime=\@$mtime", '--clamp-mtime', '--numeric-owner', '--one-file-system', '-c', '--exclude=./dev');
2019-01-13 09:17:46 +00:00
# disable signals so that we can fork and change behaviour of the signal
# handler in the parent and child without getting interrupted
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
2019-01-20 09:39:01 +00:00
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
2019-01-13 09:17:46 +00:00
2018-09-18 09:20:24 +00:00
my $pid;
pipe my $rfh, my $wfh;
if ($options->{mode} eq 'unshare') {
$pid = get_unshare_cmd {
2019-01-13 09:17:46 +00:00
# child
$SIG{'INT'} = 'DEFAULT';
$SIG{'HUP'} = 'DEFAULT';
$SIG{'PIPE'} = 'DEFAULT';
$SIG{'TERM'} = 'DEFAULT';
# unblock all delayed signals (and possibly handle them)
2019-01-20 09:39:01 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!";
2019-01-13 09:17:46 +00:00
2018-09-18 09:20:24 +00:00
close $rfh;
open(STDOUT, '>&', STDERR);
setup($options);
if ($options->{maketar}) {
2019-01-20 09:39:01 +00:00
info "creating tarball...";
2018-09-23 20:27:49 +00:00
2018-09-18 09:20:24 +00:00
# redirect tar output to the writing end of the pipe so that the
# parent process can capture the output
open(STDOUT, '>&', $wfh);
# Add ./dev as the first entries of the tar file.
# We cannot add them after calling tar, because there is no way to
# prevent tar from writing NULL entries at the end.
print $devtar;
# pack everything except ./dev
2019-01-20 09:39:01 +00:00
0 == system('tar', @taropts, '-C', $options->{root}, '.') or error "tar failed: $?";
2018-09-18 09:20:24 +00:00
2019-01-20 09:39:01 +00:00
info "done";
2018-09-18 09:20:24 +00:00
}
exit 0;
} \@idmap;
2018-10-22 15:05:56 +00:00
} elsif (any { $_ eq $options->{mode} } ('root', 'fakechroot', 'proot', 'chrootless')) {
2019-01-20 09:39:01 +00:00
$pid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($pid == 0) {
2019-01-13 09:17:46 +00:00
$SIG{'INT'} = 'DEFAULT';
$SIG{'HUP'} = 'DEFAULT';
$SIG{'PIPE'} = 'DEFAULT';
$SIG{'TERM'} = 'DEFAULT';
# unblock all delayed signals (and possibly handle them)
2019-01-20 09:39:01 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!";
2019-01-13 09:17:46 +00:00
2018-09-18 09:20:24 +00:00
close $rfh;
open(STDOUT, '>&', STDERR);
setup($options);
if ($options->{maketar}) {
2019-01-20 09:39:01 +00:00
info "creating tarball...";
2018-09-23 20:27:49 +00:00
2018-09-18 09:20:24 +00:00
# redirect tar output to the writing end of the pipe so that the
# parent process can capture the output
open(STDOUT, '>&', $wfh);
# Add ./dev as the first entries of the tar file.
# We cannot add them after calling tar, because there is no way to
# prevent tar from writing NULL entries at the end.
print $devtar;
if ($options->{mode} eq 'fakechroot') {
# Fakechroot requires tar to run inside the chroot or
# otherwise absolute symlinks will include the path to the
# root directory
2019-01-20 09:39:01 +00:00
0 == system('/usr/sbin/chroot', $options->{root}, 'tar', @taropts, '-C', '/', '.') or error "tar failed: $?";
2018-09-18 09:20:24 +00:00
} elsif ($options->{mode} eq 'proot') {
# proot requires tar to run inside proot or otherwise
# permissions will be completely off
2018-10-23 13:36:58 +00:00
my @qemuopt = ();
if (defined $options->{qemu}) {
push @qemuopt, "--qemu=qemu-$options->{qemu}";
2018-11-04 19:39:11 +00:00
push @taropts, "--exclude=./host-rootfs"
2018-10-23 13:36:58 +00:00
}
2019-01-20 09:39:01 +00:00
0 == system('proot', '--root-id', "--rootfs=$options->{root}", '--cwd=/', @qemuopt, 'tar', @taropts, '-C', '/', '.') or error "tar failed: $?";
2018-10-22 15:05:56 +00:00
} elsif (any { $_ eq $options->{mode} } ('root', 'chrootless')) {
2019-09-04 11:50:25 +00:00
# If the chroot directory is not owned by the root user,
# then we assume that no measure was taken to fake root
# permissions. Since the final tarball should contain
# entries with root ownership, we instruct tar to do so.
my @owneropts = ();
if ((stat $options->{root})[4] != 0) {
push @owneropts, '--owner=0', '--group=0', '--numeric-owner';
}
0 == system('tar', @taropts, @owneropts, '-C', $options->{root}, '.') or error "tar failed: $?";
2018-10-22 12:44:45 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
2018-09-23 20:27:49 +00:00
2019-01-20 09:39:01 +00:00
info "done";
2018-09-18 09:20:24 +00:00
}
exit 0;
}
2018-10-22 12:44:45 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
2019-01-13 09:17:46 +00:00
# parent
my $got_signal = 0;
my $waiting_for = "setup";
my $ignore = sub {
$got_signal = shift;
2019-01-20 09:39:01 +00:00
info "main() received signal $got_signal: waiting for $waiting_for...";
2019-01-13 09:17:46 +00:00
};
$SIG{'INT'} = $ignore;
$SIG{'HUP'} = $ignore;
$SIG{'PIPE'} = $ignore;
$SIG{'TERM'} = $ignore;
# unblock all delayed signals (and possibly handle them)
2019-01-20 09:39:01 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!";
2019-01-13 09:17:46 +00:00
2018-09-18 09:20:24 +00:00
close $wfh;
2018-12-28 04:09:08 +00:00
2018-09-18 09:20:24 +00:00
if ($options->{maketar}) {
2019-01-20 09:41:29 +00:00
# we use eval() so that error() doesn't take this process down and
# thus leaves the setup() process without a parent
eval {
if ($options->{target} eq '-') {
if (!copy($rfh, *STDOUT)) {
error "cannot copy to standard output: $!";
}
} else {
if (defined $tar_compressor) {
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
my $cpid = fork();
if ($cpid == 0) {
# child: default signal handlers
$SIG{'INT'} = 'DEFAULT';
$SIG{'HUP'} = 'DEFAULT';
$SIG{'PIPE'} = 'DEFAULT';
$SIG{'TERM'} = 'DEFAULT';
# unblock all delayed signals (and possibly handle them)
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!";
open(STDOUT, '>', $options->{target}) or error "cannot open $options->{target} for writing: $!";
open(STDIN, '<&', $rfh) or error "cannot open file handle for reading: $!";
exec $tar_compressor or error "cannot exec $tar_compressor: $!";
}
POSIX::sigprocmask(SIG_UNBLOCK, $sigset) or error "Can't unblock signals: $!";
waitpid $cpid, 0;
if ($? != 0) {
error "failed to start $tar_compressor";
}
} else {
if(!copy($rfh, $options->{target})) {
error "cannot copy to $options->{target}: $!";
}
}
2018-12-28 04:09:08 +00:00
}
2019-01-20 09:41:29 +00:00
};
if ($@) {
# we cannot die here because that would leave the other thread
# running without a parent
warning "run_chroot failed: $@";
$exitstatus = 1;
2018-09-18 09:20:24 +00:00
}
}
close($rfh);
waitpid $pid, 0;
2018-09-21 17:06:47 +00:00
if ($? != 0) {
$exitstatus = 1;
}
2018-09-18 09:20:24 +00:00
2019-01-13 09:17:46 +00:00
# change signal handler message
$waiting_for = "cleanup";
2018-09-18 09:20:24 +00:00
if ($options->{maketar} and -e $options->{root}) {
2019-02-23 07:50:02 +00:00
info "removing tempdir $options->{root}...";
2018-09-18 09:20:24 +00:00
if ($options->{mode} eq 'unshare') {
# We don't have permissions to remove the directory outside
# the unshared namespace, so we remove it here.
# Since this is still inside the unshared namespace, there is
# no risk of removing anything important.
$pid = get_unshare_cmd {
2019-04-25 06:56:42 +00:00
# File::Path will produce the error "cannot stat initial
# working directory" if the working directory cannot be
# accessed by the unprivileged unshared user. Thus, we first
# navigate to the parent of the root directory.
chdir "$options->{root}/.." or error "unable to chdir() to parent directory of $options->{root}: $!";
2018-09-18 09:20:24 +00:00
remove_tree($options->{root}, {error => \my $err});
if (@$err) {
for my $diag (@$err) {
my ($file, $message) = %$diag;
if ($file eq '') {
2019-01-20 09:39:01 +00:00
warning "general error: $message";
2018-09-18 09:20:24 +00:00
}
else {
2019-01-20 09:39:01 +00:00
warning "problem unlinking $file: $message";
2018-09-18 09:20:24 +00:00
}
}
}
} \@idmap;
waitpid $pid, 0;
2019-01-20 09:39:01 +00:00
$? == 0 or error "remove_tree failed";
2018-10-22 15:05:56 +00:00
} elsif (any { $_ eq $options->{mode} } ('root', 'fakechroot', 'proot', 'chrootless')) {
2018-09-18 09:20:24 +00:00
# without unshare, we use the system's rm to recursively remove the
# temporary directory just to make sure that we do not accidentally
# remove more than we should by using --one-file-system.
2018-12-30 16:18:27 +00:00
#
# --interactive=never is needed when in proot mode, the
# write-protected file /apt/apt.conf.d/01autoremove-kernels is to
# be removed.
2019-01-20 09:39:01 +00:00
0 == system('rm', '--interactive=never', '--recursive', '--preserve-root', '--one-file-system', $options->{root}) or error "rm failed: $!";
2018-10-22 12:44:45 +00:00
} else {
2019-01-20 09:39:01 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
}
2018-09-21 17:06:47 +00:00
2019-01-13 09:17:46 +00:00
if ($got_signal) {
$exitstatus = 1;
}
2018-09-21 17:06:47 +00:00
exit $exitstatus;
2018-09-18 09:20:24 +00:00
}
main();
__END__
=head1 NAME
mmdebstrap - multi-mirror Debian chroot creation
=head1 SYNOPSIS
B<mmdebstrap> [B<OPTION...>] [I<SUITE> [I<TARGET> [I<MIRROR>...]]]
=head1 DESCRIPTION
B<mmdebstrap> creates a Debian chroot of I<SUITE> into I<TARGET> from one or
more I<MIRROR>s. It is meant as an alternative to the debootstrap tool (see
section B<DEBOOTSTRAP>). In contrast to debootstrap it uses apt to resolve
dependencies and is thus able to use more than one mirror and resolve more
complex dependencies.
2019-08-27 22:53:04 +00:00
If no I<MIRROR> option is provided, L<http://deb.debian.org/debian> is used.
If I<SUITE> is a stable release name and no I<MIRROR> is specified, then
mirrors for updates and security are automatically added. If a I<MIRROR>
option starts with "deb " or "deb-src " then it is used as a one-line-style
format entry for apt's sources.list inside the chroot. If a I<MIRROR> option
contains a "://" then it is interpreted as a mirror URI and the apt line
inside the chroot is assembled as "deb [arch=A] B C D" where A is the host's
native architecture, B is the I<MIRROR>, C is the given I<SUITE> and D is the
components given via B<--components> (defaults to "main"). If a I<MIRROR>
option happens to be an existing file, then its contents are pasted into the
chroot's sources.list. This can be used to supply a deb822 style
sources.list. If I<MIRROR> is C<-> then standard input is pasted into the
chroot's sources.list. More than one mirror can be specified and are appended
to the chroot's sources.list in the given order. If any mirror contains a
https URI, then the packages apt-transport-https and ca-certificates will be
installed inside the chroot. If any mirror contains a tor+xxx URI, then the
apt-transport-tor package will be installed inside the chroot.
2018-09-18 09:20:24 +00:00
2019-01-20 09:41:29 +00:00
The optional I<TARGET> argument can either be the path to a directory, the
path to a tarball filename or C<->. If I<TARGET> ends with C<.tar>, or with
any of the filename extensions listed in the section B<COMPRESSION>, then
I<TARGET> will be interpreted as a path to a tarball filename. If I<TARGET> is
the path to a tarball filename or if I<TARGET> is C<-> or if no I<TARGET> was
specified, B<mmdebstrap> will create a temporary chroot directory in
C<$TMPDIR> or F</tmp>. If I<TARGET> is the path to a tarball filename,
B<mmdebstrap> will create a tarball of that directory and store it as
I<TARGET>, optionally applying a compression algorithm as indicated by its
filename extension. If I<TARGET> is C<-> or if no I<TARGET> was specified,
then an uncompressed tarball of that directory will be sent to standard
output. If I<TARGET> does not end in C<.tar> or with any of the filename
extensions listed in the section B<COMPRESSION>, then I<TARGET> will be
interpreted as the path to a directory. If the directory already exists, it
must either be empty or only contain an empty C<lost+found> directory. If a
directory is chosen as output in any other mode than B<sudo>, then its
contents will have wrong ownership information and special device files will
be missing.
2018-09-18 09:20:24 +00:00
The I<SUITE> may be a valid release code name (eg, sid, stretch, jessie) or a
symbolic name (eg, unstable, testing, stable, oldstable). Any suite name that
works with apt on the given mirror will work. If no I<SUITE> was specified,
then a single I<MIRROR> C<-> is added and thus the information of the desired
suite has to come from standard input as part of a valid apt sources.list
file.
2019-02-23 07:43:15 +00:00
All status output is printed to standard error unless B<--logfile> is used to
redirect it to a file or B<--quiet> or B<--silent> is used to suppress any
output on standard error. Help and version information will be printed to
standard error with the B<--help> and B<--version> options, respectively.
Otherwise, an uncompressed tarball might be sent to standard output if
I<TARGET> is C<-> or if no I<TARGET> was specified.
2018-09-18 09:20:24 +00:00
=head1 OPTIONS
2019-02-20 17:00:52 +00:00
Options are case insensitive. Short options may be bundled. Long options
require a double dash and may be abbreviated to uniqueness.
2018-09-18 09:20:24 +00:00
=over 8
=item B<-h,--help>
Print this help text and exit.
2019-02-23 07:55:31 +00:00
=item B<--version>
Print the B<mmdebstrap> version and exit.
2019-01-08 10:27:56 +00:00
=item B<--variant>=I<name>
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
Choose which package set to install. Valid variant I<name>s are B<extract>,
2018-10-22 15:05:56 +00:00
B<custom>, B<essential>, B<apt>, B<required>, B<minbase>, B<buildd>,
B<important>, B<debootstrap>, B<->, and B<standard>. The default variant is
B<required>. See the section B<VARIANTS> for more information.
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
=item B<--mode>=I<name>
2018-09-18 09:20:24 +00:00
Choose how to perform the chroot operation and create a filesystem with
2019-01-08 10:27:56 +00:00
ownership information different from the current user. Valid mode I<name>s are B<auto>,
2018-09-18 09:20:24 +00:00
B<sudo>, B<root>, B<unshare>, B<fakeroot>, B<fakechroot> and B<proot>. The
default mode is B<auto>. See the section B<MODES> for more information.
2019-01-08 10:27:56 +00:00
=item B<--aptopt>=I<option>|I<file>
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
Pass arbitrary I<option>s to apt. Will be added to
2018-10-22 15:03:39 +00:00
F</etc/apt/apt.conf.d/99mmdebstrap> inside the chroot. Can be specified
2019-01-08 10:27:56 +00:00
multiple times. Each I<option> will be appended to 99mmdebstrap. A semicolon will
2018-10-22 15:03:39 +00:00
be added at the end of the option if necessary. If the command line argument
2019-01-08 10:27:56 +00:00
is an existing I<file>, the content of the file will be appended to 99mmdebstrap
2018-09-18 09:20:24 +00:00
verbatim.
2019-03-25 13:50:41 +00:00
Example: This is necessary for allowing old timestamps from snapshot.debian.org
2018-09-18 09:20:24 +00:00
2018-12-27 13:42:29 +00:00
--aptopt='Acquire::Check-Valid-Until "false"'
2019-03-25 13:50:41 +00:00
Example: Settings controlling download of package description translations
2018-12-27 13:42:29 +00:00
--aptopt='Acquire::Languages { "environment"; "en"; }'
--aptopt='Acquire::Languages "none"'
2019-03-25 13:50:41 +00:00
Example: Enable installing Recommends (by default mmdebstrap doesn't)
2018-12-27 13:42:29 +00:00
--aptopt='Apt::Install-Recommends "true"'
2019-03-25 13:50:41 +00:00
Example: Configure apt-cacher or apt-cacher-ng as an apt proxy
2019-01-07 12:19:38 +00:00
--aptopt='Acquire::http { Proxy "http://127.0.0.1:3142"; }'
2019-03-25 13:50:41 +00:00
Example: For situations in which the apt sandbox user cannot access the chroot
2019-02-28 10:54:03 +00:00
--aptopt='APT::Sandbox::User "root"'
2018-09-18 09:20:24 +00:00
2019-03-25 13:50:41 +00:00
Example: Minimizing the number of packages installed from experimental
--aptopt='APT::Solver "aspcud"'
--aptopt='APT::Solver::aspcud::Preferences "-count(solution,APT-Release:=/a=experimental/),-removed,-changed,-new"'
2019-01-08 10:27:56 +00:00
=item B<--dpkgopt>=I<option>|I<file>
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
Pass arbitrary I<option>s to dpkg. Will be added to
2018-10-22 15:03:39 +00:00
F</etc/dpkg/dpkg.cfg.d/99mmdebstrap> inside the chroot. Can be specified
2019-01-08 10:27:56 +00:00
multiple times. Each I<option> will be appended to 99mmdebstrap. If the command
line argument is an existing I<file>, the content of the file will be appended to
2018-09-23 17:43:14 +00:00
99mmdebstrap verbatim.
2018-09-18 09:20:24 +00:00
2019-03-25 13:50:41 +00:00
Example: Exclude paths to reduce chroot size
--dpkgopt='path-exclude=/usr/share/man/*'
--dpkgopt='path-include=/usr/share/man/man[1-9]/*'
--dpkgopt='path-exclude=/usr/share/locale/*'
--dpkgopt='path-include=/usr/share/locale/locale.alias'
--dpkgopt='path-exclude=/usr/share/doc/*'
--dpkgopt='path-include=/usr/share/doc/*/copyright'
--dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*'
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
=item B<--include>=I<pkg1>[,I<pkg2>,...]
2018-09-18 09:20:24 +00:00
Comma separated list of packages which will be installed in addition to the
2018-10-22 15:05:56 +00:00
packages installed by the specified variant. The direct and indirect hard
dependencies will also be installed. The behaviour of this option depends on
the selected variant. The B<extract> and B<custom> variants install no packages
by default, so for these variants, the packages specified by this option will
be the only ones that get either extracted or installed by dpkg, respectively.
For all other variants, apt is used to install the additional packages. The
B<essential> variant does not include apt and thus, the include option will
only work when the B<chrootless> mode is selected and thus apt from the outside
2019-09-04 13:45:18 +00:00
can be used. Package names are directly passed to apt and thus, you can use apt
features like C<pkg/suite>, C<pkg=version>, C<pkg-> or use a glob or regex for
C<pkg>. See apt(8) for the supported syntax.
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
=item B<--components>=I<comp1>[,I<comp2>,...]
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
Comma separated list of components like main, contrib and non-free which will
be used for all URI-only I<MIRROR> arguments.
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
=item B<--architectures>=I<native>[,I<foreign1>,...]
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
Comma separated list of architectures. The first architecture is the I<native>
2018-09-18 09:20:24 +00:00
architecture inside the chroot. The remaining architectures will be added to
2019-01-08 10:27:56 +00:00
the foreign dpkg architectures. Without this option, the I<native>
architecture of the chroot defaults to the native architecture of the system
2019-02-23 07:49:19 +00:00
running B<mmdebstrap>.
2019-01-08 10:27:56 +00:00
2019-01-08 10:28:27 +00:00
=begin comment
2019-02-20 12:32:49 +00:00
=item B<--setup-hook>=I<command>
Execute arbitrary I<command>s right after initial setup (directory creation,
configuration of apt and dpkg, ...) but before any packages are downloaded or
installed. At that point, the chroot directory does not contain any
executables and thus cannot be chroot-ed into. The option can be specified
multiple times and the commands are executed in the order in which they are
given on the command line. If I<command> is an existing executable file or if
I<command> does not contain any shell metacharacters, then I<command> is
directly exec-ed with the path to the chroot directory passed as the first
argument. Otherwise, I<command> is executed under I<sh> and the chroot
directory can be accessed via I<$1>. All environment variables used by
B<mmdebstrap> (like C<APT_CONFIG>, C<DEBIAN_FRONTEND>, C<LC_ALL> and C<PATH>)
are preserved.
2019-03-25 13:50:41 +00:00
Example: Setup merged-/usr via symlinks
2019-02-20 12:32:49 +00:00
--setup-hook='for d in bin sbin lib; do ln -s usr/$d "$1/$d"; mkdir -p "$1/usr/$d"; done'
2019-03-27 10:44:45 +00:00
Example: Setup chroot for installing a sub-essential busybox-based chroot with
--variant=custom --include=dpkg,busybox,libc-bin,base-files,base-passwd,debianutils
--setup-hook='mkdir -p "$1/bin"'
--setup-hook='for p in awk cat chmod chown cp diff echo env grep less ln mkdir mount rm rmdir sed sh sleep sort touch uname; do ln -s busybox "$1/bin/$p"; done'
--setup-hook='echo root:x:0:0:root:/root:/bin/sh > "$1/etc/passwd"'
--setup-hook='printf "root:x:0:\nmail:x:8:\nutmp:x:43:\n" > "$1/etc/group"'
2019-02-20 12:32:49 +00:00
=item B<--essential-hook>=I<command>
Execute arbitrary I<command>s after the Essential:yes packages have been
installed but before installing the remaining packages. The hook is not
executed for the B<extract> and B<custom> variants. The option can be
specified multiple times and the commands are executed in the order in which
they are given on the command line. If I<command> is an existing executable
file or if I<command> does not contain any shell metacharacters, then
I<command> is directly exec-ed with the path to the chroot directory passed as
the first argument. Otherwise, I<command> is executed under I<sh> and the
chroot directory can be accessed via I<$1>. All environment variables used by
B<mmdebstrap> (like C<APT_CONFIG>, C<DEBIAN_FRONTEND>, C<LC_ALL> and C<PATH>)
are preserved.
2019-03-25 13:50:41 +00:00
Example: Enable unattended upgrades
2019-02-20 12:32:49 +00:00
--essential-hook='echo unattended-upgrades unattended-upgrades/enable_auto_updates boolean true | chroot "$1" debconf-set-selections'
2019-03-25 13:50:41 +00:00
Example: Select Europe/Berlin as the timezone
2019-02-20 12:32:49 +00:00
--essential-hook='echo tzdata tzdata/Areas select Europe | chroot "$1" debconf-set-selections'
--essential-hook='echo tzdata tzdata/Zones/Europe select Berlin | chroot "$1" debconf-set-selections'
=item B<--customize-hook>=I<command>
2019-01-08 10:28:27 +00:00
2019-02-20 12:32:49 +00:00
Execute arbitrary I<command>s after the chroot is set up and all packages got
installed but before final cleanup actions are carried out. The option can be
specified multiple times and the commands are executed in the order in which
they are given on the command line. If I<command> is an existing executable
file or if I<command> does not contain any shell metacharacters, then
I<command> is directly exec-ed with the path to the chroot directory passed as
the first argument. Otherwise, I<command> is executed under I<sh> and the
chroot directory can be accessed via I<$1>. All environment variables used by
B<mmdebstrap> (like C<APT_CONFIG>, C<DEBIAN_FRONTEND>, C<LC_ALL> and C<PATH>)
are preserved.
2019-01-08 10:28:27 +00:00
2019-03-25 13:50:41 +00:00
Example: Preparing a chroot for use with autopkgtest
2019-01-08 10:28:27 +00:00
2019-02-20 12:32:49 +00:00
--customize-hook='chroot "$1" passwd --delete root'
--customize-hook='chroot "$1" useradd --home-dir /home/user --create-home user'
--customize-hook='chroot "$1" passwd --delete user'
--customize-hook='echo host > "$1/etc/hostname"'
2019-09-04 13:45:43 +00:00
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"'
2019-02-20 12:32:49 +00:00
--customize-hook=/usr/share/autopkgtest/setup-commands/setup-testbed
2019-01-08 10:28:27 +00:00
=end comment
2018-09-18 09:20:24 +00:00
2019-02-20 17:02:32 +00:00
=item B<-q,--quiet>, B<-s,--silent>
2019-01-20 09:39:01 +00:00
2019-02-20 17:17:00 +00:00
Do not write anything to standard error. If used together with B<--verbose> or
B<--debug>, only the last option will take effect.
2019-01-20 09:39:01 +00:00
2019-02-20 17:02:32 +00:00
=item B<-v,--verbose>
2019-01-20 09:39:01 +00:00
Instead of progress bars, write the dpkg and apt output directly to standard
2019-02-20 17:17:00 +00:00
error. If used together with B<--quiet> or B<--debug>, only the last option
will take effect.
2019-01-20 09:39:01 +00:00
2019-02-20 17:02:32 +00:00
=item B<-d,--debug>
2019-01-20 09:39:01 +00:00
In addition to the output produced by B<--verbose>, write detailed debugging
2019-02-20 17:17:00 +00:00
information to standard error. Errors will print a backtrace. If used together
with B<--quiet> or B<--verbose>, only the last option will take effect.
2019-02-23 07:43:15 +00:00
=item B<--logfile>=I<filename>
Instead of writing status information to standard error, write it into the
file given by I<filename>.
2019-01-20 09:39:01 +00:00
2018-09-18 09:20:24 +00:00
=back
=head1 MODES
Creating a Debian chroot requires not only permissions for running chroot but
also the ability to create files owned by the superuser. The selected mode
decides which way this is achieved.
=over 8
=item B<auto>
This mode automatically selects a fitting mode. If the effective user id is the
one of the superuser, then the B<sudo> mode is chosen. Otherwise, the
B<unshare> mode is picked if the system has the sysctl
C<kernel.unprivileged_userns_clone> set to C<1>. Should that not be the case
2019-07-24 14:47:47 +00:00
and if the fakechroot binary exists, the B<fakechroot> mode is chosen. Lastly,
the B<proot> mode is used if the proot binary exists.
2018-09-18 09:20:24 +00:00
=item B<sudo>, B<root>
2019-02-28 10:54:03 +00:00
This mode directly executes chroot and is the same mode of operation as is
used by debootstrap. It is the only mode that can directly create a directory
chroot with the right permissions. If the chroot directory is not accessible
by the _apt user, then apt sandboxing will be automatically disabled.
2018-09-18 09:20:24 +00:00
=item B<unshare>
2019-09-04 13:47:15 +00:00
This mode uses Linux user namespaces to allow unprivileged use of chroot and
2018-09-18 09:20:24 +00:00
creation of files that appear to be owned by the superuser inside the unshared
namespace. A directory chroot created with this mode will end up with wrong
permissions. Choose to create a tarball instead.
=item B<fakeroot>, B<fakechroot>
2019-02-23 07:49:19 +00:00
This mode will exec B<mmdebstrap> again under C<fakechroot fakeroot>. A
2018-09-18 09:20:24 +00:00
directory chroot created with this mode will end up with wrong permissions.
Choose to create a tarball instead.
=item B<proot>
This mode will carry out all calls to chroot with proot instead. Since
permissions are only retained while proot is still running, this will lead to
wrong permissions in the final directory and tarball. This mode is useful if
you plan to use the chroot with proot.
2018-10-22 15:05:56 +00:00
=item B<chrootless>
Uses the dpkg option C<--force-script-chrootless> to install packages into
B<TARGET> without dpkg and apt inside B<target> but using apt and dpkg from
2019-02-23 07:49:19 +00:00
the machine running B<mmdebstrap>. Maintainer scripts are run without chrooting
2018-10-22 15:05:56 +00:00
into B<TARGET> and rely on their dependencies being installed on the machine
2019-09-04 11:50:25 +00:00
running B<mmdebstrap>. Unless B<mmdebstrap> was run inside fakeroot, the
directory created will be owned by the user running mmdebstrap.
2018-10-22 15:05:56 +00:00
2018-12-06 16:15:56 +00:00
=for TODO
=item B<qemu>
2018-09-18 09:20:24 +00:00
=back
=head1 VARIANTS
2018-10-22 15:05:56 +00:00
All package sets also include the direct and indirect hard dependencies (but
not recommends) of the selected package sets. The variants B<minbase>,
B<buildd> and B<->, resemble the package sets that debootstrap would install
with the same I<--variant> argument.
2018-09-18 09:20:24 +00:00
=over 8
2018-10-22 15:05:56 +00:00
=item B<extract>
Installs nothing by default (not even C<Essential:yes> packages). Packages
given by the C<--include> option are extracted but will not be installed.
=item B<custom>
Installs nothing by default (not even C<Essential:yes> packages). Packages
given by the C<--include> option will be installed. If another mode than
B<chrootless> was selected and dpkg was not part of the included package set,
then this variant will fail because it cannot configure the packages.
2018-09-18 09:20:24 +00:00
=item B<essential>
2018-10-22 15:03:39 +00:00
C<Essential:yes> packages.
2018-09-18 09:20:24 +00:00
=item B<apt>
The B<essential> set plus apt.
=item B<required>, B<minbase>
The B<essential> set plus all packages with Priority:required and apt.
=item B<buildd>
The B<minbase> set plus build-essential.
=item B<important>, B<debootstrap>, B<->
The B<required> set plus all packages with Priority:important. This is the default of debootstrap.
=item B<standard>
The B<important> set plus all packages with Priority:standard.
=back
=head1 EXAMPLES
Use like debootstrap:
2019-01-08 10:27:56 +00:00
$ sudo mmdebstrap unstable ./unstable-chroot
2018-09-18 09:20:24 +00:00
Without superuser privileges:
2019-01-08 10:27:56 +00:00
$ mmdebstrap unstable unstable-chroot.tar
2018-09-18 09:20:24 +00:00
2019-02-20 17:18:31 +00:00
With no command line arguments at all. The chroot content is entirely defined
by a sources.list file on standard input.
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
$ mmdebstrap < /etc/apt/sources.list > unstable-chroot.tar
2018-09-18 09:20:24 +00:00
Drop locales (but not the symlink to the locale name alias database),
translated manual packages (but not the untranslated ones), and documentation
(but not copyright and Debian changelog).
2019-01-08 10:27:56 +00:00
$ mmdebstrap --variant=essential \
2018-09-18 09:20:24 +00:00
--dpkgopt='path-exclude=/usr/share/man/*' \
--dpkgopt='path-include=/usr/share/man/man[1-9]/*' \
--dpkgopt='path-exclude=/usr/share/locale/*' \
--dpkgopt='path-include=/usr/share/locale/locale.alias' \
--dpkgopt='path-exclude=/usr/share/doc/*' \
--dpkgopt='path-include=/usr/share/doc/*/copyright' \
--dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*' \
unstable debian-unstable.tar
2019-01-08 10:27:56 +00:00
Use as debootstrap replacement in sbuild-createchroot:
$ sbuild-createchroot --debootstrap=mmdebstrap \
--make-sbuild-tarball ~/.cache/sbuild/unstable-amd64.tar.gz \
unstable $(mktemp -d)
2019-01-08 10:28:27 +00:00
=begin comment
Use as replacement for autopkgtest-build-qemu and vmdb2:
$ mmdebstrap --variant=important --include=linux-image-amd64 \
2019-02-20 12:32:49 +00:00
--customize-hook='chroot "$1" passwd --delete root' \
--customize-hook='chroot "$1" useradd --home-dir /home/user --create-home user' \
--customize-hook='chroot "$1" passwd --delete user' \
--customize-hook='echo host > "$1/etc/hostname"' \
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
--customize-hook=/usr/share/autopkgtest/setup-commands/setup-testbed \
2019-01-08 10:28:27 +00:00
unstable debian-unstable.tar
$ cat << END > extlinux.conf
> default linux
> timeout 0
>
> label linux
> kernel /vmlinuz
> append initrd=/initrd.img root=/dev/vda1 rw console=ttyS0
END
$ guestfish -N debian-unstable.img=disk:2G -- \
part-disk /dev/sda mbr : \
part-set-bootable /dev/sda 1 true : \
mkfs ext2 /dev/sda1 : mount /dev/sda1 / : \
tar-in debian-unstable.tar / : \
extlinux / : \
2019-02-15 10:36:40 +00:00
copy-in extlinux.conf /
2019-01-08 10:28:27 +00:00
$ qemu-img convert -O qcow2 debian-unstable.img debian-unstable.qcow2
=end comment
2018-10-22 15:04:35 +00:00
=head1 ENVIRONMENT VARIABLES
By setting C<SOURCE_DATE_EPOCH> the result will be reproducible over multiple
runs with the same options and mirror content.
2018-09-18 09:20:24 +00:00
=head1 DEBOOTSTRAP
This section lists some differences to debootstrap.
=over 8
=item * More than one mirror possible
=item * Default mirrors for stable releases include updates and security mirror
=item * Multiple ways to operate as non-root: fakechroot, proot, unshare
2018-09-23 19:15:12 +00:00
=item * 3-6 times faster
2018-09-18 09:20:24 +00:00
2018-10-22 15:03:39 +00:00
=item * Can create a chroot with only C<Essential:yes> packages and their dependencies
2018-09-18 09:20:24 +00:00
=item * Reproducible output by default if $SOURCE_DATE_EPOCH is set
=item * Can create output on filesystems with nodev set
=item * apt cache and lists are cleaned at the end
=item * foreign architecture chroots using qemu-user
=back
Limitations in comparison to debootstrap:
=over 8
=item * Only runs on systems with apt installed
=item * No I<SCRIPT> argument
2019-09-14 16:25:40 +00:00
=item * No I<--second-stage> and I<--exclude> option.
=item * No-op options I<--resolve-deps>, I<--force-check-gpg>, I<--merged-usr> and I<--no-merged-usr>
2018-09-18 09:20:24 +00:00
=back
2019-01-20 09:41:29 +00:00
=head1 COMPRESSION
B<mmdebstrap> will choose a suitable compressor for the output tarball
depending on the filename extension. The following mapping from filename
extension to compressor applies:
extension compressor
--------------------
.tar none
.gz gzip
.tgz gzip
.taz gzip
.Z compress
.taZ compress
.bz2 bzip2
.tbz bzip2
.tbz2 bzip2
.tz2 bzip2
.lz lzip
.lzma lzma
.tlz lzma
.lzo lzop
.lz4 lz4
.xz xz
.txz xz
.zst zstd
2019-03-01 11:53:16 +00:00
=head1 BUGS
As of version 1.19.5, dpkg does not provide facilities preventing it from
reading the dpkg configuration of the machine running B<mmdebstrap>.
Therefore, until this dpkg limitation is fixed, a default dpkg configuration
is recommended on machines running B<mmdebstrap>.
2018-09-18 09:20:24 +00:00
=head1 SEE ALSO
debootstrap(8)
=cut