2018-09-18 09:20:24 +00:00
#!/usr/bin/perl
#
2023-01-04 06:24:14 +00:00
# © 2018 - 2023 Johannes Schauer Marin Rodrigues <josch@mister-muffin.de>
2018-09-18 09:20:24 +00:00
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
2019-11-13 10:53:30 +00:00
#
# The software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising
# from, out of or in connection with the software or the use or other dealings
# in the software.
2018-09-18 09:20:24 +00:00
use strict;
use warnings;
2024-05-14 05:39:00 +00:00
our $VERSION = '1.5.0';
2019-02-23 07:55:31 +00:00
2018-09-18 09:20:24 +00:00
use English;
use Getopt::Long;
use Pod::Usage;
use File::Copy;
2021-10-06 19:20:41 +00:00
use File::Path qw(make_path);
2018-09-18 09:20:24 +00:00
use File::Temp qw(tempfile tempdir);
2019-12-09 09:40:51 +00:00
use File::Basename;
2020-06-23 20:45:17 +00:00
use File::Find;
2020-11-13 18:02:41 +00:00
use Cwd qw(abs_path getcwd);
2022-10-16 20:03:06 +00:00
require "syscall.ph"; ## no critic (Modules::RequireBarewordIncludes)
require "sys/ioctl.ph"; ## no critic (Modules::RequireBarewordIncludes)
2024-06-02 06:46:59 +00:00
use Fcntl
qw(S_IFCHR S_IFBLK FD_CLOEXEC F_GETFD F_SETFD LOCK_EX O_RDONLY O_DIRECTORY);
2018-09-23 17:36:07 +00:00
use List::Util qw(any none);
2022-07-28 14:58:47 +00:00
use POSIX
qw(SIGINT SIGHUP SIGPIPE SIGTERM SIG_BLOCK SIG_UNBLOCK strftime isatty);
2019-01-20 09:39:01 +00:00
use Carp;
use Term::ANSIColor;
2019-12-09 09:40:51 +00:00
use Socket;
2020-08-25 14:06:05 +00:00
use Time::HiRes;
2021-05-04 13:01:25 +00:00
use Math::BigInt;
2022-11-08 12:02:47 +00:00
use Text::ParseWords;
2024-05-12 15:16:51 +00:00
use Digest::SHA;
2020-08-15 16:09:06 +00:00
use version;
2018-09-18 09:20:24 +00:00
2020-01-09 07:39:40 +00:00
## no critic (InputOutput::RequireBriefOpen)
2018-09-18 09:20:24 +00:00
# from sched.h
2020-01-09 07:39:40 +00:00
# use typeglob constants because "use constant" has several drawback as
# explained in the documentation for the Readonly CPAN module
2021-01-13 17:40:24 +00:00
*CLONE_NEWNS = \0x20000; # mount namespace
*CLONE_NEWUTS = \0x4000000; # utsname
*CLONE_NEWIPC = \0x8000000; # ipc
*CLONE_NEWUSER = \0x10000000; # user
*CLONE_NEWPID = \0x20000000; # pid
*CLONE_NEWNET = \0x40000000; # net
*_LINUX_CAPABILITY_VERSION_3 = \0x20080522;
*CAP_SYS_ADMIN = \21;
2021-08-27 09:53:11 +00:00
*PR_CAPBSET_READ = \23;
2022-11-14 08:59:59 +00:00
# from sys/mount.h
*MS_BIND = \0x1000;
*MS_REC = \0x4000;
*MNT_DETACH = \2;
2024-05-12 15:16:51 +00:00
# uuid_t NameSpace_DNS in rfc4122
*UUID_NS_DNS = \'6ba7b810-9dad-11d1-80b4-00c04fd430c8';
2021-08-27 09:53:11 +00:00
our (
$CLONE_NEWNS, $CLONE_NEWUTS,
$CLONE_NEWIPC, $CLONE_NEWUSER,
$CLONE_NEWPID, $CLONE_NEWNET,
$_LINUX_CAPABILITY_VERSION_3, $CAP_SYS_ADMIN,
2022-11-14 08:59:59 +00:00
$PR_CAPBSET_READ, $MS_BIND,
2024-05-12 15:16:51 +00:00
$MS_REC, $MNT_DETACH,
$UUID_NS_DNS
2021-08-27 09:53:11 +00:00
);
2018-09-18 09:20:24 +00:00
2021-03-08 07:04:35 +00:00
#<<<
2018-09-18 09:20:24 +00:00
# type codes:
# 0 -> normal file
# 1 -> hardlink
# 2 -> symlink
# 3 -> character special
# 4 -> block special
# 5 -> directory
my @devfiles = (
2021-03-08 07:04:35 +00:00
# filename mode type link target major minor
["", oct(755), 5, '', undef, undef],
["console", oct(666), 3, '', 5, 1],
["fd", oct(777), 2, '/proc/self/fd', undef, undef],
["full", oct(666), 3, '', 1, 7],
["null", oct(666), 3, '', 1, 3],
["ptmx", oct(666), 3, '', 5, 2],
["pts/", oct(755), 5, '', undef, undef],
["random", oct(666), 3, '', 1, 8],
["shm/", oct(755), 5, '', undef, undef],
["stderr", oct(777), 2, '/proc/self/fd/2', undef, undef],
["stdin", oct(777), 2, '/proc/self/fd/0', undef, undef],
["stdout", oct(777), 2, '/proc/self/fd/1', undef, undef],
["tty", oct(666), 3, '', 5, 0],
["urandom", oct(666), 3, '', 1, 9],
["zero", oct(666), 3, '', 1, 5],
2018-09-18 09:20:24 +00:00
);
2021-03-08 07:04:35 +00:00
#>>>
2018-09-18 09:20:24 +00:00
2019-01-20 09:39:01 +00:00
# verbosity levels:
# 0 -> print nothing
# 1 -> normal output and progress bars
# 2 -> verbose output
# 3 -> debug output
my $verbosity_level = 1;
2022-01-07 13:41:22 +00:00
my $is_covering = 0;
{
# make $@ local, so we don't print "Undefined subroutine called"
# in other parts where we evaluate $@
local $@ = '';
$is_covering = !!(eval { Devel::Cover::get_coverage() });
}
2020-01-08 14:33:49 +00:00
2020-08-19 06:16:19 +00:00
# the reason why Perl::Critic warns about this is, that it suspects that the
# programmer wants to implement a test whether the terminal is interactive or
# not, in which case, complex interactions with the magic *ARGV indeed make it
# advisable to use IO::Interactive. In our case, we do not want to create an
# interactivity check but just want to check whether STDERR is opened to a tty,
# so our use of -t is fine and not "fragile and complicated" as is written in
# the description of InputOutput::ProhibitInteractiveTest. Also see
# https://github.com/Perl-Critic/Perl-Critic/issues/918
sub stderr_is_tty() {
## no critic (InputOutput::ProhibitInteractiveTest)
if (-t STDERR) {
return 1;
} else {
return 0;
}
}
2019-01-20 09:39:01 +00:00
sub debug {
if ($verbosity_level < 3) {
2020-01-08 14:41:49 +00:00
return;
2019-01-20 09:39:01 +00:00
}
my $msg = shift;
2020-01-03 23:37:49 +00:00
my ($package, $filename, $line) = caller;
$msg = "D: $PID $line $msg";
2020-08-19 06:16:19 +00:00
if (stderr_is_tty()) {
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'clear');
2019-01-20 09:39:01 +00:00
}
print STDERR "$msg\n";
2020-01-09 07:39:40 +00:00
return;
2019-01-20 09:39:01 +00:00
}
sub info {
if ($verbosity_level == 0) {
2020-01-08 14:41:49 +00:00
return;
2019-01-20 09:39:01 +00:00
}
my $msg = shift;
2020-01-03 23:37:49 +00:00
if ($verbosity_level >= 3) {
2020-01-08 14:41:49 +00:00
my ($package, $filename, $line) = caller;
2020-01-08 16:44:07 +00:00
$msg = "$PID $line $msg";
2020-01-03 23:37:49 +00:00
}
2019-01-20 09:39:01 +00:00
$msg = "I: $msg";
2020-08-19 06:16:19 +00:00
if (stderr_is_tty()) {
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'green');
2019-01-20 09:39:01 +00:00
}
print STDERR "$msg\n";
2020-01-09 07:39:40 +00:00
return;
2019-01-20 09:39:01 +00:00
}
sub warning {
if ($verbosity_level == 0) {
2020-01-08 14:41:49 +00:00
return;
2019-01-20 09:39:01 +00:00
}
my $msg = shift;
$msg = "W: $msg";
2020-08-19 06:16:19 +00:00
if (stderr_is_tty()) {
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'bold yellow');
2019-01-20 09:39:01 +00:00
}
print STDERR "$msg\n";
2020-01-09 07:39:40 +00:00
return;
2019-01-20 09:39:01 +00:00
}
sub error {
# if error() is called with the string from a previous error() that was
# caught inside an eval(), then the string will have a newline which we
# are stripping here
2020-01-08 16:44:07 +00:00
chomp(my $msg = shift);
2019-01-20 09:39:01 +00:00
$msg = "E: $msg";
2020-08-19 06:16:19 +00:00
if (stderr_is_tty()) {
2020-01-08 16:44:07 +00:00
$msg = colored($msg, 'bold red');
2019-01-20 09:39:01 +00:00
}
if ($verbosity_level == 3) {
2020-08-19 06:16:19 +00:00
croak $msg; # produces a backtrace
2019-01-20 09:39:01 +00:00
} else {
2020-01-08 14:41:49 +00:00
die "$msg\n";
2019-01-20 09:39:01 +00:00
}
}
2021-05-04 13:01:25 +00:00
# The encoding of dev_t is MMMM Mmmm mmmM MMmm, where M is a hex digit of
# the major number and m is a hex digit of the minor number.
sub major {
my $rdev = shift;
my $right
= Math::BigInt->from_hex("0x00000000000fff00")->band($rdev)->brsft(8);
my $left
= Math::BigInt->from_hex("0xfffff00000000000")->band($rdev)->brsft(32);
return $right->bior($left);
}
sub minor {
my $rdev = shift;
my $right = Math::BigInt->from_hex("0x00000000000000ff")->band($rdev);
my $left
= Math::BigInt->from_hex("0x00000ffffff00000")->band($rdev)->brsft(12);
return $right->bior($left);
}
2022-05-24 13:40:38 +00:00
sub can_execute {
2024-05-12 15:16:51 +00:00
my $tool = shift;
my $verbose = shift // '--version';
my $pid = open my $fh, '-|' // return 0;
2022-05-24 13:40:38 +00:00
if ($pid == 0) {
open(STDERR, '>&', STDOUT) or die;
2024-05-12 15:16:51 +00:00
exec {$tool} $tool, $verbose or die;
2022-05-24 13:40:38 +00:00
}
chomp(
my $content = do { local $/; <$fh> }
);
close $fh;
if ($? != 0) {
return 0;
}
if (length $content == 0) {
return 0;
}
return 1;
}
2019-12-02 22:54:48 +00:00
# check whether a directory is mounted by comparing the device number of the
# directory itself with its parent
2020-01-09 07:39:40 +00:00
sub is_mountpoint {
2019-12-02 22:54:48 +00:00
my $dir = shift;
2020-01-08 16:44:07 +00:00
if (!-e $dir) {
2020-01-08 14:41:49 +00:00
return 0;
2019-12-02 22:54:48 +00:00
}
my @a = stat "$dir/.";
my @b = stat "$dir/..";
# if the device number is different, then the directory must be mounted
if ($a[0] != $b[0]) {
2020-01-08 14:41:49 +00:00
return 1;
2019-12-02 22:54:48 +00:00
}
# if the inode number is the same, then the directory must be mounted
if ($a[1] == $b[1]) {
2020-01-08 14:41:49 +00:00
return 1;
2019-12-02 22:54:48 +00:00
}
return 0;
}
2018-09-18 09:20:24 +00:00
# tar cannot figure out the decompression program when receiving data on
# standard input, thus we do it ourselves. This is copied from tar's
# src/suffix.c
2020-01-09 07:39:40 +00:00
sub get_tar_compressor {
2018-09-18 09:20:24 +00:00
my $filename = shift;
2019-01-20 09:41:29 +00:00
if ($filename eq '-') {
2020-01-09 07:39:40 +00:00
return;
2019-01-20 09:41:29 +00:00
} elsif ($filename =~ /\.tar$/) {
2020-01-09 07:39:40 +00:00
return;
2019-01-20 09:41:29 +00:00
} elsif ($filename =~ /\.(gz|tgz|taz)$/) {
2020-01-08 14:41:49 +00:00
return ['gzip'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(Z|taZ)$/) {
2020-01-08 14:41:49 +00:00
return ['compress'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(bz2|tbz|tbz2|tz2)$/) {
2020-01-08 14:41:49 +00:00
return ['bzip2'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lz$/) {
2020-01-08 14:41:49 +00:00
return ['lzip'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(lzma|tlz)$/) {
2020-01-08 14:41:49 +00:00
return ['lzma'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lzo$/) {
2020-01-08 14:41:49 +00:00
return ['lzop'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.lz4$/) {
2020-01-08 14:41:49 +00:00
return ['lz4'];
2018-09-18 09:20:24 +00:00
} elsif ($filename =~ /\.(xz|txz)$/) {
2021-09-24 20:01:21 +00:00
return ['xz'];
2019-01-20 09:41:29 +00:00
} elsif ($filename =~ /\.zst$/) {
2021-09-24 20:01:21 +00:00
return ['zstd'];
2018-09-18 09:20:24 +00:00
}
2020-01-09 07:39:40 +00:00
return;
2018-09-18 09:20:24 +00:00
}
2021-08-27 17:50:11 +00:00
# avoid dependency on String::ShellQuote by implementing the mechanism
# from python's shlex.quote function
sub shellescape {
my $string = shift;
if (length $string == 0) {
return "''";
}
# search for occurrences of characters that are not safe
# the 'a' regex modifier makes sure that \w only matches ASCII
if ($string !~ m/[^\w@\%+=:,.\/-]/a) {
return $string;
}
# wrap the string in single quotes and handle existing single quotes by
# putting them outside of the single-quoted string
$string =~ s/'/'"'"'/g;
return "'$string'";
}
2024-05-12 15:16:51 +00:00
sub create_v5_uuid {
use bytes;
my $ns_uuid = shift;
my $name = shift;
my $version = 0x50;
# convert the namespace uuid to binary
$ns_uuid =~ tr/-//d;
$ns_uuid = pack 'H*', $ns_uuid;
# concatenate namespace and name and take sha1
my $digest = Digest::SHA->new(1);
$digest->add($ns_uuid);
$digest->add($name);
# only the first 16 bytes matter
my $uuid = substr($digest->digest(), 0, 16);
# set the version
substr $uuid, 6, 1, chr(ord(substr($uuid, 6, 1)) & 0x0f | $version);
substr $uuid, 8, 1, chr(ord(substr $uuid, 8, 1) & 0x3f | 0x80);
# convert binary back to uuid formatting
return join '-', map { unpack 'H*', $_ }
map { substr $uuid, 0, $_, '' } (4, 2, 2, 2, 6);
}
2021-01-13 15:05:57 +00:00
sub test_unshare_userns {
2023-03-15 16:08:12 +00:00
my $verbose = shift;
2023-03-16 07:14:39 +00:00
local *maybe_error = sub {
2023-03-15 16:08:12 +00:00
my $msg = shift;
2020-01-08 14:41:49 +00:00
if ($verbose) {
2023-03-16 07:14:39 +00:00
error $msg;
2020-01-08 14:41:49 +00:00
} else {
debug $msg;
}
2023-03-15 16:08:12 +00:00
};
if ($EFFECTIVE_USER_ID == 0) {
2023-03-16 07:14:39 +00:00
maybe_error("cannot unshare user namespace when executing as root");
2020-01-08 14:41:49 +00:00
return 0;
2018-09-24 18:07:46 +00:00
}
2018-09-18 09:20:24 +00:00
# arguments to syscalls have to be stored in their own variable or
# otherwise we will get "Modification of a read-only value attempted"
2020-01-09 07:39:40 +00:00
my $unshare_flags = $CLONE_NEWUSER;
2018-09-18 09:20:24 +00:00
# we spawn a new per process because if unshare succeeds, we would
2018-10-01 15:14:59 +00:00
# otherwise have unshared the mmdebstrap process itself which we don't want
2019-01-20 09:39:01 +00:00
my $pid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($pid == 0) {
2020-01-09 07:39:40 +00:00
my $ret = syscall(&SYS_unshare, $unshare_flags);
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
exit 0;
} else {
2023-03-16 07:14:39 +00:00
maybe_error("unshare syscall failed: $!");
2020-01-08 14:41:49 +00:00
exit 1;
}
2018-09-18 09:20:24 +00:00
}
waitpid($pid, 0);
if (($? >> 8) != 0) {
2020-01-08 14:41:49 +00:00
return 0;
2018-09-18 09:20:24 +00:00
}
2018-09-24 18:09:08 +00:00
# if newuidmap and newgidmap exist, the exit status will be 1 when
# executed without parameters
system "newuidmap 2>/dev/null";
if (($? >> 8) != 1) {
2020-01-08 14:41:49 +00:00
if (($? >> 8) == 127) {
2023-03-16 07:14:39 +00:00
maybe_error("cannot find newuidmap");
2020-01-08 14:41:49 +00:00
} else {
2023-03-16 07:14:39 +00:00
maybe_error("newuidmap returned unknown exit status: $?");
2020-01-08 14:41:49 +00:00
}
return 0;
2018-09-24 18:09:08 +00:00
}
system "newgidmap 2>/dev/null";
if (($? >> 8) != 1) {
2020-01-08 14:41:49 +00:00
if (($? >> 8) == 127) {
2023-03-16 07:14:39 +00:00
maybe_error("cannot find newgidmap");
2020-01-08 14:41:49 +00:00
} else {
2023-03-16 07:14:39 +00:00
maybe_error("newgidmap returned unknown exit status: $?");
2023-03-15 16:08:12 +00:00
}
return 0;
}
my @idmap = read_subuid_subgid($verbose);
if (scalar @idmap == 0) {
2023-03-16 07:14:39 +00:00
maybe_error("failed to parse /etc/subuid and /etc/subgid");
2023-03-15 16:08:12 +00:00
return 0;
}
# too much can go wrong when doing the dance required to unsharing the user
2023-03-16 07:14:39 +00:00
# namespace, so instead of adding more complexity to support maybe_error()
2023-03-15 16:08:12 +00:00
# to a function that is already too complex, we use eval()
eval {
$pid = get_unshare_cmd(
sub {
if ($EFFECTIVE_USER_ID == 0) {
exit 0;
} else {
exit 1;
}
},
\@idmap
);
waitpid $pid, 0;
if ($? != 0) {
2023-03-16 07:14:39 +00:00
maybe_error("failed to unshare the user namespace");
2023-03-15 16:08:12 +00:00
return 0;
2020-01-08 14:41:49 +00:00
}
2023-03-15 16:08:12 +00:00
};
if ($@) {
2023-03-16 07:14:39 +00:00
maybe_error($@);
2020-01-08 14:41:49 +00:00
return 0;
2018-09-24 18:09:08 +00:00
}
2018-09-18 09:20:24 +00:00
return 1;
}
2023-03-15 16:08:12 +00:00
sub read_subuid_subgid {
my $verbose = shift;
my @result = ();
2021-08-19 10:59:11 +00:00
my $username = getpwuid $REAL_USER_ID;
2018-09-18 09:20:24 +00:00
my ($subid, $num_subid, $fh, $n);
2023-03-15 16:08:12 +00:00
local *maybe_warn = sub {
my $msg = shift;
if ($verbose) {
warning $msg;
} else {
debug $msg;
}
};
2020-01-08 16:44:07 +00:00
if (!-e "/etc/subuid") {
2023-03-15 16:08:12 +00:00
maybe_warn("/etc/subuid doesn't exist");
2020-01-08 14:41:49 +00:00
return;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
if (!-r "/etc/subuid") {
2023-03-15 16:08:12 +00:00
maybe_warn("/etc/subuid is not readable");
2020-01-08 14:41:49 +00:00
return;
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
open $fh, "<", "/etc/subuid"
2023-03-15 16:08:12 +00:00
or maybe_warn("cannot open /etc/subuid for reading: $!");
if (!$fh) {
return;
}
2018-09-18 09:20:24 +00:00
while (my $line = <$fh>) {
2020-01-08 14:41:49 +00:00
($n, $subid, $num_subid) = split(/:/, $line, 3);
last if ($n eq $username);
2018-09-18 09:20:24 +00:00
}
close $fh;
2021-08-18 20:15:05 +00:00
if (!length $subid) {
2023-03-15 16:08:12 +00:00
maybe_warn("/etc/subuid is empty");
2021-08-18 20:15:05 +00:00
return;
}
if ($n ne $username) {
2023-03-15 16:08:12 +00:00
maybe_warn("no entry in /etc/subuid for $username");
2021-08-18 20:15:05 +00:00
return;
}
2018-09-18 09:20:24 +00:00
push @result, ["u", 0, $subid, $num_subid];
if (scalar(@result) < 1) {
2023-03-15 16:08:12 +00:00
maybe_warn("/etc/subuid does not contain an entry for $username");
2020-01-08 14:41:49 +00:00
return;
2018-09-18 09:20:24 +00:00
}
if (scalar(@result) > 1) {
2023-03-15 16:08:12 +00:00
maybe_warn("/etc/subuid contains multiple entries for $username");
2020-01-08 14:41:49 +00:00
return;
2018-09-18 09:20:24 +00:00
}
2021-08-19 10:59:11 +00:00
if (!-e "/etc/subgid") {
2023-03-15 16:08:12 +00:00
maybe_warn("/etc/subgid doesn't exist");
2021-08-19 10:59:11 +00:00
return;
}
if (!-r "/etc/subgid") {
2023-03-15 16:08:12 +00:00
maybe_warn("/etc/subgid is not readable");
2021-08-19 10:59:11 +00:00
return;
}
2020-01-08 16:44:07 +00:00
open $fh, "<", "/etc/subgid"
2023-03-15 16:08:12 +00:00
or maybe_warn("cannot open /etc/subgid for reading: $!");
if (!$fh) {
return;
}
2018-09-18 09:20:24 +00:00
while (my $line = <$fh>) {
2020-01-08 14:41:49 +00:00
($n, $subid, $num_subid) = split(/:/, $line, 3);
2022-11-18 18:42:12 +00:00
last if ($n eq $username);
2018-09-18 09:20:24 +00:00
}
close $fh;
2021-08-18 20:15:05 +00:00
if (!length $subid) {
2023-03-15 16:08:12 +00:00
maybe_warn("/etc/subgid is empty");
2021-08-18 20:15:05 +00:00
return;
}
2022-11-18 18:42:12 +00:00
if ($n ne $username) {
2023-03-15 16:08:12 +00:00
maybe_warn("no entry in /etc/subgid for $username");
2021-08-18 20:15:05 +00:00
return;
}
2018-09-18 09:20:24 +00:00
push @result, ["g", 0, $subid, $num_subid];
if (scalar(@result) < 2) {
2023-03-15 16:08:12 +00:00
maybe_warn("/etc/subgid does not contain an entry for $username");
2020-01-08 14:41:49 +00:00
return;
2018-09-18 09:20:24 +00:00
}
if (scalar(@result) > 2) {
2023-03-15 16:08:12 +00:00
maybe_warn("/etc/subgid contains multiple entries for $username");
2020-01-08 14:41:49 +00:00
return;
2018-09-18 09:20:24 +00:00
}
return @result;
}
# This function spawns two child processes forming the following process tree
#
# A
# |
# fork()
# | \
# B C
# | |
# | fork()
# | | \
# | D E
# | | |
# |unshare()
# | close()
# | | |
# | | read()
# | | newuidmap(D)
# | | newgidmap(D)
# | | /
# | waitpid()
# | |
# | fork()
# | | \
# | F G
# | | |
# | | exec()
# | | /
# | waitpid()
# | /
# waitpid()
#
# To better refer to each individual part, we give each process a new
# identifier after calling fork(). Process A is the main process. After
# executing fork() we call the parent and child B and C, respectively. This
# first fork() is done because we do not want to modify A. B then remains
# waiting for its child C to finish. C calls fork() again, splitting into
# the parent D and its child E. In the parent D we call unshare() and close a
# pipe shared by D and E to signal to E that D is done with calling unshare().
# E notices this by using read() and follows up with executing the tools
# new[ug]idmap on D. E finishes and D continues with doing another fork().
# This is because when unsharing the PID namespace, we need a PID 1 to be kept
# alive or otherwise any child processes cannot fork() anymore themselves. So
# we keep F as PID 1 and finally call exec() in G.
2020-01-09 07:39:40 +00:00
sub get_unshare_cmd {
2020-01-08 16:44:07 +00:00
my $cmd = shift;
2018-09-18 09:20:24 +00:00
my $idmap = shift;
2021-01-13 15:05:57 +00:00
# unsharing the mount namespace (NEWNS) requires CAP_SYS_ADMIN
2020-01-08 16:44:07 +00:00
my $unshare_flags
2021-01-13 15:05:57 +00:00
= $CLONE_NEWNS | $CLONE_NEWPID | $CLONE_NEWUTS | $CLONE_NEWIPC;
# we only need to add CLONE_NEWUSER if we are not yet root
if ($EFFECTIVE_USER_ID != 0) {
$unshare_flags |= $CLONE_NEWUSER;
}
2018-09-18 09:20:24 +00:00
if (0) {
2020-01-09 07:39:40 +00:00
$unshare_flags |= $CLONE_NEWNET;
2018-09-18 09:20:24 +00:00
}
# fork a new process and let the child get unshare()ed
# we don't want to unshare the parent process
2019-01-20 09:39:01 +00:00
my $gcpid = fork() // error "fork() failed: $!";
2018-09-18 09:20:24 +00:00
if ($gcpid == 0) {
2020-01-08 15:23:34 +00:00
# Create a pipe for the parent process to signal the child process that
# it is done with calling unshare() so that the child can go ahead
# setting up uid_map and gid_map.
2020-01-08 14:41:49 +00:00
pipe my $rfh, my $wfh;
2020-01-08 15:23:34 +00:00
# We have to do this dance with forking a process and then modifying
# the parent from the child because:
# - new[ug]idmap can only be called on a process id after that process
# has unshared the user namespace
# - a process looses its capabilities if it performs an execve() with
# nonzero user ids see the capabilities(7) man page for details.
# - a process that unshared the user namespace by default does not
# have the privileges to call new[ug]idmap on itself
2020-01-08 14:41:49 +00:00
#
2020-01-08 15:23:34 +00:00
# this also works the other way around (the child setting up a user
# namespace and being modified from the parent) but that way, the
# parent would have to stay around until the child exited (so a pid
# would be wasted). Additionally, that variant would require an
# additional pipe to let the parent signal the child that it is done
# with calling new[ug]idmap. The way it is done here, this signaling
# can instead be done by wait()-ing for the exit of the child.
2020-01-08 14:41:49 +00:00
my $ppid = $$;
my $cpid = fork() // error "fork() failed: $!";
if ($cpid == 0) {
# child
# Close the writing descriptor at our end of the pipe so that we
# see EOF when parent closes its descriptor.
close $wfh;
# Wait for the parent process to finish its unshare() call by
# waiting for an EOF.
0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF";
2021-01-13 15:05:57 +00:00
# the process is already root, so no need for newuidmap/newgidmap
if ($EFFECTIVE_USER_ID == 0) {
exit 0;
}
2020-01-08 14:41:49 +00:00
# The program's new[ug]idmap have to be used because they are
# setuid root. These privileges are needed to map the ids from
# /etc/sub[ug]id to the user namespace set up by the parent.
# Without these privileges, only the id of the user itself can be
# mapped into the new namespace.
#
# Since new[ug]idmap is setuid root we also don't need to write
# "deny" to /proc/$$/setgroups beforehand (this is otherwise
# required for unprivileged processes trying to write to
# /proc/$$/gid_map since kernel version 3.19 for security reasons)
# and therefore the parent process keeps its ability to change its
# own group here.
#
# Since /proc/$ppid/[ug]id_map can only be written to once,
# respectively, instead of making multiple calls to new[ug]idmap,
# we assemble a command line that makes one call each.
my $uidmapcmd = "";
my $gidmapcmd = "";
foreach (@{$idmap}) {
my ($t, $hostid, $nsid, $range) = @{$_};
if ($t ne "u" and $t ne "g" and $t ne "b") {
error "invalid idmap type: $t";
}
if ($t eq "u" or $t eq "b") {
$uidmapcmd .= " $hostid $nsid $range";
}
if ($t eq "g" or $t eq "b") {
$gidmapcmd .= " $hostid $nsid $range";
}
}
my $idmapcmd = '';
if ($uidmapcmd ne "") {
2020-01-08 16:44:07 +00:00
0 == system "newuidmap $ppid $uidmapcmd"
or error "newuidmap $ppid $uidmapcmd failed: $!";
2020-01-08 14:41:49 +00:00
}
if ($gidmapcmd ne "") {
2020-01-08 16:44:07 +00:00
0 == system "newgidmap $ppid $gidmapcmd"
or error "newgidmap $ppid $gidmapcmd failed: $!";
2020-01-08 14:41:49 +00:00
}
exit 0;
}
# parent
# After fork()-ing, the parent immediately calls unshare...
2020-01-08 16:44:07 +00:00
0 == syscall &SYS_unshare, $unshare_flags
or error "unshare() failed: $!";
2020-01-08 14:41:49 +00:00
# .. and then signals the child process that we are done with the
# unshare() call by sending an EOF.
close $wfh;
# Wait for the child process to finish its setup by waiting for its
# exit.
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
my $exit = $? >> 8;
if ($exit != 0) {
error "child had a non-zero exit status: $exit";
}
# Currently we are nobody (uid and gid are 65534). So we become root
# user and group instead.
#
# We are using direct syscalls instead of setting $(, $), $< and $>
# because then perl would do additional stuff which we don't need or
# want here, like checking /proc/sys/kernel/ngroups_max (which might
# not exist). It would also also call setgroups() in a way that makes
# the root user be part of the group unknown.
2021-01-13 15:05:57 +00:00
if ($EFFECTIVE_USER_ID != 0) {
2021-12-14 15:10:25 +00:00
0 == syscall &SYS_setgid, 0 or error "setgid failed: $!";
0 == syscall &SYS_setuid, 0 or error "setuid failed: $!";
2021-01-13 15:05:57 +00:00
0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!";
}
2020-01-08 14:41:49 +00:00
if (1) {
# When the pid namespace is also unshared, then processes expect a
# master pid to always be alive within the namespace. To achieve
# this, we fork() here instead of exec() to always have one dummy
# process running as pid 1 inside the namespace. This is also what
# the unshare tool does when used with the --fork option.
#
# Otherwise, without a pid 1, new processes cannot be forked
# anymore after pid 1 finished.
my $cpid = fork() // error "fork() failed: $!";
if ($cpid != 0) {
# The parent process will stay alive as pid 1 in this
# namespace until the child finishes executing. This is
# important because pid 1 must never die or otherwise nothing
# new can be forked.
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
2020-01-08 16:44:07 +00:00
exit($? >> 8);
2020-01-08 14:41:49 +00:00
}
}
&{$cmd}();
exit 0;
2018-09-18 09:20:24 +00:00
}
# parent
return $gcpid;
}
2020-01-09 07:39:40 +00:00
sub havemknod {
2020-01-08 16:44:07 +00:00
my $root = shift;
2018-09-18 09:20:24 +00:00
my $havemknod = 0;
if (-e "$root/test-dev-null") {
2020-01-08 14:41:49 +00:00
error "/test-dev-null already exists";
2018-09-18 09:20:24 +00:00
}
2020-01-08 16:44:07 +00:00
TEST: {
2020-01-08 14:41:49 +00:00
# we fork so that we can read STDERR
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
open(STDERR, '>&', STDOUT) or error "cannot open STDERR: $!";
# we use mknod(1) instead of the system call because creating the
# right dev_t argument requires makedev(3)
exec 'mknod', "$root/test-dev-null", 'c', '1', '3';
}
2020-01-08 16:44:07 +00:00
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
{
last TEST unless $? == 0 and $content eq '';
last TEST unless -c "$root/test-dev-null";
last TEST unless open my $fh, '>', "$root/test-dev-null";
last TEST unless print $fh 'test';
}
$havemknod = 1;
2018-09-18 09:20:24 +00:00
}
if (-e "$root/test-dev-null") {
2020-01-08 16:44:07 +00:00
unlink "$root/test-dev-null"
or error "cannot unlink /test-dev-null: $!";
2018-09-18 09:20:24 +00:00
}
return $havemknod;
}
2022-10-16 20:03:06 +00:00
# inspired by /usr/share/perl/5.34/pod/perlfaq8.pod
sub terminal_width {
if (!stderr_is_tty()) {
return -1;
}
if (!defined &TIOCGWINSZ) {
return -1;
}
if (!-e "/dev/tty") {
return -1;
}
my $tty_fh;
if (!open($tty_fh, "+<", "/dev/tty")) {
return -1;
}
my $winsize = '';
if (!ioctl($tty_fh, &TIOCGWINSZ, $winsize)) {
return -1;
}
my (undef, $col, undef, undef) = unpack('S4', $winsize);
return $col;
}
# Prints the current status, the percentage and a progress bar on STDERR if
# it is an interactive tty and if verbosity is set to 1.
#
# * first 12 chars: status
# * following 7 chars: percentage
# * progress bar until 79 chars are filled
2018-09-23 17:47:14 +00:00
sub print_progress {
2019-01-20 09:39:01 +00:00
if ($verbosity_level != 1) {
2020-01-08 14:41:49 +00:00
return;
2019-01-20 09:39:01 +00:00
}
2020-08-19 06:16:19 +00:00
if (!stderr_is_tty()) {
2020-01-08 14:41:49 +00:00
return;
2018-09-23 17:47:14 +00:00
}
2022-10-16 20:03:06 +00:00
my $perc = shift;
my $status = shift;
my $len_status = 12;
my $len_perc = 7;
my $len_prog_min = 10;
my $len_prog_max = 60;
my $twidth = terminal_width();
if ($twidth <= $len_status) {
return;
}
# \e[2K clears everything on the current line (i.e. the progress bar)
print STDERR "\e[2K";
2018-09-23 17:47:14 +00:00
if ($perc eq "done") {
2022-10-16 20:03:06 +00:00
print STDERR "done\n";
return;
}
if (defined $status) {
printf STDERR "%*s", -$len_status, "$status:";
} else {
print STDERR (" " x $len_status);
}
if ($twidth <= $len_status + $len_perc) {
print STDERR "\r";
2020-01-08 14:41:49 +00:00
return;
2018-09-23 17:47:14 +00:00
}
if ($perc >= 100) {
2020-01-08 14:41:49 +00:00
$perc = 100;
2018-09-23 17:47:14 +00:00
}
2022-10-16 20:03:06 +00:00
printf STDERR "%*.2f", $len_perc, $perc;
if ($twidth <= $len_status + $len_perc + $len_prog_min) {
print STDERR "\r";
return;
}
my $len_prog = $twidth - $len_perc - $len_status;
if ($len_prog > $len_prog_max) {
$len_prog = $len_prog_max;
}
my $num_x = int($perc * ($len_prog - 3) / 100);
2020-01-08 16:44:07 +00:00
my $bar = '=' x $num_x;
2022-10-16 20:03:06 +00:00
if ($num_x != ($len_prog - 3)) {
2020-01-08 14:41:49 +00:00
$bar .= '>';
2022-10-16 20:03:06 +00:00
$bar .= ' ' x ($len_prog - $num_x - 4);
2018-09-23 17:47:14 +00:00
}
2022-10-16 20:03:06 +00:00
print STDERR " [$bar]\r";
2020-01-09 07:39:40 +00:00
return;
2018-09-23 17:47:14 +00:00
}
2019-01-13 21:04:25 +00:00
sub run_progress {
2019-04-25 06:49:28 +00:00
my ($get_exec, $line_handler, $line_has_error, $chdir) = @_;
2018-09-23 17:47:14 +00:00
pipe my $rfh, my $wfh;
2019-01-13 09:17:46 +00:00
my $got_signal = 0;
2020-01-08 16:44:07 +00:00
my $ignore = sub {
2020-01-08 14:41:49 +00:00
info "run_progress() received signal $_[0]: waiting for child...";
2019-01-13 09:17:46 +00:00
};
2020-04-10 10:55:02 +00:00
debug("run_progress: exec " . (join ' ', ($get_exec->('${FD}'))));
2019-01-13 09:17:46 +00:00
# delay signals so that we can fork and change behaviour of the signal
# handler in parent and child without getting interrupted
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
2019-01-20 09:39:01 +00:00
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
2019-01-13 09:17:46 +00:00
2019-01-20 09:39:01 +00:00
my $pid1 = open(my $pipe, '-|') // error "failed to fork(): $!";
2019-01-13 09:17:46 +00:00
2019-01-13 21:04:25 +00:00
if ($pid1 == 0) {
2020-01-08 14:41:49 +00:00
# child: default signal handlers
2020-01-09 07:39:40 +00:00
local $SIG{'INT'} = 'DEFAULT';
local $SIG{'HUP'} = 'DEFAULT';
local $SIG{'PIPE'} = 'DEFAULT';
local $SIG{'TERM'} = 'DEFAULT';
2020-01-08 14:41:49 +00:00
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-08 14:41:49 +00:00
close $rfh;
# Unset the close-on-exec flag, so that the file descriptor does not
# get closed when we exec
2020-01-08 16:44:07 +00:00
my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!";
fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC)
or error "fcntl F_SETFD: $!";
2020-01-08 14:41:49 +00:00
my $fd = fileno $wfh;
# redirect stderr to stdout so that we can capture it
open(STDERR, '>&', STDOUT) or error "cannot open STDOUT: $!";
my @execargs = $get_exec->($fd);
# before apt 1.5, "apt-get update" attempted to chdir() into the
# working directory. This will fail if the current working directory
# is not accessible by the user (for example in unshare mode). See
# Debian bug #860738
if (defined $chdir) {
chdir $chdir or error "failed chdir() to $chdir: $!";
}
2020-01-09 07:39:40 +00:00
eval { Devel::Cover::set_coverage("none") } if $is_covering;
2020-01-08 16:44:07 +00:00
exec { $execargs[0] } @execargs
or error 'cannot exec() ' . (join ' ', @execargs);
2018-09-23 17:47:14 +00:00
}
close $wfh;
# spawn two processes:
2019-01-13 21:04:25 +00:00
# parent will parse stdout to look for errors
2018-09-23 17:47:14 +00:00
# child will parse $rfh for the progress meter
2019-01-20 09:39:01 +00:00
my $pid2 = fork() // error "failed to fork(): $!";
2019-01-13 21:04:25 +00:00
if ($pid2 == 0) {
2020-01-08 14:41:49 +00:00
# child: default signal handlers
2020-01-09 07:39:40 +00:00
local $SIG{'INT'} = 'IGNORE';
local $SIG{'HUP'} = 'IGNORE';
local $SIG{'PIPE'} = 'IGNORE';
local $SIG{'TERM'} = 'IGNORE';
2019-01-13 09:17:46 +00:00
2020-01-08 14:41:49 +00:00
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2019-01-13 09:17:46 +00:00
2022-10-16 20:03:06 +00:00
if ($verbosity_level != 1 || !stderr_is_tty()) {
# no need to print any progress
# we still need to consume everything from $rfh or otherwise apt
# will block forever if there is too much output
local $/;
<$rfh>;
close $rfh;
exit 0;
}
2020-08-18 12:31:38 +00:00
my $progress = 0.0;
my $status = undef;
print_progress($progress);
2020-01-08 14:41:49 +00:00
while (my $line = <$rfh>) {
2020-08-18 12:31:38 +00:00
my ($newprogress, $newstatus) = $line_handler->($line);
next unless $newprogress;
# start a new line if the new progress value is less than the
# previous one
if ($newprogress < $progress) {
print_progress("done");
}
if (defined $newstatus) {
$status = $newstatus;
}
2022-10-16 20:03:06 +00:00
print_progress($newprogress, $status);
2020-08-18 12:31:38 +00:00
$progress = $newprogress;
2020-01-08 14:41:49 +00:00
}
2020-01-09 07:39:40 +00:00
print_progress("done");
2018-09-23 17:47:14 +00:00
2020-01-08 14:41:49 +00:00
exit 0;
2018-09-23 17:47:14 +00:00
}
2019-01-13 09:17:46 +00:00
# parent: ignore signals
# by using "local", the original is automatically restored once the
# function returns
2020-01-08 16:44:07 +00:00
local $SIG{'INT'} = $ignore;
local $SIG{'HUP'} = $ignore;
2019-01-13 09:17:46 +00:00
local $SIG{'PIPE'} = $ignore;
local $SIG{'TERM'} = $ignore;
# unblock all delayed signals (and possibly handle them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2019-01-13 09:17:46 +00:00
2020-01-08 16:44:07 +00:00
my $output = '';
2019-01-13 21:04:25 +00:00
my $has_error = 0;
while (my $line = <$pipe>) {
2020-01-08 14:41:49 +00:00
$has_error = $line_has_error->($line);
if ($verbosity_level >= 2) {
print STDERR $line;
} else {
# forward captured apt output
$output .= $line;
}
2018-09-23 17:47:14 +00:00
}
2019-01-13 21:04:25 +00:00
close($pipe);
2018-09-23 17:47:14 +00:00
my $fail = 0;
2019-01-13 21:04:25 +00:00
if ($? != 0 or $has_error) {
2020-01-08 14:41:49 +00:00
$fail = 1;
2018-09-23 17:47:14 +00:00
}
2019-01-13 21:04:25 +00:00
waitpid $pid2, 0;
2019-01-20 09:39:01 +00:00
$? == 0 or error "progress parsing failed";
2018-09-23 17:47:14 +00:00
2019-01-13 09:17:46 +00:00
if ($got_signal) {
2020-01-08 14:41:49 +00:00
error "run_progress() received signal: $got_signal";
2019-01-13 09:17:46 +00:00
}
2019-01-13 21:04:25 +00:00
# only print failure after progress output finished or otherwise it
# might interfere with the remaining output
2018-09-23 17:47:14 +00:00
if ($fail) {
2020-01-08 14:41:49 +00:00
if ($verbosity_level >= 1) {
print STDERR $output;
}
2020-01-08 16:44:07 +00:00
error((join ' ', $get_exec->('<$fd>')) . ' failed');
2018-09-23 17:47:14 +00:00
}
2020-01-09 07:39:40 +00:00
return;
2018-09-23 17:47:14 +00:00
}
2019-01-13 21:04:25 +00:00
sub run_dpkg_progress {
2018-12-27 20:08:53 +00:00
my $options = shift;
2020-01-08 16:44:07 +00:00
my @debs = @{ $options->{PKGS} // [] };
my $get_exec
= sub { return @{ $options->{ARGV} }, "--status-fd=$_[0]", @debs; };
2019-01-13 21:04:25 +00:00
my $line_has_error = sub { return 0; };
2020-01-08 16:44:07 +00:00
my $num = 0;
2019-01-13 21:04:25 +00:00
# each package has one install and one configure step, thus the total
# number is twice the number of packages
2020-01-08 16:44:07 +00:00
my $total = (scalar @debs) * 2;
2019-01-13 21:04:25 +00:00
my $line_handler = sub {
2020-08-18 12:31:38 +00:00
my $status = undef;
2020-01-08 14:41:49 +00:00
if ($_[0] =~ /^processing: (install|configure): /) {
2020-08-18 12:31:38 +00:00
if ($1 eq 'install') {
$status = 'installing';
} elsif ($1 eq 'configure') {
$status = 'configuring';
} else {
error "unknown status: $1";
}
2020-01-08 14:41:49 +00:00
$num += 1;
}
2022-12-22 09:13:01 +00:00
if ($total == 0) {
return 0, $status;
} else {
return $num / $total * 100, $status;
}
2019-01-13 21:04:25 +00:00
};
2019-01-20 09:39:01 +00:00
run_progress $get_exec, $line_handler, $line_has_error;
2020-01-09 07:39:40 +00:00
return;
2019-01-13 21:04:25 +00:00
}
2019-01-13 09:17:46 +00:00
2019-01-13 21:04:25 +00:00
sub run_apt_progress {
2023-02-01 17:04:49 +00:00
my $options = shift;
my @debs = @{ $options->{PKGS} // [] };
if ($verbosity_level >= 3) {
my @apt_debug_opts = qw(
-oDebug::pkgProblemResolver=true
-oDebug::pkgDepCache::Marker=1
-oDebug::pkgDepCache::AutoInstall=1
);
push @{ $options->{ARGV} }, @apt_debug_opts;
}
2019-01-13 21:04:25 +00:00
my $get_exec = sub {
2020-11-29 01:30:03 +00:00
my @prefix = ();
my @opts = ();
2020-01-08 14:41:49 +00:00
return (
2020-11-13 18:02:41 +00:00
@prefix,
2020-01-08 16:44:07 +00:00
@{ $options->{ARGV} },
2020-11-13 18:02:41 +00:00
@opts,
2020-01-08 14:41:49 +00:00
"-oAPT::Status-Fd=$_[0]",
# prevent apt from messing up the terminal and allow dpkg to
# receive SIGINT and quit immediately without waiting for
# maintainer script to finish
'-oDpkg::Use-Pty=false',
@debs
2020-01-08 16:44:07 +00:00
);
};
2019-04-29 22:07:35 +00:00
my $line_has_error = sub { return 0; };
if ($options->{FIND_APT_WARNINGS}) {
2020-01-08 14:41:49 +00:00
$line_has_error = sub {
# apt-get doesn't report a non-zero exit if the update failed.
# Thus, we have to parse its output. See #778357, #776152, #696335
2021-05-31 09:17:39 +00:00
# and #745735 for the parsing bugs as well as #594813, #696335,
# #776152, #778357 and #953726 for non-zero exit on transient
# network errors.
#
# For example, we want to fail with the following warning:
# W: Some index files failed to download. They have been ignored,
# or old ones used instead.
# But since this message is meant for human consumption it is not
# guaranteed to be stable across different apt versions and may
# change arbitrarily in the future. Thus, we error out on any W:
# lines as well. The downside is, that apt also unconditionally
# and by design prints a warning for unsigned repositories, even
# if they were allowed with Acquire::AllowInsecureRepositories "1"
# or with trusted=yes.
#
# A workaround was introduced by apt 2.1.16 with the --error-on=any
# option to apt-get update.
2020-01-08 14:41:49 +00:00
if ($_[0] =~ /^(W: |Err:)/) {
return 1;
}
return 0;
};
2019-04-29 22:07:35 +00:00
}
2019-01-13 21:04:25 +00:00
my $line_handler = sub {
2020-01-08 14:41:49 +00:00
if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) {
2020-08-18 12:31:38 +00:00
my $status = undef;
if ($1 eq 'pmstatus') {
$status = "installing";
} elsif ($1 eq 'dlstatus') {
$status = "downloading";
} else {
error "unknown status: $1";
}
return $2, $status;
2020-01-08 14:41:49 +00:00
}
2019-01-13 21:04:25 +00:00
};
2019-04-25 06:49:28 +00:00
run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR};
2020-01-09 07:39:40 +00:00
return;
2018-09-23 17:47:14 +00:00
}
2022-05-24 09:47:16 +00:00
sub run_apt_download_progress {
my $options = shift;
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
info "downloading packages with apt...";
}
2022-08-20 15:05:11 +00:00
2022-05-24 09:47:16 +00:00
pipe my $rfh, my $wfh;
my $pid = open my $fh, '-|' // error "fork() failed: $!";
if ($pid == 0) {
close $wfh;
# read until parent process closes $wfh
my $content = do { local $/; <$rfh> };
close $rfh;
# the parent is done -- pass what we read back to it
print $content;
exit 0;
}
close $rfh;
# Unset the close-on-exec flag, so that the file descriptor does not
# get closed when we exec
my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!";
fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC) or error "fcntl F_SETFD: $!";
my $fd = fileno $wfh;
2022-08-30 19:55:57 +00:00
# run_apt_progress() can raise an exception which would leave this function
# without cleaning up the other thread we started, making mmdebstrap hang
# in case run_apt_progress() fails -- so wrap this in eval() instead
eval {
# 2022-05-02, #debian-apt on OFTC, times in UTC+2
# 16:57 < josch> DonKult: how is -oDebug::pkgDpkgPm=1
# -oDir::Log=/dev/null a "fancy no-op"?
# 11:52 < DonKult> josch: "fancy no-op" in sofar as it does nothing to
# the system even through its not in a special mode
# ala simulation or download-only. It does all the
# things it normally does, except that it just prints
# the dpkg calls instead of execv() them which in
# practice amounts means it does nothing (the Dir::Log
# just prevents libapt from creating the /var/log/apt
# directories. As the code creates them even if no
# logs will be placed there…). As said, midterm an apt
# --print-install-packages or something would be nice
# to avoid running everything.
run_apt_progress({
ARGV => [
'apt-get',
'--yes',
'-oDebug::pkgDpkgPm=1',
'-oDir::Log=/dev/null',
$options->{dryrun}
? '-oAPT::Get::Simulate=true'
: (
"-oAPT::Keep-Fds::=$fd",
"-oDPkg::Tools::options::'cat >&$fd'::InfoFD=$fd",
"-oDpkg::Pre-Install-Pkgs::=cat >&$fd",
# no need to lock the database if we are just downloading
"-oDebug::NoLocking=1",
# no need for pty magic if we write no log
"-oDpkg::Use-Pty=0",
2023-03-22 08:27:11 +00:00
# unset this or otherwise "cat >&$fd" will fail
"-oDPkg::Chroot-Directory=",
2022-08-30 19:55:57 +00:00
),
@{ $options->{APT_ARGV} },
],
});
};
my $err = '';
if ($@) {
$err = "apt download failed: $@";
}
2022-05-24 09:47:16 +00:00
# signal the child process that we are done
close $wfh;
# and then read from it what it got
my @listofdebs = <$fh>;
close $fh;
if ($? != 0) {
2022-08-30 19:55:57 +00:00
$err = "status child failed";
}
if ($err) {
error $err;
2022-05-24 09:47:16 +00:00
}
# remove trailing newlines
chomp @listofdebs;
return @listofdebs;
}
2022-11-10 13:53:36 +00:00
sub setup_mounts {
2019-01-13 09:17:46 +00:00
my $options = shift;
my @cleanup_tasks = ();
eval {
2020-01-08 14:41:49 +00:00
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
2024-06-02 05:44:17 +00:00
0 == system('mount', "--make-rprivate", "/")
or warning("mount --make-rprivate / failed: $?");
2020-01-08 14:41:49 +00:00
# if more than essential should be installed, make the system look
2020-01-08 15:23:34 +00:00
# more like a real one by creating or bind-mounting the device
# nodes
2020-01-08 14:41:49 +00:00
foreach my $file (@devfiles) {
2020-01-08 16:44:07 +00:00
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
= @{$file};
2021-03-08 07:04:35 +00:00
next if $fname eq '';
2020-01-08 16:44:07 +00:00
if ($type == 0) { # normal file
2020-01-08 14:41:49 +00:00
error "type 0 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 1) { # hardlink
2020-01-08 14:41:49 +00:00
error "type 1 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 2) { # symlink
2020-01-08 14:41:49 +00:00
if (!$options->{havemknod}) {
2021-03-08 07:04:35 +00:00
# If we had mknod, then the symlink was already created
# in the run_setup function.
if (!-d "$options->{root}/dev") {
warning(
"skipping creation of ./dev/$fname because the"
. " /dev directory is missing in the target"
);
2020-01-08 14:41:49 +00:00
next;
}
2023-10-23 08:33:37 +00:00
if (-e "$options->{root}/dev/$fname") {
warning(
"skipping creation of ./dev/$fname because it"
. " already exists in the target");
next;
}
2021-03-08 07:04:35 +00:00
push @cleanup_tasks, sub {
unlink "$options->{root}/dev/$fname"
2022-05-28 10:52:36 +00:00
or warning("cannot unlink ./dev/$fname: $!");
2021-03-08 07:04:35 +00:00
};
symlink $linkname, "$options->{root}/dev/$fname"
2023-10-23 08:33:54 +00:00
or warning
2021-03-08 07:04:35 +00:00
"cannot create symlink ./dev/$fname -> $linkname";
2020-01-08 14:41:49 +00:00
}
2020-01-08 15:23:34 +00:00
} elsif ($type == 3 or $type == 4) {
# character/block special
2022-11-14 13:34:15 +00:00
if (any { $_ =~ '^chroot/mount(?:/dev)?$' }
@{ $options->{skip} }) {
2022-09-02 21:27:27 +00:00
info "skipping chroot/mount/dev as requested";
} elsif (!$options->{canmount}) {
2021-03-08 07:04:35 +00:00
warning "skipping bind-mounting ./dev/$fname";
2021-01-13 17:40:24 +00:00
} elsif (!$options->{havemknod}) {
2021-03-08 07:04:35 +00:00
if (!-d "$options->{root}/dev") {
warning(
"skipping creation of ./dev/$fname because the"
. " /dev directory is missing in the target"
);
next;
}
2022-06-14 06:26:48 +00:00
if ($fname eq "ptmx") {
# We must not bind-mount ptmx from the outside or
# otherwise posix_openpt() will fail. Instead
# /dev/ptmx must refer to /dev/pts/ptmx either by
# symlink or by bind-mounting. We choose a symlink.
symlink '/dev/pts/ptmx',
"$options->{root}/dev/ptmx"
or error "cannot create /dev/pts/ptmx symlink";
push @cleanup_tasks, sub {
unlink "$options->{root}/dev/ptmx"
2022-11-07 15:10:55 +00:00
or warning "unlink /dev/ptmx";
2022-06-14 06:26:48 +00:00
};
next;
}
2021-03-08 07:04:35 +00:00
if (!-e "/dev/$fname") {
warning("skipping creation of ./dev/$fname because"
. " /dev/$fname does not exist"
. " on the outside");
next;
}
if (!-c "/dev/$fname") {
warning("skipping creation of ./dev/$fname because"
. " /dev/$fname on the outside is not a"
. " character special file");
next;
}
open my $fh, '>', "$options->{root}/dev/$fname"
or error
"cannot open $options->{root}/dev/$fname: $!";
2020-01-08 14:41:49 +00:00
close $fh;
2021-03-08 07:04:35 +00:00
my @umountopts = ();
2020-01-08 14:41:49 +00:00
if ($options->{mode} eq 'unshare') {
2021-03-08 07:04:35 +00:00
push @umountopts, '--no-mtab';
2020-01-08 14:41:49 +00:00
}
2021-03-08 07:04:35 +00:00
push @cleanup_tasks, sub {
0 == system('umount', @umountopts,
"$options->{root}/dev/$fname")
2022-05-28 10:52:36 +00:00
or warning("umount ./dev/$fname failed: $?");
2021-03-08 07:04:35 +00:00
unlink "$options->{root}/dev/$fname"
2022-05-28 10:52:36 +00:00
or warning("cannot unlink ./dev/$fname: $!");
2021-03-08 07:04:35 +00:00
};
0 == system('mount', '-o', 'bind', "/dev/$fname",
"$options->{root}/dev/$fname")
or error "mount ./dev/$fname failed: $?";
2020-01-08 14:41:49 +00:00
}
2022-09-02 21:27:27 +00:00
} elsif ($type == 5) {
# directory
2022-11-14 13:34:15 +00:00
if (any { $_ =~ '^chroot/mount(?:/dev)?$' }
@{ $options->{skip} }) {
2022-09-02 21:27:27 +00:00
info "skipping chroot/mount/dev as requested";
} elsif (!$options->{canmount}) {
warning "skipping bind-mounting ./dev/$fname";
} else {
if (!-d "$options->{root}/dev") {
warning(
2021-03-08 07:04:35 +00:00
"skipping creation of ./dev/$fname because the"
2022-09-02 21:27:27 +00:00
. " /dev directory is missing in the target"
);
next;
}
if (!-e "/dev/$fname" && $fname ne "pts/") {
warning("skipping creation of ./dev/$fname because"
. " /dev/$fname does not exist"
. " on the outside");
next;
}
if (!-d "/dev/$fname" && $fname ne "pts/") {
warning("skipping creation of ./dev/$fname because"
. " /dev/$fname on the outside is not a"
. " directory");
next;
}
if (!$options->{havemknod}) {
2023-09-27 05:52:35 +00:00
# If had mknod, then the directory to bind-mount
# into was already created in the run_setup
# function.
2022-09-02 21:27:27 +00:00
push @cleanup_tasks, sub {
rmdir "$options->{root}/dev/$fname"
or warning("cannot rmdir ./dev/$fname: $!");
};
if (-e "$options->{root}/dev/$fname") {
if (!-d "$options->{root}/dev/$fname") {
error
"./dev/$fname already exists but is not"
. " a directory";
}
} else {
my $num_created
= make_path "$options->{root}/dev/$fname",
{ error => \my $err };
if ($err && @$err) {
error(
join "; ",
(
map {
"cannot create "
. (join ": ", %{$_})
} @$err
));
} elsif ($num_created == 0) {
error( "cannot create $options->{root}"
. "/dev/$fname");
}
}
chmod $mode, "$options->{root}/dev/$fname"
or error "cannot chmod ./dev/$fname: $!";
}
my @umountopts = ();
if ($options->{mode} eq 'unshare') {
push @umountopts, '--no-mtab';
}
2021-03-08 07:04:35 +00:00
push @cleanup_tasks, sub {
2022-09-02 21:27:27 +00:00
0 == system('umount', @umountopts,
"$options->{root}/dev/$fname")
or warning("umount ./dev/$fname failed: $?");
2021-03-08 07:04:35 +00:00
};
2022-09-02 21:27:27 +00:00
if ($fname eq "pts/") {
2023-09-27 05:52:35 +00:00
# We cannot just bind-mount /dev/pts from the host
# as doing so will make posix_openpt() fail.
# Instead, we need to mount a new devpts.
# We need ptmxmode=666 because /dev/ptmx is a
# symlink to /dev/pts/ptmx and without it
# posix_openpt() will fail if we are not the root
# user. See also:
# kernel.o/doc/Documentation/filesystems/devpts.txt
# salsa.d.o/debian/schroot/-/merge_requests/2
# https://bugs.debian.org/856877
# https://bugs.debian.org/817236
2022-09-02 21:27:27 +00:00
0 == system(
'mount',
'-t',
'devpts',
'none',
"$options->{root}/dev/pts",
'-o',
'noexec,nosuid,uid=5,mode=620,ptmxmode=666'
) or error "mount /dev/pts failed";
2020-01-08 14:41:49 +00:00
} else {
2022-09-02 21:27:27 +00:00
0 == system('mount', '-o', 'bind', "/dev/$fname",
"$options->{root}/dev/$fname")
or error "mount ./dev/$fname failed: $?";
2020-01-08 14:41:49 +00:00
}
2022-06-14 06:26:48 +00:00
}
2020-01-08 14:41:49 +00:00
} else {
error "unsupported type: $type";
}
}
2022-09-05 03:50:50 +00:00
} elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) {
# we cannot mount in fakechroot mode
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
# We can only mount /proc and /sys after extracting the essential
# set because if we mount it before, then base-files will not be able
# to extract those
2022-09-05 03:58:50 +00:00
if ( (any { $_ eq $options->{mode} } ('root', 'unshare'))
&& (any { $_ =~ '^chroot/mount(?:/sys)?$' } @{ $options->{skip} }))
{
2022-09-02 21:27:27 +00:00
info "skipping chroot/mount/sys as requested";
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
2021-08-26 13:40:27 +00:00
&& !$options->{canmount}) {
2021-01-13 17:40:24 +00:00
warning "skipping mount sysfs";
2021-08-26 13:40:27 +00:00
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
&& !-d "$options->{root}/sys") {
2021-03-08 07:04:35 +00:00
warning("skipping mounting of sysfs because the"
. " /sys directory is missing in the target");
2021-08-26 13:40:27 +00:00
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
&& !-e "/sys") {
2023-02-09 10:16:36 +00:00
warning("skipping mounting /sys because"
2021-08-26 13:40:27 +00:00
. " /sys does not exist on the outside");
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
&& !-d "/sys") {
2023-02-09 10:16:36 +00:00
warning("skipping mounting /sys because"
2021-08-26 13:40:27 +00:00
. " /sys on the outside is not a directory");
2021-01-13 17:40:24 +00:00
} elsif ($options->{mode} eq 'root') {
2022-03-25 13:25:54 +00:00
# we don't know whether we run in root mode inside an unshared
# user namespace or as real root so we first try the real mount and
# then fall back to mounting in a way that works in unshared mode
if (
0 == system(
'mount', '-t',
'sysfs', '-o',
'ro,nosuid,nodev,noexec', 'sys',
"$options->{root}/sys"
)
) {
push @cleanup_tasks, sub {
0 == system('umount', "$options->{root}/sys")
2022-05-28 10:52:36 +00:00
or warning("umount /sys failed: $?");
2022-03-25 13:25:54 +00:00
};
} elsif (
0 == system('mount', '-o', 'rbind', '/sys',
"$options->{root}/sys")) {
push @cleanup_tasks, sub {
# since we cannot write to /etc/mtab we need --no-mtab
# unmounting /sys only seems to be successful with --lazy
0 == system(
'umount', '--no-mtab',
'--lazy', "$options->{root}/sys"
2022-05-28 10:52:36 +00:00
) or warning("umount /sys failed: $?");
2022-03-25 13:25:54 +00:00
};
} else {
error "mount /sys failed: $?";
}
2020-01-08 14:41:49 +00:00
} elsif ($options->{mode} eq 'unshare') {
2020-01-08 15:23:34 +00:00
# naturally we have to clean up after ourselves in sudo mode where
# we do a real mount. But we also need to unmount in unshare mode
# because otherwise, even with the --one-file-system tar option,
# the permissions of the mount source will be stored and not the
# mount target (the directory)
2020-01-08 14:41:49 +00:00
push @cleanup_tasks, sub {
# since we cannot write to /etc/mtab we need --no-mtab
# unmounting /sys only seems to be successful with --lazy
2020-01-08 16:44:07 +00:00
0 == system('umount', '--no-mtab', '--lazy',
"$options->{root}/sys")
2022-05-28 10:52:36 +00:00
or warning("umount /sys failed: $?");
2020-01-08 14:41:49 +00:00
};
# without the network namespace unshared, we cannot mount a new
# sysfs. Since we need network, we just bind-mount.
#
# we have to rbind because just using bind results in "wrong fs
# type, bad option, bad superblock" error
2020-01-08 16:44:07 +00:00
0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys")
or error "mount /sys failed: $?";
2022-09-05 03:50:50 +00:00
} elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) {
# we cannot mount in fakechroot mode
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
2022-09-05 03:58:50 +00:00
if (
(any { $_ eq $options->{mode} } ('root', 'unshare'))
&& (any { $_ =~ '^chroot/mount(?:/proc)?$' } @{ $options->{skip} })
) {
2022-09-02 21:27:27 +00:00
info "skipping chroot/mount/proc as requested";
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
2021-08-26 13:40:27 +00:00
&& !$options->{canmount}) {
2021-01-13 17:40:24 +00:00
warning "skipping mount proc";
2021-08-26 13:40:27 +00:00
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
&& !-d "$options->{root}/proc") {
2021-03-08 07:04:35 +00:00
warning("skipping mounting of proc because the"
. " /proc directory is missing in the target");
2021-08-26 13:40:27 +00:00
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
&& !-e "/proc") {
2023-02-09 10:16:36 +00:00
warning("skipping mounting /proc because"
2021-08-26 13:40:27 +00:00
. " /proc does not exist on the outside");
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
&& !-d "/proc") {
2023-02-09 10:16:36 +00:00
warning("skipping mounting /proc because"
2021-08-26 13:40:27 +00:00
. " /proc on the outside is not a directory");
2023-02-09 09:19:51 +00:00
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) {
2022-03-25 13:25:54 +00:00
# we don't know whether we run in root mode inside an unshared
# user namespace or as real root so we first try the real mount and
# then fall back to mounting in a way that works in unshared
if (
2023-02-09 09:19:51 +00:00
$options->{mode} eq 'root'
&& 0 == system(
2022-03-25 13:25:54 +00:00
'mount', '-t', 'proc', '-o', 'ro', 'proc',
"$options->{root}/proc"
)
) {
push @cleanup_tasks, sub {
# some maintainer scripts mount additional stuff into /proc
# which we need to unmount beforehand
if (
is_mountpoint(
$options->{root} . "/proc/sys/fs/binfmt_misc"
)
) {
0 == system('umount',
"$options->{root}/proc/sys/fs/binfmt_misc")
2022-05-28 10:52:36 +00:00
or warning(
"umount /proc/sys/fs/binfmt_misc failed: $?");
2022-03-25 13:25:54 +00:00
}
0 == system('umount', "$options->{root}/proc")
2022-05-28 10:52:36 +00:00
or warning("umount /proc failed: $?");
2022-03-25 13:25:54 +00:00
};
} elsif (
0 == system('mount', '-t', 'proc', 'proc',
"$options->{root}/proc")) {
push @cleanup_tasks, sub {
# since we cannot write to /etc/mtab we need --no-mtab
0 == system('umount', '--no-mtab', "$options->{root}/proc")
2022-05-28 10:52:36 +00:00
or warning("umount /proc failed: $?");
2022-03-25 13:25:54 +00:00
};
2023-02-09 09:19:51 +00:00
} elsif (
# if mounting proc failed, try bind-mounting it read-only as a
# last resort
0 == system(
2023-02-10 12:26:24 +00:00
'mount', '-o',
2023-02-09 09:19:51 +00:00
'rbind', '/proc',
"$options->{root}/proc"
)
) {
2023-02-10 12:26:24 +00:00
warning("since mounting /proc normally failed, /proc is now "
. "bind-mounted instead");
# to make sure that changes (like unmounting) to the
# bind-mounted /proc do not affect the outside /proc, change
# all the bind-mounts under /proc to be a slave mount.
if (
0 != system('mount', '--make-rslave',
"$options->{root}/proc")) {
warning("mount --make-rslave /proc failed");
}
2023-02-09 09:19:51 +00:00
push @cleanup_tasks, sub {
# since we cannot write to /etc/mtab we need --no-mtab
2023-02-10 12:26:24 +00:00
0 == system(
'umount', '--no-mtab',
'--lazy', "$options->{root}/proc"
) or warning("umount /proc failed: $?");
2023-02-09 09:19:51 +00:00
};
2022-03-25 13:25:54 +00:00
} else {
error "mount /proc failed: $?";
}
2022-09-05 03:50:50 +00:00
} elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) {
# we cannot mount in fakechroot mode
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
# prevent daemons from starting
# the directory might not exist in custom variant, for example
2020-03-07 22:39:53 +00:00
#
# ideally, we should use update-alternatives but we cannot rely on it
# existing inside the chroot
#
# See #911290 for more problems of this interface
2022-09-02 21:25:48 +00:00
if (any { $_ eq 'chroot/policy-rc.d' } @{ $options->{skip} }) {
info "skipping chroot/policy-rc.d as requested";
} else {
2022-11-10 13:53:36 +00:00
push @cleanup_tasks, sub {
if (-f "$options->{root}/usr/sbin/policy-rc.d") {
unlink "$options->{root}/usr/sbin/policy-rc.d"
or error "cannot unlink policy-rc.d: $!";
}
};
2022-09-02 21:25:48 +00:00
if (-d "$options->{root}/usr/sbin/") {
open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d"
or error "cannot open policy-rc.d: $!";
print $fh "#!/bin/sh\n";
print $fh "exit 101\n";
close $fh;
chmod 0755, "$options->{root}/usr/sbin/policy-rc.d"
or error "cannot chmod policy-rc.d: $!";
}
2020-01-08 14:41:49 +00:00
}
# the file might not exist if it was removed in a hook
2022-09-02 21:25:48 +00:00
if (any { $_ eq 'chroot/start-stop-daemon' } @{ $options->{skip} }) {
info "skipping chroot/start-stop-daemon as requested";
} else {
2024-01-30 06:41:08 +00:00
# $options->{root} must not be part of $ssdloc but must instead be
# evaluated at the time the cleanup is run or otherwise, when
# performing a pivot-root, the ssd location will still be prefixed
# with the chroot path even though we changed root
2024-01-25 08:30:26 +00:00
my $ssdloc;
if (-f "$options->{root}/sbin/start-stop-daemon") {
2024-01-30 06:41:08 +00:00
$ssdloc = "/sbin/start-stop-daemon";
2024-01-25 08:30:26 +00:00
} elsif (-f "$options->{root}/usr/sbin/start-stop-daemon") {
2024-01-30 06:41:08 +00:00
$ssdloc = "/usr/sbin/start-stop-daemon";
2024-01-25 08:30:26 +00:00
}
2022-11-10 13:53:36 +00:00
push @cleanup_tasks, sub {
2024-01-25 08:30:26 +00:00
return unless length $ssdloc;
2024-01-30 06:41:08 +00:00
if (-e "$options->{root}/$ssdloc.REAL") {
move(
"$options->{root}/$ssdloc.REAL",
"$options->{root}/$ssdloc"
) or error "cannot move start-stop-daemon: $!";
2022-11-10 13:53:36 +00:00
}
};
2024-01-25 08:30:26 +00:00
if (length $ssdloc) {
2024-01-30 06:41:08 +00:00
if (-e "$options->{root}/$ssdloc.REAL") {
error "$options->{root}/$ssdloc.REAL already exists";
2022-09-02 21:25:48 +00:00
}
2024-01-30 06:41:08 +00:00
move(
"$options->{root}/$ssdloc",
"$options->{root}/$ssdloc.REAL"
) or error "cannot move start-stop-daemon: $!";
open my $fh, '>', "$options->{root}/$ssdloc"
2022-09-02 21:25:48 +00:00
or error "cannot open start-stop-daemon: $!";
print $fh "#!/bin/sh\n";
print $fh
"echo \"Warning: Fake start-stop-daemon called, doing"
. " nothing\">&2\n";
close $fh;
2024-01-30 06:41:08 +00:00
chmod 0755, "$options->{root}/$ssdloc"
2022-09-02 21:25:48 +00:00
or error "cannot chmod start-stop-daemon: $!";
2020-01-08 14:41:49 +00:00
}
}
2019-01-13 09:17:46 +00:00
};
2022-11-10 13:53:36 +00:00
if ($@) {
error "setup_mounts failed: $@";
2019-01-13 09:17:46 +00:00
}
2022-11-10 13:53:36 +00:00
return @cleanup_tasks;
2019-01-13 09:17:46 +00:00
}
2020-01-09 07:39:40 +00:00
sub run_hooks {
2022-12-23 09:02:04 +00:00
my $name = shift;
my $options = shift;
my $essential_pkgs = shift;
2019-02-20 12:32:49 +00:00
2020-01-08 16:44:07 +00:00
if (scalar @{ $options->{"${name}_hook"} } == 0) {
2020-01-08 14:41:49 +00:00
return;
2019-02-20 12:32:49 +00:00
}
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
info "not running ${name}-hooks because of --dry-run";
return;
}
2021-01-09 18:41:59 +00:00
my @env_opts = ();
2021-02-04 16:46:39 +00:00
# At this point TMPDIR is set to "$options->{root}/tmp". This is to have a
# writable TMPDIR even in unshare mode. But if TMPDIR is still set when
# running hooks, then every hook script calling chroot, will have to wrap
# that into an "env --unset=TMPDIR". To avoid this, we unset TMPDIR here.
# If the hook script needs a writable TMPDIR, then it can always use /tmp
# inside the chroot. This is also why we do not set a new MMDEBSTRAP_TMPDIR
# environment variable.
2021-03-08 06:54:04 +00:00
if (length $ENV{TMPDIR}) {
2021-02-04 16:46:39 +00:00
push @env_opts, '--unset=TMPDIR';
}
2021-01-09 18:41:59 +00:00
# The APT_CONFIG variable, if set, will confuse any manual calls to
# apt-get. If you want to use the same config used by mmdebstrap, the
# original value is stored in MMDEBSTRAP_APT_CONFIG.
2021-03-08 06:54:04 +00:00
if (length $ENV{APT_CONFIG}) {
2021-01-09 18:41:59 +00:00
push @env_opts, '--unset=APT_CONFIG';
}
2021-03-08 06:54:04 +00:00
if (length $ENV{APT_CONFIG}) {
2021-01-09 18:41:59 +00:00
push @env_opts, "MMDEBSTRAP_APT_CONFIG=$ENV{APT_CONFIG}";
}
2023-06-19 11:10:49 +00:00
# A hook script that wants to call mmdebstrap with --hook-helper needs to
2023-03-19 07:58:53 +00:00
# know how mmdebstrap was executed
push @env_opts, "MMDEBSTRAP_ARGV0=$PROGRAM_NAME";
2021-01-09 18:41:59 +00:00
# Storing the mode is important for hook scripts to potentially change
# their behavior depending on the mode. It's also important for when the
# hook wants to use the mmdebstrap --hook-helper.
push @env_opts, "MMDEBSTRAP_MODE=$options->{mode}";
2023-06-19 11:10:49 +00:00
if (defined $options->{suite}) {
push @env_opts, "MMDEBSTRAP_SUITE=$options->{suite}";
}
2023-10-23 08:35:07 +00:00
push @env_opts, "MMDEBSTRAP_FORMAT=$options->{format}";
2021-02-06 08:18:05 +00:00
# Storing the hook name is important for hook scripts to potentially change
# their behavior depending on the hook. It's also important for when the
# hook wants to use the mmdebstrap --hook-helper.
push @env_opts, "MMDEBSTRAP_HOOK=$name";
2021-01-09 18:41:59 +00:00
# This is the file descriptor of the socket that the mmdebstrap
# --hook-helper can write to and read from to communicate with the outside.
push @env_opts, ("MMDEBSTRAP_HOOKSOCK=" . fileno($options->{hooksock}));
2022-05-24 02:14:08 +00:00
# Store the verbosity of mmdebstrap so that hooks can be just as verbose
# as the mmdebstrap invocation that called them.
push @env_opts, ("MMDEBSTRAP_VERBOSITY=" . $verbosity_level);
2022-09-02 22:03:40 +00:00
# Store the packages given via --include in an environment variable so that
# hooks can, for example, make .deb files available inside the chroot.
{
my @escaped_includes = @{ $options->{include} };
foreach my $incl (@escaped_includes) {
# We have to encode commas so that values containing commas can
# be stored in the list. Since we encode using percent-encoding
# (urlencoding) we also have to encode the percent sign.
$incl =~ s/%/%25/g;
$incl =~ s/,/%2C/g;
}
push @env_opts,
("MMDEBSTRAP_INCLUDE=" . (join ",", @escaped_includes));
}
2022-12-23 09:02:04 +00:00
# Give the extract hook access to the essential packages that are about to
# be installed
if ($name eq "extract" and scalar @{$essential_pkgs} > 0) {
push @env_opts,
("MMDEBSTRAP_ESSENTIAL=" . (join " ", @{$essential_pkgs}));
}
2023-09-27 05:56:49 +00:00
if ($options->{mode} eq 'unshare') {
push @env_opts, "container=mmdebstrap-unshare";
}
2021-01-09 18:41:59 +00:00
2022-11-10 13:53:36 +00:00
# Unset the close-on-exec flag, so that the file descriptor does not
# get closed when we exec
my $flags = fcntl($options->{hooksock}, F_GETFD, 0)
or error "fcntl F_GETFD: $!";
fcntl($options->{hooksock}, F_SETFD, $flags & ~FD_CLOEXEC)
or error "fcntl F_SETFD: $!";
{
2020-01-08 16:44:07 +00:00
foreach my $script (@{ $options->{"${name}_hook"} }) {
2022-11-14 08:59:59 +00:00
my $type = $script->[0];
$script = $script->[1];
if ($type eq "pivoted") {
info "running --chrooted-$name-hook in shell: sh -c "
. "'$script'";
my $pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
# child
my @cmdprefix = ();
if ($options->{mode} eq 'fakechroot') {
# we are calling the chroot executable instead of
# chrooting the process so that fakechroot can handle
# it
@cmdprefix = ('chroot', $options->{root});
} elsif ($options->{mode} eq 'root') {
# unsharing the mount namespace is not enough for
# pivot_root to work as root (why?) unsharing the user
# namespace as well (but without remapping) makes
# pivot_root work (why??) but still makes later lazy
# umounts fail (why???). Since pivot_root is mainly
# useful for being able to run unshare mode inside
# unshare mode, we fall back to just calling chroot()
# until somebody has motivation and time to figure out
# what is going on.
chroot $options->{root}
or error "failed to chroot(): $!";
$options->{root} = "/";
chdir "/" or error "failed chdir() to /: $!";
} elsif ($options->{mode} eq 'unshare') {
0 == syscall &SYS_unshare, $CLONE_NEWNS
or error "unshare() failed: $!";
pivot_root($options->{root});
} else {
error "unknown mode: $options->{mode}";
}
0 == system(@cmdprefix, 'env', @env_opts, 'sh', '-c',
$script)
or error "command failed: $script";
exit 0;
}
waitpid($pid, 0);
$? == 0 or error "chrooted hook failed with exit code $?";
next;
}
# inode and device number of chroot before
my ($dev_before, $ino_before, undef) = stat($options->{root});
2020-01-16 09:38:14 +00:00
if (
$script =~ /^(
copy-in|copy-out
|tar-in|tar-out
|upload|download
|sync-in|sync-out
)\ /x
) {
2020-01-08 14:41:49 +00:00
info "running special hook: $script";
2022-09-05 03:50:50 +00:00
if ((any { $_ eq $options->{variant} } ('extract', 'custom'))
and $options->{mode} eq 'fakechroot'
and $name ne 'setup') {
2020-01-08 16:41:46 +00:00
info "the copy-in, copy-out, tar-in and tar-out commands"
2022-09-05 03:50:50 +00:00
. " in fakechroot mode might fail in"
2020-01-08 16:41:46 +00:00
. " extract and custom variants because there might be"
. " no tar inside the chroot";
2020-01-08 14:41:49 +00:00
}
my $pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
# whatever the script writes on stdout is sent to the
# socket
# whatever is written to the socket, send to stdin
2020-01-08 16:44:07 +00:00
open(STDOUT, '>&', $options->{hooksock})
or error "cannot open STDOUT: $!";
open(STDIN, '<&', $options->{hooksock})
or error "cannot open STDIN: $!";
2020-01-08 14:41:49 +00:00
2022-11-08 12:02:47 +00:00
# Text::ParseWords::shellwords does for perl what shlex
# does for python
my @args = shellwords $script;
hookhelper($options->{root}, $options->{mode}, $name,
2023-10-23 08:41:46 +00:00
(join ',', @{ $options->{skip} }),
$verbosity_level, @args);
2022-11-08 12:02:47 +00:00
exit 0;
2020-01-08 14:41:49 +00:00
}
waitpid($pid, 0);
$? == 0 or error "special hook failed with exit code $?";
2020-01-08 16:44:07 +00:00
} elsif (-x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) {
2020-01-08 14:41:49 +00:00
info "running --$name-hook directly: $script $options->{root}";
# execute it directly if it's an executable file
# or if it there are no shell metacharacters
# (the /a regex modifier makes \w match only ASCII)
2021-01-09 18:41:59 +00:00
0 == system('env', @env_opts, $script, $options->{root})
2020-01-08 16:44:07 +00:00
or error "command failed: $script";
2020-01-08 14:41:49 +00:00
} else {
2020-01-08 16:41:46 +00:00
info "running --$name-hook in shell: sh -c '$script' exec"
. " $options->{root}";
2020-01-08 14:41:49 +00:00
# otherwise, wrap everything in sh -c
2021-01-09 18:41:59 +00:00
0 == system('env', @env_opts,
2020-03-07 01:06:11 +00:00
'sh', '-c', $script, 'exec', $options->{root})
2020-01-08 16:44:07 +00:00
or error "command failed: $script";
2020-01-08 14:41:49 +00:00
}
2022-11-14 08:59:59 +00:00
# If the chroot directory vanished, check if pivot_root was
# performed.
#
# Running pivot_root is only really useful in the customize-hooks
# because mmdebstrap uses apt from the outside to install packages
# and that will fail after pivot_root because the process doesn't
# have access to the system on the outside anymore.
if (!-e $options->{root}) {
my ($dev_root, $ino_root, undef) = stat("/");
if ($dev_before == $dev_root and $ino_before == $ino_root) {
info "detected pivot_root, changing chroot directory to /";
# the old chroot directory is now /
# the hook probably executed pivot_root
$options->{root} = "/";
chdir "/" or error "failed chdir() to /: $!";
} else {
error "chroot directory $options->{root} vanished";
}
}
2020-01-08 14:41:49 +00:00
}
2019-02-20 12:32:49 +00:00
};
2021-01-09 18:41:59 +00:00
# Restore flags
fcntl($options->{hooksock}, F_SETFD, $flags) or error "fcntl F_SETFD: $!";
2020-01-09 07:39:40 +00:00
return;
2019-02-20 12:32:49 +00:00
}
2018-09-18 09:20:24 +00:00
sub setup {
my $options = shift;
2019-01-20 09:39:01 +00:00
foreach my $key (sort keys %{$options}) {
2020-01-08 14:41:49 +00:00
my $value = $options->{$key};
if (!defined $value) {
next;
}
if (ref $value eq '') {
debug "$key: $options->{$key}";
} elsif (ref $value eq 'ARRAY') {
debug "$key: [" . (join ', ', @{$value}) . "]";
} elsif (ref $value eq 'GLOB') {
debug "$key: GLOB";
} else {
error "unknown type for key $key: " . (ref $value);
}
2018-09-18 09:20:24 +00:00
}
2020-01-21 23:27:57 +00:00
if (-e $options->{apttrusted} && !-r $options->{apttrusted}) {
warning "cannot read $options->{apttrusted}";
}
if (-e $options->{apttrustedparts} && !-r $options->{apttrustedparts}) {
warning "cannot read $options->{apttrustedparts}";
}
2021-08-17 09:11:00 +00:00
if (any { $_ eq 'setup' } @{ $options->{skip} }) {
info "skipping setup as requested";
} else {
run_setup($options);
}
2020-04-09 21:07:02 +00:00
run_hooks('setup', $options);
2023-03-19 08:04:06 +00:00
# apt runs dpkg from inside the chroot and directly passes the filename to
# dpkg. Hence, the included files on the outside must be present under the
# same path on the inside. If they are not, dpkg cannot find them.
if (scalar(grep { /^\// } @{ $options->{include} }) > 0) {
my $ret = 0;
foreach my $f (grep { /^\// } @{ $options->{include} }) {
next if -e "$options->{root}/$f";
warning
"path given via --include is not present inside the chroot: $f";
$ret = 1;
}
if ($ret != 0) {
warning("apt runs chrooted dpkg which needs access to the "
. "package paths given via --include inside the chroot.");
warning "maybe try running mmdebstrap with "
. "--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount";
}
}
2021-08-17 09:11:00 +00:00
if (any { $_ eq 'update' } @{ $options->{skip} }) {
info "skipping update as requested";
} else {
run_update($options);
}
2020-04-09 21:07:02 +00:00
2021-11-09 06:31:56 +00:00
(my $essential_pkgs, my $cached_debs) = run_download($options);
2020-04-09 21:07:02 +00:00
2021-08-17 21:39:20 +00:00
# in theory, we don't have to extract the packages in chrootless mode
# but we do it anyways because otherwise directory creation timestamps
# will differ compared to non-chrootless and we want to create bit-by-bit
# identical tar output
#
# FIXME: dpkg could be changed to produce the same results
run_extract($options, $essential_pkgs);
2020-04-09 21:07:02 +00:00
2022-11-10 13:53:36 +00:00
# setup mounts
my @cleanup_tasks = ();
my $cleanup = sub {
my $signal = $_[0];
while (my $task = pop @cleanup_tasks) {
$task->();
2020-04-09 21:07:02 +00:00
}
2022-11-10 13:53:36 +00:00
if ($signal) {
warning "pid $PID cought signal: $signal";
exit 1;
}
};
# we only need to setup the mounts if there is anything to do
if ( $options->{variant} ne 'custom'
or scalar @{ $options->{include} } > 0
or scalar @{ $options->{"extract_hook"} } > 0
or scalar @{ $options->{"essential_hook"} } > 0
or scalar @{ $options->{"customize_hook"} } > 0) {
local $SIG{INT} = $cleanup;
local $SIG{HUP} = $cleanup;
local $SIG{PIPE} = $cleanup;
local $SIG{TERM} = $cleanup;
@cleanup_tasks = setup_mounts($options);
}
eval {
2022-12-22 10:51:01 +00:00
my $chrootcmd = [];
2022-11-10 13:53:36 +00:00
if ($options->{variant} ne 'extract') {
if ($options->{mode} ne 'chrootless') {
$chrootcmd = run_prepare($options);
}
2022-12-22 10:51:01 +00:00
}
run_hooks('extract', $options, $essential_pkgs);
2020-04-09 21:07:02 +00:00
2022-12-22 10:51:01 +00:00
if ($options->{variant} ne 'extract') {
2022-11-10 13:53:36 +00:00
run_essential($options, $essential_pkgs, $chrootcmd, $cached_debs);
2020-04-09 21:07:02 +00:00
2022-11-10 13:53:36 +00:00
run_hooks('essential', $options);
2020-04-09 21:07:02 +00:00
2022-11-10 13:53:36 +00:00
run_install($options);
2020-04-09 21:07:02 +00:00
2022-11-10 13:53:36 +00:00
run_hooks('customize', $options);
}
};
my $msg = $@;
$cleanup->(0);
if ($msg) {
error "setup failed: $msg";
2020-04-09 21:07:02 +00:00
}
2021-08-17 09:11:00 +00:00
if (any { $_ eq 'cleanup' } @{ $options->{skip} }) {
info "skipping cleanup as requested";
} else {
run_cleanup($options);
}
2020-04-09 21:07:02 +00:00
return;
}
sub run_setup() {
my $options = shift;
2020-03-07 22:40:55 +00:00
{
my @directories = (
'/etc/apt/apt.conf.d', '/etc/apt/sources.list.d',
'/etc/apt/preferences.d', '/var/cache/apt',
2020-08-15 16:09:06 +00:00
'/var/lib/apt/lists/partial', '/tmp'
2020-03-07 22:40:55 +00:00
);
2020-08-15 16:09:06 +00:00
# we need /var/lib/dpkg in case we need to write to /var/lib/dpkg/arch
push @directories, '/var/lib/dpkg';
2020-08-24 14:28:32 +00:00
# since we do not know the dpkg version inside the chroot at this
# point, we can only omit it in chrootless mode
2021-10-07 06:12:12 +00:00
if ($options->{mode} ne 'chrootless'
2024-01-09 10:21:53 +00:00
or length $options->{dpkgopts} > 0) {
2020-08-15 16:09:06 +00:00
push @directories, '/etc/dpkg/dpkg.cfg.d/';
}
2020-03-07 22:40:55 +00:00
# if dpkg and apt operate from the outside we need some more
# directories because dpkg and apt might not even be installed inside
2021-08-17 21:39:20 +00:00
# the chroot. Thus, the following block is not strictly necessary in
# chrootless mode. We unconditionally add it anyways, so that the
# output with and without chrootless mode is equal.
{
2020-08-15 16:09:06 +00:00
push @directories, '/var/log/apt';
2020-08-24 14:28:32 +00:00
# since we do not know the dpkg version inside the chroot at this
# point, we can only omit it in chrootless mode
2021-10-07 06:12:12 +00:00
if ($options->{mode} ne 'chrootless') {
2020-08-15 16:09:06 +00:00
push @directories, '/var/lib/dpkg/triggers',
'/var/lib/dpkg/info', '/var/lib/dpkg/alternatives',
'/var/lib/dpkg/updates';
}
2020-03-07 22:40:55 +00:00
}
foreach my $dir (@directories) {
if (-e "$options->{root}/$dir") {
if (!-d "$options->{root}/$dir") {
error "$dir already exists but is not a directory";
}
} else {
my $num_created = make_path "$options->{root}/$dir",
{ error => \my $err };
if ($err && @$err) {
error(
join "; ",
(map { "cannot create " . (join ": ", %{$_}) } @$err));
} elsif ($num_created == 0) {
error "cannot create $options->{root}/$dir";
}
}
}
2020-05-03 15:18:34 +00:00
# make sure /tmp is not 0755 like the rest
chmod 01777, "$options->{root}/tmp" or error "cannot chmod /tmp: $!";
2020-03-07 22:40:55 +00:00
}
# The TMPDIR set by the user or even /tmp might be inaccessible by the
# unshared user. Thus, we place all temporary files in /tmp inside the new
# rootfs.
#
# This will affect calls to tempfile() as well as runs of "apt-get update"
# which will create temporary clearsigned.message.XXXXXX files to verify
# signatures.
2021-08-25 03:20:46 +00:00
#
# Setting TMPDIR to inside the chroot is also necessary for when packages
# are installed with apt from outside the chroot with
# DPkg::Chroot-Directory
2020-03-07 22:40:55 +00:00
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{"TMPDIR"} = "$options->{root}/tmp";
}
2020-03-07 01:11:35 +00:00
my ($conf, $tmpfile)
2020-03-07 22:40:55 +00:00
= tempfile("mmdebstrap.apt.conf.XXXXXXXXXXXX", TMPDIR => 1)
2020-01-08 16:44:07 +00:00
or error "cannot open apt.conf: $!";
2018-09-18 09:20:24 +00:00
print $conf "Apt::Architecture \"$options->{nativearch}\";\n";
# the host system might have configured additional architectures
# force only the native architecture
2020-01-08 16:44:07 +00:00
if (scalar @{ $options->{foreignarchs} } > 0) {
2020-01-08 14:41:49 +00:00
print $conf "Apt::Architectures { \"$options->{nativearch}\"; ";
2020-01-08 16:44:07 +00:00
foreach my $arch (@{ $options->{foreignarchs} }) {
2020-01-08 14:41:49 +00:00
print $conf "\"$arch\"; ";
}
print $conf "};\n";
2018-09-18 09:20:24 +00:00
} else {
2020-01-08 14:41:49 +00:00
print $conf "Apt::Architectures \"$options->{nativearch}\";\n";
2018-09-18 09:20:24 +00:00
}
2019-02-28 11:22:42 +00:00
print $conf "Dir \"$options->{root}\";\n";
2023-03-22 08:27:11 +00:00
print $conf "DPkg::Chroot-Directory \"$options->{root}\";\n";
2019-04-25 06:51:42 +00:00
# not needed anymore for apt 1.3 and newer
2020-01-08 16:44:07 +00:00
print $conf
"Dir::State::Status \"$options->{root}/var/lib/dpkg/status\";\n";
2018-09-18 09:20:24 +00:00
# for authentication, use the keyrings from the host
2019-12-03 09:16:39 +00:00
print $conf "Dir::Etc::Trusted \"$options->{apttrusted}\";\n";
print $conf "Dir::Etc::TrustedParts \"$options->{apttrustedparts}\";\n";
2019-03-25 13:31:45 +00:00
if ($options->{variant} ne 'apt') {
2020-01-08 14:41:49 +00:00
# apt considers itself essential. Thus, when generating an EDSP
# document for an external solver, it will add the Essential:yes field
# to the apt package stanza. This is unnecessary for any other variant
# than 'apt' because in all other variants we compile the set of
# packages we consider essential ourselves and for the 'essential'
# variant it would even be wrong to add apt. This workaround is only
# needed when apt is used with an external solver but doesn't hurt
# otherwise and we don't have a good way to figure out whether apt is
# using an external solver or not short of parsing the --aptopt
# options.
print $conf "pkgCacheGen::ForceEssential \",\";\n";
2019-03-25 13:31:45 +00:00
}
2018-09-18 09:20:24 +00:00
close $conf;
2018-09-23 17:43:14 +00:00
# We put certain configuration items in their own configuration file
# because they have to be valid for apt invocation from outside as well as
# from inside the chroot.
# The config filename is chosen such that any settings in it will be
# overridden by what the user specified with --aptopt.
2022-01-07 13:44:01 +00:00
if (!-e "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap") {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!";
2020-01-08 14:41:49 +00:00
print $fh "Apt::Install-Recommends false;\n";
print $fh "Acquire::Languages \"none\";\n";
close $fh;
2018-09-23 17:43:14 +00:00
}
2020-08-15 16:09:06 +00:00
# apt-get update requires this
2022-01-07 13:44:01 +00:00
if (!-e "$options->{root}/var/lib/dpkg/status") {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/status"
or error "failed to open(): $!";
2020-01-08 14:41:49 +00:00
close $fh;
2018-09-18 09:20:24 +00:00
}
2022-09-22 12:22:37 +00:00
# In theory, /var/lib/dpkg/arch is only useful if there are foreign
# architectures configured or if the architecture of a chrootless chroot
# is different from the native architecture outside the chroot.
# We nevertheless always add /var/lib/dpkg/arch to make a chroot built the
# normal way bit-by-bit identical to a foreign arch chroot built in
# chrootless mode.
2020-01-08 16:44:07 +00:00
chomp(my $hostarch = `dpkg --print-architecture`);
2022-09-22 12:22:37 +00:00
if ((!-e "$options->{root}/var/lib/dpkg/arch")) {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/var/lib/dpkg/arch"
or error "cannot open /var/lib/dpkg/arch: $!";
2020-01-08 14:41:49 +00:00
print $fh "$options->{nativearch}\n";
2020-01-08 16:44:07 +00:00
foreach my $arch (@{ $options->{foreignarchs} }) {
2020-01-08 14:41:49 +00:00
print $fh "$arch\n";
}
close $fh;
2018-09-18 09:20:24 +00:00
}
2024-01-09 10:21:53 +00:00
if (length $options->{aptopts} > 0
2022-01-07 13:44:01 +00:00
and (!-e "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap")) {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap"
or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!";
2024-01-09 10:21:53 +00:00
print $fh $options->{aptopts};
2020-01-08 14:41:49 +00:00
close $fh;
2020-04-10 10:55:31 +00:00
if ($verbosity_level >= 3) {
debug "content of /etc/apt/apt.conf.d/99mmdebstrap:";
copy("$options->{root}/etc/apt/apt.conf.d/99mmdebstrap", \*STDERR);
}
2018-09-18 09:20:24 +00:00
}
2024-01-09 10:21:53 +00:00
if (length $options->{dpkgopts} > 0
2022-01-07 13:44:01 +00:00
and (!-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")) {
2020-01-08 14:41:49 +00:00
# FIXME: in chrootless mode, dpkg will only read the configuration
2021-02-23 11:49:46 +00:00
# from the host -- see #808203
if ($options->{mode} eq 'chrootless') {
warning('dpkg is unable to read an alternative configuration in'
. 'chrootless mode -- see Debian bug #808203');
}
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap"
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
2024-01-09 10:21:53 +00:00
print $fh $options->{dpkgopts};
2020-01-08 14:41:49 +00:00
close $fh;
2020-04-10 10:55:31 +00:00
if ($verbosity_level >= 3) {
debug "content of /etc/dpkg/dpkg.cfg.d/99mmdebstrap:";
copy("$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap",
\*STDERR);
}
2018-09-18 09:20:24 +00:00
}
2022-01-07 13:44:01 +00:00
if (!-e "$options->{root}/etc/fstab") {
2020-01-08 16:44:07 +00:00
open my $fh, '>', "$options->{root}/etc/fstab"
or error "cannot open fstab: $!";
2020-01-08 14:41:49 +00:00
print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n";
close $fh;
2020-01-08 16:44:07 +00:00
chmod 0644, "$options->{root}/etc/fstab"
or error "cannot chmod fstab: $!";
2018-09-18 09:20:24 +00:00
}
2020-01-22 22:30:28 +00:00
# write /etc/apt/sources.list and files in /etc/apt/sources.list.d/
2023-01-16 14:13:39 +00:00
if (scalar @{ $options->{sourceslists} } > 0) {
2020-01-22 22:30:28 +00:00
my $firstentry = $options->{sourceslists}->[0];
# if the first sources.list entry is of one-line type and without
# explicit filename, then write out an actual /etc/apt/sources.list
# otherwise everything goes into /etc/apt/sources.list.d
my $fname;
if ($firstentry->{type} eq 'one-line'
&& !defined $firstentry->{fname}) {
$fname = "$options->{root}/etc/apt/sources.list";
} else {
$fname = "$options->{root}/etc/apt/sources.list.d/0000";
if (defined $firstentry->{fname}) {
$fname .= $firstentry->{fname};
if ( $firstentry->{fname} !~ /\.list/
&& $firstentry->{fname} !~ /\.sources/) {
if ($firstentry->{type} eq 'one-line') {
$fname .= '.list';
} elsif ($firstentry->{type} eq 'deb822') {
$fname .= '.sources';
} else {
error "invalid type: $firstentry->{type}";
}
}
} else {
# if no filename is given, then this must be a deb822 file
# because if it was a one-line type file, then it would've been
# written to /etc/apt/sources.list
$fname .= 'main.sources';
}
}
2022-01-07 13:44:01 +00:00
if (!-e $fname) {
open my $fh, '>', "$fname" or error "cannot open $fname: $!";
print $fh $firstentry->{content};
close $fh;
}
2020-01-22 22:30:28 +00:00
# everything else goes into /etc/apt/sources.list.d/
for (my $i = 1 ; $i < scalar @{ $options->{sourceslists} } ; $i++) {
my $entry = $options->{sourceslists}->[$i];
my $fname = "$options->{root}/etc/apt/sources.list.d/"
. sprintf("%04d", $i);
if (defined $entry->{fname}) {
$fname .= $entry->{fname};
if ( $entry->{fname} !~ /\.list/
&& $entry->{fname} !~ /\.sources/) {
if ($entry->{type} eq 'one-line') {
$fname .= '.list';
} elsif ($entry->{type} eq 'deb822') {
$fname .= '.sources';
} else {
error "invalid type: $entry->{type}";
}
}
} else {
if ($entry->{type} eq 'one-line') {
$fname .= 'main.list';
} elsif ($entry->{type} eq 'deb822') {
$fname .= 'main.sources';
} else {
error "invalid type: $entry->{type}";
}
}
2022-01-07 13:44:01 +00:00
if (!-e $fname) {
open my $fh, '>', "$fname" or error "cannot open $fname: $!";
print $fh $entry->{content};
close $fh;
}
2020-01-22 22:30:28 +00:00
}
2018-09-18 09:20:24 +00:00
}
2018-10-22 15:05:56 +00:00
# allow network access from within
2020-11-29 19:54:31 +00:00
foreach my $file ("/etc/resolv.conf", "/etc/hostname") {
2022-01-07 13:44:01 +00:00
if (-e $file && !-e "$options->{root}/$file") {
2020-11-29 19:54:31 +00:00
# this will create a new file with 644 permissions and copy
# contents only even if $file was a symlink
copy($file, "$options->{root}/$file")
or error "cannot copy $file: $!";
# if the source was a regular file, preserve the permissions
if (-f $file) {
my $mode = (stat($file))[2];
$mode &= oct(7777); # mask off bits that aren't the mode
chmod $mode, "$options->{root}/$file"
or error "cannot chmod $file: $!";
}
2024-01-09 08:47:35 +00:00
} elsif (-e $file && -e "$options->{root}/$file") {
info "rootfs alreday contains $file";
2020-11-29 19:54:31 +00:00
} else {
warning("Host system does not have a $file to copy into the"
. " rootfs.");
}
2019-09-15 12:12:49 +00:00
}
2018-10-22 15:05:56 +00:00
if ($options->{havemknod}) {
2020-01-08 14:41:49 +00:00
foreach my $file (@devfiles) {
2020-01-08 16:44:07 +00:00
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
= @{$file};
if ($type == 0) { # normal file
2020-01-08 14:41:49 +00:00
error "type 0 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 1) { # hardlink
2020-01-08 14:41:49 +00:00
error "type 1 not implemented";
2020-01-08 16:44:07 +00:00
} elsif ($type == 2) { # symlink
if ( $options->{mode} eq 'fakechroot'
and $linkname =~ /^\/proc/) {
2020-01-08 14:41:49 +00:00
# there is no /proc in fakechroot mode
next;
}
2021-03-08 07:04:35 +00:00
symlink $linkname, "$options->{root}/dev/$fname"
or error "cannot create symlink ./dev/$fname";
2021-12-14 15:10:25 +00:00
next; # chmod cannot work on symlinks
2020-01-08 16:44:07 +00:00
} elsif ($type == 3) { # character special
2021-03-08 07:04:35 +00:00
0 == system('mknod', "$options->{root}/dev/$fname", 'c',
2020-01-08 16:44:07 +00:00
$devmajor, $devminor)
or error "mknod failed: $?";
} elsif ($type == 4) { # block special
2021-03-08 07:04:35 +00:00
0 == system('mknod', "$options->{root}/dev/$fname", 'b',
2020-01-08 16:44:07 +00:00
$devmajor, $devminor)
or error "mknod failed: $?";
} elsif ($type == 5) { # directory
2021-03-08 07:04:35 +00:00
if (-e "$options->{root}/dev/$fname") {
if (!-d "$options->{root}/dev/$fname") {
error
"./dev/$fname already exists but is not a directory";
2020-01-08 14:41:49 +00:00
}
} else {
2021-03-08 07:04:35 +00:00
my $num_created = make_path "$options->{root}/dev/$fname",
2020-01-08 16:44:07 +00:00
{ error => \my $err };
2020-01-08 14:41:49 +00:00
if ($err && @$err) {
2020-01-08 16:44:07 +00:00
error(
join "; ",
(
map { "cannot create " . (join ": ", %{$_}) }
@$err
));
2020-01-08 14:41:49 +00:00
} elsif ($num_created == 0) {
2021-03-08 07:04:35 +00:00
error "cannot create $options->{root}/dev/$fname";
2020-01-08 14:41:49 +00:00
}
}
} else {
error "unsupported type: $type";
}
2021-03-08 07:04:35 +00:00
chmod $mode, "$options->{root}/dev/$fname"
or error "cannot chmod ./dev/$fname: $!";
2020-01-08 14:41:49 +00:00
}
2018-10-22 15:05:56 +00:00
}
2018-09-18 09:20:24 +00:00
# we tell apt about the configuration via a config file passed via the
# APT_CONFIG environment variable instead of using the --option command
# line arguments because configuration settings like Dir::Etc have already
# been evaluated at the time that apt takes its command line arguments
# into account.
2020-01-09 07:39:40 +00:00
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{"APT_CONFIG"} = "$tmpfile";
}
2020-11-13 18:02:41 +00:00
# we have to make the config file world readable so that a possible
# /usr/lib/apt/solvers/apt process which is run by the _apt user is also
# able to read it
2023-03-16 20:23:41 +00:00
chmod 0644, "$tmpfile" or error "cannot chmod $tmpfile: $!";
2020-01-21 12:12:44 +00:00
if ($verbosity_level >= 3) {
2020-04-09 10:51:24 +00:00
0 == system('apt-get', '--version')
or error "apt-get --version failed: $?";
2020-01-21 12:12:44 +00:00
0 == system('apt-config', 'dump') or error "apt-config failed: $?";
2020-03-07 01:13:26 +00:00
debug "content of $tmpfile:";
copy($tmpfile, \*STDERR);
2020-01-21 12:12:44 +00:00
}
2018-09-21 06:04:40 +00:00
2022-09-05 03:50:50 +00:00
if ($options->{mode} ne 'fakechroot') {
2020-06-08 13:45:22 +00:00
# Apt dropping privileges to another user than root is not useful in
2022-09-05 03:50:50 +00:00
# fakechroot mode because all users are faked and thus there is no real
# privilege difference anyways. We could set APT::Sandbox::User "root"
# in fakechroot mode but we don't because if we would, then
# /var/cache/apt/archives/partial/ and /var/lib/apt/lists/partial/
# would not be owned by the _apt user if mmdebstrap was run in
# fakechroot mode.
2021-08-27 17:50:11 +00:00
#
2020-06-08 13:45:22 +00:00
# when apt-get update is run by the root user, then apt will attempt to
# drop privileges to the _apt user. This will fail if the _apt user
# does not have permissions to read the root directory. In that case,
# we have to disable apt sandboxing. This can for example happen in
# root mode when the path of the chroot is not in a world-readable
# location.
2020-01-08 14:41:49 +00:00
my $partial = '/var/lib/apt/lists/partial';
2023-01-26 08:28:35 +00:00
my @testcmd = (
'/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test',
'-r', "$options->{root}$partial"
);
my $pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
open(STDOUT, '>', '/dev/null')
or error "cannot open /dev/null for writing: $!";
open(STDERR, '>', '/dev/null')
or error "cannot open /dev/null for writing: $!";
exec { $testcmd[0] } @testcmd
or error("cannot exec " . (join " ", @testcmd) . ": $!");
}
waitpid $pid, 0;
if ($? != 0) {
2020-01-08 16:41:46 +00:00
warning "Download is performed unsandboxed as root as file"
. " $options->{root}$partial couldn't be accessed by user _apt";
2020-01-08 16:44:07 +00:00
open my $fh, '>>', $tmpfile
or error "cannot open $tmpfile for appending: $!";
2020-01-08 14:41:49 +00:00
print $fh "APT::Sandbox::User \"root\";\n";
close $fh;
}
2019-02-28 10:54:03 +00:00
}
2020-04-09 21:07:02 +00:00
return;
}
sub run_update() {
my $options = shift;
2019-02-20 12:32:49 +00:00
2021-05-31 09:17:39 +00:00
my $aptopts = {
2021-10-06 19:19:00 +00:00
ARGV => ['apt-get', 'update', '--error-on=any'],
2021-05-31 09:17:39 +00:00
CHDIR => $options->{root},
};
2023-01-16 06:39:15 +00:00
# Maybe "apt-get update" was already run in the setup hook? If yes, skip
# running it here. We are overly strict on purpose because better to run it
# twice on accident than not at all.
if ( !-d "$options->{root}/var/lib/apt/lists/auxfiles"
|| !-d "$options->{root}/var/lib/apt/lists/partial"
|| !-e "$options->{root}/var/lib/apt/lists/lock"
|| !-e "$options->{root}/var/cache/apt/pkgcache.bin"
|| !-e "$options->{root}/var/cache/apt/srcpkgcache.bin") {
info "running apt-get update...";
run_apt_progress($aptopts);
} else {
info "skipping apt-get update because it was already run";
}
2018-09-21 06:04:40 +00:00
# check if anything was downloaded at all
{
2020-01-08 16:44:07 +00:00
open my $fh, '-|', 'apt-get',
'indextargets' // error "failed to fork(): $!";
chomp(
my $indextargets = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
if ($indextargets eq '') {
2023-01-16 06:54:27 +00:00
warning("apt-get indextargets output is empty");
2023-01-16 14:13:39 +00:00
if (scalar @{ $options->{sourceslists} } == 0) {
warning "no known apt sources.list entry";
}
2023-01-16 06:54:27 +00:00
for my $list (@{ $options->{sourceslists} }) {
if (defined $list->{fname}) {
info("Filename: $list->{fname}");
}
info("Type: $list->{type}");
info("Content:");
for my $line (split "\n", $list->{content}) {
info(" $line");
}
}
open(my $fh, '-|', 'apt-cache', 'policy')
// error "failed to fork(): $!";
while (my $line = <$fh>) {
chomp $line;
info $line;
}
close $fh;
my $msg
= "apt-get update did not find any indices "
. "for architecture '$options->{nativearch}' in ";
if (length $options->{suite}) {
$msg .= "suite '$options->{suite}'";
} else {
$msg .= "the configured apt sources";
2020-01-08 14:41:49 +00:00
}
2023-01-16 06:54:27 +00:00
error $msg;
2020-01-08 14:41:49 +00:00
}
2018-09-21 06:04:40 +00:00
}
2018-09-18 09:20:24 +00:00
2020-04-09 21:07:02 +00:00
return;
}
sub run_download() {
my $options = shift;
2021-08-25 03:20:46 +00:00
# In the future we want to replace downloading packages with "apt-get
2022-05-24 09:47:16 +00:00
# install" and installing them with dpkg by just installing the essential
# packages with apt from the outside with DPkg::Chroot-Directory.
# We are not doing that because then the preinst script of base-passwd will
# not be called early enough and packages will fail to install because they
# are missing /etc/passwd.
2020-11-13 18:02:41 +00:00
my @cached_debs = ();
my @dl_debs = ();
if (
!$options->{dryrun}
&& ((none { $_ eq $options->{variant} } ('extract', 'custom'))
2021-11-09 06:31:56 +00:00
|| scalar @{ $options->{include} } != 0)
2020-11-13 18:02:41 +00:00
&& -d "$options->{root}/var/cache/apt/archives/"
) {
my $apt_archives = "/var/cache/apt/archives/";
opendir my $dh, "$options->{root}/$apt_archives"
or error "cannot read $apt_archives";
while (my $deb = readdir $dh) {
if ($deb !~ /\.deb$/) {
next;
}
if (!-f "$options->{root}/$apt_archives/$deb") {
next;
}
push @cached_debs, $deb;
}
closedir $dh;
}
2018-09-18 09:20:24 +00:00
# To figure out the right package set for the apt variant we can use:
# $ apt-get dist-upgrade -o dir::state::status=/dev/null
# This is because that variants only contain essential packages and
# apt and libapt treats apt as essential. If we want to install less
# (essential variant) then we have to compute the package set ourselves.
# Same if we want to install priority based variants.
2018-10-22 15:05:56 +00:00
if (any { $_ eq $options->{variant} } ('extract', 'custom')) {
2021-11-09 06:31:56 +00:00
if (scalar @{ $options->{include} } == 0) {
2020-05-03 13:06:24 +00:00
info "nothing to download -- skipping...";
2022-05-23 21:30:29 +00:00
return ([], \@cached_debs);
2020-05-03 13:06:24 +00:00
}
2023-01-16 11:13:21 +00:00
my @apt_argv = ('install', @{ $options->{include} });
2020-05-03 13:06:24 +00:00
2022-05-24 09:47:16 +00:00
@dl_debs = run_apt_download_progress({
APT_ARGV => [@apt_argv],
dryrun => $options->{dryrun},
},
);
2018-10-22 15:05:56 +00:00
} elsif ($options->{variant} eq 'apt') {
2020-01-08 14:41:49 +00:00
# if we just want to install Essential:yes packages, apt and their
# dependencies then we can make use of libapt treating apt as
# implicitly essential. An upgrade with the (currently) empty status
# file will trigger an installation of the essential packages plus apt.
#
# 2018-09-02, #debian-dpkg on OFTC, times in UTC+2
# 23:39 < josch> I'll just put it in my script and if it starts
# breaking some time I just say it's apt's fault. :P
# 23:42 < DonKult> that is how it usually works, so yes, do that :P (<-
# and please add that line next to it so you can
# remind me in 5+ years that I said that after I wrote
# in the bugreport: "Are you crazy?!? Nobody in his
# right mind would even suggest depending on it!")
2022-05-24 09:47:16 +00:00
@dl_debs = run_apt_download_progress({
APT_ARGV => ['dist-upgrade'],
dryrun => $options->{dryrun},
},
);
2022-11-14 13:34:15 +00:00
} elsif (any { $_ eq $options->{variant} }
('essential', 'standard', 'important', 'required', 'buildd')) {
2021-08-17 08:29:56 +00:00
# 2021-06-07, #debian-apt on OFTC, times in UTC+2
# 17:27 < DonKult> (?essential includes 'apt' through)
# 17:30 < josch> DonKult: no, because pkgCacheGen::ForceEssential ",";
# 17:32 < DonKult> touché
2022-05-24 09:47:16 +00:00
@dl_debs = run_apt_download_progress({
APT_ARGV => [
2021-08-17 08:29:56 +00:00
'install',
'?narrow('
. (
2021-11-09 06:31:56 +00:00
length($options->{suite})
2022-01-07 11:46:35 +00:00
? '?or(?archive(^'
. $options->{suite}
. '$),?codename(^'
. $options->{suite} . '$)),'
2021-08-17 08:29:56 +00:00
: ''
)
. '?architecture('
. $options->{nativearch}
. '),?essential)'
],
2022-05-24 09:47:16 +00:00
dryrun => $options->{dryrun},
},
);
2018-10-22 12:44:45 +00:00
} else {
2020-01-08 14:41:49 +00:00
error "unknown variant: $options->{variant}";
2018-09-18 09:20:24 +00:00
}
2018-09-21 18:32:07 +00:00
my @essential_pkgs;
2022-05-24 09:47:16 +00:00
# strip the chroot directory from the filenames
foreach my $deb (@dl_debs) {
# if filename does not start with chroot directory then the user
# might've used a file:// mirror and we check whether the path is
# accessible inside the chroot
if (rindex $deb, $options->{root}, 0) {
if (!-e "$options->{root}/$deb") {
error "package file $deb not accessible from chroot directory"
2022-05-24 11:16:24 +00:00
. " -- use copy:// instead of file:// or a bind-mount. You"
. " can also try using --hook-dir=/usr/share/mmdebstrap/"
. "hooks/file-mirror-automount to automatically create"
2022-07-26 16:43:18 +00:00
. " bind-mounts or copy the files as necessary.";
2022-05-24 09:47:16 +00:00
}
push @essential_pkgs, $deb;
next;
2020-01-08 14:41:49 +00:00
}
2022-05-24 09:47:16 +00:00
# filename starts with chroot directory, strip it off
# this is the normal case
if (!-e $deb) {
error "cannot find package file $deb";
2020-01-08 14:41:49 +00:00
}
2022-05-24 09:47:16 +00:00
push @essential_pkgs, substr($deb, length($options->{root}));
2018-09-21 18:32:07 +00:00
}
2021-11-09 06:31:56 +00:00
return (\@essential_pkgs, \@cached_debs);
2020-04-09 21:07:02 +00:00
}
sub run_extract() {
my $options = shift;
my $essential_pkgs = shift;
if ($options->{dryrun}) {
2020-01-10 10:44:15 +00:00
info "skip extracting packages because of --dry-run";
2020-04-09 21:07:02 +00:00
return;
}
2020-05-03 13:06:24 +00:00
if (scalar @{$essential_pkgs} == 0) {
info "nothing to extract -- skipping...";
return;
}
2020-04-09 21:07:02 +00:00
info "extracting archives...";
print_progress 0.0;
my $counter = 0;
my $total = scalar @{$essential_pkgs};
foreach my $deb (@{$essential_pkgs}) {
$counter += 1;
2020-08-25 11:02:33 +00:00
my $tarfilter;
my @tarfilterargs;
# if the path-excluded option was added to the dpkg config,
# insert the tarfilter between dpkg-deb and tar
if (-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") {
open(my $fh, '<',
"$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
my @matches = grep { /^path-(?:exclude|include)=/ } <$fh>;
close $fh;
chop @matches; # remove trailing newline
@tarfilterargs = map { "--" . $_ } @matches;
}
if (scalar @tarfilterargs > 0) {
if (-x "./tarfilter") {
$tarfilter = "./tarfilter";
} else {
$tarfilter = "mmtarfilter";
}
}
my $dpkg_writer;
my $tar_reader;
my $filter_reader;
my $filter_writer;
if (scalar @tarfilterargs > 0) {
pipe $filter_reader, $dpkg_writer or error "pipe failed: $!";
pipe $tar_reader, $filter_writer or error "pipe failed: $!";
} else {
pipe $tar_reader, $dpkg_writer or error "pipe failed: $!";
}
2020-04-09 21:07:02 +00:00
# not using dpkg-deb --extract as that would replace the
# merged-usr symlinks with plain directories
2023-10-23 09:48:32 +00:00
# even after switching from pre-merging to post-merging, dpkg-deb
# will ignore filter rules from dpkg.cfg.d
2021-08-25 03:20:46 +00:00
# https://bugs.debian.org/989602
2020-08-25 11:02:33 +00:00
# not using dpkg --unpack because that would try running preinst
# maintainer scripts
2020-04-09 21:07:02 +00:00
my $pid1 = fork() // error "fork() failed: $!";
if ($pid1 == 0) {
2020-08-25 11:02:33 +00:00
open(STDOUT, '>&', $dpkg_writer) or error "cannot open STDOUT: $!";
close($tar_reader) or error "cannot close tar_reader: $!";
if (scalar @tarfilterargs > 0) {
close($filter_reader)
or error "cannot close filter_reader: $!";
close($filter_writer)
or error "cannot close filter_writer: $!";
}
2020-04-09 21:07:02 +00:00
debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb");
eval { Devel::Cover::set_coverage("none") } if $is_covering;
exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb";
}
2020-08-25 11:02:33 +00:00
my $pid2;
if (scalar @tarfilterargs > 0) {
$pid2 = fork() // error "fork() failed: $!";
if ($pid2 == 0) {
open(STDIN, '<&', $filter_reader)
or error "cannot open STDIN: $!";
open(STDOUT, '>&', $filter_writer)
or error "cannot open STDOUT: $!";
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
close($tar_reader) or error "cannot close tar_reader: $!";
debug("running $tarfilter " . (join " ", @tarfilterargs));
eval { Devel::Cover::set_coverage("none") } if $is_covering;
exec $tarfilter, @tarfilterargs;
}
}
my $pid3 = fork() // error "fork() failed: $!";
if ($pid3 == 0) {
open(STDIN, '<&', $tar_reader) or error "cannot open STDIN: $!";
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
if (scalar @tarfilterargs > 0) {
close($filter_reader)
or error "cannot close filter_reader: $!";
close($filter_writer)
or error "cannot close filter_writer: $!";
}
2020-04-09 21:07:02 +00:00
debug( "running tar -C $options->{root}"
. " --keep-directory-symlink --extract --file -");
eval { Devel::Cover::set_coverage("none") } if $is_covering;
exec 'tar', '-C', $options->{root},
'--keep-directory-symlink', '--extract', '--file', '-';
}
2020-08-25 11:02:33 +00:00
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
close($tar_reader) or error "cannot close tar_reader: $!";
if (scalar @tarfilterargs > 0) {
close($filter_reader) or error "cannot close filter_reader: $!";
close($filter_writer) or error "cannot close filter_writer: $!";
}
2020-04-09 21:07:02 +00:00
waitpid($pid1, 0);
$? == 0 or error "dpkg-deb --fsys-tarfile failed: $?";
2020-08-25 11:02:33 +00:00
if (scalar @tarfilterargs > 0) {
waitpid($pid2, 0);
$? == 0 or error "tarfilter failed: $?";
}
waitpid($pid3, 0);
2020-04-09 21:07:02 +00:00
$? == 0 or error "tar --extract failed: $?";
2022-10-16 20:03:06 +00:00
print_progress($counter / $total * 100, "extracting");
2020-04-09 21:07:02 +00:00
}
print_progress "done";
return;
}
sub run_prepare {
my $options = shift;
if ($options->{mode} eq 'fakechroot') {
# this borrows from and extends
# /etc/fakechroot/debootstrap.env and
# /etc/fakechroot/chroot.env
{
2022-05-29 05:50:10 +00:00
my %subst = (
chroot => "/usr/sbin/chroot.fakechroot",
mkfifo => "/bin/true",
ldconfig => (getcwd() . '/ldconfig.fakechroot'),
ldd => "/usr/bin/ldd.fakechroot",
ischroot => "/bin/true"
);
if (!-x $subst{ldconfig}) {
$subst{ldconfig}
= '/usr/libexec/mmdebstrap/ldconfig.fakechroot';
2021-09-16 14:23:46 +00:00
}
2023-01-23 15:35:35 +00:00
my %mergedusrmap = (
"/bin" => "/usr/bin",
"/sbin" => "/usr/sbin",
"/usr/bin/" => "/bin",
"/usr/sbin" => "/sbin"
);
2023-01-23 14:01:21 +00:00
my %fakechrootsubst;
2022-05-29 05:50:10 +00:00
foreach my $d (split ':', $ENV{PATH}) {
2023-01-23 14:01:21 +00:00
foreach my $k (sort %subst) {
2023-01-23 15:35:35 +00:00
my $mapped_path = $mergedusrmap{$d} // $d;
next if !-e "$d/$k" && !-e "$mapped_path/$k";
$fakechrootsubst{"$d/$k=$subst{$k}"} = 1;
$fakechrootsubst{"$mapped_path/$k=$subst{$k}"} = 1;
2022-05-29 05:50:10 +00:00
}
2020-04-09 21:07:02 +00:00
}
if (defined $ENV{FAKECHROOT_CMD_SUBST}
&& $ENV{FAKECHROOT_CMD_SUBST} ne "") {
2023-01-23 14:01:21 +00:00
foreach my $e (split /:/, $ENV{FAKECHROOT_CMD_SUBST}) {
$fakechrootsubst{$e} = 1;
}
2020-04-09 21:07:02 +00:00
}
## no critic (Variables::RequireLocalizedPunctuationVars)
2023-01-23 14:01:21 +00:00
$ENV{FAKECHROOT_CMD_SUBST} = join ':',
(sort keys %fakechrootsubst);
2020-04-09 21:07:02 +00:00
}
if (defined $ENV{FAKECHROOT_EXCLUDE_PATH}
&& $ENV{FAKECHROOT_EXCLUDE_PATH} ne "") {
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_EXCLUDE_PATH}
= "$ENV{FAKECHROOT_EXCLUDE_PATH}:/dev:/proc:/sys";
} else {
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_EXCLUDE_PATH} = '/dev:/proc:/sys';
}
# workaround for long unix socket path if FAKECHROOT_BASE
# exceeds the limit of 108 bytes
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_AF_UNIX_PATH} = "/tmp";
}
{
my @ldlibpath = ();
if (defined $ENV{LD_LIBRARY_PATH}
&& $ENV{LD_LIBRARY_PATH} ne "") {
push @ldlibpath, (split /:/, $ENV{LD_LIBRARY_PATH});
}
# FIXME: workaround allowing installation of systemd should
# live in fakechroot, see #917920
2021-09-21 12:17:27 +00:00
push @ldlibpath, "$options->{root}/lib/systemd";
my $parse_ld_so_conf;
$parse_ld_so_conf = sub {
foreach my $conf (@_) {
next if !-r $conf;
open my $fh, '<', "$conf" or error "can't read $conf: $!";
while (my $line = <$fh>) {
chomp $line;
if ($line eq "") {
next;
}
if ($line =~ /^#/) {
next;
}
if ($line =~ /include (.*)/) {
$parse_ld_so_conf->(glob("$options->{root}/$1"));
next;
}
if (!-d "$options->{root}/$line") {
next;
}
push @ldlibpath, "$options->{root}/$line";
}
close $fh;
2020-04-09 21:07:02 +00:00
}
2021-09-21 12:17:27 +00:00
};
if (-e "$options->{root}/etc/ld.so.conf") {
$parse_ld_so_conf->("$options->{root}/etc/ld.so.conf");
2020-04-09 21:07:02 +00:00
}
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{LD_LIBRARY_PATH} = join ':', @ldlibpath;
}
}
# make sure that APT_CONFIG and TMPDIR are not set when executing
# anything inside the chroot
my @chrootcmd = ('env', '--unset=APT_CONFIG', '--unset=TMPDIR');
2022-09-05 03:50:50 +00:00
if (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot')) {
2022-05-24 18:36:01 +00:00
push @chrootcmd, ('chroot', $options->{root});
2018-11-20 23:13:10 +00:00
} else {
2020-04-09 21:07:02 +00:00
error "unknown mode: $options->{mode}";
}
2023-09-27 12:02:00 +00:00
# foreign architecture setup for fakechroot mode
if (defined $options->{qemu} && $options->{mode} eq 'fakechroot') {
# Make sure that the fakeroot and fakechroot shared libraries exist for
# the right architecture
open my $fh, '-|', 'dpkg-architecture', '-a',
$options->{nativearch},
'-qDEB_HOST_MULTIARCH' // error "failed to fork(): $!";
chomp(
my $deb_host_multiarch = do { local $/; <$fh> }
);
close $fh;
if (($? != 0) or (!$deb_host_multiarch)) {
error "dpkg-architecture failed: $?";
}
my $fakechrootdir = "/usr/lib/$deb_host_multiarch/fakechroot";
if (!-e "$fakechrootdir/libfakechroot.so") {
error "$fakechrootdir/libfakechroot.so doesn't exist."
. " Install libfakechroot:$options->{nativearch}"
. " outside the chroot";
}
my $fakerootdir = "/usr/lib/$deb_host_multiarch/libfakeroot";
if (!-e "$fakerootdir/libfakeroot-sysv.so") {
error "$fakerootdir/libfakeroot-sysv.so doesn't exist."
. " Install libfakeroot:$options->{nativearch}"
. " outside the chroot";
}
# The rest of this block sets environment variables, so we have to add
# the "no critic" statement to stop perlcritic from complaining about
# setting global variables
## no critic (Variables::RequireLocalizedPunctuationVars)
# fakechroot only fills LD_LIBRARY_PATH with the directories of the
# host's architecture. We append the directories of the chroot
# architecture.
$ENV{LD_LIBRARY_PATH}
= "$ENV{LD_LIBRARY_PATH}:$fakechrootdir:$fakerootdir";
# The binfmt support on the outside is used, so qemu needs to know
# where it has to look for shared libraries
if (defined $ENV{QEMU_LD_PREFIX}
&& $ENV{QEMU_LD_PREFIX} ne "") {
$ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
2020-04-09 21:07:02 +00:00
} else {
2023-09-27 12:02:00 +00:00
$ENV{QEMU_LD_PREFIX} = $options->{root};
2020-01-08 14:41:49 +00:00
}
2018-11-20 23:13:10 +00:00
}
2020-04-09 21:07:02 +00:00
# some versions of coreutils use the renameat2 system call in mv.
2022-09-05 03:50:50 +00:00
# This breaks certain versions of fakechroot. Here we do
2020-04-09 21:07:02 +00:00
# a sanity check and warn the user in case things might break.
2022-09-05 03:50:50 +00:00
if ($options->{mode} eq 'fakechroot'
and -e "$options->{root}/bin/mv") {
2020-04-09 21:07:02 +00:00
mkdir "$options->{root}/000-move-me"
or error "cannot create directory: $!";
my $ret = system @chrootcmd, '/bin/mv', '/000-move-me',
'/001-delete-me';
if ($ret != 0) {
2022-09-05 03:50:50 +00:00
info "the /bin/mv binary inside the chroot doesn't"
. " work under fakechroot";
info "with certain versions of coreutils and glibc,"
. " this is due to missing support for renameat2 in"
. " fakechroot";
info "see https://github.com/dex4er/fakechroot/issues/60";
2020-04-09 21:07:02 +00:00
info "expect package post installation scripts not to work";
rmdir "$options->{root}/000-move-me"
or error "cannot rmdir: $!";
} else {
rmdir "$options->{root}/001-delete-me"
or error "cannot rmdir: $!";
}
}
return \@chrootcmd;
}
sub run_essential() {
my $options = shift;
my $essential_pkgs = shift;
my $chrootcmd = shift;
2020-11-13 18:02:41 +00:00
my $cached_debs = shift;
2020-03-22 13:08:21 +00:00
2020-05-03 13:06:24 +00:00
if (scalar @{$essential_pkgs} == 0) {
info "no essential packages -- skipping...";
return;
}
2018-11-20 23:13:10 +00:00
if ($options->{mode} eq 'chrootless') {
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
2020-11-27 23:45:32 +00:00
info "simulate installing essential packages...";
2020-01-10 10:44:15 +00:00
} else {
2020-11-27 23:45:32 +00:00
info "installing essential packages...";
2020-01-10 10:44:15 +00:00
}
2020-01-08 14:41:49 +00:00
# FIXME: the dpkg config from the host is parsed before the command
# line arguments are parsed and might break this mode
# Example: if the host has --path-exclude set, then this will also
2021-08-25 03:15:44 +00:00
# affect the chroot. See #808203
2020-01-08 14:41:49 +00:00
my @chrootless_opts = (
2023-03-22 08:27:11 +00:00
'-oDPkg::Chroot-Directory=',
2020-01-08 14:41:49 +00:00
'-oDPkg::Options::=--force-not-root',
'-oDPkg::Options::=--force-script-chrootless',
'-oDPkg::Options::=--root=' . $options->{root},
2020-01-10 10:44:15 +00:00
'-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log",
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
2020-01-08 16:44:07 +00:00
);
2020-01-08 14:41:49 +00:00
if (defined $options->{qemu}) {
# The binfmt support on the outside is used, so qemu needs to know
# where it has to look for shared libraries
if (defined $ENV{QEMU_LD_PREFIX}
&& $ENV{QEMU_LD_PREFIX} ne "") {
2020-01-09 07:39:40 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2020-01-08 14:41:49 +00:00
$ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
} else {
2020-01-09 07:39:40 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2020-01-08 14:41:49 +00:00
$ENV{QEMU_LD_PREFIX} = $options->{root};
}
}
2021-08-17 21:39:20 +00:00
# we don't use apt because that will not run the base-passwd preinst
# early enough
#run_apt_progress({
# ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
# PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}],
#});
run_dpkg_progress({
ARGV => [
'dpkg',
'--force-not-root',
'--force-script-chrootless',
"--root=$options->{root}",
"--log=$options->{root}/var/log/dpkg.log",
'--install',
'--force-depends'
],
PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}] });
2022-09-05 03:50:50 +00:00
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot'))
{
2020-04-09 21:07:02 +00:00
# install the extracted packages properly
# we need --force-depends because dpkg does not take Pre-Depends
# into account and thus doesn't install them in the right order
# And the --predep-package option is broken: #539133
2021-08-16 11:32:20 +00:00
#
# We could use apt from outside the chroot using DPkg::Chroot-Directory
# but then the preinst script of base-passwd will not be called early
# enough and packages will fail to install because they are missing
# /etc/passwd. Also, with plain dpkg the essential variant can finish
# within 9 seconds. If we use apt instead, it becomes 12 seconds. We
# prefer speed here.
2020-04-09 21:07:02 +00:00
if ($options->{dryrun}) {
2020-11-27 23:45:32 +00:00
info "simulate installing essential packages...";
2020-04-09 21:07:02 +00:00
} else {
2020-11-27 23:45:32 +00:00
info "installing essential packages...";
2022-11-10 13:53:36 +00:00
run_dpkg_progress({
ARGV =>
[@{$chrootcmd}, 'dpkg', '--install', '--force-depends'],
PKGS => $essential_pkgs,
});
2020-04-09 21:07:02 +00:00
}
} else {
error "unknown mode: $options->{mode}";
}
2020-01-08 14:41:49 +00:00
2020-11-13 18:02:41 +00:00
if (any { $_ eq 'essential/unlink' } @{ $options->{skip} }) {
info "skipping essential/unlink as requested";
} else {
foreach my $deb (@{$essential_pkgs}) {
# do not unlink those packages that were in /var/cache/apt/archive
# before the download phase
next
if any { "/var/cache/apt/archives/$_" eq $deb } @{$cached_debs};
2022-05-24 09:47:16 +00:00
# do not unlink those packages that were not in
# /var/cache/apt/archive (for example because they were provided by
# a file:// mirror)
next if $deb !~ /\/var\/cache\/apt\/archives\//;
2020-11-13 18:02:41 +00:00
unlink "$options->{root}/$deb"
or error "cannot unlink $deb: $!";
}
2020-04-09 21:07:02 +00:00
}
return;
}
sub run_install() {
2022-11-10 13:53:36 +00:00
my $options = shift;
2021-11-09 06:31:56 +00:00
2023-01-16 11:13:21 +00:00
my @pkgs_to_install = (@{ $options->{include} });
2021-11-09 06:31:56 +00:00
if ($options->{variant} eq 'buildd') {
2023-10-23 08:38:10 +00:00
push @pkgs_to_install, 'build-essential', 'apt';
2021-11-09 06:31:56 +00:00
}
2022-11-14 13:34:15 +00:00
if (any { $_ eq $options->{variant} }
2023-10-23 08:38:10 +00:00
('required', 'important', 'standard')) {
2022-02-11 22:01:08 +00:00
# Many of the priority:required packages are also essential:yes. We
# make sure not to select those here to avoid useless "xxx is already
# the newest version" messages.
2021-11-09 06:31:56 +00:00
my $priority;
2023-10-23 08:38:10 +00:00
if (any { $_ eq $options->{variant} } ('required')) {
2022-02-11 22:01:08 +00:00
$priority = '?and(?priority(required),?not(?essential))';
2021-11-09 06:31:56 +00:00
} elsif ($options->{variant} eq 'important') {
2022-02-11 22:01:08 +00:00
$priority = '?and(?or(?priority(required),?priority(important)),'
. '?not(?essential))';
2021-11-09 06:31:56 +00:00
} elsif ($options->{variant} eq 'standard') {
2022-02-11 22:01:08 +00:00
$priority = '?and(?or(~prequired,~pimportant,~pstandard),'
. '?not(?essential))';
2021-11-09 06:31:56 +00:00
}
2023-01-16 11:13:21 +00:00
push @pkgs_to_install,
(
2021-11-09 06:31:56 +00:00
"?narrow("
. (
length($options->{suite})
2022-01-07 11:46:35 +00:00
? '?or(?archive(^'
. $options->{suite}
. '$),?codename(^'
. $options->{suite} . '$)),'
2021-11-09 06:31:56 +00:00
: ''
)
. "?architecture($options->{nativearch}),"
. "$priority)"
2023-01-16 11:13:21 +00:00
);
2021-11-09 06:31:56 +00:00
}
2020-04-09 21:07:02 +00:00
if ($options->{mode} eq 'chrootless') {
2021-11-09 06:31:56 +00:00
if (scalar @pkgs_to_install > 0) {
2020-04-09 21:07:02 +00:00
my @chrootless_opts = (
2023-03-22 08:27:11 +00:00
'-oDPkg::Chroot-Directory=',
2020-04-09 21:07:02 +00:00
'-oDPkg::Options::=--force-not-root',
'-oDPkg::Options::=--force-script-chrootless',
'-oDPkg::Options::=--root=' . $options->{root},
'-oDPkg::Options::=--log='
. "$options->{root}/var/log/dpkg.log",
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
);
run_apt_progress({
ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
2021-11-09 06:31:56 +00:00
PKGS => [@pkgs_to_install],
2020-04-09 21:07:02 +00:00
});
}
2022-09-05 03:50:50 +00:00
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot'))
{
2020-04-09 21:07:02 +00:00
if ($options->{variant} ne 'custom'
2021-11-09 06:31:56 +00:00
and scalar @pkgs_to_install > 0) {
2021-08-16 11:32:20 +00:00
# Advantage of running apt on the outside instead of inside the
# chroot:
2020-04-09 21:07:02 +00:00
#
2021-08-16 11:32:20 +00:00
# - we can build chroots without apt (for example from buildinfo
# files)
#
# - we do not need to install additional packages like
# apt-transport-* or ca-certificates inside the chroot
#
# - we do not not need additional key material inside the chroot
#
# - we can make use of file:// and copy://
#
2022-02-11 22:01:36 +00:00
# - we can use EDSP solvers without installing apt-utils or other
# solvers inside the chroot
#
2021-08-16 11:32:20 +00:00
# The DPkg::Install::Recursive::force=true workaround can be
# dropped after this issue is fixed:
2021-09-19 17:38:47 +00:00
# https://salsa.debian.org/apt-team/apt/-/merge_requests/189
2021-08-16 11:32:20 +00:00
#
# We could also move the dpkg call to the outside and run dpkg with
# --root but this would only make sense in situations where there
# is no dpkg inside the chroot.
2020-04-09 21:07:02 +00:00
if (!$options->{dryrun}) {
2022-11-10 13:53:36 +00:00
info "installing remaining packages inside the chroot...";
run_apt_progress({
ARGV => [
'apt-get',
'-o',
'Dir::Bin::dpkg=env',
'-o',
'DPkg::Options::=--unset=TMPDIR',
'-o',
'DPkg::Options::=dpkg',
$options->{mode} eq 'fakechroot'
? ('-o', 'DPkg::Install::Recursive::force=true')
: (),
'--yes',
'install'
],
PKGS => [@pkgs_to_install],
});
2020-04-09 21:07:02 +00:00
} else {
info "simulate installing remaining packages inside the"
. " chroot...";
run_apt_progress({
ARGV => [
'apt-get', '--yes',
'-oAPT::Get::Simulate=true', 'install'
],
2021-11-09 06:31:56 +00:00
PKGS => [@pkgs_to_install],
2020-04-09 21:07:02 +00:00
});
2020-01-08 14:41:49 +00:00
}
}
2018-10-22 15:05:56 +00:00
} else {
2020-01-08 14:41:49 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
2020-04-09 21:07:02 +00:00
return;
}
sub run_cleanup() {
my $options = shift;
2019-01-08 10:28:27 +00:00
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/apt' } @{ $options->{skip} }) {
info "skipping cleanup/apt as requested";
} else {
2021-05-09 18:44:02 +00:00
if ( none { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }
and none { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
info "cleaning package lists and apt cache...";
}
if (any { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }) {
info "skipping cleanup/apt/lists as requested";
} else {
if (any { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
info "cleaning package lists...";
}
run_apt_progress({
ARGV => [
'apt-get', '--option',
'Dir::Etc::SourceList=/dev/null', '--option',
'Dir::Etc::SourceParts=/dev/null', 'update'
],
CHDIR => $options->{root},
});
}
if (any { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
info "skipping cleanup/apt/cache as requested";
} else {
if (any { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }) {
info "cleaning apt cache...";
}
run_apt_progress(
{ ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} });
}
2018-09-18 09:20:24 +00:00
2020-04-09 22:00:36 +00:00
# apt since 1.6 creates the auxfiles directory. If apt inside the
# chroot is older than that, then it will not know how to clean it.
if (-e "$options->{root}/var/lib/apt/lists/auxfiles") {
2021-10-06 19:20:41 +00:00
0 == system(
'rm',
'--interactive=never',
'--recursive',
'--preserve-root',
'--one-file-system',
"$options->{root}/var/lib/apt/lists/auxfiles"
) or error "rm failed: $?";
2020-04-09 22:00:36 +00:00
}
2019-04-23 11:28:55 +00:00
}
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/mmdebstrap' } @{ $options->{skip} }) {
info "skipping cleanup/mmdebstrap as requested";
} else {
# clean up temporary configuration file
unlink "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
2024-01-23 06:48:12 +00:00
or warning "failed to unlink /etc/apt/apt.conf.d/00mmdebstrap: $!";
2020-04-09 22:00:36 +00:00
if (defined $ENV{APT_CONFIG} && -e $ENV{APT_CONFIG}) {
unlink $ENV{APT_CONFIG}
or error "failed to unlink $ENV{APT_CONFIG}: $!";
}
2018-09-18 15:11:02 +00:00
}
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/reproducible' } @{ $options->{skip} }) {
info "skipping cleanup/reproducible as requested";
} else {
# clean up certain files to make output reproducible
foreach my $fname (
'/var/log/dpkg.log', '/var/log/apt/history.log',
'/var/log/apt/term.log', '/var/log/alternatives.log',
2022-02-11 22:01:56 +00:00
'/var/cache/ldconfig/aux-cache', '/var/log/apt/eipp.log.xz',
'/var/lib/dbus/machine-id'
2020-04-09 22:00:36 +00:00
) {
my $path = "$options->{root}$fname";
if (!-e $path) {
next;
}
unlink $path or error "cannot unlink $path: $!";
}
if (-e "$options->{root}/etc/machine-id") {
# from machine-id(5):
# For operating system images which are created once and used on
# multiple machines, for example for containers or in the cloud,
# /etc/machine-id should be an empty file in the generic file
# system image. An ID will be generated during boot and saved to
# this file if possible. Having an empty file in place is useful
# because it allows a temporary file to be bind-mounted over the
# real file, in case the image is used read-only.
unlink "$options->{root}/etc/machine-id"
or error "cannot unlink /etc/machine-id: $!";
open my $fh, '>', "$options->{root}/etc/machine-id"
or error "failed to open(): $!";
close $fh;
2020-01-08 14:41:49 +00:00
}
2020-01-21 12:13:58 +00:00
}
2022-09-02 21:29:52 +00:00
if (any { $_ eq 'cleanup/run' } @{ $options->{skip} }) {
info "skipping cleanup/run as requested";
} else {
# remove any possible leftovers in /run
if (-d "$options->{root}/run") {
opendir(my $dh, "$options->{root}/run")
or error "Can't opendir($options->{root}/run): $!";
while (my $entry = readdir $dh) {
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
2022-09-06 14:58:20 +00:00
# skip deleting /run/lock as /var/lock is a symlink to it
# according to Debian policy §9.1.4
next if $entry eq "lock";
2022-09-02 21:29:52 +00:00
debug "deleting files in /run: $entry";
0 == system(
'rm', '--interactive=never',
'--recursive', '--preserve-root',
'--one-file-system', "$options->{root}/run/$entry"
) or error "rm failed: $?";
}
closedir($dh);
}
}
2020-04-09 22:00:36 +00:00
if (any { $_ eq 'cleanup/tmp' } @{ $options->{skip} }) {
info "skipping cleanup/tmp as requested";
} else {
2022-09-02 21:29:52 +00:00
# remove any possible leftovers in /tmp
2020-04-09 22:00:36 +00:00
if (-d "$options->{root}/tmp") {
opendir(my $dh, "$options->{root}/tmp")
or error "Can't opendir($options->{root}/tmp): $!";
while (my $entry = readdir $dh) {
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
2022-09-02 21:29:52 +00:00
debug "deleting files in /tmp: $entry";
2021-10-06 19:20:41 +00:00
0 == system(
'rm', '--interactive=never',
'--recursive', '--preserve-root',
'--one-file-system', "$options->{root}/tmp/$entry"
) or error "rm failed: $?";
2020-01-08 14:41:49 +00:00
}
2020-04-09 22:00:36 +00:00
closedir($dh);
2020-01-08 14:41:49 +00:00
}
2020-01-06 11:44:44 +00:00
}
2023-09-27 11:52:20 +00:00
if (any { $_ eq 'cleanup/dev' } @{ $options->{skip} }) {
info "skipping cleanup/dev as requested";
} else {
# By default, tar is run with --exclude=./dev because we create the
# ./dev entries ourselves using @devfiles. But if --skip=output/dev is
# used, --exclude=./dev is not passed so that the chroot includes ./dev
# as created by base-files. But if mknod was available (for example
# when running as root) then ./dev will also include the @devfiles
# entries created by run_setup() and thus the resulting tarball will
# include things inside ./dev despite the user having supplied
# --skip=output/dev. So if --skip=output/dev was passed and if a
# tarball is to be created, we need to make sure to clean up the
# ./dev entries that were created in run_setup(). This is not done
# when creating a directory because in that case we want to do the
# same as debootstrap and create a directory including device nodes.
if ($options->{format} ne 'directory' && any { $_ eq 'output/dev' }
@{ $options->{skip} }) {
foreach my $file (@devfiles) {
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
= @{$file};
if (!-e "$options->{root}/dev/$fname") {
next;
}
# do not remove ./dev itself
if ($fname eq "") {
next;
}
if ($type == 0) { # normal file
error "type 0 not implemented";
} elsif ($type == 1) { # hardlink
error "type 1 not implemented";
} elsif (any { $_ eq $type } (2, 3, 4))
{ # symlink, char, block
unlink "$options->{root}/dev/$fname"
or error "failed to unlink ./dev/$fname: $!";
} elsif ($type == 5) { # directory
rmdir "$options->{root}/dev/$fname"
or error "failed to unlink ./dev/$fname: $!";
} else {
error "unsupported type: $type";
}
}
}
}
2020-01-09 07:39:40 +00:00
return;
2018-09-18 09:20:24 +00:00
}
2019-12-09 09:40:51 +00:00
# messages from process inside unshared namespace to the outside
# openw -- open file for writing
# untar -- extract tar into directory
# write -- write data to last opened file or tar process
# close -- finish file writing or tar extraction
# adios -- last message and tear-down
# messages from process outside unshared namespace to the inside
# okthx -- success
sub checkokthx {
2020-01-08 16:44:07 +00:00
my $fh = shift;
my $ret = read($fh, my $buf, 2 + 5) // error "cannot read from socket: $!";
2019-12-09 09:40:51 +00:00
if ($ret == 0) { error "received eof on socket"; }
my ($len, $msg) = unpack("nA5", $buf);
if ($msg ne "okthx") { error "expected okthx but got: $msg"; }
2020-01-08 16:44:07 +00:00
if ($len != 0) { error "expected no payload but got $len bytes"; }
2020-01-09 07:39:40 +00:00
return;
2019-12-09 09:40:51 +00:00
}
2020-08-15 16:29:17 +00:00
# resolve a path inside a chroot
sub chrooted_realpath {
my $root = shift;
my $src = shift;
my $result = $root;
my $prefix;
# relative paths are relative to the root of the chroot
# remove prefixed slashes
$src =~ s{^/+}{};
my $loop = 0;
while (length $src) {
if ($loop > 25) {
error "too many levels of symbolic links";
}
# Get the first directory component.
($prefix, $src) = split m{/+}, $src, 2;
# Resolve the first directory component.
if ($prefix eq ".") {
# Ignore, stay at the same directory.
} elsif ($prefix eq "..") {
# Go up one directory.
$result =~ s{(.*)/[^/]*}{$1};
# but not further than the root
if ($result !~ m/^\Q$root\E/) {
$result = $root;
}
} elsif (-l "$result/$prefix") {
my $dst = readlink "$result/$prefix";
if ($dst =~ s{^/+}{}) {
# Absolute pathname, reset result back to $root.
$result = $root;
}
$src = length $src ? "$dst/$src" : $dst;
$loop++;
} else {
# Otherwise append the prefix.
$result = "$result/$prefix";
}
}
return $result;
}
2022-11-14 08:59:59 +00:00
sub pivot_root {
my $root = shift;
my $target = "/mnt";
my $put_old = "tmp";
0 == syscall &SYS_mount, $root, $target, 0, $MS_REC | $MS_BIND, 0
or error "mount failed: $!";
chdir "/mnt" or error "failed chdir() to /mnt: $!";
0 == syscall &SYS_pivot_root, my $new_root = ".", $put_old
or error "pivot_root failed: $!";
chroot "." or error "failed to chroot() to .: $!";
0 == syscall &SYS_umount2, $put_old, $MNT_DETACH
or error "umount2 failed: $!";
0 == syscall &SYS_umount2, my $sys = "sys", $MNT_DETACH
or error "umount2 failed: $!";
return;
}
2020-01-16 11:02:11 +00:00
sub hookhelper {
2023-10-23 08:41:46 +00:00
my ($root, $mode, $hook, $skipopt, $verbosity, $command, @args) = @_;
2022-11-08 12:02:47 +00:00
$verbosity_level = $verbosity;
2023-10-23 08:41:46 +00:00
my @skipopts = ();
if (length $skipopt) {
for my $skip (split /[,\s]+/, $skipopt) {
# strip leading and trailing whitespace
$skip =~ s/^\s+|\s+$//g;
# skip if the remainder is an empty string
if ($skip eq '') {
next;
}
push @skipopts, $skip;
}
}
2020-01-16 11:02:11 +00:00
# we put everything in an eval block because that way we can easily handle
# errors without goto labels or much code duplication: the error handler
# has to send an "error" message to the other side
eval {
2020-01-08 14:41:49 +00:00
my @cmdprefix = ();
2020-01-21 12:24:49 +00:00
my @tarcmd = (
'tar', '--numeric-owner', '--xattrs', '--format=pax',
'--pax-option=exthdr.name=%d/PaxHeaders/%f,'
. 'delete=atime,delete=ctime'
);
2020-01-08 14:41:49 +00:00
if ($hook eq 'setup') {
2020-03-22 13:08:21 +00:00
} elsif (any { $_ eq $hook } ('extract', 'essential', 'customize')) {
2020-01-08 14:41:49 +00:00
if ($mode eq 'fakechroot') {
# Fakechroot requires tar to run inside the chroot or
# otherwise absolute symlinks will include the path to the
# root directory
2022-05-24 18:36:01 +00:00
push @cmdprefix, 'chroot', $root;
2020-01-08 14:41:49 +00:00
} elsif (any { $_ eq $mode } ('root', 'chrootless', 'unshare')) {
2020-08-15 16:29:17 +00:00
# not chrooting in this case
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $mode";
}
} else {
error "unknown hook: $hook";
}
2022-11-14 13:34:15 +00:00
if (any { $_ eq $command } ('copy-in', 'tar-in', 'upload', 'sync-in'))
{
2022-11-08 12:02:47 +00:00
if (scalar @args < 2) {
2020-08-18 07:36:27 +00:00
error "$command needs at least one path on the"
2020-01-08 16:41:46 +00:00
. " outside and the output path inside the chroot";
2020-01-08 14:41:49 +00:00
}
2022-11-08 12:02:47 +00:00
my $outpath = pop @args;
foreach my $file (@args) {
2020-01-08 14:41:49 +00:00
# the right argument for tar's --directory argument depends on
# whether tar is called from inside the chroot or from the
# outside
my $directory;
if ($hook eq 'setup') {
2020-08-15 16:29:17 +00:00
# tar runs outside, so acquire the correct path
$directory = chrooted_realpath $root, $outpath;
2022-11-14 13:34:15 +00:00
} elsif (any { $_ eq $hook }
('extract', 'essential', 'customize')) {
2022-09-05 03:50:50 +00:00
if ($mode eq 'fakechroot') {
2020-08-15 16:29:17 +00:00
# tar will run inside the chroot
$directory = $outpath;
2022-11-14 13:34:15 +00:00
} elsif (any { $_ eq $mode }
('root', 'chrootless', 'unshare')) {
2020-08-15 16:29:17 +00:00
$directory = chrooted_realpath $root, $outpath;
} else {
error "unknown mode: $mode";
}
2020-01-08 14:41:49 +00:00
} else {
error "unknown hook: $hook";
}
2022-09-05 03:50:50 +00:00
# if chrooted_realpath was used and if fakechroot
# was used (absolute symlinks will be broken) we can
2020-08-15 16:29:17 +00:00
# check and potentially fail early if the target does not exist
2022-09-05 03:50:50 +00:00
if ($mode ne 'fakechroot') {
2020-08-15 16:29:17 +00:00
my $dirtocheck = $directory;
if ($command eq 'upload') {
# check the parent directory instead
$dirtocheck =~ s/(.*)\/[^\/]*/$1/;
}
if (!-e $dirtocheck) {
error "path does not exist: $dirtocheck";
}
if (!-d $dirtocheck) {
error "path is not a directory: $dirtocheck";
}
}
2020-01-08 14:41:49 +00:00
my $fh;
if ($command eq 'upload') {
# open the requested file for writing
2020-01-08 16:44:07 +00:00
open $fh, '|-', @cmdprefix, 'sh', '-c', 'cat > "$1"',
'exec', $directory // error "failed to fork(): $!";
2022-11-14 13:34:15 +00:00
} elsif (any { $_ eq $command }
('copy-in', 'tar-in', 'sync-in')) {
2020-01-08 15:23:34 +00:00
# open a tar process that extracts the tarfile that we
# supply it with on stdin to the output directory inside
# the chroot
2020-11-13 21:36:58 +00:00
my @cmd = (
2021-12-14 15:10:25 +00:00
@cmdprefix, @tarcmd, '--xattrs-include=*',
2020-11-13 21:36:58 +00:00
'--directory', $directory, '--extract', '--file', '-'
);
2023-10-23 09:47:27 +00:00
# go via mmtarfilter if copy-in/mknod, tar-in/mknod or
# sync-in/mknod were part of the skip options
if (any { $_ eq "$command/mknod" } @skipopts) {
info "skipping $command/mknod as requested";
my $tarfilter = "mmtarfilter";
if (-x "./tarfilter") {
$tarfilter = "./tarfilter";
}
pipe my $filter_reader, $fh or error "pipe failed: $!";
pipe my $tar_reader, my $filter_writer
or error "pipe failed: $!";
my $pid1 = fork() // error "fork() failed: $!";
if ($pid1 == 0) {
open(STDIN, '<&', $filter_reader)
or error "cannot open STDIN: $!";
open(STDOUT, '>&', $filter_writer)
or error "cannot open STDOUT: $!";
close($tar_reader)
or error "cannot close tar_reader: $!";
debug(
"helper: running $tarfilter --type-exclude=3 "
. "--type-exclude=4");
eval { Devel::Cover::set_coverage("none") }
if $is_covering;
exec $tarfilter, '--type-exclude=3',
'--type-exclude=4';
}
my $pid2 = fork() // error "fork() failed: $!";
if ($pid2 == 0) {
open(STDIN, '<&', $tar_reader)
or error "cannot open STDIN: $!";
close($filter_writer)
or error "cannot close filter_writer: $!";
debug("helper: running " . (join " ", @cmd));
eval { Devel::Cover::set_coverage("none") }
if $is_covering;
exec { $cmd[0] } @cmd;
}
} else {
debug("helper: running " . (join " ", @cmd));
open($fh, '|-', @cmd) // error "failed to fork(): $!";
}
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
if ($command eq 'copy-in') {
# instruct the parent process to create a tarball of the
# requested path outside the chroot
2020-11-13 21:36:58 +00:00
debug "helper: sending mktar";
2022-11-08 12:02:47 +00:00
print STDOUT (pack("n", length $file) . "mktar" . $file);
2020-01-16 09:38:14 +00:00
} elsif ($command eq 'sync-in') {
# instruct the parent process to create a tarball of the
# content of the requested path outside the chroot
2020-11-13 21:36:58 +00:00
debug "helper: sending mktac";
2022-11-08 12:02:47 +00:00
print STDOUT (pack("n", length $file) . "mktac" . $file);
2020-01-16 09:38:14 +00:00
} elsif (any { $_ eq $command } ('upload', 'tar-in')) {
2020-01-08 14:41:49 +00:00
# instruct parent process to open a tarball of the
# requested path outside the chroot for reading
2020-11-13 21:36:58 +00:00
debug "helper: sending openr";
2022-11-08 12:02:47 +00:00
print STDOUT (pack("n", length $file) . "openr" . $file);
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
STDOUT->flush();
2020-11-13 21:36:58 +00:00
debug "helper: waiting for okthx";
2020-01-08 14:41:49 +00:00
checkokthx \*STDIN;
# handle "write" messages from the parent process and feed
# their payload into the tar process until a "close" message
# is encountered
2020-01-08 16:44:07 +00:00
while (1) {
2020-01-08 14:41:49 +00:00
# receive the next message
2020-01-08 16:44:07 +00:00
my $ret = read(STDIN, my $buf, 2 + 5)
// error "cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
my ($len, $msg) = unpack("nA5", $buf);
2020-11-13 21:36:58 +00:00
debug "helper: received message: $msg";
2020-01-08 14:41:49 +00:00
if ($msg eq "close") {
# finish the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
2020-11-13 21:36:58 +00:00
debug "helper: sending okthx";
2020-01-08 16:44:07 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
STDOUT->flush();
last;
} elsif ($msg ne "write") {
error "expected write but got: $msg";
}
# read the payload
my $content;
{
2020-01-08 16:44:07 +00:00
my $ret = read(STDIN, $content, $len)
// error "error cannot read from socket: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) {
error "received eof on socket";
}
}
# write the payload to the tar process
2020-01-08 16:44:07 +00:00
print $fh $content
or error "cannot write to tar process: $!";
2020-11-13 21:36:58 +00:00
debug "helper: sending okthx";
2020-01-08 16:44:07 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
2020-01-08 14:41:49 +00:00
STDOUT->flush();
}
close $fh;
if ($command ne 'upload' and $? != 0) {
error "tar failed";
}
}
2022-11-14 13:34:15 +00:00
} elsif (any { $_ eq $command }
('copy-out', 'tar-out', 'download', 'sync-out')) {
2022-11-08 12:02:47 +00:00
if (scalar @args < 2) {
2020-08-18 07:36:27 +00:00
error "$command needs at least one path inside the chroot and"
2020-01-08 16:41:46 +00:00
. " the output path on the outside";
2020-01-08 14:41:49 +00:00
}
2022-11-08 12:02:47 +00:00
my $outpath = pop @args;
foreach my $file (@args) {
2020-01-08 14:41:49 +00:00
# the right argument for tar's --directory argument depends on
# whether tar is called from inside the chroot or from the
# outside
my $directory;
if ($hook eq 'setup') {
2020-08-15 16:29:17 +00:00
# tar runs outside, so acquire the correct path
2022-11-08 12:02:47 +00:00
$directory = chrooted_realpath $root, $file;
2022-11-14 13:34:15 +00:00
} elsif (any { $_ eq $hook }
('extract', 'essential', 'customize')) {
2022-09-05 03:50:50 +00:00
if ($mode eq 'fakechroot') {
2020-08-15 16:29:17 +00:00
# tar will run inside the chroot
2022-11-08 12:02:47 +00:00
$directory = $file;
2022-11-14 13:34:15 +00:00
} elsif (any { $_ eq $mode }
('root', 'chrootless', 'unshare')) {
2022-11-08 12:02:47 +00:00
$directory = chrooted_realpath $root, $file;
2020-08-15 16:29:17 +00:00
} else {
error "unknown mode: $mode";
}
2020-01-08 14:41:49 +00:00
} else {
error "unknown hook: $hook";
}
2022-09-05 03:50:50 +00:00
# if chrooted_realpath was used and if fakechroot
# was used (absolute symlinks will be broken) we can
2020-08-15 16:29:17 +00:00
# check and potentially fail early if the source does not exist
2022-09-05 03:50:50 +00:00
if ($mode ne 'fakechroot') {
2020-08-15 16:29:17 +00:00
if (!-e $directory) {
error "path does not exist: $directory";
}
if ($command eq 'download') {
if (!-f $directory) {
error "path is not a file: $directory";
}
}
}
2020-01-08 14:41:49 +00:00
my $fh;
if ($command eq 'download') {
# open the requested file for reading
2020-01-08 16:44:07 +00:00
open $fh, '-|', @cmdprefix, 'sh', '-c', 'cat "$1"',
'exec', $directory // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} elsif ($command eq 'sync-out') {
2020-01-08 15:23:34 +00:00
# Open a tar process that creates a tarfile of everything
2020-01-16 09:38:14 +00:00
# inside the requested directory inside the chroot and
# writes it to stdout.
2020-11-13 21:36:58 +00:00
my @cmd = (
2021-12-14 15:10:25 +00:00
@cmdprefix, @tarcmd, '--directory',
2020-11-13 21:36:58 +00:00
$directory, '--create', '--file', '-', '.'
);
debug("helper: running " . (join " ", @cmd));
open($fh, '-|', @cmd) // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} elsif (any { $_ eq $command } ('copy-out', 'tar-out')) {
# Open a tar process that creates a tarfile of the
# requested directory inside the chroot and writes it to
# stdout. To emulate the behaviour of cp, change to the
# dirname of the requested path first.
2020-11-13 21:36:58 +00:00
my @cmd = (
@cmdprefix, @tarcmd, '--directory',
dirname($directory), '--create', '--file', '-',
basename($directory));
debug("helper: running " . (join " ", @cmd));
open($fh, '-|', @cmd) // error "failed to fork(): $!";
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
2020-01-16 09:38:14 +00:00
if (any { $_ eq $command } ('copy-out', 'sync-out')) {
2020-01-08 14:41:49 +00:00
# instruct the parent process to extract a tarball to a
# certain path outside the chroot
2020-11-13 21:36:58 +00:00
debug "helper: sending untar";
2020-01-08 16:44:07 +00:00
print STDOUT (
pack("n", length $outpath) . "untar" . $outpath);
2020-01-16 09:38:14 +00:00
} elsif (any { $_ eq $command } ('download', 'tar-out')) {
2020-01-08 14:41:49 +00:00
# instruct parent process to open a tarball of the
# requested path outside the chroot for writing
2020-11-13 21:36:58 +00:00
debug "helper: sending openw";
2020-01-08 16:44:07 +00:00
print STDOUT (
pack("n", length $outpath) . "openw" . $outpath);
2020-01-16 09:38:14 +00:00
} else {
error "unknown command: $command";
2020-01-08 14:41:49 +00:00
}
STDOUT->flush();
2020-11-13 21:36:58 +00:00
debug "helper: waiting for okthx";
2020-01-08 14:41:49 +00:00
checkokthx \*STDIN;
# read from the tar process and send as payload to the parent
# process
while (1) {
# read from tar
2020-01-08 16:44:07 +00:00
my $ret = read($fh, my $cont, 4096)
// error "cannot read from pipe: $!";
2020-01-08 14:41:49 +00:00
if ($ret == 0) { last; }
2020-11-13 21:36:58 +00:00
debug "helper: sending write";
2020-01-08 14:41:49 +00:00
# send to parent
print STDOUT pack("n", $ret) . "write" . $cont;
STDOUT->flush();
2020-11-13 21:36:58 +00:00
debug "helper: waiting for okthx";
2020-01-08 14:41:49 +00:00
checkokthx \*STDIN;
if ($ret < 4096) { last; }
}
# signal to the parent process that we are done
2020-11-13 21:36:58 +00:00
debug "helper: sending close";
2020-01-08 14:41:49 +00:00
print STDOUT pack("n", 0) . "close";
STDOUT->flush();
2020-11-13 21:36:58 +00:00
debug "helper: waiting for okthx";
2020-01-08 14:41:49 +00:00
checkokthx \*STDIN;
close $fh;
2020-01-16 09:38:14 +00:00
if ($? != 0) {
error "$command failed";
2020-01-08 14:41:49 +00:00
}
}
} else {
error "unknown command: $command";
}
2020-01-16 11:02:11 +00:00
};
if ($@) {
# inform the other side that something went wrong
print STDOUT (pack("n", 0) . "error");
STDOUT->flush();
error "hookhelper failed: $@";
}
return;
}
2020-01-08 14:41:49 +00:00
2020-08-15 20:36:13 +00:00
sub hooklistener {
2022-11-08 12:02:47 +00:00
$verbosity_level = shift;
2020-08-15 20:36:13 +00:00
# we put everything in an eval block because that way we can easily handle
# errors without goto labels or much code duplication: the error handler
# has to send an "error" message to the other side
eval {
while (1) {
# get the next message
my $msg = "error";
my $len = -1;
{
2020-11-13 21:36:58 +00:00
debug "listener: reading next command";
2020-08-15 20:36:13 +00:00
my $ret = read(STDIN, my $buf, 2 + 5)
// error "cannot read from socket: $!";
2020-11-13 21:36:58 +00:00
debug "listener: finished reading command";
2020-08-15 20:36:13 +00:00
if ($ret == 0) {
error "received eof on socket";
}
($len, $msg) = unpack("nA5", $buf);
}
if ($msg eq "adios") {
2020-11-13 21:36:58 +00:00
debug "listener: received message: adios";
2020-08-15 20:36:13 +00:00
# setup finished, so we break out of the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
last;
} elsif ($msg eq "openr") {
# handle the openr message
2020-11-13 21:36:58 +00:00
debug "listener: received message: openr";
2020-08-15 20:36:13 +00:00
my $infile;
{
my $ret = read(STDIN, $infile, $len)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the requested path exists outside the chroot
if (!-e $infile) {
error "$infile does not exist";
}
2020-11-13 21:36:58 +00:00
debug "listener: sending okthx";
2020-08-15 20:36:13 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
open my $fh, '<', $infile
or error "failed to open $infile for reading: $!";
# read from the file and send as payload to the child process
while (1) {
# read from file
my $ret = read($fh, my $cont, 4096)
// error "cannot read from pipe: $!";
if ($ret == 0) { last; }
2020-11-13 21:36:58 +00:00
debug "listener: sending write";
2020-08-15 20:36:13 +00:00
# send to child
print STDOUT pack("n", $ret) . "write" . $cont;
STDOUT->flush();
2020-11-13 21:36:58 +00:00
debug "listener: waiting for okthx";
2020-08-15 20:36:13 +00:00
checkokthx \*STDIN;
if ($ret < 4096) { last; }
}
# signal to the child process that we are done
2020-11-13 21:36:58 +00:00
debug "listener: sending close";
2020-08-15 20:36:13 +00:00
print STDOUT pack("n", 0) . "close";
STDOUT->flush();
2020-11-13 21:36:58 +00:00
debug "listener: waiting for okthx";
2020-08-15 20:36:13 +00:00
checkokthx \*STDIN;
close $fh;
} elsif ($msg eq "openw") {
2020-11-13 21:36:58 +00:00
debug "listener: received message: openw";
2020-08-15 20:36:13 +00:00
# payload is the output directory
my $outfile;
{
my $ret = read(STDIN, $outfile, $len)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the directory exists
my $outdir = dirname($outfile);
if (-e $outdir) {
if (!-d $outdir) {
error "$outdir already exists but is not a directory";
}
} else {
my $num_created = make_path $outdir, { error => \my $err };
if ($err && @$err) {
error(
join "; ",
(
map { "cannot create " . (join ": ", %{$_}) }
@$err
));
} elsif ($num_created == 0) {
error "cannot create $outdir";
}
}
2020-11-13 21:36:58 +00:00
debug "listener: sending okthx";
2020-08-15 20:36:13 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
# now we expect one or more "write" messages containing the
# tarball to write
open my $fh, '>', $outfile
or error "failed to open $outfile for writing: $!";
# handle "write" messages from the child process and feed
# their payload into the file handle until a "close" message
# is encountered
while (1) {
# receive the next message
my $ret = read(STDIN, my $buf, 2 + 5)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
my ($len, $msg) = unpack("nA5", $buf);
2020-11-13 21:36:58 +00:00
debug "listener: received message: $msg";
2020-08-15 20:36:13 +00:00
if ($msg eq "close") {
# finish the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
2020-11-13 21:36:58 +00:00
debug "listener: sending okthx";
2020-08-15 20:36:13 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
last;
} elsif ($msg ne "write") {
# we should not receive this message at this point
error "expected write but got: $msg";
}
# read the payload
my $content;
{
my $ret = read(STDIN, $content, $len)
// error "error cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# write the payload to the file handle
print $fh $content
or error "cannot write to file handle: $!";
2020-11-13 21:36:58 +00:00
debug "listener: sending okthx";
2020-08-15 20:36:13 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
}
close $fh;
} elsif (any { $_ eq $msg } ('mktar', 'mktac')) {
# handle the mktar message
2020-11-13 21:36:58 +00:00
debug "listener: received message: $msg";
2020-08-15 20:36:13 +00:00
my $indir;
{
my $ret = read(STDIN, $indir, $len)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the requested path exists outside the chroot
if (!-e $indir) {
error "$indir does not exist";
}
2020-11-13 21:36:58 +00:00
debug "listener: sending okthx";
2020-08-15 20:36:13 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
# Open a tar process creating a tarfile of the instructed
# path. To emulate the behaviour of cp, change to the
# dirname of the requested path first.
2020-11-13 21:36:58 +00:00
my @cmd = (
'tar',
'--numeric-owner',
'--xattrs',
'--format=pax',
'--pax-option=exthdr.name=%d/PaxHeaders/%f,'
. 'delete=atime,delete=ctime',
'--directory',
$msg eq 'mktar' ? dirname($indir) : $indir,
'--create',
'--file',
'-',
$msg eq 'mktar' ? basename($indir) : '.'
);
debug("listener: running " . (join " ", @cmd));
open(my $fh, '-|', @cmd) // error "failed to fork(): $!";
2020-08-15 20:36:13 +00:00
# read from the tar process and send as payload to the child
# process
while (1) {
# read from tar
my $ret = read($fh, my $cont, 4096)
// error "cannot read from pipe: $!";
if ($ret == 0) { last; }
2020-11-13 21:36:58 +00:00
debug "listener: sending write ($ret bytes)";
2020-08-15 20:36:13 +00:00
# send to child
print STDOUT pack("n", $ret) . "write" . $cont;
STDOUT->flush();
2020-11-13 21:36:58 +00:00
debug "listener: waiting for okthx";
2020-08-15 20:36:13 +00:00
checkokthx \*STDIN;
if ($ret < 4096) { last; }
}
# signal to the child process that we are done
2020-11-13 21:36:58 +00:00
debug "listener: sending close";
2020-08-15 20:36:13 +00:00
print STDOUT pack("n", 0) . "close";
STDOUT->flush();
2020-11-13 21:36:58 +00:00
debug "listener: waiting for okthx";
2020-08-15 20:36:13 +00:00
checkokthx \*STDIN;
close $fh;
if ($? != 0) {
error "tar failed";
}
} elsif ($msg eq "untar") {
2020-11-13 21:36:58 +00:00
debug "listener: received message: untar";
2020-08-15 20:36:13 +00:00
# payload is the output directory
my $outdir;
{
my $ret = read(STDIN, $outdir, $len)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# make sure that the directory exists
if (-e $outdir) {
if (!-d $outdir) {
error "$outdir already exists but is not a directory";
}
} else {
my $num_created = make_path $outdir, { error => \my $err };
if ($err && @$err) {
error(
join "; ",
(
map { "cannot create " . (join ": ", %{$_}) }
@$err
));
} elsif ($num_created == 0) {
error "cannot create $outdir";
}
}
2020-11-13 21:36:58 +00:00
debug "listener: sending okthx";
2020-08-15 20:36:13 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
# now we expect one or more "write" messages containing the
# tarball to unpack
open my $fh, '|-', 'tar', '--numeric-owner', '--xattrs',
'--xattrs-include=*', '--directory', $outdir,
'--extract', '--file',
'-' // error "failed to fork(): $!";
# handle "write" messages from the child process and feed
# their payload into the tar process until a "close" message
# is encountered
while (1) {
# receive the next message
my $ret = read(STDIN, my $buf, 2 + 5)
// error "cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
my ($len, $msg) = unpack("nA5", $buf);
2020-11-13 21:36:58 +00:00
debug "listener: received message: $msg";
2020-08-15 20:36:13 +00:00
if ($msg eq "close") {
# finish the loop
if ($len != 0) {
error "expected no payload but got $len bytes";
}
2020-11-13 21:36:58 +00:00
debug "listener: sending okthx";
2020-08-15 20:36:13 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
last;
} elsif ($msg ne "write") {
# we should not receive this message at this point
error "expected write but got: $msg";
}
# read the payload
my $content;
{
my $ret = read(STDIN, $content, $len)
// error "error cannot read from socket: $!";
if ($ret == 0) {
error "received eof on socket";
}
}
# write the payload to the tar process
print $fh $content
or error "cannot write to tar process: $!";
2020-11-13 21:36:58 +00:00
debug "listener: sending okthx";
2020-08-15 20:36:13 +00:00
print STDOUT (pack("n", 0) . "okthx")
or error "cannot write to socket: $!";
STDOUT->flush();
}
close $fh;
if ($? != 0) {
error "tar failed";
}
2023-10-23 09:49:14 +00:00
} elsif ($msg eq "error") {
error "received error on socket";
2020-08-15 20:36:13 +00:00
} else {
error "unknown message: $msg";
}
}
};
if ($@) {
2023-10-23 09:50:09 +00:00
warning("hooklistener errored out: $@");
2020-08-15 20:36:13 +00:00
# inform the other side that something went wrong
print STDOUT (pack("n", 0) . "error")
or error "cannot write to socket: $!";
STDOUT->flush();
}
return;
}
2020-08-24 14:20:04 +00:00
# parse files of the format found in /usr/share/distro-info/ and return two
# lists: the first contains codenames of end-of-life distros and the second
# list contains codenames of currently active distros
sub parse_distro_info {
my $file = shift;
my @eol = ();
my @current = ();
my $today = POSIX::strftime "%Y-%m-%d", localtime;
open my $fh, '<', $file or error "cannot open $file: $!";
my $i = 0;
while (my $line = <$fh>) {
chomp($line);
$i++;
my @cells = split /,/, $line;
if (scalar @cells < 4) {
error "cannot parse line $i of $file";
}
if (
$i == 1
and ( scalar @cells < 6
or $cells[0] ne 'version'
or $cells[1] ne 'codename'
or $cells[2] ne 'series'
or $cells[3] ne 'created'
or $cells[4] ne 'release'
or $cells[5] ne 'eol')
) {
error "cannot find correct header in $file";
}
if ($i == 1) {
next;
}
if (scalar @cells == 6) {
if ($cells[5] !~ m/^\d\d\d\d-\d\d-\d\d$/) {
error "invalid eof date format in $file:$i: $cells[5]";
}
# since the date format is iso8601, we can use lexicographic string
# comparison to compare dates
if ($cells[5] lt $today) {
push @eol, $cells[2];
} else {
push @current, $cells[2];
}
} else {
push @current, $cells[2];
}
}
close $fh;
return ([@eol], [@current]);
}
sub get_suite_by_vendor {
my %suite_by_vendor = (
'debian' => {},
'ubuntu' => {},
'tanglu' => {},
'kali' => {},
);
# pre-fill with some known values
foreach my $suite (
'potato', 'woody', 'sarge', 'etch',
'lenny', 'squeeze', 'wheezy', 'jessie'
) {
$suite_by_vendor{'debian'}->{$suite} = 1;
}
foreach my $suite (
'unstable', 'stable', 'oldstable', 'stretch',
2021-02-04 16:43:33 +00:00
'buster', 'bullseye', 'bookworm', 'trixie'
2020-08-24 14:20:04 +00:00
) {
$suite_by_vendor{'debian'}->{$suite} = 0;
}
foreach my $suite ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis') {
$suite_by_vendor{'tanglu'}->{$suite} = 0;
}
foreach my $suite ('kali-dev', 'kali-rolling', 'kali-bleeding-edge') {
$suite_by_vendor{'kali'}->{$suite} = 0;
}
foreach
my $suite ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic') {
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
}
# if the Debian package distro-info-data is installed, then we can use it,
# to get better data about new distros or EOL distros
if (-e '/usr/share/distro-info/debian.csv') {
my ($eol, $current)
= parse_distro_info('/usr/share/distro-info/debian.csv');
foreach my $suite (@{$eol}) {
$suite_by_vendor{'debian'}->{$suite} = 1;
}
foreach my $suite (@{$current}) {
$suite_by_vendor{'debian'}->{$suite} = 0;
}
}
if (-e '/usr/share/distro-info/ubuntu.csv') {
my ($eol, $current)
= parse_distro_info('/usr/share/distro-info/ubuntu.csv');
foreach my $suite (@{$eol}, @{$current}) {
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
}
}
# if debootstrap is installed we infer distro names from the symlink
# targets of the scripts in /usr/share/debootstrap/scripts/
my $debootstrap_scripts = '/usr/share/debootstrap/scripts/';
if (-d $debootstrap_scripts) {
opendir(my $dh, $debootstrap_scripts)
or error "Can't opendir($debootstrap_scripts): $!";
while (my $suite = readdir $dh) {
# this is only a heuristic -- don't overwrite anything but instead
# just update anything that was missing
if (!-l "$debootstrap_scripts/$suite") {
next;
}
my $target = readlink "$debootstrap_scripts/$suite";
if ($target eq "sid"
and not exists $suite_by_vendor{'debian'}->{$suite}) {
$suite_by_vendor{'debian'}->{$suite} = 0;
} elsif ($target eq "gutsy"
and not exists $suite_by_vendor{'ubuntu'}->{$suite}) {
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
} elsif ($target eq "aequorea"
and not exists $suite_by_vendor{'tanglu'}->{$suite}) {
$suite_by_vendor{'tanglu'}->{$suite} = 0;
} elsif ($target eq "kali"
and not exists $suite_by_vendor{'kali'}->{$suite}) {
$suite_by_vendor{'kali'}->{$suite} = 0;
}
}
closedir($dh);
}
return %suite_by_vendor;
}
# try to guess the right keyring path for the given suite
sub get_keyring_by_suite {
my $query = shift;
my $suite_by_vendor = shift;
my $debianvendor;
my $ubuntuvendor;
2021-11-29 20:15:59 +00:00
# make $@ local, so we don't print "Can't locate Dpkg/Vendor/Debian.pm"
# in other parts where we evaluate $@
local $@ = '';
2020-08-24 14:20:04 +00:00
eval {
require Dpkg::Vendor::Debian;
require Dpkg::Vendor::Ubuntu;
$debianvendor = Dpkg::Vendor::Debian->new();
$ubuntuvendor = Dpkg::Vendor::Ubuntu->new();
};
my $keyring_by_vendor = sub {
my $vendor = shift;
my $eol = shift;
if ($vendor eq 'debian') {
if ($eol) {
if (defined $debianvendor) {
return $debianvendor->run_hook(
'archive-keyrings-historic');
} else {
return
'/usr/share/keyrings/debian-archive-removed-keys.gpg';
}
} else {
if (defined $debianvendor) {
return $debianvendor->run_hook('archive-keyrings');
} else {
return '/usr/share/keyrings/debian-archive-keyring.gpg';
}
}
} elsif ($vendor eq 'ubuntu') {
if (defined $ubuntuvendor) {
return $ubuntuvendor->run_hook('archive-keyrings');
} else {
return '/usr/share/keyrings/ubuntu-archive-keyring.gpg';
}
} elsif ($vendor eq 'tanglu') {
return '/usr/share/keyrings/tanglu-archive-keyring.gpg';
} elsif ($vendor eq 'kali') {
return '/usr/share/keyrings/kali-archive-keyring.gpg';
} else {
error "unknown vendor: $vendor";
}
};
my %keyrings = ();
foreach my $vendor (keys %{$suite_by_vendor}) {
foreach my $suite (keys %{ $suite_by_vendor->{$vendor} }) {
my $keyring = $keyring_by_vendor->(
$vendor, $suite_by_vendor->{$vendor}->{$suite});
debug "suite $suite with keyring $keyring";
$keyrings{$suite} = $keyring;
}
}
if (exists $keyrings{$query}) {
return $keyrings{$query};
} else {
return;
}
}
sub get_sourceslist_by_suite {
my $suite = shift;
my $arch = shift;
my $signedby = shift;
my $compstr = shift;
my $suite_by_vendor = shift;
my @debstable = keys %{ $suite_by_vendor->{'debian'} };
my @ubuntustable = keys %{ $suite_by_vendor->{'ubuntu'} };
my @tanglustable = keys %{ $suite_by_vendor->{'tanglu'} };
my @kali = keys %{ $suite_by_vendor->{'kali'} };
my $mirror = 'http://deb.debian.org/debian';
my $secmirror = 'http://security.debian.org/debian-security';
if (any { $_ eq $suite } @ubuntustable) {
if (any { $_ eq $arch } ('amd64', 'i386')) {
$mirror = 'http://archive.ubuntu.com/ubuntu';
$secmirror = 'http://security.ubuntu.com/ubuntu';
} else {
$mirror = 'http://ports.ubuntu.com/ubuntu-ports';
$secmirror = 'http://ports.ubuntu.com/ubuntu-ports';
}
if (-e '/usr/share/debootstrap/scripts/gutsy') {
# try running the debootstrap script but ignore errors
my $script = 'set -eu;
default_mirror() { echo $1; };
mirror_style() { :; };
download_style() { :; };
finddebs_style() { :; };
variants() { :; };
keyring() { :; };
doing_variant() { false; };
2022-09-12 09:56:26 +00:00
info() { fmt="$2"; shift; shift; printf "I: $fmt\n" "$@" >&2; };
2020-08-24 14:20:04 +00:00
. /usr/share/debootstrap/scripts/gutsy;';
open my $fh, '-|', 'env', "ARCH=$arch", "SUITE=$suite",
'sh', '-c', $script // last;
chomp(
my $output = do { local $/; <$fh> }
);
close $fh;
if ($? == 0 && $output ne '') {
$mirror = $output;
}
}
} elsif (any { $_ eq $suite } @tanglustable) {
$mirror = 'http://archive.tanglu.org/tanglu';
} elsif (any { $_ eq $suite } @kali) {
$mirror = 'https://http.kali.org/kali';
}
my $sourceslist = '';
$sourceslist .= "deb$signedby $mirror $suite $compstr\n";
if (any { $_ eq $suite } @ubuntustable) {
$sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n";
$sourceslist .= "deb$signedby $secmirror $suite-security $compstr\n";
} elsif (any { $_ eq $suite } @tanglustable) {
$sourceslist .= "deb$signedby $secmirror $suite-updates $compstr\n";
} elsif (any { $_ eq $suite } @debstable
and none { $_ eq $suite } ('testing', 'unstable', 'sid')) {
$sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n";
2020-11-15 09:14:03 +00:00
# the security mirror changes, starting with bullseye
# https://lists.debian.org/87r26wqr2a.fsf@43-1.org
2020-11-27 23:46:48 +00:00
my $bullseye_or_later = 0;
2022-11-14 13:34:15 +00:00
if (any { $_ eq $suite } ('stable', 'bullseye', 'bookworm', 'trixie'))
{
2022-01-08 07:31:26 +00:00
$bullseye_or_later = 1;
}
my $distro_info = '/usr/share/distro-info/debian.csv';
2021-11-29 20:15:59 +00:00
# make $@ local, so we don't print "Can't locate Debian/DistroInfo.pm"
# in other parts where we evaluate $@
local $@ = '';
2020-11-27 23:46:48 +00:00
eval { require Debian::DistroInfo; };
if (!$@) {
2022-01-08 07:31:26 +00:00
debug "libdistro-info-perl is installed";
2020-11-27 23:46:48 +00:00
my $debinfo = DebianDistroInfo->new();
if ($debinfo->version($suite, 0) >= 11) {
$bullseye_or_later = 1;
}
} elsif (-f $distro_info) {
2022-01-08 07:31:26 +00:00
debug "distro-info-data is installed";
2020-11-15 09:14:03 +00:00
open my $fh, '<', $distro_info
or error "cannot open $distro_info: $!";
my $i = 0;
my $matching_version;
2021-08-16 11:11:42 +00:00
my @releases;
my $today = POSIX::strftime "%Y-%m-%d", localtime;
2020-11-15 09:14:03 +00:00
while (my $line = <$fh>) {
chomp($line);
$i++;
my @cells = split /,/, $line;
if (scalar @cells < 4) {
error "cannot parse line $i of $distro_info";
}
if (
$i == 1
and ( scalar @cells < 6
or $cells[0] ne 'version'
or $cells[1] ne 'codename'
or $cells[2] ne 'series'
or $cells[3] ne 'created'
or $cells[4] ne 'release'
or $cells[5] ne 'eol')
) {
error "cannot find correct header in $distro_info";
}
if ($i == 1) {
next;
}
2021-08-16 11:11:42 +00:00
if ( scalar @cells > 4
and $cells[4] =~ m/^\d\d\d\d-\d\d-\d\d$/
and $cells[4] lt $today) {
push @releases, $cells[0];
}
2020-11-15 09:14:03 +00:00
if (lc $cells[1] eq $suite or lc $cells[2] eq $suite) {
$matching_version = $cells[0];
last;
}
}
close $fh;
2020-11-29 19:54:50 +00:00
if (defined $matching_version and $matching_version >= 11) {
2020-11-27 23:46:48 +00:00
$bullseye_or_later = 1;
2020-11-15 09:14:03 +00:00
}
2021-08-16 11:11:42 +00:00
if ($suite eq "stable" and $releases[-1] >= 11) {
$bullseye_or_later = 1;
}
2020-08-24 14:20:04 +00:00
} else {
2022-01-08 07:31:26 +00:00
debug "neither libdistro-info-perl nor distro-info-data installed";
2020-08-24 14:20:04 +00:00
}
2020-11-27 23:46:48 +00:00
if ($bullseye_or_later) {
# starting from bullseye use
$sourceslist
.= "deb$signedby $secmirror $suite-security" . " $compstr\n";
} else {
$sourceslist
.= "deb$signedby $secmirror $suite/updates" . " $compstr\n";
}
2020-08-24 14:20:04 +00:00
}
return $sourceslist;
}
2020-01-22 22:30:28 +00:00
sub guess_sources_format {
my $content = shift;
my $is_deb822 = 0;
my $is_oneline = 0;
for my $line (split "\n", $content) {
if ($line =~ /^deb(-src)? /) {
$is_oneline = 1;
last;
}
if ($line =~ /^[^#:\s]+:/) {
$is_deb822 = 1;
last;
}
}
if ($is_deb822) {
return 'deb822';
}
if ($is_oneline) {
return 'one-line';
}
return;
}
2020-04-09 16:33:05 +00:00
sub approx_disk_usage {
2024-05-12 15:16:51 +00:00
my $directory = shift;
my $block_size = shift;
2020-04-09 16:33:05 +00:00
info "approximating disk usage...";
2020-06-23 20:45:17 +00:00
# the "du" utility reports different results depending on the underlying
# filesystem, see https://bugs.debian.org/650077 for a discussion
#
# we use code similar to the one used by dpkg-gencontrol instead
#
2024-05-12 15:16:51 +00:00
# Regular files are measured in number of $block_size byte blocks. All
# other entries are assumed to take one block of space.
2020-06-23 20:45:17 +00:00
#
# We ignore /dev because depending on the mode, the directory might be
# populated or not and we want consistent disk usage results independent
# of the mode.
2024-02-01 04:53:51 +00:00
my $installed_size = 0;
my %hardlink;
2020-06-23 20:45:17 +00:00
my $scan_installed_size = sub {
if ($File::Find::name eq "$directory/dev") {
# add all entries of @devfiles once
$installed_size += scalar @devfiles;
2024-02-01 04:53:51 +00:00
return;
2020-06-23 20:45:17 +00:00
} elsif ($File::Find::name =~ /^$directory\/dev\//) {
# ignore everything below /dev
2024-02-01 04:53:51 +00:00
return;
}
lstat or error "cannot stat $File::Find::name";
if (-f _ or -l _) {
my ($dev, $ino, $nlink) = (lstat _)[0, 1, 3];
return if exists $hardlink{"$dev:$ino"};
# Track hardlinks to avoid repeated additions.
$hardlink{"$dev:$ino"} = 1 if $nlink > 1;
2024-05-12 15:16:51 +00:00
# add file size in $block_size byte blocks, rounded up
$installed_size += int(((-s _) + $block_size) / $block_size);
2020-06-23 20:45:17 +00:00
} else {
# all other entries are assumed to only take up one block
$installed_size += 1;
}
};
2022-02-16 09:52:58 +00:00
# We use no_chdir because otherwise the unshared user has to have read
# permissions for the current working directory when producing an ext2
# image. See https://bugs.debian.org/1005857
find({ wanted => $scan_installed_size, no_chdir => 1 }, $directory);
2020-06-23 20:45:17 +00:00
# because the above is only a heuristic we add 10% extra for good measure
return int($installed_size * 1.1);
2020-04-09 16:33:05 +00:00
}
2020-01-16 11:02:11 +00:00
sub main() {
2020-08-25 14:06:05 +00:00
my $before = Time::HiRes::time;
2020-01-16 11:02:11 +00:00
umask 022;
if (scalar @ARGV >= 7 && $ARGV[0] eq "--hook-helper") {
2022-11-08 12:02:47 +00:00
shift @ARGV; # shift off "--hook-helper"
hookhelper(@ARGV);
2020-01-08 14:41:49 +00:00
exit 0;
2019-12-09 09:40:51 +00:00
}
2020-08-15 20:36:13 +00:00
# this is the counterpart to --hook-helper and will receive and carry
# out its instructions
2020-11-13 21:37:53 +00:00
if (scalar @ARGV == 2 && $ARGV[0] eq "--hook-listener") {
2022-11-08 12:02:47 +00:00
hooklistener($ARGV[1]);
2020-08-15 20:36:13 +00:00
exit 0;
}
2020-01-18 22:13:10 +00:00
# this is like:
# lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' ...
# but without needing lxc
2022-07-28 15:21:27 +00:00
if (scalar @ARGV >= 1 && $ARGV[0] eq "--unshare-helper") {
2023-03-16 07:14:39 +00:00
if ($EFFECTIVE_USER_ID != 0) {
test_unshare_userns(1);
2020-01-18 22:13:10 +00:00
}
2021-08-18 20:15:05 +00:00
my @idmap = ();
if ($EFFECTIVE_USER_ID != 0) {
2023-03-15 16:08:12 +00:00
@idmap = read_subuid_subgid 1;
2021-08-18 20:15:05 +00:00
}
my $pid = get_unshare_cmd(
2020-01-18 22:13:10 +00:00
sub {
0 == system @ARGV[1 .. $#ARGV] or error "system failed: $?";
},
\@idmap
);
waitpid $pid, 0;
$? == 0 or error "unshared command failed";
exit 0;
}
2019-12-09 09:40:51 +00:00
2018-09-18 09:20:24 +00:00
my $mtime = time;
if (exists $ENV{SOURCE_DATE_EPOCH}) {
2020-01-08 16:44:07 +00:00
$mtime = $ENV{SOURCE_DATE_EPOCH} + 0;
2018-09-18 09:20:24 +00:00
}
2020-01-09 07:39:40 +00:00
{
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{DEBIAN_FRONTEND} = 'noninteractive';
$ENV{DEBCONF_NONINTERACTIVE_SEEN} = 'true';
$ENV{LC_ALL} = 'C.UTF-8';
$ENV{LANGUAGE} = 'C.UTF-8';
$ENV{LANG} = 'C.UTF-8';
}
2018-09-18 09:20:24 +00:00
# copy ARGV because getopt modifies it
my @ARGVORIG = @ARGV;
2020-01-21 23:28:48 +00:00
# obtain the correct defaults for the keyring locations that apt knows
# about
my $apttrusted
= `eval \$(apt-config shell v Dir::Etc::trusted/f); printf \$v`;
my $apttrustedparts
= `eval \$(apt-config shell v Dir::Etc::trustedparts/d); printf \$v`;
2020-01-08 16:44:07 +00:00
chomp(my $hostarch = `dpkg --print-architecture`);
2018-09-18 09:20:24 +00:00
my $options = {
2020-01-08 16:44:07 +00:00
components => ["main"],
variant => "important",
include => [],
architectures => [$hostarch],
mode => 'auto',
2023-09-27 11:52:20 +00:00
format => 'auto',
2024-01-09 10:21:53 +00:00
dpkgopts => '',
aptopts => '',
2020-01-21 23:28:48 +00:00
apttrusted => $apttrusted,
apttrustedparts => $apttrustedparts,
2020-01-08 16:44:07 +00:00
noop => [],
setup_hook => [],
2020-03-22 13:08:21 +00:00
extract_hook => [],
2020-01-08 16:44:07 +00:00
essential_hook => [],
customize_hook => [],
2020-01-10 10:44:15 +00:00
dryrun => 0,
2020-04-09 22:00:36 +00:00
skip => [],
2018-09-18 09:20:24 +00:00
};
2019-02-23 07:43:15 +00:00
my $logfile = undef;
2020-01-08 16:44:07 +00:00
Getopt::Long::Configure('default', 'bundling', 'auto_abbrev',
'ignore_case_always');
2018-09-18 09:20:24 +00:00
GetOptions(
2021-12-14 15:10:25 +00:00
'h|help' => sub { pod2usage(-exitval => 0, -verbose => 1) },
'man' => sub { pod2usage(-exitval => 0, -verbose => 2) },
2020-01-08 16:44:07 +00:00
'version' => sub { print STDOUT "mmdebstrap $VERSION\n"; exit 0; },
2021-11-09 06:31:56 +00:00
'components=s@' => \$options->{components},
'variant=s' => \$options->{variant},
'include=s' => sub {
my ($opt_name, $opt_value) = @_;
2022-09-02 21:35:56 +00:00
my $sanitize_path = sub {
my $pkg = shift;
2023-03-19 08:02:43 +00:00
$pkg = abs_path($pkg)
// error "cannot resolve absolute path of $pkg: $!";
2022-12-22 09:15:27 +00:00
if ($pkg !~ /^\//) {
error "absolute path of $pkg doesn't start with a slash";
}
2022-09-02 21:35:56 +00:00
if (!-f $pkg) {
error "$pkg is not an existing file";
}
2022-12-22 09:15:27 +00:00
if (!-r $pkg) {
error "$pkg is not readable";
}
2022-09-02 21:35:56 +00:00
return $pkg;
};
if ($opt_value =~ /^[?~!(]/) {
# Treat option as a single apt pattern and don't split by comma
# or whitespace -- append it verbatim.
push @{ $options->{include} }, $opt_value;
} elsif ($opt_value =~ /^\.?\.?\//) {
# Treat option as a single path name and don't split by comma
# or whitespace -- append the normalized path.
2022-09-06 11:06:40 +00:00
push @{ $options->{include} }, &{$sanitize_path}($opt_value);
2022-09-02 21:35:56 +00:00
} else {
for my $pkg (split /[,\s]+/, $opt_value) {
# strip leading and trailing whitespace
$pkg =~ s/^\s+|\s+$//g;
# skip if the remainder is an empty string
if ($pkg eq '') {
next;
}
# Make paths canonical absolute paths, resolve symlinks
# and check if it's an existing file.
if ($pkg =~ /^\.?\.?\//) {
2022-09-06 11:06:40 +00:00
$pkg = &{$sanitize_path}($pkg);
2022-09-02 21:35:56 +00:00
}
push @{ $options->{include} }, $pkg;
2021-11-09 06:31:56 +00:00
}
}
2022-09-02 21:35:56 +00:00
# We are not sorting or otherwise normalizing the order of
# arguments to apt because package order matters for "apt install"
# since https://salsa.debian.org/apt-team/apt/-/merge_requests/256
2021-11-09 06:31:56 +00:00
},
2020-01-08 14:41:49 +00:00
'architectures=s@' => \$options->{architectures},
2020-01-08 16:44:07 +00:00
'mode=s' => \$options->{mode},
2024-01-09 10:21:53 +00:00
'dpkgopt=s' => sub {
my ($opt_name, $opt_value) = @_;
if (-r $opt_value) {
open my $fh, '<', $opt_value
or error "failed to open $opt_value: $!";
$options->{dpkgopts} .= do { local $/; <$fh> };
if ($options->{dpkgopts} !~ /\n$/) {
print $fh "\n";
}
close $fh;
} else {
$options->{dpkgopts} .= $opt_value;
if ($opt_value !~ /\n$/) {
$options->{dpkgopts} .= "\n";
}
}
},
'aptopt=s' => sub {
my ($opt_name, $opt_value) = @_;
if (-r $opt_value) {
open my $fh, '<', $opt_value
or error "failed to open $opt_value: $!";
$options->{aptopts} .= do { local $/; <$fh> };
if ($options->{aptopts} !~ /\n$/) {
print $fh "\n";
}
close $fh;
} else {
$options->{aptopts} .= $opt_value;
if ($opt_value !~ /;$/) {
$options->{aptopts} .= ';';
}
if ($opt_value !~ /\n$/) {
$options->{aptopts} .= "\n";
}
}
},
'keyring=s' => sub {
2020-01-08 14:41:49 +00:00
my ($opt_name, $opt_value) = @_;
if ($opt_value =~ /"/) {
2020-01-08 16:41:46 +00:00
error "--keyring: apt cannot handle paths with double quotes:"
. " $opt_value";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
if (!-e $opt_value) {
2020-01-08 14:41:49 +00:00
error "keyring \"$opt_value\" does not exist";
}
my $abs_path = abs_path($opt_value);
if (!defined $abs_path) {
error "unable to get absolute path of --keyring: $opt_value";
}
# since abs_path resolved all symlinks for us, we can now test
# what the actual target actually is
2020-01-21 12:38:53 +00:00
if (-d $abs_path) {
$options->{apttrustedparts} = $abs_path;
2020-01-08 14:41:49 +00:00
} else {
2020-01-21 12:38:53 +00:00
$options->{apttrusted} = $abs_path;
2020-01-08 14:41:49 +00:00
}
},
2020-01-08 16:44:07 +00:00
's|silent' => sub { $verbosity_level = 0; },
'q|quiet' => sub { $verbosity_level = 0; },
2020-01-08 14:41:49 +00:00
'v|verbose' => sub { $verbosity_level = 2; },
2020-01-08 16:44:07 +00:00
'd|debug' => sub { $verbosity_level = 3; },
2023-09-27 11:52:20 +00:00
'format=s' => \$options->{format},
2020-01-08 14:41:49 +00:00
'logfile=s' => \$logfile,
# no-op options so that mmdebstrap can be used with
# sbuild-createchroot --debootstrap=mmdebstrap
2021-12-14 15:10:25 +00:00
'resolve-deps' => sub { push @{ $options->{noop} }, 'resolve-deps'; },
'merged-usr' => sub { push @{ $options->{noop} }, 'merged-usr'; },
2020-01-08 16:44:07 +00:00
'no-merged-usr' =>
sub { push @{ $options->{noop} }, 'no-merged-usr'; },
'force-check-gpg' =>
sub { push @{ $options->{noop} }, 'force-check-gpg'; },
2022-05-24 02:09:41 +00:00
'setup-hook=s' => sub {
2022-11-14 08:59:59 +00:00
push @{ $options->{setup_hook} }, ["normal", $_[1]];
2022-05-24 02:09:41 +00:00
},
'extract-hook=s' => sub {
2022-11-14 08:59:59 +00:00
push @{ $options->{extract_hook} }, ["normal", $_[1]];
},
'chrooted-extract-hook=s' => sub {
push @{ $options->{extract_hook} }, ["pivoted", $_[1]];
2022-05-24 02:09:41 +00:00
},
'essential-hook=s' => sub {
2022-11-14 08:59:59 +00:00
push @{ $options->{essential_hook} }, ["normal", $_[1]];
},
'chrooted-essential-hook=s' => sub {
push @{ $options->{essential_hook} }, ["pivoted", $_[1]];
2022-05-24 02:09:41 +00:00
},
'customize-hook=s' => sub {
2022-11-14 08:59:59 +00:00
push @{ $options->{customize_hook} }, ["normal", $_[1]];
},
'chrooted-customize-hook=s' => sub {
push @{ $options->{customize_hook} }, ["pivoted", $_[1]];
2022-05-24 02:09:41 +00:00
},
'hook-directory=s' => sub {
2020-08-15 22:50:46 +00:00
my ($opt_name, $opt_value) = @_;
if (!-e $opt_value) {
error "hook directory \"$opt_value\" does not exist";
}
my $abs_path = abs_path($opt_value);
if (!defined $abs_path) {
error( "unable to get absolute path of "
. "--hook-directory: $opt_value");
}
# since abs_path resolved all symlinks for us, we can now test
# what the actual target actually is
if (!-d $opt_value) {
error "hook directory \"$opt_value\" is not a directory";
}
# gather all files starting with special prefixes into the
# respective keys of a hash
my %scripts;
2023-01-04 06:23:56 +00:00
my $count = 0;
2020-08-15 22:50:46 +00:00
opendir(my $dh, $opt_value)
or error "Can't opendir($opt_value): $!";
while (my $entry = readdir $dh) {
2023-01-16 06:55:27 +00:00
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
my $found = 0;
2020-08-15 22:50:46 +00:00
foreach
my $hook ('setup', 'extract', 'essential', 'customize') {
2023-01-16 06:55:27 +00:00
if ($entry =~ m/^\Q$hook\E/) {
if (-x "$opt_value/$entry") {
push @{ $scripts{$hook} }, "$opt_value/$entry";
$count += 1;
$found = 1;
} else {
warning("$opt_value/$entry is named like a "
. "hook but not executable");
}
2020-08-17 16:57:36 +00:00
}
2020-08-15 22:50:46 +00:00
}
2023-01-16 06:55:27 +00:00
if (!$found && -x "$opt_value/$entry") {
warning("$opt_value/$entry: is executable "
. "but not prefixed with a hook name");
}
2020-08-15 22:50:46 +00:00
}
closedir($dh);
2023-01-04 06:23:56 +00:00
if ($count == 0) {
warning "No executable hook scripts found in $opt_value";
return;
}
2020-08-15 22:50:46 +00:00
# add the sorted list associated with each key to the respective
# list of hooks
foreach my $hook (keys %scripts) {
2020-08-17 16:57:36 +00:00
push @{ $options->{"${hook}_hook"} },
2022-11-14 08:59:59 +00:00
(map { ["normal", $_] } (sort @{ $scripts{$hook} }));
2020-08-15 22:50:46 +00:00
}
},
2020-03-07 01:12:21 +00:00
# Sometimes --simulate fails even though non-simulate succeeds because
# in simulate mode, apt cannot rely on dpkg to figure out tricky
# dependency situations and will give up instead when it cannot find
# a solution.
#
# 2020-02-06, #debian-apt on OFTC, times in UTC+1
# 12:52 < DonKult> [...] It works in non-simulation because simulate is
# more picky. If you wanna know why simulate complains
# here prepare for long suffering in dependency hell.
'simulate' => \$options->{dryrun},
'dry-run' => \$options->{dryrun},
2022-09-02 21:23:53 +00:00
'skip=s' => sub {
my ($opt_name, $opt_value) = @_;
for my $skip (split /[,\s]+/, $opt_value) {
# strip leading and trailing whitespace
$skip =~ s/^\s+|\s+$//g;
# skip if the remainder is an empty string
if ($skip eq '') {
next;
}
push @{ $options->{skip} }, $skip;
}
2024-01-08 22:01:53 +00:00
}) or pod2usage(-exitval => 2, -verbose => 0);
2018-09-18 09:20:24 +00:00
2019-02-23 07:43:15 +00:00
if (defined($logfile)) {
2020-01-08 14:41:49 +00:00
open(STDERR, '>', $logfile) or error "cannot open $logfile: $!";
2019-02-23 07:43:15 +00:00
}
2020-01-08 16:44:07 +00:00
foreach my $arg (@{ $options->{noop} }) {
2020-11-27 23:48:18 +00:00
info "the option --$arg is a no-op. It only exists for compatibility"
2020-01-08 16:41:46 +00:00
. " with some debootstrap wrappers.";
2019-02-15 11:42:46 +00:00
}
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
2020-03-22 13:08:21 +00:00
foreach my $hook ('setup', 'extract', 'essential', 'customize') {
2020-01-10 10:44:15 +00:00
if (scalar @{ $options->{"${hook}_hook"} } > 0) {
warning "In dry-run mode, --$hook-hook options have no effect";
}
2022-11-14 08:59:59 +00:00
if ($options->{mode} eq 'chrootless') {
foreach my $script (@{ $options->{"${hook}_hook"} }) {
if ($script->[0] eq "pivoted") {
error "--chrooted-$hook-hook are illegal in "
. "chrootless mode";
}
}
}
2020-01-10 10:44:15 +00:00
}
}
2020-01-08 16:44:07 +00:00
my @valid_variants = (
'extract', 'custom', 'essential', 'apt',
'required', 'minbase', 'buildd', 'important',
'debootstrap', '-', 'standard'
);
if (none { $_ eq $options->{variant} } @valid_variants) {
2020-01-08 14:41:49 +00:00
error "invalid variant. Choose from " . (join ', ', @valid_variants);
2018-09-18 09:20:24 +00:00
}
# debootstrap and - are an alias for important
2018-09-23 17:36:07 +00:00
if (any { $_ eq $options->{variant} } ('-', 'debootstrap')) {
2020-01-08 14:41:49 +00:00
$options->{variant} = 'important';
2018-09-18 09:20:24 +00:00
}
2021-11-09 06:31:56 +00:00
# minbase is an alias for required
if ($options->{variant} eq 'minbase') {
$options->{variant} = 'required';
}
2018-09-18 09:20:24 +00:00
# fakeroot is an alias for fakechroot
if ($options->{mode} eq 'fakeroot') {
2020-01-08 14:41:49 +00:00
$options->{mode} = 'fakechroot';
2018-09-18 09:20:24 +00:00
}
# sudo is an alias for root
if ($options->{mode} eq 'sudo') {
2020-01-08 14:41:49 +00:00
$options->{mode} = 'root';
2018-09-18 09:20:24 +00:00
}
2022-09-05 03:50:50 +00:00
my @valid_modes = ('auto', 'root', 'unshare', 'fakechroot', 'chrootless');
2018-09-23 17:36:07 +00:00
if (none { $_ eq $options->{mode} } @valid_modes) {
2020-01-08 14:41:49 +00:00
error "invalid mode. Choose from " . (join ', ', @valid_modes);
2018-09-18 09:20:24 +00:00
}
2020-04-09 16:33:05 +00:00
# sqfs is an alias for squashfs
2023-09-27 11:52:20 +00:00
if ($options->{format} eq 'sqfs') {
$options->{format} = 'squashfs';
2020-04-09 16:33:05 +00:00
}
# dir is an alias for directory
2023-09-27 11:52:20 +00:00
if ($options->{format} eq 'dir') {
$options->{format} = 'directory';
2020-04-09 16:33:05 +00:00
}
2021-03-25 06:04:14 +00:00
my @valid_formats
2024-05-12 15:16:51 +00:00
= ('auto', 'directory', 'tar', 'squashfs', 'ext2', 'ext4', 'null');
2023-09-27 11:52:20 +00:00
if (none { $_ eq $options->{format} } @valid_formats) {
2020-04-09 16:33:05 +00:00
error "invalid format. Choose from " . (join ', ', @valid_formats);
}
2022-01-08 06:44:05 +00:00
# setting PATH for chroot, ldconfig, start-stop-daemon...
2022-05-24 18:35:25 +00:00
my $defaultpath = `eval \$(apt-config shell v DPkg::Path); printf \$v`;
2022-01-08 06:44:05 +00:00
if (length $ENV{PATH}) {
## no critic (Variables::RequireLocalizedPunctuationVars)
2022-05-24 18:35:25 +00:00
$ENV{PATH} = "$ENV{PATH}:$defaultpath";
2022-01-08 06:44:05 +00:00
} else {
2021-01-06 10:49:29 +00:00
## no critic (Variables::RequireLocalizedPunctuationVars)
2022-05-24 18:35:25 +00:00
$ENV{PATH} = $defaultpath;
2021-01-06 10:49:29 +00:00
}
2021-10-06 19:20:41 +00:00
foreach my $tool (
'dpkg', 'dpkg-deb', 'apt-get', 'apt-cache',
'apt-config', 'tar', 'rm', 'find',
'env'
) {
2022-05-24 13:40:38 +00:00
if (!can_execute $tool) {
2020-11-12 21:36:10 +00:00
error "cannot find $tool";
}
}
2021-10-07 06:12:12 +00:00
{
my $dpkgversion = version->new(0);
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
# redirect stderr to /dev/null to hide error messages from dpkg
# versions before 1.20.0
open(STDERR, '>', '/dev/null')
or error "cannot open /dev/null for writing: $!";
exec 'dpkg', '--robot', '--version';
}
chomp(
my $content = do { local $/; <$fh> }
);
close $fh;
# the --robot option was introduced in 1.20.0 but until 1.20.2 the
# output contained a string after the version, separated by a
# whitespace -- since then, it's only the version
2021-11-06 21:01:22 +00:00
if ($? == 0 and $content =~ /^([0-9.]+).*$/) {
2021-10-07 06:12:12 +00:00
# dpkg is new enough for the --robot option
$dpkgversion = version->new($1);
}
if ($dpkgversion < "1.20.0") {
error "need dpkg >= 1.20.0 but have $dpkgversion";
}
}
2021-10-06 19:19:00 +00:00
{
my $aptversion = version->new(0);
my $pid = open my $fh, '-|', 'apt-get',
'--version' // error "failed to fork(): $!";
chomp(
my $content = do { local $/; <$fh> }
);
close $fh;
if ( $? == 0
2023-02-09 09:20:25 +00:00
and $content =~ /^apt (\d+\.\d+\.\d+)\S* \(\S+\)$/am) {
2021-10-06 19:19:00 +00:00
$aptversion = version->new($1);
}
2022-01-07 11:46:35 +00:00
if ($aptversion < "2.3.14") {
error "need apt >= 2.3.14 but have $aptversion";
2021-10-06 19:19:00 +00:00
}
}
2019-03-25 13:35:38 +00:00
my $check_fakechroot_running = sub {
2020-01-08 14:41:49 +00:00
# test if we are inside fakechroot already
# We fork a child process because setting FAKECHROOT_DETECT seems to
# be an irreversible operation for fakechroot.
my $pid = open my $rfh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
# with the FAKECHROOT_DETECT environment variable set, any program
# execution will be replaced with the output "fakeroot [version]"
2020-01-09 07:39:40 +00:00
local $ENV{FAKECHROOT_DETECT} = 0;
2020-01-08 14:41:49 +00:00
exec 'echo', 'If fakechroot is running, this will not be printed';
}
my $content = do { local $/; <$rfh> };
waitpid $pid, 0;
my $result = 0;
2021-09-16 14:12:20 +00:00
if ($? == 0 and $content =~ /^fakechroot [0-9.]+$/) {
2020-01-08 14:41:49 +00:00
$result = 1;
}
return $result;
2019-03-25 13:35:38 +00:00
};
2018-10-23 16:04:05 +00:00
# figure out the mode to use or test whether the chosen mode is legal
if ($options->{mode} eq 'auto') {
2020-01-08 14:41:49 +00:00
if (&{$check_fakechroot_running}()) {
# if mmdebstrap is executed inside fakechroot, then we assume the
# user expects fakechroot mode
$options->{mode} = 'fakechroot';
} elsif ($EFFECTIVE_USER_ID == 0) {
# if mmdebstrap is executed as root, we assume the user wants root
# mode
$options->{mode} = 'root';
2021-01-13 15:05:57 +00:00
} elsif (test_unshare_userns(0)) {
# if we are not root, unshare mode is our best option if
# test_unshare_userns() succeeds
2020-01-08 14:41:49 +00:00
$options->{mode} = 'unshare';
2022-05-24 13:40:38 +00:00
} elsif (can_execute 'fakechroot') {
2020-01-08 14:41:49 +00:00
# the next fallback is fakechroot
# exec ourselves again but within fakechroot
my @prefix = ();
2020-01-08 16:44:07 +00:00
if ($is_covering) {
2020-01-08 14:41:49 +00:00
@prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
}
exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
} else {
2023-03-16 21:18:49 +00:00
error( "unable to pick chroot mode automatically (use --mode for "
. "manual selection)");
2020-01-08 14:41:49 +00:00
}
info "automatically chosen mode: $options->{mode}";
2018-10-23 16:04:05 +00:00
} elsif ($options->{mode} eq 'root') {
2020-01-08 14:41:49 +00:00
if ($EFFECTIVE_USER_ID != 0) {
error "need to be root";
}
2018-10-23 16:04:05 +00:00
} elsif ($options->{mode} eq 'fakechroot') {
2020-01-08 14:41:49 +00:00
if (&{$check_fakechroot_running}()) {
# fakechroot is already running
2022-05-24 13:40:38 +00:00
} elsif (!can_execute 'fakechroot') {
2020-01-08 14:41:49 +00:00
error "need working fakechroot binary";
} else {
# exec ourselves again but within fakechroot
my @prefix = ();
2020-01-08 16:44:07 +00:00
if ($is_covering) {
2020-01-08 14:41:49 +00:00
@prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
}
exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
}
2018-10-23 16:04:05 +00:00
} elsif ($options->{mode} eq 'unshare') {
2021-01-13 15:05:57 +00:00
# For unshare mode to work we either need to already be the root user
# and then we do not have to unshare the user namespace anymore but we
# need to be able to unshare the mount namespace...
2021-02-06 09:11:53 +00:00
#
# We need to call unshare with "--propagation unchanged" or otherwise
# we get 'cannot change root filesystem propagation' when running
# mmdebstrap inside a chroot for which the root of the chroot is not
# its own mount point.
2021-01-13 15:05:57 +00:00
if ($EFFECTIVE_USER_ID == 0
2021-02-06 09:11:53 +00:00
&& 0 != system 'unshare --mount --propagation unchanged -- true') {
2021-01-13 15:05:57 +00:00
error "unable to unshare the mount namespace";
}
# ...or we are not root and then we need to be able to unshare the user
# namespace.
2023-03-16 07:14:39 +00:00
if ($EFFECTIVE_USER_ID != 0) {
test_unshare_userns(1);
2020-01-08 14:41:49 +00:00
}
2018-10-23 16:04:05 +00:00
} elsif ($options->{mode} eq 'chrootless') {
2024-01-08 21:37:13 +00:00
if (any { $_ eq 'check/chrootless' } @{ $options->{skip} }) {
info "skipping check/chrootless as requested";
} else {
my $ischroot = 0 == system 'ischroot';
if ( $EFFECTIVE_USER_ID == 0
&& !exists $ENV{FAKEROOTKEY}
&& !$ischroot) {
error
"running chrootless mode as root without fakeroot might "
. "damage the host system if not run inside a chroot";
}
2020-05-02 21:54:04 +00:00
}
2018-10-23 16:04:05 +00:00
} else {
2020-01-08 14:41:49 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
2021-08-26 13:40:27 +00:00
$options->{canmount} = 1;
if ($options->{mode} eq 'root') {
2021-01-13 17:40:24 +00:00
# It's possible to be root but not be able to mount anything.
# This is for example the case when running under docker.
# Mounting needs CAP_SYS_ADMIN which might not be available.
#
# We test for CAP_SYS_ADMIN using the capget syscall.
# We cannot use cap_get_proc from sys/capability.h because Perl.
# We don't use capsh because we don't want to depend on libcap2-bin
my $hdrp = pack(
"Li", # __u32 followed by int
$_LINUX_CAPABILITY_VERSION_3, # available since Linux 2.6.26
0 # caps of this process
);
my $datap = pack("LLLLLL", 0, 0, 0, 0, 0, 0); # six __u32
0 == syscall &SYS_capget, $hdrp, $datap
or error "capget failed: $!";
my ($effective, undef) = unpack "LLLLLL", $datap;
2022-09-02 21:38:53 +00:00
if ((($effective >> $CAP_SYS_ADMIN) & 1) != 1) {
2021-08-27 09:53:11 +00:00
warning
"cannot mount because CAP_SYS_ADMIN is not in the effective set";
$options->{canmount} = 0;
}
if (0 == syscall &SYS_prctl, $PR_CAPBSET_READ, $CAP_SYS_ADMIN) {
warning
"cannot mount because CAP_SYS_ADMIN is not in the bounding set";
2021-08-26 13:40:27 +00:00
$options->{canmount} = 0;
2021-01-13 17:40:24 +00:00
}
# To test whether we can use mount without actually trying to mount
# something we try unsharing the mount namespace. If this is allowed,
# then we are also allowed to mount.
2021-02-19 11:53:14 +00:00
#
# We need to call unshare with "--propagation unchanged" or otherwise
# we get 'cannot change root filesystem propagation' when running
# mmdebstrap inside a chroot for which the root of the chroot is not
# its own mount point.
2021-08-26 13:40:27 +00:00
if (0 != system 'unshare --mount --propagation unchanged -- true') {
2021-01-13 17:40:24 +00:00
# if we cannot unshare the mount namespace as root, then we also
# cannot mount
2021-08-26 13:40:27 +00:00
warning "cannot mount because unshare --mount failed";
$options->{canmount} = 0;
}
}
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
2022-05-24 13:40:38 +00:00
if (!can_execute 'mount') {
2021-08-26 13:40:27 +00:00
warning "cannot execute mount";
$options->{canmount} = 0;
2021-01-13 17:40:24 +00:00
}
}
2021-08-26 13:40:27 +00:00
# we can only possibly mount in root and unshare mode
if (none { $_ eq $options->{mode} } ('root', 'unshare')) {
$options->{canmount} = 0;
}
2019-10-27 21:04:21 +00:00
my @architectures = ();
2020-01-08 16:44:07 +00:00
foreach my $archs (@{ $options->{architectures} }) {
2020-01-08 14:41:49 +00:00
foreach my $arch (split /[,\s]+/, $archs) {
# strip leading and trailing whitespace
$arch =~ s/^\s+|\s+$//g;
# skip if the remainder is an empty string
if ($arch eq '') {
next;
}
# do not append component if it's already in the list
2020-01-08 16:44:07 +00:00
if (any { $_ eq $arch } @architectures) {
2020-01-08 14:41:49 +00:00
next;
}
push @architectures, $arch;
}
2019-10-27 21:04:21 +00:00
}
2020-01-08 16:44:07 +00:00
$options->{nativearch} = $hostarch;
2019-10-27 21:04:21 +00:00
$options->{foreignarchs} = [];
if (scalar @architectures == 0) {
2020-01-08 16:41:46 +00:00
warning "empty architecture list: falling back to native architecture"
. " $hostarch";
2019-10-27 21:04:21 +00:00
} elsif (scalar @architectures == 1) {
2020-01-08 14:41:49 +00:00
$options->{nativearch} = $architectures[0];
2019-10-27 21:04:21 +00:00
} else {
2020-01-08 14:41:49 +00:00
$options->{nativearch} = $architectures[0];
2020-01-08 16:44:07 +00:00
push @{ $options->{foreignarchs} },
@architectures[1 .. $#architectures];
2019-10-27 21:04:21 +00:00
}
debug "Native architecture (outside): $hostarch";
debug "Native architecture (inside): $options->{nativearch}";
2020-01-08 16:44:07 +00:00
debug("Foreign architectures (inside): "
. (join ', ', @{ $options->{foreignarchs} }));
2018-09-18 09:20:24 +00:00
2018-09-21 06:00:06 +00:00
{
2020-01-08 14:41:49 +00:00
# FIXME: autogenerate this list
my $deb2qemu = {
2020-01-08 16:44:07 +00:00
alpha => 'alpha',
amd64 => 'x86_64',
arm => 'arm',
arm64 => 'aarch64',
armel => 'arm',
armhf => 'arm',
hppa => 'hppa',
i386 => 'i386',
m68k => 'm68k',
mips => 'mips',
mips64 => 'mips64',
2020-01-08 14:41:49 +00:00
mips64el => 'mips64el',
2020-01-08 16:44:07 +00:00
mipsel => 'mipsel',
powerpc => 'ppc',
ppc64 => 'ppc64',
ppc64el => 'ppc64le',
riscv64 => 'riscv64',
s390x => 's390x',
sh4 => 'sh4',
sparc => 'sparc',
sparc64 => 'sparc64',
2020-01-08 14:41:49 +00:00
};
2020-05-01 05:39:26 +00:00
if (any { $_ eq 'check/qemu' } @{ $options->{skip} }) {
info "skipping check/qemu as requested";
} elsif ($options->{mode} eq "chrootless") {
2020-04-14 16:25:02 +00:00
info "skipping emulation check in chrootless mode";
2021-02-04 16:47:10 +00:00
} elsif ($options->{variant} eq "extract") {
info "skipping emulation check for extract variant";
2020-04-14 16:25:02 +00:00
} elsif ($hostarch ne $options->{nativearch}) {
2022-05-24 13:40:38 +00:00
if (!can_execute 'arch-test') {
2020-04-10 10:25:24 +00:00
error "install arch-test for foreign architecture support";
}
2020-01-08 14:41:49 +00:00
my $withemu = 0;
2020-01-08 16:44:07 +00:00
my $noemu = 0;
2020-01-08 14:41:49 +00:00
{
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
{
2020-01-09 07:39:40 +00:00
## no critic (TestingAndDebugging::ProhibitNoWarnings)
# don't print a warning if the following fails
no warnings;
2020-01-08 14:41:49 +00:00
exec 'arch-test', $options->{nativearch};
}
2020-01-08 15:23:34 +00:00
# if exec didn't work (for example because the arch-test
# program is missing) prepare for the worst and assume that
# the architecture cannot be executed
2020-01-08 16:41:46 +00:00
print "$options->{nativearch}: not supported on this"
. " machine/kernel\n";
2020-01-08 14:41:49 +00:00
exit 1;
}
2020-01-08 16:44:07 +00:00
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
if ($? == 0 and $content eq "$options->{nativearch}: ok") {
$withemu = 1;
}
}
{
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
if ($pid == 0) {
{
2020-01-09 07:39:40 +00:00
## no critic (TestingAndDebugging::ProhibitNoWarnings)
# don't print a warning if the following fails
no warnings;
2020-01-08 14:41:49 +00:00
exec 'arch-test', '-n', $options->{nativearch};
}
2020-01-08 15:23:34 +00:00
# if exec didn't work (for example because the arch-test
# program is missing) prepare for the worst and assume that
# the architecture cannot be executed
2020-01-08 16:41:46 +00:00
print "$options->{nativearch}: not supported on this"
. " machine/kernel\n";
2020-01-08 14:41:49 +00:00
exit 1;
}
2020-01-08 16:44:07 +00:00
chomp(
my $content = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
if ($? == 0 and $content eq "$options->{nativearch}: ok") {
$noemu = 1;
}
}
# four different outcomes, depending on whether arch-test
# succeeded with or without emulation
#
# withemu | noemu |
# --------+-------+-----------------
# 0 | 0 | test why emu doesn't work and quit
# 0 | 1 | should never happen
# 1 | 0 | use qemu emulation
# 1 | 1 | don't use qemu emulation
if ($withemu == 0 and $noemu == 0) {
{
2020-01-08 16:44:07 +00:00
open my $fh, '<', '/proc/filesystems'
or error "failed to open /proc/filesystems: $!";
2020-01-09 07:39:40 +00:00
unless (grep { /^nodev\tbinfmt_misc$/ } (<$fh>)) {
2020-01-08 16:41:46 +00:00
warning "binfmt_misc not found in /proc/filesystems --"
. " is the module loaded?";
2020-01-08 14:41:49 +00:00
}
close $fh;
}
{
2020-01-08 16:44:07 +00:00
open my $fh, '<', '/proc/mounts'
or error "failed to open /proc/mounts: $!";
unless (
2020-01-09 07:39:40 +00:00
grep {
2020-04-10 10:25:45 +00:00
/^binfmt_misc\s+
\/proc\/sys\/fs\/binfmt_misc\s+
binfmt_misc\s+/x
2020-01-09 07:39:40 +00:00
} (<$fh>)
2020-01-08 16:44:07 +00:00
) {
2020-01-08 16:41:46 +00:00
warning "binfmt_misc not found in /proc/mounts -- not"
. " mounted?";
2020-01-08 14:41:49 +00:00
}
close $fh;
}
{
2020-01-08 16:44:07 +00:00
if (!exists $deb2qemu->{ $options->{nativearch} }) {
2020-01-08 16:41:46 +00:00
warning "no mapping from $options->{nativearch} to"
. " qemu-user binary";
2022-05-24 18:36:01 +00:00
} elsif (!can_execute 'update-binfmts') {
warning "cannot find update-binfmts";
2020-01-08 14:41:49 +00:00
} else {
2020-01-08 16:44:07 +00:00
my $binfmt_identifier
= 'qemu-' . $deb2qemu->{ $options->{nativearch} };
2022-05-24 18:36:01 +00:00
open my $fh, '-|', 'update-binfmts', '--display',
2020-01-08 16:44:07 +00:00
$binfmt_identifier // error "failed to fork(): $!";
chomp(
my $binfmts = do { local $/; <$fh> }
);
2020-01-08 14:41:49 +00:00
close $fh;
2020-04-10 10:26:14 +00:00
if ($? != 0 || $binfmts eq '') {
2020-01-08 16:41:46 +00:00
warning "$binfmt_identifier is not a supported"
. " binfmt name";
2020-01-08 14:41:49 +00:00
}
}
}
2020-01-08 16:41:46 +00:00
error "$options->{nativearch} can neither be executed natively"
. " nor via qemu user emulation with binfmt_misc";
2020-01-08 14:41:49 +00:00
} elsif ($withemu == 0 and $noemu == 1) {
error "arch-test succeeded without emu but not with emu";
} elsif ($withemu == 1 and $noemu == 0) {
2022-05-24 12:18:51 +00:00
info "$options->{nativearch} cannot be executed natively, but"
. " transparently using qemu-user binfmt emulation";
2020-01-08 16:44:07 +00:00
if (!exists $deb2qemu->{ $options->{nativearch} }) {
2020-01-08 16:41:46 +00:00
error "no mapping from $options->{nativearch} to qemu-user"
. " binary";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
$options->{qemu} = $deb2qemu->{ $options->{nativearch} };
2020-01-08 14:41:49 +00:00
} elsif ($withemu == 1 and $noemu == 1) {
2020-01-08 16:41:46 +00:00
info "$options->{nativearch} is different from $hostarch but"
. " can be executed natively";
2020-01-08 14:41:49 +00:00
} else {
error "logic error";
}
} else {
2020-01-08 16:41:46 +00:00
info "chroot architecture $options->{nativearch} is equal to the"
. " host's architecture";
2020-01-08 14:41:49 +00:00
}
2018-09-18 09:20:24 +00:00
}
2024-02-01 04:58:32 +00:00
if (defined $options->{qemu} && $options->{mode} eq 'fakechroot') {
if (!can_execute 'dpkg-architecture') {
error "cannot find dpkg-architecture";
}
}
2018-10-02 08:09:22 +00:00
{
2020-11-14 22:25:07 +00:00
$options->{suite} = undef;
2020-01-08 14:41:49 +00:00
if (scalar @ARGV > 0) {
2020-11-14 22:25:07 +00:00
$options->{suite} = shift @ARGV;
2020-01-08 14:41:49 +00:00
if (scalar @ARGV > 0) {
$options->{target} = shift @ARGV;
} else {
$options->{target} = '-';
}
} else {
2020-01-08 16:44:07 +00:00
info
"No SUITE specified, expecting sources.list on standard input";
2020-01-08 14:41:49 +00:00
$options->{target} = '-';
}
2020-01-22 22:30:28 +00:00
my $sourceslists = [];
2020-11-14 22:25:07 +00:00
if (!defined $options->{suite}) {
2020-01-08 14:41:49 +00:00
# If no suite was specified, then the whole sources.list has to
# come from standard input
2020-11-27 23:48:18 +00:00
info "reading sources.list from standard input...";
2020-01-22 22:30:28 +00:00
my $content = do {
local $/;
## no critic (InputOutput::ProhibitExplicitStdin)
<STDIN>;
};
2023-01-16 14:13:39 +00:00
if ($content eq "") {
warning "sources.list from standard input is empty";
} else {
my $type = guess_sources_format($content);
if (!defined $type
|| ($type ne "deb822" and $type ne "one-line")) {
error "cannot determine sources.list format";
}
push @{$sourceslists},
{
type => $type,
fname => undef,
content => $content,
};
2020-01-22 22:30:28 +00:00
}
2020-01-08 14:41:49 +00:00
} else {
my @components = ();
2020-01-08 16:44:07 +00:00
foreach my $comp (@{ $options->{components} }) {
2020-01-08 14:41:49 +00:00
my @comps = split /[,\s]+/, $comp;
foreach my $c (@comps) {
# strip leading and trailing whitespace
$c =~ s/^\s+|\s+$//g;
# skip if the remainder is an empty string
if ($c eq "") {
next;
}
# do not append component if it's already in the list
2020-01-08 16:44:07 +00:00
if (any { $_ eq $c } @components) {
2020-01-08 14:41:49 +00:00
next;
}
push @components, $c;
}
}
my $compstr = join " ", @components;
2023-01-16 14:13:39 +00:00
# From the suite name we can maybe infer which key we need. If we
# can infer this information, then we need to check whether the
# currently running apt actually trusts this key or not. If it
# doesn't, then we need to add a signed-by line to the sources.list
# entry.
2020-08-24 14:20:04 +00:00
my $signedby = '';
my %suite_by_vendor = get_suite_by_vendor();
2023-01-16 13:58:23 +00:00
my $gpgproc = sub {
2020-11-14 22:25:07 +00:00
my $keyring
= get_keyring_by_suite($options->{suite}, \%suite_by_vendor);
2020-08-24 14:20:04 +00:00
if (!defined $keyring) {
2023-03-16 21:18:49 +00:00
debug "get_keyring_by_suite() cannot find keyring";
2023-01-16 13:58:23 +00:00
return '';
2020-01-08 14:41:49 +00:00
}
2020-01-21 23:29:38 +00:00
2020-01-08 14:41:49 +00:00
# we can only check if we need the signed-by entry if we u
# automatically chosen keyring exists
2020-01-21 23:29:38 +00:00
if (!defined $keyring || !-e $keyring) {
2023-03-16 21:18:49 +00:00
debug "found keyring does not exist";
2023-01-16 13:58:23 +00:00
return '';
2020-01-21 23:29:38 +00:00
}
# we can only check key material if gpg is installed
my $gpghome = tempdir(
"mmdebstrap.gpghome.XXXXXXXXXXXX",
TMPDIR => 1,
CLEANUP => 1
);
my @gpgcmd = (
'gpg', '--quiet',
'--ignore-time-conflict', '--no-options',
'--no-default-keyring', '--homedir',
$gpghome, '--no-auto-check-trustdb',
);
2020-01-22 22:30:56 +00:00
my ($ret, $message);
2020-01-21 23:29:38 +00:00
{
2020-01-22 22:30:56 +00:00
my $fh;
{
# change warning handler to prevent message
# Can't exec "gpg": No such file or directory
local $SIG{__WARN__} = sub { $message = shift; };
$ret = open $fh, '-|', @gpgcmd, '--version';
}
# we only want to check if the gpg command exists
close $fh;
2020-01-21 23:29:38 +00:00
}
if ($? != 0 || !defined $ret || defined $message) {
2023-01-16 13:58:23 +00:00
warning
"gpg --version failed: cannot infer signed-by value";
return '';
2020-01-21 23:29:38 +00:00
}
2021-04-15 00:00:39 +00:00
# initialize gpg trustdb with empty one
{
2022-06-04 06:30:53 +00:00
0 == system(@gpgcmd, '--update-trustdb')
or error "gpg failed to initialize trustdb:: $?";
2021-04-15 00:00:39 +00:00
}
2022-09-11 19:12:14 +00:00
if (!-d $options->{apttrustedparts}) {
warning "$options->{apttrustedparts} doesn't exist";
2023-01-16 13:58:23 +00:00
return '';
2022-09-11 19:12:14 +00:00
}
2020-01-21 23:29:38 +00:00
# find all the fingerprints of the keys apt currently
# knows about
2021-04-15 00:00:39 +00:00
my @keyrings = ();
2023-03-16 21:18:49 +00:00
opendir my $dh, $options->{apttrustedparts}
2020-01-21 23:29:38 +00:00
or error "cannot read $options->{apttrustedparts}";
while (my $filename = readdir $dh) {
if ($filename !~ /\.(asc|gpg)$/) {
next;
2020-01-08 14:41:49 +00:00
}
2021-04-15 00:00:39 +00:00
$filename = "$options->{apttrustedparts}/$filename";
# skip empty keyrings
-s "$filename" || next;
2023-03-16 21:18:49 +00:00
push @keyrings, $filename;
2020-01-21 23:29:38 +00:00
}
2020-08-18 07:35:56 +00:00
closedir $dh;
2021-04-15 00:00:39 +00:00
if (-s $options->{apttrusted}) {
push @keyrings, $options->{apttrusted};
2020-01-21 23:29:38 +00:00
}
my @aptfingerprints = ();
2021-04-15 00:00:39 +00:00
if (scalar @keyrings == 0) {
2023-03-16 21:18:49 +00:00
debug "no keyring is trusted by apt";
2023-01-16 13:58:23 +00:00
return " [signed-by=\"$keyring\"]";
2020-01-21 23:29:38 +00:00
}
2023-01-16 14:01:30 +00:00
info "finding correct signed-by value...";
2023-01-16 07:09:54 +00:00
my $progress = 0.0;
print_progress($progress);
for (my $i = 0 ; $i < scalar @keyrings ; $i++) {
my $k = $keyrings[$i];
open(my $fh, '-|', @gpgcmd, '--with-colons',
'--show-keys', $k) // error "failed to fork(): $!";
2020-01-22 22:30:56 +00:00
while (my $line = <$fh>) {
if ($line !~ /^fpr:::::::::([^:]+):/) {
next;
}
push @aptfingerprints, $1;
2020-01-08 14:41:49 +00:00
}
2020-01-22 22:30:56 +00:00
close $fh;
2023-01-16 07:09:54 +00:00
if ($? != 0) {
warning("gpg failed to read $k");
}
print_progress($i / (scalar @keyrings) * 100.0, undef);
2020-01-21 23:29:38 +00:00
}
2023-01-16 07:09:54 +00:00
print_progress("done");
2020-01-21 23:29:38 +00:00
if (scalar @aptfingerprints == 0) {
2023-03-16 21:18:49 +00:00
debug "no fingerprints found";
2023-01-16 13:58:23 +00:00
return " [signed-by=\"$keyring\"]";
2020-01-21 23:29:38 +00:00
}
# check if all fingerprints from the keyring that we guessed
# are known by apt and only add signed-by option if that's not
# the case
my @suitefingerprints = ();
2020-01-22 22:30:56 +00:00
{
2021-04-15 00:00:39 +00:00
open(my $fh, '-|', @gpgcmd, '--with-colons', '--show-keys',
$keyring) // error "failed to fork(): $!";
2020-01-22 22:30:56 +00:00
while (my $line = <$fh>) {
if ($line !~ /^fpr:::::::::([^:]+):/) {
next;
}
# if this fingerprint is not known by apt, then we need
#to add the signed-by option
if (none { $_ eq $1 } @aptfingerprints) {
2023-03-16 21:18:49 +00:00
debug "fingerprint $1 is not trusted by apt";
2023-01-16 13:58:23 +00:00
return " [signed-by=\"$keyring\"]";
2020-01-22 22:30:56 +00:00
}
2020-01-08 14:41:49 +00:00
}
2020-01-22 22:30:56 +00:00
close $fh;
2023-01-16 13:58:23 +00:00
if ($? != 0) {
warning "gpg failed -- cannot infer signed-by value";
}
2020-01-08 14:41:49 +00:00
}
2023-01-16 13:58:23 +00:00
return '';
};
if (any { $_ eq 'check/signed-by' } @{ $options->{skip} }) {
info "skipping check/signed-by as requested";
} else {
$signedby = $gpgproc->();
2020-01-08 14:41:49 +00:00
}
if (scalar @ARGV > 0) {
for my $arg (@ARGV) {
if ($arg eq '-') {
2020-11-27 23:48:18 +00:00
info 'reading sources.list from standard input...';
2020-01-22 22:30:28 +00:00
my $content = do {
local $/;
## no critic (InputOutput::ProhibitExplicitStdin)
<STDIN>;
};
2023-01-16 14:13:39 +00:00
if ($content eq "") {
warning
"sources.list from standard input is empty";
2020-01-22 22:30:28 +00:00
} else {
2023-01-16 14:13:39 +00:00
my $type = guess_sources_format($content);
if (!defined $type
|| ($type ne 'deb822' and $type ne 'one-line'))
{
error "cannot determine sources.list format";
}
# if last entry is of same type and without filename,
# then append
if ( scalar @{$sourceslists} > 0
&& $sourceslists->[-1]{type} eq $type
&& !defined $sourceslists->[-1]{fname}) {
$sourceslists->[-1]{content}
.= ($type eq 'one-line' ? "\n" : "\n\n")
. $content;
} else {
push @{$sourceslists},
{
type => $type,
fname => undef,
content => $content,
};
}
2020-01-22 22:30:28 +00:00
}
2020-01-08 14:41:49 +00:00
} elsif ($arg =~ /^deb(-src)? /) {
2020-01-22 22:30:28 +00:00
my $content = "$arg\n";
# if last entry is of same type and without filename,
# then append
if ( scalar @{$sourceslists} > 0
&& $sourceslists->[-1]{type} eq 'one-line'
&& !defined $sourceslists->[-1]{fname}) {
$sourceslists->[-1]{content} .= "\n" . $content;
} else {
push @{$sourceslists},
{
type => 'one-line',
fname => undef,
content => $content,
};
}
2020-01-08 14:41:49 +00:00
} elsif ($arg =~ /:\/\//) {
2020-11-14 22:25:07 +00:00
my $content = join ' ',
(
"deb$signedby",
$arg, $options->{suite}, "$compstr\n"
);
2020-01-22 22:30:28 +00:00
# if last entry is of same type and without filename,
# then append
if ( scalar @{$sourceslists} > 0
&& $sourceslists->[-1]{type} eq 'one-line'
&& !defined $sourceslists->[-1]{fname}) {
$sourceslists->[-1]{content} .= "\n" . $content;
} else {
push @{$sourceslists},
{
type => 'one-line',
fname => undef,
content => $content,
};
}
2020-01-08 14:41:49 +00:00
} elsif (-f $arg) {
2020-01-22 22:30:28 +00:00
my $content = '';
2020-01-08 14:41:49 +00:00
open my $fh, '<', $arg or error "cannot open $arg: $!";
while (my $line = <$fh>) {
2020-01-22 22:30:28 +00:00
$content .= $line;
2020-01-08 14:41:49 +00:00
}
close $fh;
2023-01-16 14:13:39 +00:00
if ($content eq "") {
warning "$arg is empty";
2020-01-22 22:30:28 +00:00
} else {
2023-01-16 14:13:39 +00:00
my $type = undef;
if ($arg =~ /\.list$/) {
$type = 'one-line';
} elsif ($arg =~ /\.sources$/) {
$type = 'deb822';
} else {
$type = guess_sources_format($content);
}
if (!defined $type
|| ($type ne 'deb822' and $type ne 'one-line'))
{
error "cannot determine sources.list format";
}
push @{$sourceslists},
{
type => $type,
fname => basename($arg),
content => $content,
};
2020-01-22 22:30:28 +00:00
}
2023-01-16 14:13:39 +00:00
} elsif ($arg eq '') {
# empty
2020-01-08 14:41:49 +00:00
} else {
error "invalid mirror: $arg";
}
}
} else {
2020-08-24 14:20:04 +00:00
my $sourceslist
2020-11-14 22:25:07 +00:00
= get_sourceslist_by_suite($options->{suite},
$options->{nativearch},
2020-08-24 14:20:04 +00:00
$signedby, $compstr, \%suite_by_vendor);
2020-01-22 22:30:28 +00:00
push @{$sourceslists},
{
type => 'one-line',
fname => undef,
content => $sourceslist,
};
2020-01-08 14:41:49 +00:00
}
}
2020-01-22 22:30:28 +00:00
if (scalar @{$sourceslists} == 0) {
2023-01-16 14:13:39 +00:00
warning "empty apt sources.list";
2020-01-08 14:41:49 +00:00
}
2020-01-22 22:30:28 +00:00
debug("sources list entries:");
for my $list (@{$sourceslists}) {
if (defined $list->{fname}) {
debug("fname: $list->{fname}");
}
debug("type: $list->{type}");
debug("content:");
for my $line (split "\n", $list->{content}) {
debug(" $line");
}
}
$options->{sourceslists} = $sourceslists;
2018-09-18 09:20:24 +00:00
}
2022-07-28 14:58:47 +00:00
if ($options->{target} eq '-') {
if (POSIX::isatty STDOUT) {
error "stdout is a an interactive tty";
}
} else {
2020-01-08 14:41:49 +00:00
my $abs_path = abs_path($options->{target});
if (!defined $abs_path) {
2020-01-08 16:41:46 +00:00
error "unable to get absolute path of target directory"
. " $options->{target}";
2020-01-08 14:41:49 +00:00
}
$options->{target} = $abs_path;
2018-09-18 09:20:24 +00:00
}
if ($options->{target} eq '/') {
2020-01-08 14:41:49 +00:00
error "refusing to use the filesystem root as output directory";
2018-09-18 09:20:24 +00:00
}
2019-01-20 09:41:29 +00:00
my $tar_compressor = get_tar_compressor($options->{target});
2018-09-18 09:20:24 +00:00
2020-04-09 16:33:05 +00:00
# figure out the right format
2023-09-27 11:52:20 +00:00
if ($options->{format} eq 'auto') {
2021-05-04 13:01:25 +00:00
# (stat(...))[6] is the device identifier which contains the major and
# minor numbers for character special files
# major 1 and minor 3 is /dev/null on Linux
if ( $options->{target} eq '/dev/null'
and $OSNAME eq 'linux'
and -c '/dev/null'
and major((stat("/dev/null"))[6]) == 1
and minor((stat("/dev/null"))[6]) == 3) {
2023-09-27 11:52:20 +00:00
$options->{format} = 'null';
2021-05-04 13:01:25 +00:00
} elsif ($options->{target} eq '-'
and $OSNAME eq 'linux'
and major((stat(STDOUT))[6]) == 1
and minor((stat(STDOUT))[6]) == 3) {
# by checking the major and minor number of the STDOUT fd we also
# can detect redirections to /dev/null and choose the null format
# accordingly
2023-09-27 11:52:20 +00:00
$options->{format} = 'null';
2021-03-25 06:04:14 +00:00
} elsif ($options->{target} ne '-' and -d $options->{target}) {
2023-09-27 11:52:20 +00:00
$options->{format} = 'directory';
2020-04-09 16:33:05 +00:00
} elsif (
defined $tar_compressor
or $options->{target} =~ /\.tar$/
or $options->{target} eq '-'
or -p $options->{target} # named pipe (fifo)
or -c $options->{target} # character special like /dev/null
) {
2023-09-27 11:52:20 +00:00
$options->{format} = 'tar';
2020-04-09 16:33:05 +00:00
# check if the compressor is installed
if (defined $tar_compressor) {
my $pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
open(STDOUT, '>', '/dev/null')
or error "cannot open /dev/null for writing: $!";
open(STDIN, '<', '/dev/null')
or error "cannot open /dev/null for reading: $!";
exec { $tar_compressor->[0] } @{$tar_compressor}
or error("cannot exec "
. (join " ", @{$tar_compressor})
. ": $!");
}
waitpid $pid, 0;
if ($? != 0) {
error("failed to start " . (join " ", @{$tar_compressor}));
}
}
} elsif ($options->{target} =~ /\.(squashfs|sqfs)$/) {
2023-09-27 11:52:20 +00:00
$options->{format} = 'squashfs';
2020-04-09 16:33:05 +00:00
# check if tar2sqfs is installed
2020-01-08 14:41:49 +00:00
my $pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
2020-01-08 16:44:07 +00:00
open(STDOUT, '>', '/dev/null')
or error "cannot open /dev/null for writing: $!";
open(STDIN, '<', '/dev/null')
or error "cannot open /dev/null for reading: $!";
2020-04-09 16:33:05 +00:00
exec('tar2sqfs', '--version')
or error("cannot exec tar2sqfs --version: $!");
2020-01-08 14:41:49 +00:00
}
waitpid $pid, 0;
if ($? != 0) {
2020-04-09 16:33:05 +00:00
error("failed to start tar2sqfs --version");
2020-01-08 14:41:49 +00:00
}
2020-04-09 16:33:05 +00:00
} elsif ($options->{target} =~ /\.ext2$/) {
2023-09-27 11:52:20 +00:00
$options->{format} = 'ext2';
2020-04-09 16:33:05 +00:00
# check if the installed version of genext2fs supports tarballs on
# stdin
2020-11-12 14:49:10 +00:00
(undef, my $filename) = tempfile(
"mmdebstrap.ext2.XXXXXXXXXXXX",
OPEN => 0,
TMPDIR => 1
);
2020-04-09 16:33:05 +00:00
open my $fh, '|-', 'genext2fs', '-B', '1024', '-b', '8', '-N',
2020-07-09 05:34:03 +00:00
'11', '-a', '-', $filename // error "failed to fork(): $!";
2020-04-09 16:33:05 +00:00
# write 10240 null-bytes to genext2fs -- this represents an empty
# tar archive
print $fh ("\0" x 10240)
or error "cannot write to genext2fs process";
close $fh;
my $exitstatus = $?;
unlink $filename // die "cannot unlink $filename";
if ($exitstatus != 0) {
error "genext2fs failed with exit status: $exitstatus";
}
2024-05-12 15:16:51 +00:00
} elsif ($options->{target} =~ /\.ext4$/) {
$options->{format} = 'ext4';
# check if the installed version of e2fsprogs supports tarballs on
# stdin
(undef, my $filename) = tempfile(
"mmdebstrap.ext4.XXXXXXXXXXXX",
OPEN => 0,
TMPDIR => 1
);
# creating file to suppress message "Creating regular file ..."
{ open my $fh, '>', $filename; }
open my $fh, '|-', 'mke2fs', '-q', '-F', '-o', 'Linux', '-T',
'ext4', '-b', '4096', '-d', '-', $filename,
'16384' // error "failed to fork(): $!";
# write 10240 null-bytes to mke2fs -- this represents an empty
# tar archive
print $fh ("\0" x 10240)
or error "cannot write to mke2fs process";
close $fh;
my $exitstatus = $?;
unlink $filename // die "cannot unlink $filename";
if ($exitstatus != 0) {
error "mke2fs failed with exit status: $exitstatus";
}
2020-04-09 16:33:05 +00:00
} else {
2023-09-27 11:52:20 +00:00
$options->{format} = 'directory';
2020-01-08 14:41:49 +00:00
}
2023-09-27 11:52:20 +00:00
info "automatically chosen format: $options->{format}";
2018-09-18 09:20:24 +00:00
}
2023-09-27 11:52:20 +00:00
if ( $options->{target} eq '-'
and $options->{format} ne 'tar'
and $options->{format} ne 'null') {
error "the $options->{format} format is unable to write to stdout";
2020-04-09 16:33:05 +00:00
}
2023-09-27 11:52:20 +00:00
if ($options->{format} eq 'null'
2021-05-04 13:01:25 +00:00
and none { $_ eq $options->{target} } ('-', '/dev/null')) {
info "ignoring target $options->{target} with null format";
}
2024-05-12 15:16:51 +00:00
my $blocksize = -1;
2023-09-27 11:52:20 +00:00
if ($options->{format} eq 'ext2') {
2022-12-21 19:02:56 +00:00
if (!can_execute 'genext2fs') {
error "need genext2fs for ext2 format";
}
2024-05-12 15:16:51 +00:00
$blocksize = 1024;
} elsif ($options->{format} eq 'ext4') {
if (!can_execute 'mke2fs', '-V') {
error "need mke2fs for ext4 format";
}
$blocksize = 4096;
2023-09-27 11:52:20 +00:00
} elsif ($options->{format} eq 'squashfs') {
2022-12-21 19:02:56 +00:00
if (!can_execute 'tar2sqfs') {
error "need tar2sqfs binary from the squashfs-tools-ng package";
}
2024-05-12 15:16:51 +00:00
$blocksize = 1048576;
2022-12-21 19:02:56 +00:00
}
2024-06-02 06:46:59 +00:00
my $rootdir_handle;
2024-05-12 15:16:51 +00:00
if (any { $_ eq $options->{format} }
('tar', 'squashfs', 'ext2', 'ext4', 'null')) {
2023-09-27 11:52:20 +00:00
if ($options->{format} ne 'null') {
2022-09-05 03:50:50 +00:00
if (any { $_ eq $options->{variant} } ('extract', 'custom')
and $options->{mode} eq 'fakechroot') {
2024-05-12 15:16:51 +00:00
info "creating a tarball, squashfs, ext2 or ext4 image in"
2022-09-05 03:50:50 +00:00
. " fakechroot mode might fail in extract and"
2021-03-25 06:04:14 +00:00
. " custom variants because there might be no tar inside the"
. " chroot";
}
# try to fail early if target tarball or squashfs image cannot be
# opened for writing
if ($options->{target} ne '-') {
if ($options->{dryrun}) {
if (-e $options->{target}) {
info "not overwriting $options->{target} because in"
. " dry-run mode";
}
} else {
open my $fh, '>', $options->{target}
or error
"cannot open $options->{target} for writing: $!";
close $fh;
2020-01-10 10:44:15 +00:00
}
}
2020-01-08 14:41:49 +00:00
}
# since the output is a tarball, we create the rootfs in a temporary
# directory
2020-01-21 23:30:12 +00:00
$options->{root} = tempdir('mmdebstrap.XXXXXXXXXX', TMPDIR => 1);
2020-01-08 14:41:49 +00:00
info "using $options->{root} as tempdir";
2024-06-02 06:46:59 +00:00
# add an flock on the temporary directory to prevent cleanup by systemd
# see section Age in tmpfiles.d(5)
sysopen($rootdir_handle, $options->{root}, O_RDONLY | O_DIRECTORY)
or error "Failed to sysopen $options->{root}: $!\n";
flock($rootdir_handle, LOCK_EX)
or error "Unable to flock $options->{root}: $!\n";
2020-01-08 14:41:49 +00:00
# in unshare and root mode, other users than the current user need to
# access the rootfs, most prominently, the _apt user. Thus, make the
# temporary directory world readable.
2021-12-14 15:10:25 +00:00
if (
any { $_ eq $options->{mode} } ('unshare', 'root')
or ($EFFECTIVE_USER_ID == 0 and $options->{mode} eq 'chrootless')
) {
2020-01-08 14:41:49 +00:00
chmod 0755, $options->{root} or error "cannot chmod root: $!";
}
2023-09-27 11:52:20 +00:00
} elsif ($options->{format} eq 'directory') {
2020-01-08 14:41:49 +00:00
# user does not seem to have specified a tarball as output, thus work
# directly in the supplied directory
$options->{root} = $options->{target};
if (-e $options->{root}) {
2020-01-08 16:44:07 +00:00
if (!-d $options->{root}) {
2020-01-08 14:41:49 +00:00
error "$options->{root} exists and is not a directory";
}
2020-05-01 05:39:52 +00:00
if (any { $_ eq 'check/empty' } @{ $options->{skip} }) {
info "skipping check/empty as requested";
} else {
# check if the directory is empty or contains nothing more than
# an empty lost+found directory. The latter exists on freshly
# created ext3 and ext4 partitions.
# rationale for requiring an empty directory:
# https://bugs.debian.org/833525
opendir(my $dh, $options->{root})
or error "Can't opendir($options->{root}): $!";
while (my $entry = readdir $dh) {
# skip the "." and ".." entries
next if $entry eq ".";
next if $entry eq "..";
# if the entry is a directory named "lost+found" then skip
# it, if it's empty
if ($entry eq "lost+found"
and -d "$options->{root}/$entry") {
opendir(my $dh2, "$options->{root}/$entry");
# Attempt reading the directory thrice. If the third
# time succeeds, then it has more entries than just "."
# and ".." and must thus not be empty.
readdir $dh2;
readdir $dh2;
# rationale for requiring an empty directory:
# https://bugs.debian.org/833525
if (readdir $dh2) {
error "$options->{root} contains a non-empty"
. " lost+found directory";
}
closedir($dh2);
} else {
error "$options->{root} is not empty";
2020-01-08 14:41:49 +00:00
}
}
2020-05-01 05:39:52 +00:00
closedir($dh);
2020-01-08 14:41:49 +00:00
}
} else {
2020-01-08 16:44:07 +00:00
my $num_created = make_path "$options->{root}",
{ error => \my $err };
2020-01-08 14:41:49 +00:00
if ($err && @$err) {
2020-01-08 16:44:07 +00:00
error(join "; ",
(map { "cannot create " . (join ": ", %{$_}) } @$err));
2020-01-08 14:41:49 +00:00
} elsif ($num_created == 0) {
error "cannot create $options->{root}";
}
}
2020-04-09 16:33:05 +00:00
} else {
2023-09-27 11:52:20 +00:00
error "unknown format: $options->{format}";
2018-09-18 09:20:24 +00:00
}
2018-10-22 13:05:19 +00:00
# check for double quotes because apt doesn't allow to escape them and
# thus paths with double quotes are invalid in the apt config
if ($options->{root} =~ /"/) {
2020-01-08 14:41:49 +00:00
error "apt cannot handle paths with double quotes";
2018-10-22 13:05:19 +00:00
}
2018-09-18 09:20:24 +00:00
my @idmap;
# for unshare mode the rootfs directory has to have appropriate
# permissions
2021-08-18 20:15:05 +00:00
if ($EFFECTIVE_USER_ID != 0 and $options->{mode} eq 'unshare') {
2023-03-15 16:08:12 +00:00
@idmap = read_subuid_subgid 1;
2020-01-08 14:41:49 +00:00
# sanity check
2020-01-10 08:29:34 +00:00
if ( scalar(@idmap) != 2
|| $idmap[0][0] ne 'u'
2021-08-18 20:15:05 +00:00
|| $idmap[1][0] ne 'g'
|| !length $idmap[0][2]
|| !length $idmap[1][2]) {
2020-01-08 14:41:49 +00:00
error "invalid idmap";
}
2018-09-18 09:20:24 +00:00
2020-01-08 16:44:07 +00:00
my $outer_gid = $REAL_GROUP_ID + 0;
2018-09-18 09:20:24 +00:00
2020-01-09 07:39:40 +00:00
my $pid = get_unshare_cmd(
sub { chown 1, 1, $options->{root} },
[
['u', '0', $REAL_USER_ID, '1'],
['g', '0', $outer_gid, '1'],
['u', '1', $idmap[0][2], '1'],
['g', '1', $idmap[1][2], '1']]);
2020-01-08 14:41:49 +00:00
waitpid $pid, 0;
$? == 0 or error "chown failed";
2018-09-18 09:20:24 +00:00
}
2022-12-22 09:16:27 +00:00
# check if .deb files given by --include are readable by the unshared user
if ($options->{mode} eq 'unshare'
and scalar(grep { /^\// } @{ $options->{include} }) > 0) {
my $pid = get_unshare_cmd(
sub {
my $ret = 0;
foreach my $f (grep { /^\// } @{ $options->{include} }) {
# open the file for real because -r will report the file as
# readable even though open will fail (in contrast to the
# coreutils test utility, perl doesn't use faccessat)
my $res = open(my $fh, '<', $f);
if (!$res) {
warning "unshared user cannot access $f for reading";
$ret = 1;
} else {
close $fh;
}
}
exit $ret;
},
\@idmap
);
waitpid $pid, 0;
if ($? != 0) {
2023-03-19 08:04:06 +00:00
warning("apt on the outside is run as the unshared user and "
. "needs read access to packages outside the chroot given "
. "via --include");
2022-12-22 09:16:27 +00:00
}
}
2018-09-18 09:20:24 +00:00
# figure out whether we have mknod
$options->{havemknod} = 0;
if ($options->{mode} eq 'unshare') {
2020-01-09 07:39:40 +00:00
my $pid = get_unshare_cmd(
sub {
$options->{havemknod} = havemknod($options->{root});
},
\@idmap
);
2020-01-08 14:41:49 +00:00
waitpid $pid, 0;
$? == 0 or error "havemknod failed";
2022-11-14 13:34:15 +00:00
} elsif (any { $_ eq $options->{mode} }
('root', 'fakechroot', 'chrootless')) {
2020-01-08 14:41:49 +00:00
$options->{havemknod} = havemknod($options->{root});
2018-10-22 12:44:45 +00:00
} else {
2020-01-08 14:41:49 +00:00
error "unknown mode: $options->{mode}";
2018-09-18 09:20:24 +00:00
}
2023-09-27 11:52:20 +00:00
# If a tarball is to be created, we always (except if --skip=output/dev is
# passed) craft the /dev entries ourselves.
# Why do we put /dev entries in the final tarball?
# - because debootstrap does it
# - because schroot (#856877) and pbuilder rely on it and we care about
# Debian buildds (using schroot) and reproducible builds infra (using
# pbuilder)
# If both the above assertion change, we can stop creating /dev entries as
# well.
2018-09-18 09:20:24 +00:00
my $devtar = '';
2024-05-12 15:16:51 +00:00
if (any { $_ eq $options->{format} } ('tar', 'squashfs', 'ext2', 'ext4')) {
2020-01-08 14:41:49 +00:00
foreach my $file (@devfiles) {
2020-01-08 16:44:07 +00:00
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
= @{$file};
2021-03-08 07:04:35 +00:00
if (length "./dev/$fname" > 100) {
error "tar entry cannot exceed 100 characters";
}
2023-09-27 11:52:20 +00:00
if ($type == 3
and any { $_ eq 'output/mknod' } @{ $options->{skip} }) {
info "skipping output/mknod as requested for ./dev/$fname";
next;
}
2020-01-08 16:44:07 +00:00
my $entry = pack(
'a100 a8 a8 a8 a12 a12 A8 a1 a100 a8 a32 a32 a8 a8 a155 x12',
2021-03-08 07:04:35 +00:00
"./dev/$fname",
2020-01-08 16:44:07 +00:00
sprintf('%07o', $mode),
2020-08-15 20:36:13 +00:00
sprintf('%07o', 0), # uid
sprintf('%07o', 0), # gid
sprintf('%011o', 0), # size
sprintf('%011o', $mtime),
'', # checksum
$type,
$linkname,
"ustar ",
'', # username
'', # groupname
defined($devmajor) ? sprintf('%07o', $devmajor) : '',
defined($devminor) ? sprintf('%07o', $devminor) : '',
'', # prefix
);
# compute and insert checksum
substr($entry, 148, 7)
= sprintf("%06o\0", unpack("%16C*", $entry));
$devtar .= $entry;
2020-01-08 14:41:49 +00:00
}
2023-09-27 11:52:20 +00:00
} elsif (any { $_ eq $options->{format} } ('directory', 'null')) {
2020-08-15 20:36:13 +00:00
# nothing to do
2018-10-22 12:44:45 +00:00
} else {
2023-09-27 11:52:20 +00:00
error "unknown format: $options->{format}";
2018-09-18 09:20:24 +00:00
}
2020-08-15 20:36:13 +00:00
my $exitstatus = 0;
my @taropts = (
'--sort=name',
"--mtime=\@$mtime",
'--clamp-mtime',
'--numeric-owner',
'--one-file-system',
'--format=pax',
'--pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime',
'-c',
2023-08-20 06:01:37 +00:00
'--exclude=./lost+found'
2020-08-15 20:36:13 +00:00
);
2023-09-27 11:52:20 +00:00
# only exclude ./dev if device nodes are written out (the default)
if (none { $_ eq 'output/dev' } @{ $options->{skip} }) {
push @taropts, '--exclude=./dev';
}
2020-08-15 20:36:13 +00:00
# tar2sqfs and genext2fs do not support extended attributes
2023-09-27 11:52:20 +00:00
if ($options->{format} eq "squashfs") {
2021-08-25 03:20:46 +00:00
# tar2sqfs supports user.*, trusted.* and security.* but not system.*
# https://bugs.debian.org/988100
# lib/sqfs/xattr/xattr.c of https://github.com/AgentD/squashfs-tools-ng
# https://github.com/AgentD/squashfs-tools-ng/issues/83
# https://github.com/AgentD/squashfs-tools-ng/issues/25
2021-05-17 19:40:08 +00:00
warning("tar2sqfs does not support extended attributes"
. " from the 'system' namespace");
push @taropts, '--xattrs', '--xattrs-exclude=system.*';
2023-09-27 11:52:20 +00:00
} elsif ($options->{format} eq "ext2") {
2020-08-15 20:36:13 +00:00
warning "genext2fs does not support extended attributes";
2024-05-12 15:16:51 +00:00
warning "ext2 does not support sub-second precision timestamps";
warning "ext2 does not support timestamps beyond 2038 January 18";
warning "ext2 inode size of 128 prevents removing these limitations";
2020-08-15 20:36:13 +00:00
} else {
push @taropts, '--xattrs';
}
2020-01-08 14:41:49 +00:00
2020-08-15 20:36:13 +00:00
# disable signals so that we can fork and change behaviour of the signal
# handler in the parent and child without getting interrupted
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
2020-01-08 14:41:49 +00:00
2020-08-15 20:36:13 +00:00
# a pipe to transfer the final tarball from the child to the parent
pipe my $rfh, my $wfh;
2020-01-08 14:41:49 +00:00
2020-08-15 20:36:13 +00:00
# instead of two pipe calls, creating four file handles, we use socketpair
socketpair my $childsock, my $parentsock, AF_UNIX, SOCK_STREAM, PF_UNSPEC
or error "socketpair failed: $!";
$options->{hooksock} = $childsock;
2020-08-18 10:08:55 +00:00
# for communicating the required number of blocks, we don't need
# bidirectional communication, so a pipe() is enough
# we don't communicate this via the hook communication because
# a) this would abuse the functionality exclusively for hooks
# b) it puts code writing the protocol outside of the helper/listener
# c) the forked listener process cannot communicate to its parent
pipe my $nblkreader, my $nblkwriter or error "pipe failed: $!";
2020-01-08 14:41:49 +00:00
2024-02-01 23:11:32 +00:00
my $worker = sub {
# child
local $SIG{'INT'} = 'DEFAULT';
local $SIG{'HUP'} = 'DEFAULT';
local $SIG{'PIPE'} = 'DEFAULT';
local $SIG{'TERM'} = 'DEFAULT';
2020-08-15 20:36:13 +00:00
2024-02-01 23:11:32 +00:00
# unblock all delayed signals (and possibly handle them)
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-08-15 20:36:13 +00:00
2024-02-01 23:11:32 +00:00
close $rfh;
close $parentsock;
open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!";
2020-01-08 14:41:49 +00:00
2024-02-01 23:11:32 +00:00
setup($options);
2020-01-08 14:41:49 +00:00
2024-02-01 23:11:32 +00:00
print $childsock (pack('n', 0) . 'adios');
$childsock->flush();
2020-08-15 20:36:13 +00:00
2024-02-01 23:11:32 +00:00
close $childsock;
2020-08-15 20:36:13 +00:00
2024-02-01 23:11:32 +00:00
close $nblkreader;
2024-05-12 15:16:51 +00:00
if (!$options->{dryrun} && any { $_ eq $options->{format} }
('ext2', 'ext4')) {
my $numblocks = approx_disk_usage($options->{root}, $blocksize);
2024-02-01 23:11:32 +00:00
print $nblkwriter "$numblocks\n";
$nblkwriter->flush();
}
close $nblkwriter;
2020-08-15 20:36:13 +00:00
2024-02-01 23:11:32 +00:00
if ($options->{dryrun}) {
info "simulate creating tarball...";
2024-05-12 15:16:51 +00:00
} elsif (any { $_ eq $options->{format} }
('tar', 'squashfs', 'ext2', 'ext4')) {
2024-02-01 23:11:32 +00:00
info "creating tarball...";
2020-08-15 20:36:13 +00:00
2024-02-01 23:11:32 +00:00
# redirect tar output to the writing end of the pipe so
# that the parent process can capture the output
open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!";
2020-08-15 20:36:13 +00:00
2024-02-01 23:11:32 +00:00
# Add ./dev as the first entries of the tar file.
# We cannot add them after calling tar, because there is no
# way to prevent tar from writing NULL entries at the end.
if (any { $_ eq 'output/dev' } @{ $options->{skip} }) {
info "skipping output/dev as requested";
} else {
print $devtar;
2020-08-18 10:08:55 +00:00
}
2020-08-15 20:36:13 +00:00
2024-02-01 23:11:32 +00:00
if ($options->{mode} eq 'unshare') {
# pack everything except ./dev
0 == system('tar', @taropts, '-C', $options->{root}, '.')
or error "tar failed: $?";
} elsif ($options->{mode} eq 'fakechroot') {
# By default, FAKECHROOT_EXCLUDE_PATH includes /proc and /sys
# which means that the resulting tarball will contain the
# permission and ownership information of /proc and /sys from
# the outside, which we want to avoid.
## no critic (Variables::RequireLocalizedPunctuationVars)
$ENV{FAKECHROOT_EXCLUDE_PATH} = "/dev";
# Fakechroot requires tar to run inside the chroot or otherwise
# absolute symlinks will include the path to the root directory
0 == system('chroot', $options->{root}, 'tar',
@taropts, '-C', '/', '.')
or error "tar failed: $?";
} elsif (any { $_ eq $options->{mode} } ('root', 'chrootless')) {
# If the chroot directory is not owned by the root user, then
# we assume that no measure was taken to fake root permissions.
# Since the final tarball should contain entries with root
# ownership, we instruct tar to do so.
my @owneropts = ();
if ((stat $options->{root})[4] != 0) {
push @owneropts, '--owner=0', '--group=0',
'--numeric-owner';
2020-04-09 16:33:05 +00:00
}
2024-02-01 23:11:32 +00:00
0 == system('tar', @taropts, @owneropts, '-C',
$options->{root}, '.')
or error "tar failed: $?";
2020-01-08 14:41:49 +00:00
} else {
2024-02-01 23:11:32 +00:00
error "unknown mode: $options->{mode}";
2020-01-08 14:41:49 +00:00
}
2020-08-15 20:36:13 +00:00
2024-02-01 23:11:32 +00:00
info "done";
} elsif (any { $_ eq $options->{format} } ('directory', 'null')) {
# nothing to do
} else {
error "unknown format: $options->{format}";
}
exit 0;
};
my $pid;
if ($options->{mode} eq 'unshare') {
$pid = get_unshare_cmd($worker, \@idmap);
} elsif (any { $_ eq $options->{mode} }
('root', 'fakechroot', 'chrootless')) {
$pid = fork() // error "fork() failed: $!";
if ($pid == 0) {
$worker->();
2020-01-08 14:41:49 +00:00
}
2020-08-15 20:36:13 +00:00
} else {
error "unknown mode: $options->{mode}";
}
# parent
my $got_signal = 0;
my $waiting_for = "setup";
my $ignore = sub {
$got_signal = shift;
info "main() received signal $got_signal: waiting for $waiting_for...";
2019-12-09 09:40:51 +00:00
};
2020-08-15 20:36:13 +00:00
local $SIG{'INT'} = $ignore;
local $SIG{'HUP'} = $ignore;
local $SIG{'PIPE'} = $ignore;
local $SIG{'TERM'} = $ignore;
# unblock all delayed signals (and possibly handle them)
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
close $wfh;
close $childsock;
debug "starting to listen for hooks";
# handle special hook commands via parentsock
my $lpid = fork() // error "fork() failed: $!";
if ($lpid == 0) {
# whatever the script writes on stdout is sent to the
# socket
# whatever is written to the socket, send to stdin
open(STDOUT, '>&', $parentsock)
or error "cannot open STDOUT: $!";
open(STDIN, '<&', $parentsock)
or error "cannot open STDIN: $!";
2022-11-08 12:02:47 +00:00
hooklistener($verbosity_level);
exit 0;
2020-08-15 20:36:13 +00:00
}
waitpid($lpid, 0);
if ($? != 0) {
2020-01-08 14:41:49 +00:00
# we cannot die here because that would leave the other thread
# running without a parent
warning "listening on child socket failed: $@";
$exitstatus = 1;
2019-12-09 09:40:51 +00:00
}
debug "finish to listen for hooks";
close $parentsock;
2018-12-28 04:09:08 +00:00
2020-08-18 10:08:55 +00:00
my $numblocks = 0;
close $nblkwriter;
2024-05-12 15:16:51 +00:00
if (!$options->{dryrun} && any { $_ eq $options->{format} }
('ext2', 'ext4')) {
2022-12-22 09:17:40 +00:00
$numblocks = <$nblkreader>;
2023-02-23 20:50:55 +00:00
if (defined $numblocks) {
chomp $numblocks;
} else {
2022-12-22 09:17:40 +00:00
# This can happen if the setup process died early and thus closes
# the pipe from the other and. The EOF is turned into undef.
2023-02-23 20:50:55 +00:00
# we cannot die here because that would skip the cleanup task
warning "failed to read required number of blocks";
$exitstatus = 1;
$numblocks = -1;
2022-12-22 09:17:40 +00:00
}
2020-08-18 10:08:55 +00:00
}
close $nblkreader;
2020-01-10 10:44:15 +00:00
if ($options->{dryrun}) {
# nothing to do
2023-09-27 11:52:20 +00:00
} elsif (any { $_ eq $options->{format} } ('directory', 'null')) {
2020-04-09 16:33:05 +00:00
# nothing to do
2024-05-12 15:16:51 +00:00
} elsif ((any { $_ eq $options->{format} } ('ext2', 'ext4'))
&& $numblocks <= 0) {
2023-10-23 09:51:14 +00:00
# nothing to do because of invalid $numblocks
2024-05-12 15:16:51 +00:00
} elsif (any { $_ eq $options->{format} }
('tar', 'squashfs', 'ext2', 'ext4')) {
2020-01-08 14:41:49 +00:00
# we use eval() so that error() doesn't take this process down and
# thus leaves the setup() process without a parent
eval {
if ($options->{target} eq '-') {
if (!copy($rfh, *STDOUT)) {
error "cannot copy to standard output: $!";
}
} else {
2024-05-12 15:16:51 +00:00
if (any { $_ eq $options->{format} }
('squashfs', 'ext2', 'ext4')
or defined $tar_compressor) {
2020-01-08 14:41:49 +00:00
my @argv = ();
2023-09-27 11:52:20 +00:00
if ($options->{format} eq 'squashfs') {
2020-01-08 14:41:49 +00:00
push @argv, 'tar2sqfs',
2020-01-10 10:44:15 +00:00
'--quiet', '--no-skip', '--force',
'--exportable',
2020-01-08 16:44:07 +00:00
'--compressor', 'xz',
2024-05-12 15:16:51 +00:00
'--block-size', $blocksize,
2020-01-08 16:44:07 +00:00
$options->{target};
2023-09-27 11:52:20 +00:00
} elsif ($options->{format} eq 'ext2') {
2020-04-09 16:33:05 +00:00
if ($numblocks <= 0) {
error "invalid number of blocks: $numblocks";
}
push @argv, 'genext2fs', '-B', 1024, '-b', $numblocks,
2020-07-09 05:34:03 +00:00
'-i', '16384', '-a', '-', $options->{target};
2024-05-12 15:16:51 +00:00
} elsif ($options->{format} eq 'ext4') {
if ($numblocks <= 0) {
error "invalid number of blocks: $numblocks";
}
push @argv, 'mke2fs', '-q', '-F', '-o', 'Linux', '-T',
'ext4';
if (exists $ENV{SOURCE_DATE_EPOCH}) {
# if SOURCE_DATE_EPOCH was set, make the image
# reproducible by setting a fixed uuid and
# hash_seed
my $uuid = create_v5_uuid(
create_v5_uuid(
$UUID_NS_DNS, "mister-muffin.de"
),
$mtime
);
push @argv, '-U', $uuid, '-E', "hash_seed=$uuid";
}
push @argv, '-b', $blocksize, '-d', '-',
$options->{target}, $numblocks;
2023-09-27 11:52:20 +00:00
} elsif ($options->{format} eq 'tar') {
2020-01-08 14:41:49 +00:00
push @argv, @{$tar_compressor};
2020-04-09 16:33:05 +00:00
} else {
2023-09-27 11:52:20 +00:00
error "unknown format: $options->{format}";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_BLOCK, $sigset)
or error "Can't block signals: $!";
2020-01-08 14:41:49 +00:00
my $cpid = fork() // error "fork() failed: $!";
if ($cpid == 0) {
# child: default signal handlers
2020-01-09 07:39:40 +00:00
local $SIG{'INT'} = 'DEFAULT';
local $SIG{'HUP'} = 'DEFAULT';
local $SIG{'PIPE'} = 'DEFAULT';
local $SIG{'TERM'} = 'DEFAULT';
2020-01-08 14:41:49 +00:00
2020-01-08 15:23:34 +00:00
# unblock all delayed signals (and possibly handle
# them)
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-08 14:41:49 +00:00
2020-04-09 16:33:05 +00:00
# redirect stdout to file or /dev/null
2024-05-12 15:16:51 +00:00
if (any { $_ eq $options->{format} }
('squashfs', 'ext2', 'ext4')) {
2020-01-08 16:44:07 +00:00
open(STDOUT, '>', '/dev/null')
or error "cannot open /dev/null for writing: $!";
2023-09-27 11:52:20 +00:00
} elsif ($options->{format} eq 'tar') {
2020-01-08 16:44:07 +00:00
open(STDOUT, '>', $options->{target})
or error
"cannot open $options->{target} for writing: $!";
2020-04-09 16:33:05 +00:00
} else {
2023-09-27 11:52:20 +00:00
error "unknown format: $options->{format}";
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
open(STDIN, '<&', $rfh)
or error "cannot open file handle for reading: $!";
2020-01-09 07:39:40 +00:00
eval { Devel::Cover::set_coverage("none") }
2020-01-08 16:44:07 +00:00
if $is_covering;
exec { $argv[0] } @argv
or
error("cannot exec " . (join " ", @argv) . ": $!");
2020-01-08 14:41:49 +00:00
}
2020-01-08 16:44:07 +00:00
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
or error "Can't unblock signals: $!";
2020-01-08 14:41:49 +00:00
waitpid $cpid, 0;
if ($? != 0) {
2020-05-02 21:55:34 +00:00
error("failed to run " . (join " ", @argv));
2020-01-08 14:41:49 +00:00
}
} else {
2022-07-26 16:43:44 +00:00
# somehow, when running under qemu, writing to a virtio
# device will not result in a ENOSPC but just stall forever
2020-01-08 16:44:07 +00:00
if (!copy($rfh, $options->{target})) {
2020-01-08 14:41:49 +00:00
error "cannot copy to $options->{target}: $!";
}
}
}
};
if ($@) {
# we cannot die here because that would leave the other thread
# running without a parent
2022-01-08 07:31:50 +00:00
# We send SIGHUP to all our processes (including eventually
# running tar and this process itself) to reliably tear down
# all running child processes. The main process is not affected
# because we are ignoring SIGHUP.
2023-10-23 09:51:41 +00:00
#
# FIXME: this codepath becomes dangerous in case mmdebstrap is not
# run in its own process group. When run from the terminal, the
# shell creates a new process group as part of its job control, so
# sending SIGHUP to all processes in our own process group should
# not be dangerous. But for example, on debci, lxc will run in the
# same process group as mmdebstrap and sending SIGHUP to the whole
# process group will also kill lxc. Creating a new process group
# for $pid will break things because only the foreground job is
# allowed to read from the terminal. If a background job does it,
# i will be suspended with SIGTTIN. Even though apt could be told
# to not read from the terminal by opening STDIN from /dev/null,
# this would make --chrooted-customize-hook=bash impossible.
# Making the $pid process group the foreground job will destroy all
# the signal handling we have set up for when the user presses
# ctrl+c in a terminal. Even if we fix the signal handling we now
# find ourselves in the opposite situation: the $pid process must
# now clean up the former main process tree reliably. And we cannot
# create a new process group for everything all-in-one because that
# would also destroy CTRL+C handling from the terminal.
2020-01-08 14:41:49 +00:00
warning "creating tarball failed: $@";
2023-10-23 09:51:41 +00:00
my $pgroup = getpgrp();
warning "sending SIGHUP to all processes in process group $pgroup";
kill HUP => -$pgroup;
2020-01-08 14:41:49 +00:00
$exitstatus = 1;
}
2020-04-09 16:33:05 +00:00
} else {
2023-09-27 11:52:20 +00:00
error "unknown format: $options->{format}";
2018-09-18 09:20:24 +00:00
}
close($rfh);
waitpid $pid, 0;
2018-09-21 17:06:47 +00:00
if ($? != 0) {
2020-01-08 14:41:49 +00:00
$exitstatus = 1;
2018-09-21 17:06:47 +00:00
}
2018-09-18 09:20:24 +00:00
2019-01-13 09:17:46 +00:00
# change signal handler message
$waiting_for = "cleanup";
2023-09-27 11:52:20 +00:00
if (any { $_ eq $options->{format} } ('directory')) {
2020-04-09 16:33:05 +00:00
# nothing to do
2023-09-27 11:52:20 +00:00
} elsif (any { $_ eq $options->{format} }
2024-05-12 15:16:51 +00:00
('tar', 'squashfs', 'ext2', 'ext4', 'null')) {
2020-04-09 16:33:05 +00:00
if (!-e $options->{root}) {
error "$options->{root} does not exist";
}
2020-01-08 14:41:49 +00:00
info "removing tempdir $options->{root}...";
if ($options->{mode} eq 'unshare') {
# We don't have permissions to remove the directory outside
# the unshared namespace, so we remove it here.
# Since this is still inside the unshared namespace, there is
# no risk of removing anything important.
2020-01-09 07:39:40 +00:00
$pid = get_unshare_cmd(
sub {
2021-10-06 19:20:41 +00:00
# change CWD to chroot directory because find tries to
# chdir to the current directory which might not be
# accessible by the unshared user:
# find: Failed to restore initial working directory
0 == system('env', "--chdir=$options->{root}", 'find',
$options->{root}, '-mount',
'-mindepth', '1', '-delete')
or error "rm failed: $?";
# ignore failure in case the unshared user doesn't have the
# required permissions -- we attempt again later if
# necessary
rmdir "$options->{root}";
2020-01-09 07:39:40 +00:00
},
\@idmap
);
2020-01-08 14:41:49 +00:00
waitpid $pid, 0;
$? == 0 or error "remove_tree failed";
2020-03-07 22:42:19 +00:00
# in unshare mode, the toplevel directory might've been created in
# a directory that the unshared user cannot change and thus cannot
2021-10-06 19:20:41 +00:00
# delete. We attempt its removal again outside as the normal user.
2020-03-07 22:42:19 +00:00
if (-e $options->{root}) {
rmdir "$options->{root}"
or error "cannot rmdir $options->{root}: $!";
}
2022-11-14 13:34:15 +00:00
} elsif (any { $_ eq $options->{mode} }
('root', 'fakechroot', 'chrootless')) {
2020-01-08 14:41:49 +00:00
# without unshare, we use the system's rm to recursively remove the
# temporary directory just to make sure that we do not accidentally
# remove more than we should by using --one-file-system.
2020-01-08 16:44:07 +00:00
0 == system('rm', '--interactive=never', '--recursive',
'--preserve-root', '--one-file-system', $options->{root})
2021-10-06 19:20:41 +00:00
or error "rm failed: $?";
2020-01-08 14:41:49 +00:00
} else {
error "unknown mode: $options->{mode}";
}
2020-04-09 16:33:05 +00:00
} else {
2023-09-27 11:52:20 +00:00
error "unknown format: $options->{format}";
2018-09-18 09:20:24 +00:00
}
2018-09-21 17:06:47 +00:00
2019-01-13 09:17:46 +00:00
if ($got_signal) {
2020-01-08 14:41:49 +00:00
$exitstatus = 1;
2019-01-13 09:17:46 +00:00
}
2020-08-25 14:06:05 +00:00
if ($exitstatus == 0) {
my $duration = Time::HiRes::time - $before;
info "success in " . (sprintf "%.04f", $duration) . " seconds";
2022-01-08 07:32:09 +00:00
exit 0;
2020-08-25 14:06:05 +00:00
}
2022-01-08 07:32:09 +00:00
error "mmdebstrap failed to run";
return 1;
2018-09-18 09:20:24 +00:00
}
main();
__END__
=head1 NAME
mmdebstrap - multi-mirror Debian chroot creation
=head1 SYNOPSIS
B<mmdebstrap> [B<OPTION...>] [I<SUITE> [I<TARGET> [I<MIRROR>...]]]
=head1 DESCRIPTION
B<mmdebstrap> creates a Debian chroot of I<SUITE> into I<TARGET> from one or
more I<MIRROR>s. It is meant as an alternative to the debootstrap tool (see
section B<DEBOOTSTRAP>). In contrast to debootstrap it uses apt to resolve
dependencies and is thus able to use more than one mirror and resolve more
2024-01-08 22:02:53 +00:00
complex dependency relationships. See section B<OPERATION> for an overview of
how B<mmdebstrap> works internally.
The I<SUITE> option may either be a valid release code name (eg, sid, bookworm,
trixie) or a symbolic name (eg, unstable, testing, stable, oldstable). Any
suite name that works with apt on the given mirror will work. The I<SUITE>
option is optional if no I<TARGET> and no I<MIRROR> option is provided. If
I<SUITE> is missing, then the information of the desired suite has to come from
standard input as part of a valid apt sources.list file or be set up via hooks.
2021-08-17 08:29:56 +00:00
The value of the I<SUITE> argument will be used to determine which apt index to
2020-11-28 13:30:50 +00:00
use for finding out the set of C<Essential:yes> packages and/or the set of
2024-01-08 22:02:53 +00:00
packages with the right priority for the selected variant. This functionality
can be disabled by choosing the empty string for I<SUITE>. See the section
2020-11-28 13:30:50 +00:00
B<VARIANTS> for more information.
2018-09-18 09:20:24 +00:00
2024-01-08 22:02:53 +00:00
The I<TARGET> option may either be the path to a directory, the path to a
2024-05-12 15:16:51 +00:00
tarball filename, the path to a squashfs image, the path to an ext2 or ext4
image, a FIFO, a character special device, or C<->. The I<TARGET> option is
optional if no I<MIRROR> option is provided. If I<TARGET> is missing or if
I<TARGET> is C<->, an uncompressed tarball will be sent to standard output.
Without the B<--format> option, I<TARGET> will be used to choose the format.
See the section B<FORMATS> for more information.
2024-01-08 22:02:53 +00:00
The I<MIRROR> option may either be provided as a URI, in apt one-line format,
as a path to a file in apt's one-line or deb822-format, or C<->. If no
I<MIRROR> option is provided, then L<http://deb.debian.org/debian> is used as
the default. If I<SUITE> does not refer to "unstable" or "testing", then
I<SUITE>-updates and I<SUITE>-security mirrors are automatically added. If a
I<MIRROR> option starts with "deb " or "deb-src " then it is used as a one-line
format entry for apt's sources.list inside the chroot. If a I<MIRROR> option
contains a "://" then it is interpreted as a mirror URI and the apt line inside
the chroot is assembled as "deb [arch=A] B C D" where A is the host's native
architecture, B is the I<MIRROR>, C is the given I<SUITE> and D is the
components given via B<--components> (defaults to "main"). If a I<MIRROR>
option happens to be an existing file, then its contents are written into the
chroot's sources.list (if the first I<MIRROR> is a file in one-line format) or
into the chroot's sources.list.d directory, named with the extension .list or
.sources, depending on whether the file is in one-line or deb822 format,
respectively. If I<MIRROR> is C<-> then standard input is pasted into the
chroot's sources.list. More than one mirror can be specified and are appended
to the chroot's sources.list in the given order. If you specify a https or tor
I<MIRROR> and you want the chroot to be able to update itself, don't forget to
also install the ca-certificates package, the apt-transport-https package for
apt versions less than 1.5 and/or the apt-transport-tor package using the
B<--include> option, as necessary.
2019-02-23 07:43:15 +00:00
All status output is printed to standard error unless B<--logfile> is used to
redirect it to a file or B<--quiet> or B<--silent> is used to suppress any
output on standard error. Help and version information will be printed to
standard error with the B<--help> and B<--version> options, respectively.
Otherwise, an uncompressed tarball might be sent to standard output if
I<TARGET> is C<-> or if no I<TARGET> was specified.
2018-09-18 09:20:24 +00:00
=head1 OPTIONS
2019-02-20 17:00:52 +00:00
Options are case insensitive. Short options may be bundled. Long options
2023-01-16 07:12:00 +00:00
require a double dash and may be abbreviated to uniqueness. Options can be
placed anywhere on the command line, even before or mixed with the I<SUITE>,
I<TARGET>, and I<MIRROR> arguments. A double dash C<--> can be used to stop
interpreting command line arguments as options to allow I<SUITE>, I<TARGET> and
I<MIRROR> arguments that start with a single or double dash. Option order only
matters for options that can be passed multiple times as documented below.
2019-02-20 17:00:52 +00:00
2018-09-18 09:20:24 +00:00
=over 8
=item B<-h,--help>
2019-11-29 07:45:08 +00:00
Print synopsis and options of this man page and exit.
=item B<--man>
Show the full man page as generated from Perl POD in a pager. This requires
the perldoc program from the perl-doc package. This is the same as running:
pod2man /usr/bin/mmdebstrap | man -l -
2018-09-18 09:20:24 +00:00
2019-02-23 07:55:31 +00:00
=item B<--version>
Print the B<mmdebstrap> version and exit.
2019-01-08 10:27:56 +00:00
=item B<--variant>=I<name>
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
Choose which package set to install. Valid variant I<name>s are B<extract>,
2018-10-22 15:05:56 +00:00
B<custom>, B<essential>, B<apt>, B<required>, B<minbase>, B<buildd>,
B<important>, B<debootstrap>, B<->, and B<standard>. The default variant is
2020-01-21 12:17:31 +00:00
B<debootstrap>. See the section B<VARIANTS> for more information.
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
=item B<--mode>=I<name>
2018-09-18 09:20:24 +00:00
Choose how to perform the chroot operation and create a filesystem with
2020-01-08 16:19:30 +00:00
ownership information different from the current user. Valid mode I<name>s are
2022-09-05 03:50:50 +00:00
B<auto>, B<sudo>, B<root>, B<unshare>, B<fakeroot>, B<fakechroot> and
2020-01-10 11:05:01 +00:00
B<chrootless>. The default mode is B<auto>. See the section B<MODES> for more
information.
2018-09-18 09:20:24 +00:00
2020-04-09 16:33:05 +00:00
=item B<--format>=I<name>
Choose the output format. Valid format I<name>s are B<auto>, B<directory>,
2024-05-12 15:16:51 +00:00
B<tar>, B<squashfs>, B<ext2>, B<ext4> and B<null>. The default format is
B<auto>. See the section B<FORMATS> for more information.
2020-04-09 16:33:05 +00:00
2019-01-08 10:27:56 +00:00
=item B<--aptopt>=I<option>|I<file>
2018-09-18 09:20:24 +00:00
2020-08-18 07:37:08 +00:00
Pass arbitrary I<option>s to apt. Will be permamently added to
F</etc/apt/apt.conf.d/99mmdebstrap> inside the chroot. Use hooks for temporary
configuration options. Can be specified multiple times. Each I<option> will be
appended to 99mmdebstrap. A semicolon will be added at the end of the option if
necessary. If the command line argument is an existing I<file>, the content of
the file will be appended to 99mmdebstrap verbatim.
2018-09-18 09:20:24 +00:00
2019-03-25 13:50:41 +00:00
Example: This is necessary for allowing old timestamps from snapshot.debian.org
2018-09-18 09:20:24 +00:00
2018-12-27 13:42:29 +00:00
--aptopt='Acquire::Check-Valid-Until "false"'
2021-09-09 20:59:54 +00:00
--aptopt='Apt::Key::gpgvcommand "/usr/libexec/mmdebstrap/gpgvnoexpkeysig"'
2019-03-25 13:50:41 +00:00
Example: Settings controlling download of package description translations
2018-12-27 13:42:29 +00:00
--aptopt='Acquire::Languages { "environment"; "en"; }'
--aptopt='Acquire::Languages "none"'
2019-03-25 13:50:41 +00:00
2020-05-01 21:37:38 +00:00
Example: Enable installing Recommends (by default B<mmdebstrap> doesn't)
2019-03-25 13:50:41 +00:00
2018-12-27 13:42:29 +00:00
--aptopt='Apt::Install-Recommends "true"'
2019-03-25 13:50:41 +00:00
Example: Configure apt-cacher or apt-cacher-ng as an apt proxy
2019-01-07 12:19:38 +00:00
--aptopt='Acquire::http { Proxy "http://127.0.0.1:3142"; }'
2019-03-25 13:50:41 +00:00
Example: For situations in which the apt sandbox user cannot access the chroot
2019-02-28 10:54:03 +00:00
--aptopt='APT::Sandbox::User "root"'
2018-09-18 09:20:24 +00:00
2019-03-25 13:50:41 +00:00
Example: Minimizing the number of packages installed from experimental
--aptopt='APT::Solver "aspcud"'
2020-01-08 16:19:30 +00:00
--aptopt='APT::Solver::aspcud::Preferences
"-count(solution,APT-Release:=/a=experimental/),-removed,-changed,-new"'
2019-03-25 13:50:41 +00:00
2019-10-28 15:29:38 +00:00
=item B<--keyring>=I<file>|I<directory>
2023-03-02 10:53:43 +00:00
Change the default keyring to use by apt during the initial setup. This is
similar to setting B<Dir::Etc::Trusted> and B<Dir::Etc::TrustedParts> using
B<--aptopt> except that the latter setting will be permanently stored in the
2023-10-28 13:24:51 +00:00
chroot while the keyrings passed via B<--keyring> will only be visible to apt
as run by B<mmdebstrap>. Do not use B<--keyring> if apt inside the chroot needs
to know about your keys after the initial chroot creation by B<mmdebstrap>.
This option is mainly intended for users who use B<mmdebstrap> as a
B<deboostrap> drop-in replacement. As such, it is probably not what you want to
use if you use B<mmdebstrap> with more than a single mirror unless you pass it
a directory containing all the keyrings you need.
2023-03-02 10:53:43 +00:00
By default, the local setting of B<Dir::Etc::Trusted> and
B<Dir::Etc::TrustedParts> are used to choose the keyring used by apt as run by
B<mmdebstrap>. These two locations are set to F</etc/apt/trusted.gpg> and
F</etc/apt/trusted.gpg.d> by default. Depending on whether a file or directory
is passed to this option, the former and latter default can be changed,
respectively. Since apt only supports a single keyring file and directory,
respectively, you can B<not> use this option to pass multiple files and/or
directories. Using the C<--keyring> argument in the following way is equal to
keeping the default:
2019-12-03 09:16:39 +00:00
--keyring=/etc/apt/trusted.gpg --keyring=/etc/apt/trusted.gpg.d
If you need to pass multiple keyrings, use the C<signed-by> option when
specifying the mirror like this:
mmdebstrap mysuite out.tar "deb [signed-by=/path/to/key.gpg] http://..."
2023-03-02 10:53:43 +00:00
Another reason to use C<signed-by> instead of B<--keyring> is if apt inside the
chroot needs to know by what key the repository is signed even after the
initial chroot creation.
2019-12-03 09:16:39 +00:00
The C<signed-by> option will automatically be added to the final
C<sources.list> if the keyring required for the selected I<SUITE> is not yet
trusted by apt. Automatically adding the C<signed-by> option in these cases
requires C<gpg> to be installed. If C<gpg> and C<ubuntu-archive-keyring> are
installed, then you can create a Ubuntu Bionic chroot on Debian like this:
mmdebstrap bionic ubuntu-bionic.tar
The resulting chroot will have a C<source.list> with a C<signed-by> option
pointing to F</usr/share/keyrings/ubuntu-archive-keyring.gpg>.
2019-10-28 15:29:38 +00:00
2023-03-02 10:53:43 +00:00
You do not need to use B<--keyring> or C<signed-by> if you placed the keys that
apt needs to know about into F</etc/apt/trusted.gpg.d> in the B<--setup-hook>
2023-10-28 13:24:51 +00:00
(which is before C<apt update> runs), for example by using the B<copy-in>
2023-03-02 10:53:43 +00:00
special hook. You also need to copy your keys into the chroot explicitly if the
key you passed via C<signed-by> points to a location that is not otherwise
populated during chroot creation (for example by installing a keyring package).
2019-01-08 10:27:56 +00:00
=item B<--dpkgopt>=I<option>|I<file>
2018-09-18 09:20:24 +00:00
2020-08-18 07:37:08 +00:00
Pass arbitrary I<option>s to dpkg. Will be permanently added to
F</etc/dpkg/dpkg.cfg.d/99mmdebstrap> inside the chroot. Use hooks for temporary
configuration options. Can be specified multiple times. Each I<option> will be
appended to 99mmdebstrap. If the command line argument is an existing I<file>,
the content of the file will be appended to 99mmdebstrap verbatim.
2018-09-18 09:20:24 +00:00
2019-03-25 13:50:41 +00:00
Example: Exclude paths to reduce chroot size
--dpkgopt='path-exclude=/usr/share/man/*'
--dpkgopt='path-include=/usr/share/man/man[1-9]/*'
--dpkgopt='path-exclude=/usr/share/locale/*'
--dpkgopt='path-include=/usr/share/locale/locale.alias'
--dpkgopt='path-exclude=/usr/share/doc/*'
--dpkgopt='path-include=/usr/share/doc/*/copyright'
--dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*'
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
=item B<--include>=I<pkg1>[,I<pkg2>,...]
2018-09-18 09:20:24 +00:00
2019-10-28 14:35:29 +00:00
Comma or whitespace separated list of packages which will be installed in
addition to the packages installed by the specified variant. The direct and
indirect hard dependencies will also be installed. The behaviour of this
option depends on the selected variant. The B<extract> and B<custom> variants
install no packages by default, so for these variants, the packages specified
by this option will be the only ones that get either extracted or installed by
dpkg, respectively. For all other variants, apt is used to install the
2021-11-09 06:31:56 +00:00
additional packages. Package names are directly passed to apt and thus, you
can use apt features like C<pkg/suite>, C<pkg=version>, C<pkg->, use a glob or
2023-03-19 08:04:06 +00:00
regex for C<pkg>, use apt patterns or pass a path to a .deb package file (see
below for notes concerning passing the path to a .deb package file in
2023-10-28 13:24:51 +00:00
B<unshare> mode). See L<apt(8)> for the supported syntax.
2022-09-02 21:35:56 +00:00
The option can be specified multiple times and the packages are concatenated in
the order in which they are given on the command line. If later list items are
repeated, then they get dropped so that the resulting package list is free of
duplicates. So the following are equivalent:
2019-10-28 14:35:29 +00:00
--include="pkg1/stable pkg2=1.0 pkg3-"
2022-09-02 21:35:56 +00:00
--include=pkg1/stable,pkg2=1.0,pkg3-,,,
2019-10-28 14:35:29 +00:00
--incl=pkg1/stable --incl="pkg2=1.0 pkg3-" --incl=pkg2=1.0,pkg3-
2018-09-18 09:20:24 +00:00
2022-09-02 21:35:56 +00:00
Since the list of packages is separated by comma or whitespace, it is not
possible to mix apt patterns or .deb package file paths containing either
commas or whitespace with normal package names. If you do, your patterns and
paths will be split by comma and whitespace as well and become useless. To pass
such a pattern or package file path, put them into their own B<--include>
option. If the argument to B<--include> starts with an apt pattern or with a
file path, then it will not be split:
--include="?or(?priority(required), ?priority(important))"
--include="./path/to/deb with spaces/and,commas/foo.deb"
Specifically, all arguments to B<--include> that start with a C<?>, C<!>, C<~>,
C<(>, C</>, C<./> or C<../> are not split and treated as single arguments to
apt. To add more packages, use multiple B<--include> options. To disable this
detection of patterns and paths, start the argument to B<--include> with a
comma or whitespace.
2023-03-19 08:04:06 +00:00
If you pass the path to a .deb package file using B<--include>, B<mmdebstrap>
will ensure that the path exists. If the path is a relative path, it will
internally by converted to an absolute path. Since apt (outside the chroot)
passes paths to dpkg (on the inside) verbatim, you have to make the .deb
package available under the same path inside the chroot as well or otherwise
dpkg inside the chroot will be unable to access it. This can be achieved using
a setup-hook. A hook that automatically makes the contents of C<file://>
mirrors as well as .deb packages given with B<--include> available inside the
chroot is provided by B<mmdebstrap> as
B<--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount>. This hook
takes care of copying all relevant file to their correct locations and cleans
up those files at the end. In B<unshare> mode, the .deb package paths have to
be accessible by the unshared user as well. This means that the package itself
likely must be made world-readable and all directory components on the path to
it world-executable.
2019-01-08 10:27:56 +00:00
=item B<--components>=I<comp1>[,I<comp2>,...]
2018-09-18 09:20:24 +00:00
2023-03-05 09:02:23 +00:00
Comma or whitespace separated list of components like main, contrib, non-free
and non-free-firmware which will be used for all URI-only I<MIRROR> arguments.
The option can be specified multiple times and the components are concatenated
in the order in which they are given on the command line. If later list items
are repeated, then they get dropped so that the resulting component list is
free of duplicates. So the following are equivalent:
--components="main contrib non-free non-free-firmware"
--components=main,contrib,non-free,non-free-firmware
--comp=main --comp="contrib non-free" --comp="main,non-free-firmware"
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
=item B<--architectures>=I<native>[,I<foreign1>,...]
2018-09-18 09:20:24 +00:00
2019-10-27 21:04:21 +00:00
Comma or whitespace separated list of architectures. The first architecture is
the I<native> architecture inside the chroot. The remaining architectures will
be added to the foreign dpkg architectures. Without this option, the I<native>
2019-01-08 10:27:56 +00:00
architecture of the chroot defaults to the native architecture of the system
2019-10-27 21:04:21 +00:00
running B<mmdebstrap>. The option can be specified multiple times and values
are concatenated. If later list items are repeated, then they get dropped so
that the resulting list is free of duplicates. So the following are
equivalent:
--architectures="amd64 armhf mipsel"
--architectures=amd64,armhf,mipsel
--arch=amd64 --arch="armhf mipsel" --arch=armhf,mipsel
2019-01-08 10:27:56 +00:00
2020-01-10 10:44:15 +00:00
=item B<--simulate>, B<--dry-run>
Run apt-get with B<--simulate>. Only the package cache is initialized but no
binary packages are downloaded or installed. Use this option to quickly check
whether a package selection within a certain suite and variant can in principle
be installed as far as their dependencies go. If the output is a tarball, then
no output is produced. If the output is a directory, then the directory will be
left populated with the skeleton files and directories necessary for apt to run
2020-06-23 21:14:37 +00:00
in it. No hooks are executed in with B<--simulate> or B<--dry-run>.
2020-01-10 10:44:15 +00:00
2019-02-20 12:32:49 +00:00
=item B<--setup-hook>=I<command>
Execute arbitrary I<command>s right after initial setup (directory creation,
configuration of apt and dpkg, ...) but before any packages are downloaded or
installed. At that point, the chroot directory does not contain any
2019-12-09 09:40:51 +00:00
executables and thus cannot be chroot-ed into. See section B<HOOKS> for more
information.
2019-02-20 12:32:49 +00:00
2023-10-23 09:52:28 +00:00
Example: add additional apt sources entries on top of the default ones:
--setup-hook='echo "deb http..." > "$1"/etc/apt/sources.list.d/custom.list'
2022-07-26 16:30:52 +00:00
Example: Setup chroot for installing a sub-essential busybox-based chroot
2021-09-03 10:04:40 +00:00
with --variant=custom
2020-01-08 16:19:30 +00:00
--include=dpkg,busybox,libc-bin,base-files,base-passwd,debianutils
2019-03-27 10:44:45 +00:00
--setup-hook='mkdir -p "$1/bin"'
2020-01-08 16:19:30 +00:00
--setup-hook='for p in awk cat chmod chown cp diff echo env grep less ln
2021-05-31 14:33:34 +00:00
mkdir mount rm rmdir sed sh sleep sort touch uname mktemp; do
2020-01-08 16:19:30 +00:00
ln -s busybox "$1/bin/$p"; done'
2019-03-27 10:44:45 +00:00
--setup-hook='echo root:x:0:0:root:/root:/bin/sh > "$1/etc/passwd"'
--setup-hook='printf "root:x:0:\nmail:x:8:\nutmp:x:43:\n" > "$1/etc/group"'
2022-07-26 16:30:52 +00:00
For a more elegant way for setting up a sub-essential busybox-based chroot, see
the B<--hook-dir> option below.
2021-05-31 14:33:34 +00:00
2020-06-23 21:14:37 +00:00
=item B<--extract-hook>=I<command>
Execute arbitrary I<command>s after the Essential:yes packages have been
extracted but before installing them. See section B<HOOKS> for more
information.
Example: Install busybox symlinks
2022-03-07 10:27:10 +00:00
--extract-hook='chroot "$1" /bin/busybox --install -s'
2020-06-23 21:14:37 +00:00
2019-02-20 12:32:49 +00:00
=item B<--essential-hook>=I<command>
Execute arbitrary I<command>s after the Essential:yes packages have been
installed but before installing the remaining packages. The hook is not
2019-12-09 09:40:51 +00:00
executed for the B<extract> and B<custom> variants. See section B<HOOKS> for
more information.
2019-02-20 12:32:49 +00:00
2019-03-25 13:50:41 +00:00
Example: Enable unattended upgrades
2019-02-20 12:32:49 +00:00
2020-01-08 16:19:30 +00:00
--essential-hook='echo unattended-upgrades
unattended-upgrades/enable_auto_updates boolean true
| chroot "$1" debconf-set-selections'
2019-03-25 13:50:41 +00:00
Example: Select Europe/Berlin as the timezone
2020-01-08 16:19:30 +00:00
--essential-hook='echo tzdata tzdata/Areas select Europe
| chroot "$1" debconf-set-selections'
--essential-hook='echo tzdata tzdata/Zones/Europe select Berlin
| chroot "$1" debconf-set-selections'
2019-02-20 12:32:49 +00:00
=item B<--customize-hook>=I<command>
2019-01-08 10:28:27 +00:00
2019-02-20 12:32:49 +00:00
Execute arbitrary I<command>s after the chroot is set up and all packages got
2019-12-09 09:40:51 +00:00
installed but before final cleanup actions are carried out. See section
B<HOOKS> for more information.
2019-01-08 10:28:27 +00:00
2023-10-23 09:56:15 +00:00
Example: Add a user without a password
2019-01-08 10:28:27 +00:00
2020-01-08 16:19:30 +00:00
--customize-hook='chroot "$1" useradd --home-dir /home/user
--create-home user'
2019-02-20 12:32:49 +00:00
--customize-hook='chroot "$1" passwd --delete user'
2023-10-23 09:56:15 +00:00
Example: set up F</etc/hostname> and F</etc/hosts>
2019-02-20 12:32:49 +00:00
--customize-hook='echo host > "$1/etc/hostname"'
2019-09-04 13:45:43 +00:00
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"'
2019-01-08 10:28:27 +00:00
2023-10-23 09:55:35 +00:00
Example: to mimic B<debootstrap> behaviour, B<mmdebstrap> copies from the host.
Remove them in a B<--customize-hook> to make the chroot reproducible across
multiple hosts:
--customize-hook='rm "$1"/etc/resolv.conf'
--customize-hook='rm "$1"/etc/hostname'
2020-08-15 22:50:46 +00:00
=item B<--hook-directory>=I<directory>
Execute scripts in I<directory> with filenames starting with C<setup>,
C<extract>, C<essential> or C<customize>, at the respective stages during an
mmdebstrap run. The files must be marked executable. Their extension is
ignored. Subdirectories are not traversed. This option is a short-hand for
specifying the remaining four hook options individually for each file in the
directory. If there are more than one script for a stage, then they are added
alphabetically. This is useful in cases, where a user wants to run the same
hooks frequently. For example, given a directory C<./hooks> with two scripts
2020-11-13 05:27:12 +00:00
C<setup01-foo.sh> and C<setup02-bar.sh>, this call:
2020-08-15 22:50:46 +00:00
mmdebstrap --customize=./scriptA --hook-dir=./hooks --setup=./scriptB
is equivalent to this call:
mmdebstrap --customize=./scriptA --setup=./hooks/setup01-foo.sh \
--setup=./hooks/setup02-bar.sh --setup=./scriptB
The option can be specified multiple times and scripts are added to the
respective hooks in the order the options are given on the command line. Thus,
if the scripts in two directories depend upon each other, the scripts must be
placed into a common directory and be named such that they get added in the
correct order.
2021-05-31 14:33:34 +00:00
Example 1: Run mmdebstrap with eatmydata
2020-08-15 22:50:46 +00:00
--hook-dir=/usr/share/mmdebstrap/hooks/eatmydata
2021-05-31 14:33:34 +00:00
Example 2: Setup chroot for installing a sub-essential busybox-based chroot
--hook-dir=/usr/share/mmdebstrap/hooks/busybox
2022-07-26 16:30:52 +00:00
Example 3: Automatically mount all directories referenced by C<file://> mirrors
2022-05-24 11:16:24 +00:00
into the chroot
--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount
2020-06-23 21:14:37 +00:00
=item B<--skip>=I<stage>[,I<stage>,...]
B<mmdebstrap> tries hard to implement sensible defaults and will try to stop
you before shooting yourself in the foot. This option is for when you are sure
you know what you are doing and allows one to skip certain actions and safety
2020-11-28 13:30:50 +00:00
checks. See section B<OPERATION> for a list of possible arguments and their
2022-09-02 21:23:53 +00:00
context. The option can be specified multiple times or you can separate
multiple values by comma or whitespace.
2018-09-18 09:20:24 +00:00
2019-02-20 17:02:32 +00:00
=item B<-q,--quiet>, B<-s,--silent>
2019-01-20 09:39:01 +00:00
2019-02-20 17:17:00 +00:00
Do not write anything to standard error. If used together with B<--verbose> or
B<--debug>, only the last option will take effect.
2019-01-20 09:39:01 +00:00
2019-02-20 17:02:32 +00:00
=item B<-v,--verbose>
2019-01-20 09:39:01 +00:00
Instead of progress bars, write the dpkg and apt output directly to standard
2019-02-20 17:17:00 +00:00
error. If used together with B<--quiet> or B<--debug>, only the last option
will take effect.
2019-01-20 09:39:01 +00:00
2019-02-20 17:02:32 +00:00
=item B<-d,--debug>
2019-01-20 09:39:01 +00:00
In addition to the output produced by B<--verbose>, write detailed debugging
2019-02-20 17:17:00 +00:00
information to standard error. Errors will print a backtrace. If used together
with B<--quiet> or B<--verbose>, only the last option will take effect.
2019-02-23 07:43:15 +00:00
=item B<--logfile>=I<filename>
Instead of writing status information to standard error, write it into the
file given by I<filename>.
2019-01-20 09:39:01 +00:00
2018-09-18 09:20:24 +00:00
=back
=head1 MODES
Creating a Debian chroot requires not only permissions for running chroot but
also the ability to create files owned by the superuser. The selected mode
decides which way this is achieved.
=over 8
=item B<auto>
This mode automatically selects a fitting mode. If the effective user id is the
one of the superuser, then the B<sudo> mode is chosen. Otherwise, the
2023-03-16 07:14:39 +00:00
B<unshare> mode is picked if F</etc/subuid> and F</etc/subgid> are set up
correctly. Should that not be the case and if the fakechroot binary exists, the
B<fakechroot> mode is chosen.
2018-09-18 09:20:24 +00:00
=item B<sudo>, B<root>
2019-02-28 10:54:03 +00:00
This mode directly executes chroot and is the same mode of operation as is
used by debootstrap. It is the only mode that can directly create a directory
chroot with the right permissions. If the chroot directory is not accessible
2021-01-13 17:40:24 +00:00
by the _apt user, then apt sandboxing will be automatically disabled. This mode
2023-09-27 06:00:15 +00:00
needs to be able to mount and thus requires C<CAP_SYS_ADMIN>.
2018-09-18 09:20:24 +00:00
=item B<unshare>
2023-02-14 21:00:19 +00:00
When used as a normal (not root) user, this mode uses Linux user namespaces to
allow unprivileged use of chroot and creation of files that appear to be owned
by the superuser inside the unshared namespace. A tarball created in this mode
will be bit-by-bit identical to a tarball created with the B<root> mode. With
this mode, the only binaries that will run as the root user will be
2023-10-28 13:24:51 +00:00
L<newuidmap(1)> and L<newgidmap(1)> via their setuid bit. Running those
2023-02-14 21:00:19 +00:00
successfully requires F</etc/subuid> and F</etc/subgid> to have an entry for
2023-10-28 13:24:51 +00:00
your username. This entry was usually created by L<adduser(8)> already.
2023-02-14 21:00:19 +00:00
The unshared user will not automatically have access to the same files as you
do. This is intentional and an additional security against unintended changes
to your files that could theoretically result from running B<mmdebstrap> and
package maintainer scripts. To copy files in and out of the chroot, either use
globally readable or writable directories or use special hooks like B<copy-in>
and B<copy-out>.
Besides the user namespace, the mount, pid (process ids), uts (hostname) and
2023-10-28 13:24:51 +00:00
ipc namespaces will be unshared as well. See the man pages of L<namespaces(7)>
and L<unshare(2)> as well as the manual pages they are linking to.
2018-09-18 09:20:24 +00:00
2020-04-12 07:10:30 +00:00
A directory chroot created with this mode will end up with wrong ownership
2023-02-14 21:00:19 +00:00
information (seen from outside the unshared user namespace). For correct
ownership information, the directory must be accessed from a user namespace
with the right subuid/subgid offset, like so:
2020-04-12 07:10:30 +00:00
$ lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' -- \
> /usr/sbin/chroot ./debian-rootfs /bin/bash
Or without LXC:
$ mmdebstrap --unshare-helper /usr/sbin/chroot ./debian-rootfs /bin/bash
2024-03-23 21:50:34 +00:00
Or without mmdebstrap:
$ unshare --map-auto --map-user=65536 --map-group=65536 --keep-caps -- \
> /usr/sbin/chroot ./debian-rootfs /bin/bash
The above uses C<--map-auto> to map the block of user/group ids for the
effective user/group to a block starting at user/group ID 0. We also want to
map the current effective user/group ID into the subuid/subgid range using
C<--map-user> and C<--map-group>, respectively. But if that uid/gid overlaps
with the respective range, a "hole" will be removed from the mapping and the
remaining uid/gid values will get shifted. Thus, we map the current effective
user/group ID to the highest possible uid/gid, putting them at the end. Since
that means that the user/group will be "nobody" and not "root" inside the
namespace, C<--keep-caps> propagate permitted capabilities into the ambient set
and thus give the user C<CAP_DAC_OVERRIDE> and other capabilities that it
would've had.
Lastly, if you don't mind using superuser privileges and have systemd-nspawn
2020-04-12 07:10:30 +00:00
available and you know your subuid/subgid offset (100000 in this example):
$ sudo systemd-nspawn --private-users=100000 \
> --directory=./debian-rootfs /bin/bash
2024-01-09 09:29:53 +00:00
A directory created in B<unshare> mode cannot be removed the normal way.
Instead, use something like this:
$ unshare --map-root-user --map-auto rm -rf ./debian-rootfs
2024-03-23 21:50:34 +00:00
The above L<unshare(1)> command will map user and group ids into different
ranges compared to the mapping used by B<mmdebstrap> (effectively shifting them
one up) but it will provide the required capabilities for the removal
operation.
2021-01-13 15:05:57 +00:00
If this mode is used as the root user, the user namespace is not unshared (but
the mount namespace and other still are) and created directories will have
correct ownership information. This is also useful in cases where the root user
wants the benefits of an unshared mount namespace to prevent accidentally
messing up the system.
2018-09-18 09:20:24 +00:00
=item B<fakeroot>, B<fakechroot>
2019-02-23 07:49:19 +00:00
This mode will exec B<mmdebstrap> again under C<fakechroot fakeroot>. A
2020-01-16 17:03:13 +00:00
directory chroot created with this mode will end up with wrong permissions. If
you need a directory then run B<mmdebstrap> under C<fakechroot fakeroot -s
fakeroot.env> and use C<fakeroot.env> later when entering the chroot with
C<fakechroot fakeroot -i fakeroot.env chroot ...>. This mode will not work if
maintainer scripts are unable to handle C<LD_PRELOAD> correctly like the
package B<initramfs-tools> until version 0.132. This mode will also not work
2021-09-21 12:17:27 +00:00
with a different libc inside the chroot than on the outside. See the section
2023-10-28 13:24:51 +00:00
B<LIMITATIONS> in L<fakechroot(1)>.
2018-09-18 09:20:24 +00:00
2018-10-22 15:05:56 +00:00
=item B<chrootless>
Uses the dpkg option C<--force-script-chrootless> to install packages into
2020-05-01 21:37:38 +00:00
I<TARGET> without dpkg and apt inside I<TARGET> but using apt and dpkg from the
machine running B<mmdebstrap>. Maintainer scripts are run without chrooting
2020-04-11 21:10:13 +00:00
into I<TARGET> and rely on their dependencies being installed on the machine
2021-02-23 11:50:18 +00:00
running B<mmdebstrap>. Only very few packages support this mode. Namely, as of
2022-09-05 04:25:24 +00:00
2022, not all essential packages support it. See
https://wiki.debian.org/Teams/Dpkg/Spec/InstallBootstrap or the
2021-02-23 11:50:18 +00:00
dpkg-root-support usertag of debian-dpkg@lists.debian.org in the Debian bug
tracking system. B<WARNING>: if this option is used carelessly with packages
that do not support C<DPKG_ROOT>, this mode can result in undesired changes to
the system running B<mmdebstrap> because maintainer-scripts will be run without
2024-01-09 09:30:50 +00:00
L<chroot(1)>. Make sure to run this mode without superuser privileges and/or
2024-01-25 08:49:51 +00:00
inside a throw-away chroot environment like so:
2018-10-22 15:05:56 +00:00
2024-01-09 09:30:50 +00:00
mmdebstrap --variant=apt --include=mmdebstrap \
--customize-hook='chroot "$1" mmdebstrap --mode=chrootless
--variant=apt unstable chrootless.tar' \
--customize-hook='copy-out chrootless.tar .' unstable /dev/null
2018-12-06 16:15:56 +00:00
2018-09-18 09:20:24 +00:00
=back
=head1 VARIANTS
2018-10-22 15:05:56 +00:00
All package sets also include the direct and indirect hard dependencies (but
not recommends) of the selected package sets. The variants B<minbase>,
B<buildd> and B<->, resemble the package sets that debootstrap would install
2021-08-17 08:29:56 +00:00
with the same I<--variant> argument. The release with a name matching the
2021-11-09 06:31:56 +00:00
I<SUITE> argument as well as the native architecture will be used to determine
the C<Essential:yes> and priority values. To select packages with matching
priority from any suite, specify the empty string for I<SUITE>. The default
variant is B<debootstrap>.
2018-09-18 09:20:24 +00:00
=over 8
2018-10-22 15:05:56 +00:00
=item B<extract>
Installs nothing by default (not even C<Essential:yes> packages). Packages
given by the C<--include> option are extracted but will not be installed.
=item B<custom>
Installs nothing by default (not even C<Essential:yes> packages). Packages
given by the C<--include> option will be installed. If another mode than
B<chrootless> was selected and dpkg was not part of the included package set,
then this variant will fail because it cannot configure the packages.
2018-09-18 09:20:24 +00:00
=item B<essential>
2024-01-09 09:32:40 +00:00
C<Essential:yes> packages. If I<SUITE> is a non-empty string, then only
packages from the archive with suite or codename matching I<SUITE> will be
considered for selection of C<Essential:yes> packages.
2018-09-18 09:20:24 +00:00
=item B<apt>
2023-01-20 06:08:55 +00:00
The B<essential> set plus apt. This variant uses the fact that B<apt> treats
itself as essential and thus running C<apt-get dist-upgrade> without any
packages installed will install the B<essential> set plus B<apt>. If you just
want B<essential> and B<apt>, then this variant is faster than using the
B<essential> variant and adding B<apt> via C<--include> because all packages
get installed at once. The downside of this variant is, that if it should
happen that an B<essential> package is not installable, then it will just get
ignored without throwing an error.
2018-09-18 09:20:24 +00:00
2024-01-25 08:50:13 +00:00
=item B<buildd>
2018-09-18 09:20:24 +00:00
2024-01-25 08:50:13 +00:00
The B<essential> set plus apt and build-essential.
2021-11-09 06:31:56 +00:00
It is roughly equivalent to running mmdebstrap with
2024-01-25 08:50:13 +00:00
--variant=essential --include="apt,build-essential"
2018-09-18 09:20:24 +00:00
2024-01-25 08:50:13 +00:00
=item B<required>, B<minbase>
2018-09-18 09:20:24 +00:00
2024-01-25 08:50:13 +00:00
The B<essential> set plus all packages with Priority:required.
2021-11-09 06:31:56 +00:00
It is roughly equivalent to running mmdebstrap with
2024-01-25 08:50:13 +00:00
--variant=essential --include="?priority(required)"
2018-09-18 09:20:24 +00:00
=item B<important>, B<debootstrap>, B<->
2020-01-08 16:19:30 +00:00
The B<required> set plus all packages with Priority:important. This is the
2021-11-09 06:31:56 +00:00
default of debootstrap. It is roughly equivalent to running mmdebstrap with
--variant=essential --include="~prequired|~pimportant"
2018-09-18 09:20:24 +00:00
=item B<standard>
The B<important> set plus all packages with Priority:standard.
2021-11-09 06:31:56 +00:00
It is roughly equivalent to running mmdebstrap with
--variant=essential --include="~prequired|~pimportant|~pstandard"
2018-09-18 09:20:24 +00:00
=back
2020-04-09 16:33:05 +00:00
=head1 FORMATS
2020-05-01 21:37:38 +00:00
The output format of B<mmdebstrap> is specified using the B<--format> option.
2020-04-09 16:33:05 +00:00
Without that option the default format is I<auto>. The following formats exist:
=over 8
=item B<auto>
When selecting this format (the default), the actual format will be inferred
2020-04-12 07:11:21 +00:00
from the I<TARGET> positional argument. If I<TARGET> was not specified, then
2021-05-04 13:01:25 +00:00
the B<tar> format will be chosen. If I<TARGET> happens to be F</dev/null> or if
standard output is F</dev/null>, then the B<null> format will be chosen. If
I<TARGET> is an existing directory, and does not equal to C<->, then the
B<directory> format will be chosen. If I<TARGET> ends with C<.tar> or with one
of the filename extensions listed in the section B<COMPRESSION>, or if
I<TARGET> equals C<->, or if I<TARGET> is a named pipe (fifo) or if I<TARGET>
is a character special file, then the B<tar> format will be chosen. If
I<TARGET> ends with C<.squashfs> or C<.sqfs>, then the B<squashfs> format will
2023-10-28 13:24:51 +00:00
be chosen. If I<TARGET> ends with C<.ext2> then the B<ext2> format will be
2024-05-12 15:16:51 +00:00
chosen. If I<TARGET> ends with C<.ext4> then the B<ext4> format will be
2021-05-04 13:01:25 +00:00
chosen. If none of these conditions apply, the B<directory> format will be
chosen.
2020-04-09 16:33:05 +00:00
=item B<directory>, B<dir>
A chroot directory will be created in I<TARGET>. If the directory already
exists, it must either be empty or only contain an empty C<lost+found>
directory. The special I<TARGET> C<-> does not work with this format because a
directory cannot be written to standard output. If you need your directory be
named C<->, then just explicitly pass the relative path to it like F<./->. If
a directory is chosen as output in any other mode than B<sudo>, then its
contents will have wrong ownership information and special device files will be
2020-04-12 07:11:35 +00:00
missing. Refer to the section B<MODES> for more information.
2020-04-09 16:33:05 +00:00
=item B<tar>
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
C<$TMPDIR> is not set. A tarball of that directory will be stored in I<TARGET>
or sent to standard output if I<TARGET> was omitted or if I<TARGET> equals
C<->. If I<TARGET> ends with one of the filename extensions listed in the
section B<COMPRESSION>, then a compressed tarball will be created. The tarball
will be in POSIX 1003.1-2001 (pax) format and will contain extended attributes.
To preserve the extended attributes, you have to pass B<--xattrs
--xattrs-include='*'> to tar when extracting the tarball.
=item B<squashfs>, B<sqfs>
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
C<$TMPDIR> is not set. A tarball of that directory will be piped to the
C<tar2sqfs> utility, which will create an xz compressed squashfs image with a
blocksize of 1048576 bytes in I<TARGET>. The special I<TARGET> C<-> does not
work with this format because C<tar2sqfs> can only write to a regular file. If
you need your squashfs image be named C<->, then just explicitly pass the
2021-05-07 07:39:40 +00:00
relative path to it like F<./->. The C<tar2sqfs> tool only supports a limited
set of extended attribute prefixes. Therefore, extended attributes are disabled
in the resulting image. If you need them, create a tarball first and remove the
extended attributes from its pax headers. Refer to the B<EXAMPLES> section for
how to achieve this.
2020-04-09 16:33:05 +00:00
=item B<ext2>
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
C<$TMPDIR> is not set. A tarball of that directory will be piped to the
C<genext2fs> utility, which will create an ext2 image that will be
approximately 90% full in I<TARGET>. The special I<TARGET> C<-> does not work
with this format because C<genext2fs> can only write to a regular file. If you
need your ext2 image be named C<->, then just explicitly pass the relative path
2020-05-01 21:44:12 +00:00
to it like F<./->. To convert the result to an ext3 image, use C<tune2fs -O
has_journal TARGET> and to convert it to ext4, use C<tune2fs -O
2024-05-12 15:16:51 +00:00
extents,uninit_bg,dir_index,has_journal TARGET>.
B<CAUTION>: the ext2 format does not support timestamps beyond 2038 January 19,
does not support sub-second precision timestamps and does not support extended
attributes. Its inode size of 128 prevents adding these features with tune2fs
later on.
=item B<ext4>
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
C<$TMPDIR> is not set. A tarball of that directory will be piped to the
C<mke2fs> utility, which will create an ext4 image that will be approximately
90% full in I<TARGET>. The special I<TARGET> C<-> does not work with this
format because C<mke2fs> can only write to a regular file. If you need your
ext4 image be named C<->, then just explicitly pass the relative path to it
like F<./->. If C<SOURCE_DATE_EPOCH> is set, the filesystem UUID and hash_seed
will be set to a UUID derived from SOURCE_DATE_EPOCH to create reproducible
images.
2020-04-09 16:33:05 +00:00
2021-03-25 06:04:14 +00:00
=item B<null>
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
C<$TMPDIR> is not set. After the bootstrap is complete, the temporary chroot
will be deleted without being part of the output. This is most useful when the
desired artifact is generated inside the chroot and it is transferred using
2021-05-04 13:01:25 +00:00
special hooks such as B<sync-out>. It is also useful in situations where only
the exit code or stdout or stderr of a process run in a hook is of interest.
2021-03-25 06:04:14 +00:00
2020-04-09 16:33:05 +00:00
=back
2019-12-09 09:40:51 +00:00
=head1 HOOKS
This section describes properties of the hook options B<--setup-hook>,
2020-06-23 21:14:37 +00:00
B<--extract-hook>, B<--essential-hook> and B<--customize-hook> which are common
2020-08-18 07:38:22 +00:00
to all four of them. Any information specific to each hook is documented under
2020-06-23 21:14:37 +00:00
the specific hook options in the section B<OPTIONS>.
2019-12-09 09:40:51 +00:00
The options can be specified multiple times and the commands are executed in
2023-10-23 09:55:53 +00:00
the order in which they are given on the command line. There are four different
types of hook option arguments. If the argument passed to the hook option
starts with C<copy-in>, C<copy-out>, C<tar-in>, C<tar-out>, C<upload> or
2020-03-07 01:06:11 +00:00
C<download> followed by a space, then the hook is interpreted as a special
2019-12-09 09:40:51 +00:00
hook. Otherwise, if I<command> is an existing executable file from C<$PATH> or
if I<command> does not contain any shell metacharacters, then I<command> is
directly exec-ed with the path to the chroot directory passed as the first
argument. Otherwise, I<command> is executed under I<sh> and the chroot
2022-05-24 02:14:08 +00:00
directory can be accessed via I<$1>. Most environment variables set by
B<mmdebstrap> (like C<DEBIAN_FRONTEND>, C<LC_ALL> and C<PATH>) are preserved.
Most notably, C<APT_CONFIG> is being unset. If you need the path to
C<APT_CONFIG> as written by mmdebstrap it can be found in the
C<MMDEBSTRAP_APT_CONFIG> environment variable. All environment variables set by
the user are preserved, except for C<TMPDIR> which is cleared. See section
B<TMPDIR>. Furthermore, C<MMDEBSTRAP_MODE> will store the mode set by
2023-10-23 08:35:07 +00:00
B<--mode>, C<MMDEBSTRAP_FORMAT> stores the format chosen by B<--format>,
C<MMDEBSTRAP_HOOK> stores which hook is currently run (setup, extract,
essential, customize), C<MMDEBSTRAP_ARGV0> stores the name of the binary with
which B<mmdebstrap> was executed and C<MMDEBSTRAP_VERBOSITY> stores the
numerical verbosity level (0 for no output, 1 for normal, 2 for verbose and 3
for debug output). The C<MMDEBSTRAP_INCLUDE> variable stores the list of
2023-03-19 07:58:53 +00:00
packages, apt patterns or file paths given by the B<--include> option,
separated by a comma and with commas and percent signs in the option values
2023-06-19 11:10:49 +00:00
urlencoded. If I<SUITE> name was supplied, it's stored in C<MMDEBSTRAP_SUITE>.
2022-05-24 02:14:08 +00:00
In special hooks, the paths inside the chroot are relative to the root
directory of the chroot. The path on the outside is relative to current
directory of the original B<mmdebstrap> invocation. The path inside the chroot
must already exist. Paths outside the chroot are created as necessary.
2019-12-09 09:40:51 +00:00
2022-09-05 03:50:50 +00:00
In B<fakechroot> mode, C<tar>, or C<sh> and C<cat> have to be run inside the
chroot or otherwise, symlinks will be wrongly resolved and/or permissions will
be off. This means that the special hooks might fail in B<fakechroot> mode for
the B<setup> hook or for the B<extract> and B<custom> variants if no C<tar> or
C<sh> and C<cat> is available inside the chroot.
2019-12-09 09:40:51 +00:00
=over 8
=item B<copy-out> I<pathinside> [I<pathinside> ...] I<pathoutside>
2020-11-28 13:30:50 +00:00
Recursively copies one or more files and directories recursively from
I<pathinside> inside the chroot to I<pathoutside> outside of the chroot.
2019-12-09 09:40:51 +00:00
=item B<copy-in> I<pathoutside> [I<pathoutside> ...] I<pathinside>
Recursively copies one or more files and directories into the chroot into,
placing them into I<pathinside> inside of the chroot.
2020-01-16 09:38:14 +00:00
=item B<sync-out> I<pathinside> I<pathoutside>
Recursively copy everything inside I<pathinside> inside the chroot into
I<pathoutside>. In contrast to B<copy-out>, this command synchronizes the
content of I<pathinside> with the content of I<pathoutside> without deleting
anything from I<pathoutside> but overwriting content as necessary. Use this
command over B<copy-out> if you don't want to create a new directory outside
the chroot but only update the content of an existing directory.
=item B<sync-in> I<pathoutside> I<pathinside>
Recursively copy everything inside I<pathoutside> into I<pathinside> inside the
chroot. In contrast to B<copy-in>, this command synchronizes the content of
I<pathoutside> with the content of I<pathinside> without deleting anything from
I<pathinside> but overwriting content as necessary. Use this command over
B<copy-in> if you don't want to create a new directory inside the chroot but
only update the content of an existing directory.
2019-12-09 09:40:51 +00:00
=item B<tar-in> I<outside.tar> I<pathinside>
Unpacks a tarball I<outside.tar> from outside the chroot into a certain
2023-10-23 09:47:27 +00:00
location I<pathinside> inside the chroot. In B<unshare> mode, device nodes
cannot be created. To ignore device nodes in tarballs, use
B<--skip=tar-in/mknod>.
2019-12-09 09:40:51 +00:00
=item B<tar-out> I<pathinside> I<outside.tar>
Packs the path I<pathinside> from inside the chroot into a tarball, placing it
2024-05-14 05:32:09 +00:00
into a certain location I<outside.tar> outside the chroot. To emulate behaviour
of C<cp> and to provide control over the path which gets put into the tarball,
a C<chdir()> is performed to the C<dirname()> of I<pathinside> and then the
C<basename()> of I<pathinside> is packaged as a tarball. For example, if
I<pathinside> is C</boot/.> then first a C<chdir()> into C</boot> will be
performed before packing up the contents of C<.> inside C</boot>.
2019-12-09 09:40:51 +00:00
=item B<download> I<fileinside> I<fileoutside>
Copy the file given by I<fileinside> from inside the chroot to outside the
chroot as I<fileoutside>. In contrast to B<copy-out>, this command only
handles files and not directories. To copy a directory recursively out of the
chroot, use B<copy-out> or B<tar-out>. Its advantage is, that by being able to
specify the full path on the outside, including the filename, the file on the
2022-03-07 22:41:58 +00:00
outside can have a different name from the file on the inside. In contrast to
B<copy-out> and B<tar-out>, this command follows symlinks.
2019-12-09 09:40:51 +00:00
=item B<upload> I<fileoutside> I<fileinside>
Copy the file given by I<fileoutside> from outside the chroot to inside the
chroot as I<fileinside>. In contrast to B<copy-in>, this command only
handles files and not directories. To copy a directory recursively into the
chroot, use B<copy-in> or B<tar-in>. Its advantage is, that by being able to
specify the full path on the inside, including the filename, the file on the
2022-01-08 07:33:41 +00:00
inside can have a different name from the file on the outside. In contrast to
B<copy-in> and B<tar-in>, permission and ownership information will not be
retained.
2019-12-09 09:40:51 +00:00
=back
2020-11-28 13:30:50 +00:00
=head1 OPERATION
2022-11-07 15:17:05 +00:00
This section gives an overview of the different steps to create a chroot. At
its core, what B<mmdebstrap> does can be put into a 14 line shell script:
2022-12-22 09:17:58 +00:00
mkdir -p "$2/etc/apt" "$2/var/cache" "$2/var/lib"
2022-11-07 15:17:05 +00:00
cat << END > "$2/apt.conf"
Apt::Architecture "$(dpkg --print-architecture)";
Apt::Architectures "$(dpkg --print-architecture)";
Dir "$(cd "$2" && pwd)";
Dir::Etc::Trusted "$(eval "$(apt-config shell v Dir::Etc::Trusted/f)"; printf "$v")";
Dir::Etc::TrustedParts "$(eval "$(apt-config shell v Dir::Etc::TrustedParts/d)"; printf "$v")";
END
echo "deb http://deb.debian.org/debian/ $1 main" > "$2/etc/apt/sources.list"
APT_CONFIG="$2/apt.conf" apt-get update
APT_CONFIG="$2/apt.conf" apt-get --yes --download-only install '?essential'
for f in "$2"/var/cache/apt/archives/*.deb; do dpkg-deb --extract "$f" "$2"; done
chroot "$2" sh -c "dpkg --install --force-depends /var/cache/apt/archives/*.deb"
The additional complexity of B<mmdebstrap> is to support operation without
superuser privileges, bit-by-bit reproducible output, hooks and foreign
architecture support.
The remainder of this section explains what B<mmdebstrap> does step-by-step.
2020-11-28 13:30:50 +00:00
=over 8
=item B<check>
Upon startup, several checks are carried out, like:
=over 4
=item * whether required utilities (apt, dpkg, tar) are installed
=item * which mode to use and whether prerequisites are met
2024-01-08 21:37:13 +00:00
=item * do not allow chrootless mode as root (without fakeroot) unless inside a chroot. This check can be disabled using B<--skip=check/chrootless>
2020-11-28 13:30:50 +00:00
=item * whether the requested architecture can be executed (requires arch-test) using qemu binfmt_misc support. This requires arch-test and can be disabled using B<--skip=check/qemu>
=item * how the apt sources can be assembled from I<SUITE>, I<MIRROR> and B<--components> and/or from standard input as deb822 or one-line format and whether the required GPG keys exist.
=item * which output format to pick depending on the B<--format> argument or name of I<TARGET> or its type.
=item * whether the output directory is empty. This check can be disabled using B<--skip=check/empty>
2023-01-16 13:58:23 +00:00
=item * whether adding a C<signed-by> to C<apt/sources.list> is necessary. This requires gpg and can be disabled using B<--skip=check/signed-by>
2020-11-28 13:30:50 +00:00
=back
=item B<setup>
2021-08-17 09:11:00 +00:00
The following tasks are carried out unless B<--skip=setup> is used:
2020-11-28 13:30:50 +00:00
=over 4
=item * create required directories
=item * write out the temporary apt config file
=item * populates F</etc/apt/apt.conf.d/99mmdebstrap> and F</etc/dpkg/dpkg.cfg.d/99mmdebstrap> with config options from B<--aptopt> and B<--dpkgopt>, respectively
=item * write out F</etc/apt/sources.list>
=item * copy over F</etc/resolv.conf> and F</etc/hostname>
=item * populate F</dev> if mknod is possible
=back
=item B<setup-hook>
Run B<--setup-hook> options and all F<setup*> scripts in B<--hook-dir>.
=item B<update>
Runs C<apt-get update> using the temporary apt configuration file created in
2021-08-17 09:11:00 +00:00
the B<setup> step. This can be disabled using B<--skip=update>.
2020-11-28 13:30:50 +00:00
=item B<download>
2022-05-24 09:47:16 +00:00
In the B<extract> and B<custom> variants, C<apt-get install> is used to
download all the packages requested via the B<--include> option. The B<apt>
variant uses the fact that libapt treats the C<apt> packages as implicitly
essential to download only all C<Essential:yes> packages plus apt using
C<apt-get dist-upgrade>. In the remaining variants, all Packages files
downloaded by the B<update> step are inspected to find the C<Essential:yes>
2023-01-16 11:40:26 +00:00
package set as well as all packages of the required priority. If I<SUITE> is a
non-empty string, then only packages from the archive with suite or codename
matching I<SUITE> will be considered for selection of C<Essential:yes>
packages.
2020-11-28 13:30:50 +00:00
2022-11-10 13:53:36 +00:00
=item B<mount>
Mount relevant device nodes, F</proc> and F</sys> into the chroot and unmount
them afterwards. This can be disabled using B<--skip=chroot/mount> or
specifically by B<--skip=chroot/mount/dev>, B<--skip=chroot/mount/proc> and
B<--skip=chroot/mount/sys>, respectively. B<mmdebstrap> will disable running
services by temporarily moving F</usr/sbin/policy-rc.d> and
2024-01-25 08:30:26 +00:00
F</usr/sbin/start-stop-daemon> if they exist. This can be disabled with
2022-11-10 13:53:36 +00:00
B<--skip=chroot/policy-rc.d> and B<--skip=chroot/start-stop-daemon>,
respectively.
2020-11-28 13:30:50 +00:00
=item B<extract>
2021-08-17 21:39:20 +00:00
Extract the downloaded packages into the rootfs.
2020-11-28 13:30:50 +00:00
=item B<prepare>
In B<fakechroot> mode, environment variables C<LD_LIBRARY_PATH> will be set up
2024-01-26 08:29:35 +00:00
correctly. For foreign B<fakechroot> environments, C<LD_LIBRARY_PATH> and
2020-11-28 13:30:50 +00:00
C<QEMU_LD_PREFIX> are set up accordingly. This step is not carried out in
2022-05-24 02:15:25 +00:00
B<extract> mode and neither for the B<chrootless> variant.
2020-11-28 13:30:50 +00:00
2022-12-22 10:51:01 +00:00
=item B<extract-hook>
Run B<--extract-hook> options and all F<extract*> scripts in B<--hook-dir>.
2020-11-28 13:30:50 +00:00
=item B<essential>
Uses C<dpkg --install> to properly install all packages that have been
extracted before. Removes all packages downloaded in the B<download> step,
except those which were present in F</var/cache/apt/archives/> before (if any).
This can be disabled using B<--skip=essential/unlink>. This step is not carried
out in B<extract> mode.
=item B<essential-hook>
Run B<--essential-hook> options and all F<essential*> scripts in B<--hook-dir>.
This step is not carried out in B<extract> mode.
=item B<install>
Install the apt package into the chroot, if necessary and then run apt from
inside the chroot to install all remaining packages. This step is not carried
out in B<extract> mode.
=item B<customize-hook>
Run B<--customize-hook> options and all F<customize*> scripts in B<--hook-dir>.
This step is not carried out in B<extract> mode.
2022-11-10 13:53:36 +00:00
=item B<unmount>
2022-09-02 21:27:27 +00:00
2022-11-10 13:53:36 +00:00
Unmount everything that was mounted during the B<mount> stage and restores
2024-01-25 08:30:26 +00:00
F</usr/sbin/policy-rc.d> and F</usr/sbin/start-stop-daemon> if necessary.
2022-09-02 21:25:48 +00:00
2020-11-28 13:30:50 +00:00
=item B<cleanup>
2021-08-17 09:11:00 +00:00
Performs cleanup tasks, unless B<--skip=cleanup> is used:
2020-11-28 13:30:50 +00:00
=over 4
2021-05-09 18:44:02 +00:00
=item * Removes the package lists (unless B<--skip=cleanup/apt/lists>) and apt cache (unless B<--skip=cleanup/apt/cache>). Both removals can be disabled by using B<--skip=cleanup/apt>.
2020-11-28 13:30:50 +00:00
2024-01-26 08:29:35 +00:00
=item * Remove all files that were put into the chroot for setup purposes, like F</etc/apt/apt.conf.d/00mmdebstrap> and the temporary apt config. This can be disabled using B<--skip=cleanup/mmdebstrap>.
2020-11-28 13:30:50 +00:00
2022-10-10 13:00:58 +00:00
=item * Remove files that make the result unreproducible and write the empty string to /etc/machine-id if it exists. This can be disabled using B<--skip=cleanup/reproducible>. Note that this will not remove files that make the result unreproducible on machines with differing F</etc/resolv.conf> or F</etc/hostname>. Use a B<--customize-hook> to make those two files reproducible across multiple hosts. See section C<SOURCE_DATE_EPOCH> for more information. The following files will be removed:
2022-05-11 08:47:25 +00:00
=over 4
=item * F</var/log/dpkg.log>
=item * F</var/log/apt/history.log>
=item * F</var/log/apt/term.log>
=item * F</var/log/alternatives.log>
=item * F</var/cache/ldconfig/aux-cache>
=item * F</var/log/apt/eipp.log.xz>
=item * F</var/lib/dbus/machine-id>
=back
2020-11-28 13:30:50 +00:00
2022-09-02 21:29:52 +00:00
=item * Remove everything in F</run> inside the chroot. This can be disabled using B<--skip=cleanup/run>.
2020-11-28 13:30:50 +00:00
=item * Remove everything in F</tmp> inside the chroot. This can be disabled using B<--skip=cleanup/tmp>.
=back
2023-09-27 11:52:20 +00:00
=item B<output>
2020-11-28 13:30:50 +00:00
For formats other than B<directory>, pack up the temporary chroot directory
2024-05-12 15:16:51 +00:00
into a tarball, ext2 image, ext4 image or squashfs image and delete the
temporary chroot directory.
2020-11-28 13:30:50 +00:00
2023-09-27 11:52:20 +00:00
If B<--skip=output/dev> is added, the resulting chroot will not contain the
device nodes, directories and symlinks that B<debootstrap> creates but just
an empty /dev as created by B<base-files>.
If B<--skip=output/mknod> is added, the resulting chroot will not contain
device nodes (neither block nor character special devices). This is useful
if the chroot tarball is to be exatracted in environments where mknod does
not function like in unshared user namespaces.
2020-11-28 13:30:50 +00:00
=back
2018-09-18 09:20:24 +00:00
=head1 EXAMPLES
Use like debootstrap:
2019-01-08 10:27:56 +00:00
$ sudo mmdebstrap unstable ./unstable-chroot
2018-09-18 09:20:24 +00:00
Without superuser privileges:
2019-01-08 10:27:56 +00:00
$ mmdebstrap unstable unstable-chroot.tar
2018-09-18 09:20:24 +00:00
2019-02-20 17:18:31 +00:00
With no command line arguments at all. The chroot content is entirely defined
by a sources.list file on standard input.
2018-09-18 09:20:24 +00:00
2019-01-08 10:27:56 +00:00
$ mmdebstrap < /etc/apt/sources.list > unstable-chroot.tar
2018-09-18 09:20:24 +00:00
2019-10-05 05:51:05 +00:00
Since the tarball is output on stdout, members of it can be excluded using tar
on-the-fly. For example the /dev directory can be removed from the final
tarbal in cases where it is to be extracted by a non-root user who cannot
create device nodes:
$ mmdebstrap unstable | tar --delete ./dev > unstable-chroot.tar
2022-05-22 00:56:21 +00:00
Create a tarball for use with C<sbuild --chroot-mode=unshare>:
$ mmdebstrap --variant=buildd unstable ~/.cache/sbuild/unstable-amd64.tar
2020-01-07 16:40:13 +00:00
Instead of a tarball, a squashfs image can be created:
$ mmdebstrap unstable unstable-chroot.squashfs
2020-01-16 17:03:13 +00:00
By default, B<mmdebstrap> runs B<tar2sqfs> with C<--no-skip --exportable
--compressor xz --block-size 1048576>. To choose a different set of options,
2021-05-07 07:39:40 +00:00
and to filter out all extended attributes not supported by B<tar2sqfs>, pipe
the output of B<mmdebstrap> into B<tar2sqfs> manually like so:
$ mmdebstrap unstable \
| mmtarfilter --pax-exclude='*' \
--pax-include='SCHILY.xattr.user.*' \
--pax-include='SCHILY.xattr.trusted.*' \
--pax-include='SCHILY.xattr.security.*' \
| tar2sqfs --quiet --no-skip --force --exportable --compressor xz \
--block-size 1048576 unstable-chroot.squashfs
2020-01-16 17:03:13 +00:00
2019-10-05 05:51:05 +00:00
By default, debootstrapping a stable distribution will add mirrors for security
and updates to the sources.list.
$ mmdebstrap stable stable-chroot.tar
If you don't want this behaviour, you can override it by manually specifying a
mirror in various different ways:
$ mmdebstrap stable stable-chroot.tar http://deb.debian.org/debian
2020-11-15 10:30:04 +00:00
$ mmdebstrap stable stable-chroot.tar "deb http://deb.debian.org/debian stable main"
2019-10-05 05:51:05 +00:00
$ mmdebstrap stable stable-chroot.tar /path/to/sources.list
$ mmdebstrap stable stable-chroot.tar - < /path/to/sources.list
2018-09-18 09:20:24 +00:00
Drop locales (but not the symlink to the locale name alias database),
translated manual packages (but not the untranslated ones), and documentation
(but not copyright and Debian changelog).
2019-01-08 10:27:56 +00:00
$ mmdebstrap --variant=essential \
2018-09-18 09:20:24 +00:00
--dpkgopt='path-exclude=/usr/share/man/*' \
--dpkgopt='path-include=/usr/share/man/man[1-9]/*' \
--dpkgopt='path-exclude=/usr/share/locale/*' \
--dpkgopt='path-include=/usr/share/locale/locale.alias' \
--dpkgopt='path-exclude=/usr/share/doc/*' \
--dpkgopt='path-include=/usr/share/doc/*/copyright' \
--dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*' \
unstable debian-unstable.tar
2020-03-07 01:07:10 +00:00
Create a bootable USB Stick that boots into a full Debian desktop:
$ mmdebstrap --aptopt='Apt::Install-Recommends "true"' --customize-hook \
2024-02-08 10:14:03 +00:00
'chroot "$1" adduser --comment user --disabled-password user' \
2020-03-07 01:07:10 +00:00
--customize-hook='echo 'user:live' | chroot "$1" chpasswd' \
--customize-hook='echo host > "$1/etc/hostname"' \
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
--include=linux-image-amd64,task-desktop unstable debian-unstable.tar
$ cat << END > extlinux.conf
> default linux
> timeout 0
>
> label linux
> kernel /vmlinuz
> append initrd=/initrd.img root=LABEL=rootfs
END
# You can use $(sudo blockdev --getsize64 /dev/sdXXX) to get the right
# image size for the target medium in bytes
2022-02-13 16:38:40 +00:00
$ guestfish -N debian-unstable.img=disk:8G -- \
part-disk /dev/sda mbr : \
part-set-bootable /dev/sda 1 true : \
2022-09-23 07:20:23 +00:00
mkfs ext4 /dev/sda1 : \
2022-02-13 16:38:40 +00:00
set-label /dev/sda1 rootfs : \
2022-09-23 07:20:23 +00:00
mount /dev/sda1 / : \
2020-03-07 01:07:10 +00:00
tar-in debian-unstable.tar / xattrs:true : \
2022-02-13 18:56:39 +00:00
upload /usr/lib/EXTLINUX/mbr.bin /boot/mbr.bin : \
copy-file-to-device /boot/mbr.bin /dev/sda size:440 : \
2020-03-07 01:07:10 +00:00
extlinux / : copy-in extlinux.conf / : sync : umount / : shutdown
$ qemu-system-x86_64 -m 1G -enable-kvm debian-unstable.img
$ sudo dd if=debian-unstable.img of=/dev/sdXXX status=progress
2022-03-07 22:41:58 +00:00
On architectures without extlinux you can also boot using grub2:
$ mmdebstrap --include=linux-image-amd64,grub2,systemd-sysv unstable fs.tar
$ guestfish -N debian-unstable.img=disk:2G -- \
part-disk /dev/sda mbr : \
part-set-bootable /dev/sda 1 true : \
2022-09-23 07:20:23 +00:00
mkfs ext4 /dev/sda1 : \
2022-03-07 22:41:58 +00:00
set-label /dev/sda1 rootfs : \
2022-09-23 07:20:23 +00:00
mount /dev/sda1 / : \
2022-03-07 22:41:58 +00:00
tar-in fs.tar / xattrs:true : \
command "grub-install /dev/sda" : \
command update-grub : \
sync : umount / : shutdown
$ qemu-system-x86_64 -m 1G -enable-kvm debian-unstable.img
2020-03-07 01:25:55 +00:00
Build libdvdcss2.deb without installing installing anything or changing apt
sources on the current system:
$ mmdebstrap --variant=apt --components=main,contrib --include=libdvd-pkg \
--customize-hook='chroot $1 /usr/lib/libdvd-pkg/b-i_libdvdcss.sh' \
| tar --extract --verbose --strip-components=4 \
--wildcards './usr/src/libdvd-pkg/libdvdcss2_*_*.deb'
$ ls libdvdcss2_*_*.deb
2023-10-23 09:56:15 +00:00
Use as replacement for autopkgtest-build-qemu and vmdb2 for all architectures
supporting EFI booting (amd64, arm64, armhf, i386, riscv64), use a convenience
wrapper around B<mmdebstrap>:
$ mmdebstrap-autopkgtest-build-qemu unstable ./autopkgtest.img
Use as replacement for autopkgtest-build-qemu and vmdb2 on architectures
supporting extlinux (amd64 and i386):
2019-01-08 10:28:27 +00:00
$ mmdebstrap --variant=important --include=linux-image-amd64 \
2019-02-20 12:32:49 +00:00
--customize-hook='chroot "$1" passwd --delete root' \
2020-11-15 10:30:04 +00:00
--customize-hook='chroot "$1" useradd --home-dir /home/user --create-home user' \
2019-02-20 12:32:49 +00:00
--customize-hook='chroot "$1" passwd --delete user' \
--customize-hook='echo host > "$1/etc/hostname"' \
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
--customize-hook=/usr/share/autopkgtest/setup-commands/setup-testbed \
2019-01-08 10:28:27 +00:00
unstable debian-unstable.tar
$ cat << END > extlinux.conf
> default linux
> timeout 0
>
> label linux
> kernel /vmlinuz
2022-03-07 22:41:58 +00:00
> append initrd=/initrd.img root=/dev/vda1 rw console=ttyS0
2019-01-08 10:28:27 +00:00
END
2020-03-07 01:07:10 +00:00
$ guestfish -N debian-unstable.img=disk:8G -- \
2019-01-08 10:28:27 +00:00
part-disk /dev/sda mbr : \
part-set-bootable /dev/sda 1 true : \
2022-02-13 16:44:25 +00:00
mkfs ext4 /dev/sda1 : mount /dev/sda1 / : \
2020-01-03 15:05:28 +00:00
tar-in debian-unstable.tar / xattrs:true : \
2022-02-13 19:00:30 +00:00
upload /usr/lib/EXTLINUX/mbr.bin /boot/mbr.bin : \
copy-file-to-device /boot/mbr.bin /dev/sda size:440 : \
2022-02-13 16:38:40 +00:00
extlinux / : copy-in extlinux.conf / : sync : umount / : shutdown
2019-01-08 10:28:27 +00:00
$ qemu-img convert -O qcow2 debian-unstable.img debian-unstable.qcow2
2020-05-03 15:19:03 +00:00
As a debootstrap wrapper to run it without superuser privileges but using Linux
user namespaces instead. This fixes Debian bug #829134.
$ mmdebstrap --variant=custom --mode=unshare \
2023-09-27 05:56:49 +00:00
--setup-hook='debootstrap unstable "$1"' \
2020-05-03 15:19:03 +00:00
- debian-debootstrap.tar
2019-10-05 05:51:05 +00:00
Build a non-Debian chroot like Ubuntu bionic:
2020-01-08 16:19:30 +00:00
$ mmdebstrap --aptopt='Dir::Etc::Trusted
"/usr/share/keyrings/ubuntu-keyring-2012-archive.gpg"' bionic bionic.tar
2019-10-05 05:51:05 +00:00
2020-11-15 10:27:51 +00:00
If, for some reason, you cannot use a caching proxy like apt-cacher or
apt-cacher-ng, you can use the B<sync-in> and B<sync-out> special hooks to
synchronize a directory outside the chroot with F</var/cache/apt/archives>
inside the chroot.
2022-05-24 09:47:16 +00:00
$ mmdebstrap --variant=apt --skip=essential/unlink \
2020-11-15 10:27:51 +00:00
--setup-hook='mkdir -p ./cache "$1"/var/cache/apt/archives/' \
--setup-hook='sync-in ./cache /var/cache/apt/archives/' \
--customize-hook='sync-out /var/cache/apt/archives ./cache' \
unstable /dev/null
2022-05-24 11:16:24 +00:00
Instead of copying potentially large amounts of data with B<sync-in> you can
also use a bind-mount in combination with a C<file://> mirror to make packages
from the outside available inside the chroot:
$ mmdebstrap --variant=apt --skip=essential/unlink \
--setup-hook='mkdir "$1/tmp/mirror"' \
--setup-hook='mount -o ro,bind /tmp/mirror "$1/tmp/mirror"' \
--customize-hook='sync-out /var/cache/apt/archives ./cache' \
--customize-hook='umount "$1/tmp/mirror"; rmdir "$1/tmp/mirror";' \
unstable /dev/null file:///tmp/mirror http://deb.debian.org/debian
To automatically mount all directories referenced by C<file://> mirrors
into the chroot you can use a hook:
$ mmdebstrap --variant=apt \
--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount \
unstable /dev/null file:///tmp/mirror1 file:///tmp/mirror2
2021-01-13 17:08:04 +00:00
Create a system that can be used with docker:
$ mmdebstrap unstable | sudo docker import - debian
[...]
$ sudo docker run -it --rm debian whoami
root
$ sudo docker rmi debian
2023-10-23 09:54:12 +00:00
Create and boot a qemu virtual machine for an arbitrary architecture using
the B<debvm-create> wrapper script around B<mmdebstrap>:
$ debvm-create -r stable -- --architecture=riscv64
$ debvm-run
2021-08-27 09:53:32 +00:00
Create a system that can be used with podman:
$ mmdebstrap unstable | podman import - debian
[...]
$ podman run --network=none -it --rm debian whoami
root
$ podman rmi debian
2021-10-20 21:10:50 +00:00
As a docker/podman replacement:
2023-10-23 09:47:27 +00:00
$ mmdebstrap unstable chroot.tar
2021-10-20 21:10:50 +00:00
[...]
2023-10-23 09:47:27 +00:00
$ mmdebstrap --variant=custom --skip=update,tar-in/mknod \
2021-10-20 21:10:50 +00:00
--setup-hook='tar-in chroot.tar /' \
--customize-hook='chroot "$1" whoami' unstable /dev/null
[...]
root
$ rm chroot.tar
2022-05-22 00:57:01 +00:00
You can re-use a chroot tarball created with mmdebstrap for further refinement.
Say you want to create a minimal chroot and a chroot with more packages
installed, then instead of downloading and installing the essential packages
twice you can instead build on top of the already present minimal chroot:
$ mmdebstrap --variant=apt unstable chroot.tar
2023-10-23 09:47:27 +00:00
$ mmdebstrap --variant=custom --skip=update,setup,cleanup,tar-in/mknod \
--setup-hook='tar-in chroot.tar /' \
2022-05-22 00:57:01 +00:00
--customize-hook='chroot "$1" apt-get install --yes pkg1 pkg2' \
'' chroot-full.tar
2018-10-22 15:04:35 +00:00
=head1 ENVIRONMENT VARIABLES
2020-03-07 01:06:11 +00:00
=over 8
=item C<SOURCE_DATE_EPOCH>
2022-05-11 08:47:25 +00:00
By setting C<SOURCE_DATE_EPOCH> the result will be reproducible across multiple
runs with the same options and mirror content. Note that for debootstrap
compatibility, B<mmdebstrap> will copy the host's F</etc/resolv.conf> and
F</etc/hostname> into the chroot. This means that the B<mmdebstrap> output will
differ if it is run on machines with differing F</etc/resolv.conf> and
F</etc/hostname> contents. To make the result reproducible across different
hosts, you need to manually either delete both files from the output:
$ mmdebstrap --customize-hook='rm "$1"/etc/resolv.conf' \
--customize-hook='rm "$1"/etc/hostname' ...
or fill them with reproducible content:
$ mmdebstrap --customize-hook='echo nameserver X > "$1"/etc/resolv.conf' \
--customize-hook='echo host > "$1"/etc/hostname' ...
2018-10-22 15:04:35 +00:00
2020-03-07 01:06:11 +00:00
=item C<TMPDIR>
When creating a tarball, a temporary directory is populated with the rootfs
before the tarball is packed. The location of that temporary directory will be
in F</tmp> or the location pointed to by C<TMPDIR> if that environment variable
2022-05-22 00:57:42 +00:00
is set. Setting C<TMPDIR> to a different directory than F</tmp> is useful if
you have F</tmp> on a tmpfs that is too small for your rootfs.
If you set C<TMPDIR> in B<unshare> mode, then the unshared user must be able to
access the directory. This means that the directory itself must be
world-writable and all its ancestors must be at least world-executable.
2020-03-07 01:06:11 +00:00
2021-03-08 18:33:51 +00:00
Since C<TMPDIR> is only valid outside the chroot, the variable is being unset
when running hook scripts. If you need a valid temporary directory in a hook,
consider using F</tmp> inside your target directory.
2020-03-07 01:06:11 +00:00
=back
2018-09-18 09:20:24 +00:00
=head1 DEBOOTSTRAP
This section lists some differences to debootstrap.
=over 8
=item * More than one mirror possible
=item * Default mirrors for stable releases include updates and security mirror
2022-09-05 03:50:50 +00:00
=item * Multiple ways to operate as non-root: fakechroot and unshare
2018-09-18 09:20:24 +00:00
2022-05-22 00:54:41 +00:00
=item * twice as fast
2018-09-18 09:20:24 +00:00
2020-01-08 16:19:30 +00:00
=item * Can create a chroot with only C<Essential:yes> packages and their deps
2018-09-18 09:20:24 +00:00
=item * Reproducible output by default if $SOURCE_DATE_EPOCH is set
=item * Can create output on filesystems with nodev set
=item * apt cache and lists are cleaned at the end
=item * foreign architecture chroots using qemu-user
=back
Limitations in comparison to debootstrap:
=over 8
2020-01-19 21:22:50 +00:00
=item * Only runs on systems with apt installed (Debian and derivatives)
2018-09-18 09:20:24 +00:00
2022-05-22 00:54:41 +00:00
=item * No I<SCRIPT> argument (use hooks instead)
2018-09-18 09:20:24 +00:00
2020-01-08 16:19:30 +00:00
=item * Some debootstrap options don't exist, namely:
2020-11-27 23:49:46 +00:00
I<--second-stage>, I<--exclude>, I<--resolve-deps>, I<--force-check-gpg>,
2024-05-14 05:32:28 +00:00
I<--merged-usr>, I<--no-merged-usr> and I<--cache-dir>.
2018-09-18 09:20:24 +00:00
=back
2022-07-26 16:30:52 +00:00
=head1 MERGED-/USR
B<mmdebstrap> will create a merged-/usr chroot or not depending on whether
packages setting up merged-/usr (i.e. the B<usrmerge> package) are installed or
2022-10-18 08:32:03 +00:00
not. In Debian, the essential package B<init-system-helpers> depends on the
B<usrmerge> package, starting with Debian 12 (Bookworm).
2022-07-26 16:30:52 +00:00
Before Debian 12 (Bookworm), to force B<mmdebstrap> to create a chroot with
merged-/usr using symlinks, either explicitly install the B<usrmerge> package:
--include=usrmerge
or setup merged-/usr using the debootstrap-method which takes care of the
architecture specific symlinks and installs the B<usr-is-merged> package.
--hook-dir=/usr/share/mmdebstrap/hooks/merged-usr
To force B<mmdebstrap> to create a chroot without merged-/usr even after the
Debian 12 (Bookworm) release, you can use the following hook:
--hook-dir=/usr/share/mmdebstrap/hooks/no-merged-usr
This will write "this system will not be supported in the future" into
F</etc/unsupported-skip-usrmerge-conversion> inside the chroot and install the
B<usr-is-merged> package to avoid the installation of the B<usrmerge> package
and its dependencies.
2023-01-26 08:28:50 +00:00
If you are using B<mmdebstrap> in a setup where you do not know upfront whether
the chroot you are creating should be merged-/usr or not and you want to avoid
installation of the B<usrmerge> package and it's dependencies, you can use:
--hook-dir=/usr/share/mmdebstrap/hooks/maybe-merged-usr
That hook will use the availability of the B<usr-is-merged> package to decide
whether to call the B<merged-usr> hook or not.
2019-01-20 09:41:29 +00:00
=head1 COMPRESSION
B<mmdebstrap> will choose a suitable compressor for the output tarball
depending on the filename extension. The following mapping from filename
extension to compressor applies:
extension compressor
--------------------
.tar none
.gz gzip
.tgz gzip
.taz gzip
.Z compress
.taZ compress
.bz2 bzip2
.tbz bzip2
.tbz2 bzip2
.tz2 bzip2
.lz lzip
.lzma lzma
.tlz lzma
.lzo lzop
.lz4 lz4
2021-09-24 20:01:21 +00:00
.xz xz
.txz xz
.zst zstd
2019-01-20 09:41:29 +00:00
2020-01-16 17:03:13 +00:00
To change compression specific options, either use the respecitve environment
2020-05-01 21:37:38 +00:00
variables like B<XZ_OPT> or send B<mmdebstrap> output to your compressor of
choice with a pipe.
2020-01-16 17:03:13 +00:00
2023-06-14 05:43:26 +00:00
=head1 WRAPPERS
=head2 debvm
B<debvm> helps create and run virtual machines for various Debian releases and
architectures. The tool B<debvm-create> can be used to create a virtual
machine image and the tool B<debvm-run> can be used to run such a machine
image. Their purpose primarily is testing software using qemu as a containment
technology. These are relatively thin wrappers around B<mmdebstrap> and
B<qemu>.
=head2 bdebstrap
B<bdebstrap> is a YAML config based multi-mirror Debian chroot creation tool.
B<bdebstrap> is an alternative to B<debootstrap> and a wrapper around
B<mmdebstrap> to support YAML based configuration files. It inherits all
benefits from B<mmdebstrap>. The support for configuration allows storing all
customization in a YAML file instead of having to use a very long one-liner
call to B<mmdebstrap>. It also layering multiple customizations on top of each
other, e.g. to support flavors of an image.
2019-03-01 11:53:16 +00:00
=head1 BUGS
2019-10-05 05:51:18 +00:00
https://gitlab.mister-muffin.de/josch/mmdebstrap/issues
https://bugs.debian.org/src:mmdebstrap
2021-05-31 09:17:39 +00:00
As of version 1.20.9, dpkg does not provide facilities preventing it from
2019-03-01 11:53:16 +00:00
reading the dpkg configuration of the machine running B<mmdebstrap>.
2020-04-09 10:49:07 +00:00
Therefore, until this dpkg limitation is fixed, a default dpkg configuration is
recommended on machines running B<mmdebstrap>. If you are using B<mmdebstrap>
as the non-root user, then as a workaround you could run C<chmod 600
/etc/dpkg/dpkg.cfg.d/*> so that the config files are only accessible by the
2021-08-25 03:15:44 +00:00
root user. See Debian bug #808203.
2019-03-01 11:53:16 +00:00
2021-05-31 09:17:39 +00:00
With apt versions before 2.1.16, setting C<[trusted=yes]> or
C<Acquire::AllowInsecureRepositories "1"> to allow signed archives without a
known public key or unsigned archives will fail because of a gpg warning in the
apt output. Since apt does not communicate its status via any other means than
human readable strings, and because B<mmdebstrap> wants to treat transient
network errors as errors, B<mmdebstrap> treats any warning from "apt-get
update" as an error.
2019-10-05 05:51:18 +00:00
2018-09-18 09:20:24 +00:00
=head1 SEE ALSO
2023-10-28 13:24:51 +00:00
L<debootstrap(8)>, L<debvm(1)>, L<bdebstrap(1)>
2018-09-18 09:20:24 +00:00
=cut
2020-01-08 15:22:51 +00:00
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 ft=perl tw=79