2018-09-18 09:20:24 +00:00
|
|
|
#!/usr/bin/perl
|
|
|
|
#
|
|
|
|
# Copyright: 2018 Johannes Schauer <josch@mister-muffin.de>
|
|
|
|
#
|
|
|
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
# of this software and associated documentation files (the "Software"), to
|
|
|
|
# deal in the Software without restriction, including without limitation the
|
|
|
|
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
# sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
# furnished to do so, subject to the following conditions:
|
|
|
|
#
|
|
|
|
# The above copyright notice and this permission notice shall be included in
|
|
|
|
# all copies or substantial portions of the Software.
|
2019-11-13 10:53:30 +00:00
|
|
|
#
|
|
|
|
# The software is provided "as is", without warranty of any kind, express or
|
|
|
|
# implied, including but not limited to the warranties of merchantability,
|
|
|
|
# fitness for a particular purpose and noninfringement. In no event shall the
|
|
|
|
# authors or copyright holders be liable for any claim, damages or other
|
|
|
|
# liability, whether in an action of contract, tort or otherwise, arising
|
|
|
|
# from, out of or in connection with the software or the use or other dealings
|
|
|
|
# in the software.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
use strict;
|
|
|
|
use warnings;
|
|
|
|
|
2020-09-18 11:43:42 +00:00
|
|
|
our $VERSION = '0.7.1';
|
2019-02-23 07:55:31 +00:00
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
use English;
|
|
|
|
use Getopt::Long;
|
|
|
|
use Pod::Usage;
|
|
|
|
use File::Copy;
|
|
|
|
use File::Path qw(make_path remove_tree);
|
|
|
|
use File::Temp qw(tempfile tempdir);
|
2019-12-09 09:40:51 +00:00
|
|
|
use File::Basename;
|
2020-06-23 20:45:17 +00:00
|
|
|
use File::Find;
|
2020-11-13 18:02:41 +00:00
|
|
|
use Cwd qw(abs_path getcwd);
|
2020-01-09 07:39:40 +00:00
|
|
|
require "syscall.ph"; ## no critic (Modules::RequireBarewordIncludes)
|
2018-09-23 17:47:14 +00:00
|
|
|
use Fcntl qw(S_IFCHR S_IFBLK FD_CLOEXEC F_GETFD F_SETFD);
|
2018-09-23 17:36:07 +00:00
|
|
|
use List::Util qw(any none);
|
2020-08-24 14:20:04 +00:00
|
|
|
use POSIX qw(SIGINT SIGHUP SIGPIPE SIGTERM SIG_BLOCK SIG_UNBLOCK strftime);
|
2019-01-20 09:39:01 +00:00
|
|
|
use Carp;
|
|
|
|
use Term::ANSIColor;
|
2019-12-09 09:40:51 +00:00
|
|
|
use Socket;
|
2020-08-25 14:06:05 +00:00
|
|
|
use Time::HiRes;
|
2020-08-15 16:09:06 +00:00
|
|
|
use version;
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-01-09 07:39:40 +00:00
|
|
|
## no critic (InputOutput::RequireBriefOpen)
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
# from sched.h
|
2020-01-09 07:39:40 +00:00
|
|
|
# use typeglob constants because "use constant" has several drawback as
|
|
|
|
# explained in the documentation for the Readonly CPAN module
|
|
|
|
*CLONE_NEWNS = \0x20000;
|
|
|
|
*CLONE_NEWUTS = \0x4000000;
|
|
|
|
*CLONE_NEWIPC = \0x8000000;
|
|
|
|
*CLONE_NEWUSER = \0x10000000;
|
|
|
|
*CLONE_NEWPID = \0x20000000;
|
|
|
|
*CLONE_NEWNET = \0x40000000;
|
|
|
|
our (
|
|
|
|
$CLONE_NEWNS, $CLONE_NEWUTS, $CLONE_NEWIPC,
|
|
|
|
$CLONE_NEWUSER, $CLONE_NEWPID, $CLONE_NEWNET
|
|
|
|
);
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
# type codes:
|
|
|
|
# 0 -> normal file
|
|
|
|
# 1 -> hardlink
|
|
|
|
# 2 -> symlink
|
|
|
|
# 3 -> character special
|
|
|
|
# 4 -> block special
|
|
|
|
# 5 -> directory
|
|
|
|
my @devfiles = (
|
|
|
|
# filename mode type link target major minor
|
2020-01-09 07:39:40 +00:00
|
|
|
["./dev/", oct(755), 5, '', undef, undef],
|
|
|
|
["./dev/console", oct(666), 3, '', 5, 1],
|
|
|
|
["./dev/fd", oct(777), 2, '/proc/self/fd', undef, undef],
|
|
|
|
["./dev/full", oct(666), 3, '', 1, 7],
|
|
|
|
["./dev/null", oct(666), 3, '', 1, 3],
|
|
|
|
["./dev/ptmx", oct(666), 3, '', 5, 2],
|
|
|
|
["./dev/pts/", oct(755), 5, '', undef, undef],
|
|
|
|
["./dev/random", oct(666), 3, '', 1, 8],
|
|
|
|
["./dev/shm/", oct(755), 5, '', undef, undef],
|
|
|
|
["./dev/stderr", oct(777), 2, '/proc/self/fd/2', undef, undef],
|
|
|
|
["./dev/stdin", oct(777), 2, '/proc/self/fd/0', undef, undef],
|
|
|
|
["./dev/stdout", oct(777), 2, '/proc/self/fd/1', undef, undef],
|
|
|
|
["./dev/tty", oct(666), 3, '', 5, 0],
|
|
|
|
["./dev/urandom", oct(666), 3, '', 1, 9],
|
|
|
|
["./dev/zero", oct(666), 3, '', 1, 5],
|
2018-09-18 09:20:24 +00:00
|
|
|
);
|
|
|
|
|
2019-01-20 09:39:01 +00:00
|
|
|
# verbosity levels:
|
|
|
|
# 0 -> print nothing
|
|
|
|
# 1 -> normal output and progress bars
|
|
|
|
# 2 -> verbose output
|
|
|
|
# 3 -> debug output
|
|
|
|
my $verbosity_level = 1;
|
|
|
|
|
2020-01-09 07:39:40 +00:00
|
|
|
my $is_covering = !!(eval { Devel::Cover::get_coverage() });
|
2020-01-08 14:33:49 +00:00
|
|
|
|
2020-08-19 06:16:19 +00:00
|
|
|
# the reason why Perl::Critic warns about this is, that it suspects that the
|
|
|
|
# programmer wants to implement a test whether the terminal is interactive or
|
|
|
|
# not, in which case, complex interactions with the magic *ARGV indeed make it
|
|
|
|
# advisable to use IO::Interactive. In our case, we do not want to create an
|
|
|
|
# interactivity check but just want to check whether STDERR is opened to a tty,
|
|
|
|
# so our use of -t is fine and not "fragile and complicated" as is written in
|
|
|
|
# the description of InputOutput::ProhibitInteractiveTest. Also see
|
|
|
|
# https://github.com/Perl-Critic/Perl-Critic/issues/918
|
|
|
|
sub stderr_is_tty() {
|
|
|
|
## no critic (InputOutput::ProhibitInteractiveTest)
|
|
|
|
if (-t STDERR) {
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-20 09:39:01 +00:00
|
|
|
sub debug {
|
|
|
|
if ($verbosity_level < 3) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return;
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
my $msg = shift;
|
2020-01-03 23:37:49 +00:00
|
|
|
my ($package, $filename, $line) = caller;
|
|
|
|
$msg = "D: $PID $line $msg";
|
2020-08-19 06:16:19 +00:00
|
|
|
if (stderr_is_tty()) {
|
2020-01-08 16:44:07 +00:00
|
|
|
$msg = colored($msg, 'clear');
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
print STDERR "$msg\n";
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sub info {
|
|
|
|
if ($verbosity_level == 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return;
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
my $msg = shift;
|
2020-01-03 23:37:49 +00:00
|
|
|
if ($verbosity_level >= 3) {
|
2020-01-08 14:41:49 +00:00
|
|
|
my ($package, $filename, $line) = caller;
|
2020-01-08 16:44:07 +00:00
|
|
|
$msg = "$PID $line $msg";
|
2020-01-03 23:37:49 +00:00
|
|
|
}
|
2019-01-20 09:39:01 +00:00
|
|
|
$msg = "I: $msg";
|
2020-08-19 06:16:19 +00:00
|
|
|
if (stderr_is_tty()) {
|
2020-01-08 16:44:07 +00:00
|
|
|
$msg = colored($msg, 'green');
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
print STDERR "$msg\n";
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sub warning {
|
|
|
|
if ($verbosity_level == 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return;
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
my $msg = shift;
|
|
|
|
$msg = "W: $msg";
|
2020-08-19 06:16:19 +00:00
|
|
|
if (stderr_is_tty()) {
|
2020-01-08 16:44:07 +00:00
|
|
|
$msg = colored($msg, 'bold yellow');
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
print STDERR "$msg\n";
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sub error {
|
|
|
|
# if error() is called with the string from a previous error() that was
|
|
|
|
# caught inside an eval(), then the string will have a newline which we
|
|
|
|
# are stripping here
|
2020-01-08 16:44:07 +00:00
|
|
|
chomp(my $msg = shift);
|
2019-01-20 09:39:01 +00:00
|
|
|
$msg = "E: $msg";
|
2020-08-19 06:16:19 +00:00
|
|
|
if (stderr_is_tty()) {
|
2020-01-08 16:44:07 +00:00
|
|
|
$msg = colored($msg, 'bold red');
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
if ($verbosity_level == 3) {
|
2020-08-19 06:16:19 +00:00
|
|
|
croak $msg; # produces a backtrace
|
2019-01-20 09:39:01 +00:00
|
|
|
} else {
|
2020-01-08 14:41:49 +00:00
|
|
|
die "$msg\n";
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-02 22:54:48 +00:00
|
|
|
# check whether a directory is mounted by comparing the device number of the
|
|
|
|
# directory itself with its parent
|
2020-01-09 07:39:40 +00:00
|
|
|
sub is_mountpoint {
|
2019-12-02 22:54:48 +00:00
|
|
|
my $dir = shift;
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!-e $dir) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return 0;
|
2019-12-02 22:54:48 +00:00
|
|
|
}
|
|
|
|
my @a = stat "$dir/.";
|
|
|
|
my @b = stat "$dir/..";
|
|
|
|
# if the device number is different, then the directory must be mounted
|
|
|
|
if ($a[0] != $b[0]) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return 1;
|
2019-12-02 22:54:48 +00:00
|
|
|
}
|
|
|
|
# if the inode number is the same, then the directory must be mounted
|
|
|
|
if ($a[1] == $b[1]) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return 1;
|
2019-12-02 22:54:48 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
# tar cannot figure out the decompression program when receiving data on
|
|
|
|
# standard input, thus we do it ourselves. This is copied from tar's
|
|
|
|
# src/suffix.c
|
2020-01-09 07:39:40 +00:00
|
|
|
sub get_tar_compressor {
|
2018-09-18 09:20:24 +00:00
|
|
|
my $filename = shift;
|
2019-01-20 09:41:29 +00:00
|
|
|
if ($filename eq '-') {
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2019-01-20 09:41:29 +00:00
|
|
|
} elsif ($filename =~ /\.tar$/) {
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2019-01-20 09:41:29 +00:00
|
|
|
} elsif ($filename =~ /\.(gz|tgz|taz)$/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return ['gzip'];
|
2018-09-18 09:20:24 +00:00
|
|
|
} elsif ($filename =~ /\.(Z|taZ)$/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return ['compress'];
|
2018-09-18 09:20:24 +00:00
|
|
|
} elsif ($filename =~ /\.(bz2|tbz|tbz2|tz2)$/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return ['bzip2'];
|
2018-09-18 09:20:24 +00:00
|
|
|
} elsif ($filename =~ /\.lz$/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return ['lzip'];
|
2018-09-18 09:20:24 +00:00
|
|
|
} elsif ($filename =~ /\.(lzma|tlz)$/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return ['lzma'];
|
2018-09-18 09:20:24 +00:00
|
|
|
} elsif ($filename =~ /\.lzo$/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return ['lzop'];
|
2018-09-18 09:20:24 +00:00
|
|
|
} elsif ($filename =~ /\.lz4$/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return ['lz4'];
|
2018-09-18 09:20:24 +00:00
|
|
|
} elsif ($filename =~ /\.(xz|txz)$/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return ['xz', '--threads=0'];
|
2019-01-20 09:41:29 +00:00
|
|
|
} elsif ($filename =~ /\.zst$/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return ['zstd'];
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 07:39:40 +00:00
|
|
|
sub test_unshare {
|
2018-12-05 07:06:26 +00:00
|
|
|
my $verbose = shift;
|
2018-09-24 18:07:46 +00:00
|
|
|
if ($EFFECTIVE_USER_ID == 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
my $msg = "cannot use unshare mode when executing as root";
|
|
|
|
if ($verbose) {
|
|
|
|
warning $msg;
|
|
|
|
} else {
|
|
|
|
debug $msg;
|
|
|
|
}
|
|
|
|
return 0;
|
2018-09-24 18:07:46 +00:00
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
# arguments to syscalls have to be stored in their own variable or
|
|
|
|
# otherwise we will get "Modification of a read-only value attempted"
|
2020-01-09 07:39:40 +00:00
|
|
|
my $unshare_flags = $CLONE_NEWUSER;
|
2018-09-18 09:20:24 +00:00
|
|
|
# we spawn a new per process because if unshare succeeds, we would
|
2018-10-01 15:14:59 +00:00
|
|
|
# otherwise have unshared the mmdebstrap process itself which we don't want
|
2019-01-20 09:39:01 +00:00
|
|
|
my $pid = fork() // error "fork() failed: $!";
|
2018-09-18 09:20:24 +00:00
|
|
|
if ($pid == 0) {
|
2020-01-09 07:39:40 +00:00
|
|
|
my $ret = syscall(&SYS_unshare, $unshare_flags);
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($ret == 0) {
|
|
|
|
exit 0;
|
|
|
|
} else {
|
|
|
|
my $msg = "unshare syscall failed: $!";
|
|
|
|
if ($verbose) {
|
|
|
|
warning $msg;
|
|
|
|
} else {
|
|
|
|
debug $msg;
|
|
|
|
}
|
|
|
|
exit 1;
|
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
waitpid($pid, 0);
|
|
|
|
if (($? >> 8) != 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return 0;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
2018-09-24 18:09:08 +00:00
|
|
|
# if newuidmap and newgidmap exist, the exit status will be 1 when
|
|
|
|
# executed without parameters
|
|
|
|
system "newuidmap 2>/dev/null";
|
|
|
|
if (($? >> 8) != 1) {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (($? >> 8) == 127) {
|
|
|
|
my $msg = "cannot find newuidmap";
|
|
|
|
if ($verbose) {
|
|
|
|
warning $msg;
|
|
|
|
} else {
|
|
|
|
debug $msg;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
my $msg = "newuidmap returned unknown exit status: $?";
|
|
|
|
if ($verbose) {
|
|
|
|
warning $msg;
|
|
|
|
} else {
|
|
|
|
debug $msg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2018-09-24 18:09:08 +00:00
|
|
|
}
|
|
|
|
system "newgidmap 2>/dev/null";
|
|
|
|
if (($? >> 8) != 1) {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (($? >> 8) == 127) {
|
|
|
|
my $msg = "cannot find newgidmap";
|
|
|
|
if ($verbose) {
|
|
|
|
warning $msg;
|
|
|
|
} else {
|
|
|
|
debug $msg;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
my $msg = "newgidmap returned unknown exit status: $?";
|
|
|
|
if ($verbose) {
|
|
|
|
warning $msg;
|
|
|
|
} else {
|
|
|
|
debug $msg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2018-09-24 18:09:08 +00:00
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub read_subuid_subgid() {
|
|
|
|
my $username = getpwuid $<;
|
|
|
|
my ($subid, $num_subid, $fh, $n);
|
|
|
|
my @result = ();
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!-e "/etc/subuid") {
|
2020-01-08 14:41:49 +00:00
|
|
|
warning "/etc/subuid doesn't exist";
|
|
|
|
return;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!-r "/etc/subuid") {
|
2020-01-08 14:41:49 +00:00
|
|
|
warning "/etc/subuid is not readable";
|
|
|
|
return;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
open $fh, "<", "/etc/subuid"
|
|
|
|
or error "cannot open /etc/subuid for reading: $!";
|
2018-09-18 09:20:24 +00:00
|
|
|
while (my $line = <$fh>) {
|
2020-01-08 14:41:49 +00:00
|
|
|
($n, $subid, $num_subid) = split(/:/, $line, 3);
|
|
|
|
last if ($n eq $username);
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
close $fh;
|
|
|
|
push @result, ["u", 0, $subid, $num_subid];
|
|
|
|
|
|
|
|
if (scalar(@result) < 1) {
|
2020-01-08 14:41:49 +00:00
|
|
|
warning "/etc/subuid does not contain an entry for $username";
|
|
|
|
return;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
if (scalar(@result) > 1) {
|
2020-01-08 14:41:49 +00:00
|
|
|
warning "/etc/subuid contains multiple entries for $username";
|
|
|
|
return;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
open $fh, "<", "/etc/subgid"
|
|
|
|
or error "cannot open /etc/subgid for reading: $!";
|
2018-09-18 09:20:24 +00:00
|
|
|
while (my $line = <$fh>) {
|
2020-01-08 14:41:49 +00:00
|
|
|
($n, $subid, $num_subid) = split(/:/, $line, 3);
|
|
|
|
last if ($n eq $username);
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
close $fh;
|
|
|
|
push @result, ["g", 0, $subid, $num_subid];
|
|
|
|
|
|
|
|
if (scalar(@result) < 2) {
|
2020-01-08 14:41:49 +00:00
|
|
|
warning "/etc/subgid does not contain an entry for $username";
|
|
|
|
return;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
if (scalar(@result) > 2) {
|
2020-01-08 14:41:49 +00:00
|
|
|
warning "/etc/subgid contains multiple entries for $username";
|
|
|
|
return;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return @result;
|
|
|
|
}
|
|
|
|
|
|
|
|
# This function spawns two child processes forming the following process tree
|
|
|
|
#
|
|
|
|
# A
|
|
|
|
# |
|
|
|
|
# fork()
|
|
|
|
# | \
|
|
|
|
# B C
|
|
|
|
# | |
|
|
|
|
# | fork()
|
|
|
|
# | | \
|
|
|
|
# | D E
|
|
|
|
# | | |
|
|
|
|
# |unshare()
|
|
|
|
# | close()
|
|
|
|
# | | |
|
|
|
|
# | | read()
|
|
|
|
# | | newuidmap(D)
|
|
|
|
# | | newgidmap(D)
|
|
|
|
# | | /
|
|
|
|
# | waitpid()
|
|
|
|
# | |
|
|
|
|
# | fork()
|
|
|
|
# | | \
|
|
|
|
# | F G
|
|
|
|
# | | |
|
|
|
|
# | | exec()
|
|
|
|
# | | /
|
|
|
|
# | waitpid()
|
|
|
|
# | /
|
|
|
|
# waitpid()
|
|
|
|
#
|
|
|
|
# To better refer to each individual part, we give each process a new
|
|
|
|
# identifier after calling fork(). Process A is the main process. After
|
|
|
|
# executing fork() we call the parent and child B and C, respectively. This
|
|
|
|
# first fork() is done because we do not want to modify A. B then remains
|
|
|
|
# waiting for its child C to finish. C calls fork() again, splitting into
|
|
|
|
# the parent D and its child E. In the parent D we call unshare() and close a
|
|
|
|
# pipe shared by D and E to signal to E that D is done with calling unshare().
|
|
|
|
# E notices this by using read() and follows up with executing the tools
|
|
|
|
# new[ug]idmap on D. E finishes and D continues with doing another fork().
|
|
|
|
# This is because when unsharing the PID namespace, we need a PID 1 to be kept
|
|
|
|
# alive or otherwise any child processes cannot fork() anymore themselves. So
|
|
|
|
# we keep F as PID 1 and finally call exec() in G.
|
2020-01-09 07:39:40 +00:00
|
|
|
sub get_unshare_cmd {
|
2020-01-08 16:44:07 +00:00
|
|
|
my $cmd = shift;
|
2018-09-18 09:20:24 +00:00
|
|
|
my $idmap = shift;
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
my $unshare_flags
|
2020-01-09 07:39:40 +00:00
|
|
|
= $CLONE_NEWUSER | $CLONE_NEWNS | $CLONE_NEWPID | $CLONE_NEWUTS
|
|
|
|
| $CLONE_NEWIPC;
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
if (0) {
|
2020-01-09 07:39:40 +00:00
|
|
|
$unshare_flags |= $CLONE_NEWNET;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# fork a new process and let the child get unshare()ed
|
|
|
|
# we don't want to unshare the parent process
|
2019-01-20 09:39:01 +00:00
|
|
|
my $gcpid = fork() // error "fork() failed: $!";
|
2018-09-18 09:20:24 +00:00
|
|
|
if ($gcpid == 0) {
|
2020-01-08 15:23:34 +00:00
|
|
|
# Create a pipe for the parent process to signal the child process that
|
|
|
|
# it is done with calling unshare() so that the child can go ahead
|
|
|
|
# setting up uid_map and gid_map.
|
2020-01-08 14:41:49 +00:00
|
|
|
pipe my $rfh, my $wfh;
|
2020-01-08 15:23:34 +00:00
|
|
|
# We have to do this dance with forking a process and then modifying
|
|
|
|
# the parent from the child because:
|
|
|
|
# - new[ug]idmap can only be called on a process id after that process
|
|
|
|
# has unshared the user namespace
|
|
|
|
# - a process looses its capabilities if it performs an execve() with
|
|
|
|
# nonzero user ids see the capabilities(7) man page for details.
|
|
|
|
# - a process that unshared the user namespace by default does not
|
|
|
|
# have the privileges to call new[ug]idmap on itself
|
2020-01-08 14:41:49 +00:00
|
|
|
#
|
2020-01-08 15:23:34 +00:00
|
|
|
# this also works the other way around (the child setting up a user
|
|
|
|
# namespace and being modified from the parent) but that way, the
|
|
|
|
# parent would have to stay around until the child exited (so a pid
|
|
|
|
# would be wasted). Additionally, that variant would require an
|
|
|
|
# additional pipe to let the parent signal the child that it is done
|
|
|
|
# with calling new[ug]idmap. The way it is done here, this signaling
|
|
|
|
# can instead be done by wait()-ing for the exit of the child.
|
|
|
|
|
2020-01-08 14:41:49 +00:00
|
|
|
my $ppid = $$;
|
|
|
|
my $cpid = fork() // error "fork() failed: $!";
|
|
|
|
if ($cpid == 0) {
|
|
|
|
# child
|
|
|
|
|
|
|
|
# Close the writing descriptor at our end of the pipe so that we
|
|
|
|
# see EOF when parent closes its descriptor.
|
|
|
|
close $wfh;
|
|
|
|
|
|
|
|
# Wait for the parent process to finish its unshare() call by
|
|
|
|
# waiting for an EOF.
|
|
|
|
0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF";
|
|
|
|
|
|
|
|
# The program's new[ug]idmap have to be used because they are
|
|
|
|
# setuid root. These privileges are needed to map the ids from
|
|
|
|
# /etc/sub[ug]id to the user namespace set up by the parent.
|
|
|
|
# Without these privileges, only the id of the user itself can be
|
|
|
|
# mapped into the new namespace.
|
|
|
|
#
|
|
|
|
# Since new[ug]idmap is setuid root we also don't need to write
|
|
|
|
# "deny" to /proc/$$/setgroups beforehand (this is otherwise
|
|
|
|
# required for unprivileged processes trying to write to
|
|
|
|
# /proc/$$/gid_map since kernel version 3.19 for security reasons)
|
|
|
|
# and therefore the parent process keeps its ability to change its
|
|
|
|
# own group here.
|
|
|
|
#
|
|
|
|
# Since /proc/$ppid/[ug]id_map can only be written to once,
|
|
|
|
# respectively, instead of making multiple calls to new[ug]idmap,
|
|
|
|
# we assemble a command line that makes one call each.
|
|
|
|
my $uidmapcmd = "";
|
|
|
|
my $gidmapcmd = "";
|
|
|
|
foreach (@{$idmap}) {
|
|
|
|
my ($t, $hostid, $nsid, $range) = @{$_};
|
|
|
|
if ($t ne "u" and $t ne "g" and $t ne "b") {
|
|
|
|
error "invalid idmap type: $t";
|
|
|
|
}
|
|
|
|
if ($t eq "u" or $t eq "b") {
|
|
|
|
$uidmapcmd .= " $hostid $nsid $range";
|
|
|
|
}
|
|
|
|
if ($t eq "g" or $t eq "b") {
|
|
|
|
$gidmapcmd .= " $hostid $nsid $range";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
my $idmapcmd = '';
|
|
|
|
if ($uidmapcmd ne "") {
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system "newuidmap $ppid $uidmapcmd"
|
|
|
|
or error "newuidmap $ppid $uidmapcmd failed: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
if ($gidmapcmd ne "") {
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system "newgidmap $ppid $gidmapcmd"
|
|
|
|
or error "newgidmap $ppid $gidmapcmd failed: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
exit 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
# parent
|
|
|
|
|
|
|
|
# After fork()-ing, the parent immediately calls unshare...
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == syscall &SYS_unshare, $unshare_flags
|
|
|
|
or error "unshare() failed: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
|
|
|
|
# .. and then signals the child process that we are done with the
|
|
|
|
# unshare() call by sending an EOF.
|
|
|
|
close $wfh;
|
|
|
|
|
|
|
|
# Wait for the child process to finish its setup by waiting for its
|
|
|
|
# exit.
|
|
|
|
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
|
|
|
|
my $exit = $? >> 8;
|
|
|
|
if ($exit != 0) {
|
|
|
|
error "child had a non-zero exit status: $exit";
|
|
|
|
}
|
|
|
|
|
|
|
|
# Currently we are nobody (uid and gid are 65534). So we become root
|
|
|
|
# user and group instead.
|
|
|
|
#
|
|
|
|
# We are using direct syscalls instead of setting $(, $), $< and $>
|
|
|
|
# because then perl would do additional stuff which we don't need or
|
|
|
|
# want here, like checking /proc/sys/kernel/ngroups_max (which might
|
|
|
|
# not exist). It would also also call setgroups() in a way that makes
|
|
|
|
# the root user be part of the group unknown.
|
|
|
|
0 == syscall &SYS_setgid, 0 or error "setgid failed: $!";
|
|
|
|
0 == syscall &SYS_setuid, 0 or error "setuid failed: $!";
|
|
|
|
0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!";
|
|
|
|
|
|
|
|
if (1) {
|
|
|
|
# When the pid namespace is also unshared, then processes expect a
|
|
|
|
# master pid to always be alive within the namespace. To achieve
|
|
|
|
# this, we fork() here instead of exec() to always have one dummy
|
|
|
|
# process running as pid 1 inside the namespace. This is also what
|
|
|
|
# the unshare tool does when used with the --fork option.
|
|
|
|
#
|
|
|
|
# Otherwise, without a pid 1, new processes cannot be forked
|
|
|
|
# anymore after pid 1 finished.
|
|
|
|
my $cpid = fork() // error "fork() failed: $!";
|
|
|
|
if ($cpid != 0) {
|
|
|
|
# The parent process will stay alive as pid 1 in this
|
|
|
|
# namespace until the child finishes executing. This is
|
|
|
|
# important because pid 1 must never die or otherwise nothing
|
|
|
|
# new can be forked.
|
|
|
|
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
|
2020-01-08 16:44:07 +00:00
|
|
|
exit($? >> 8);
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
&{$cmd}();
|
|
|
|
|
|
|
|
exit 0;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# parent
|
|
|
|
return $gcpid;
|
|
|
|
}
|
|
|
|
|
2020-01-09 07:39:40 +00:00
|
|
|
sub havemknod {
|
2020-01-08 16:44:07 +00:00
|
|
|
my $root = shift;
|
2018-09-18 09:20:24 +00:00
|
|
|
my $havemknod = 0;
|
|
|
|
if (-e "$root/test-dev-null") {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "/test-dev-null already exists";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
TEST: {
|
2020-01-08 14:41:49 +00:00
|
|
|
# we fork so that we can read STDERR
|
|
|
|
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
|
|
|
|
if ($pid == 0) {
|
|
|
|
open(STDERR, '>&', STDOUT) or error "cannot open STDERR: $!";
|
|
|
|
# we use mknod(1) instead of the system call because creating the
|
|
|
|
# right dev_t argument requires makedev(3)
|
|
|
|
exec 'mknod', "$root/test-dev-null", 'c', '1', '3';
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
chomp(
|
|
|
|
my $content = do { local $/; <$fh> }
|
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
close $fh;
|
|
|
|
{
|
|
|
|
last TEST unless $? == 0 and $content eq '';
|
|
|
|
last TEST unless -c "$root/test-dev-null";
|
|
|
|
last TEST unless open my $fh, '>', "$root/test-dev-null";
|
|
|
|
last TEST unless print $fh 'test';
|
|
|
|
}
|
|
|
|
$havemknod = 1;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
if (-e "$root/test-dev-null") {
|
2020-01-08 16:44:07 +00:00
|
|
|
unlink "$root/test-dev-null"
|
|
|
|
or error "cannot unlink /test-dev-null: $!";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
return $havemknod;
|
|
|
|
}
|
|
|
|
|
2018-09-23 17:47:14 +00:00
|
|
|
sub print_progress {
|
2019-01-20 09:39:01 +00:00
|
|
|
if ($verbosity_level != 1) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return;
|
2019-01-20 09:39:01 +00:00
|
|
|
}
|
2018-09-23 17:47:14 +00:00
|
|
|
my $perc = shift;
|
2020-08-19 06:16:19 +00:00
|
|
|
if (!stderr_is_tty()) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return;
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
|
|
|
if ($perc eq "done") {
|
2020-01-08 14:41:49 +00:00
|
|
|
# \e[2K clears everything on the current line (i.e. the progress bar)
|
|
|
|
print STDERR "\e[2Kdone\n";
|
|
|
|
return;
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
|
|
|
if ($perc >= 100) {
|
2020-01-08 14:41:49 +00:00
|
|
|
$perc = 100;
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
|
|
|
my $width = 50;
|
2020-01-08 16:44:07 +00:00
|
|
|
my $num_x = int($perc * $width / 100);
|
|
|
|
my $bar = '=' x $num_x;
|
2018-09-23 17:47:14 +00:00
|
|
|
if ($num_x != $width) {
|
2020-01-08 14:41:49 +00:00
|
|
|
$bar .= '>';
|
|
|
|
$bar .= ' ' x ($width - $num_x - 1);
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
|
|
|
printf STDERR "%6.2f [%s]\r", $perc, $bar;
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
|
|
|
|
2019-01-13 21:04:25 +00:00
|
|
|
sub run_progress {
|
2019-04-25 06:49:28 +00:00
|
|
|
my ($get_exec, $line_handler, $line_has_error, $chdir) = @_;
|
2018-09-23 17:47:14 +00:00
|
|
|
pipe my $rfh, my $wfh;
|
2019-01-13 09:17:46 +00:00
|
|
|
my $got_signal = 0;
|
2020-01-08 16:44:07 +00:00
|
|
|
my $ignore = sub {
|
2020-01-08 14:41:49 +00:00
|
|
|
info "run_progress() received signal $_[0]: waiting for child...";
|
2019-01-13 09:17:46 +00:00
|
|
|
};
|
|
|
|
|
2020-04-10 10:55:02 +00:00
|
|
|
debug("run_progress: exec " . (join ' ', ($get_exec->('${FD}'))));
|
|
|
|
|
2019-01-13 09:17:46 +00:00
|
|
|
# delay signals so that we can fork and change behaviour of the signal
|
|
|
|
# handler in parent and child without getting interrupted
|
|
|
|
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
|
2019-01-20 09:39:01 +00:00
|
|
|
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
|
2019-01-13 09:17:46 +00:00
|
|
|
|
2019-01-20 09:39:01 +00:00
|
|
|
my $pid1 = open(my $pipe, '-|') // error "failed to fork(): $!";
|
2019-01-13 09:17:46 +00:00
|
|
|
|
2019-01-13 21:04:25 +00:00
|
|
|
if ($pid1 == 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# child: default signal handlers
|
2020-01-09 07:39:40 +00:00
|
|
|
local $SIG{'INT'} = 'DEFAULT';
|
|
|
|
local $SIG{'HUP'} = 'DEFAULT';
|
|
|
|
local $SIG{'PIPE'} = 'DEFAULT';
|
|
|
|
local $SIG{'TERM'} = 'DEFAULT';
|
2020-01-08 14:41:49 +00:00
|
|
|
|
|
|
|
# unblock all delayed signals (and possibly handle them)
|
2020-01-08 16:44:07 +00:00
|
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
|
|
or error "Can't unblock signals: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
|
|
|
|
close $rfh;
|
|
|
|
# Unset the close-on-exec flag, so that the file descriptor does not
|
|
|
|
# get closed when we exec
|
2020-01-08 16:44:07 +00:00
|
|
|
my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!";
|
|
|
|
fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC)
|
|
|
|
or error "fcntl F_SETFD: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
my $fd = fileno $wfh;
|
|
|
|
# redirect stderr to stdout so that we can capture it
|
|
|
|
open(STDERR, '>&', STDOUT) or error "cannot open STDOUT: $!";
|
|
|
|
my @execargs = $get_exec->($fd);
|
|
|
|
# before apt 1.5, "apt-get update" attempted to chdir() into the
|
|
|
|
# working directory. This will fail if the current working directory
|
|
|
|
# is not accessible by the user (for example in unshare mode). See
|
|
|
|
# Debian bug #860738
|
|
|
|
if (defined $chdir) {
|
|
|
|
chdir $chdir or error "failed chdir() to $chdir: $!";
|
|
|
|
}
|
2020-01-09 07:39:40 +00:00
|
|
|
eval { Devel::Cover::set_coverage("none") } if $is_covering;
|
2020-01-08 16:44:07 +00:00
|
|
|
exec { $execargs[0] } @execargs
|
|
|
|
or error 'cannot exec() ' . (join ' ', @execargs);
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
|
|
|
close $wfh;
|
|
|
|
|
|
|
|
# spawn two processes:
|
2019-01-13 21:04:25 +00:00
|
|
|
# parent will parse stdout to look for errors
|
2018-09-23 17:47:14 +00:00
|
|
|
# child will parse $rfh for the progress meter
|
2019-01-20 09:39:01 +00:00
|
|
|
my $pid2 = fork() // error "failed to fork(): $!";
|
2019-01-13 21:04:25 +00:00
|
|
|
if ($pid2 == 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# child: default signal handlers
|
2020-01-09 07:39:40 +00:00
|
|
|
local $SIG{'INT'} = 'IGNORE';
|
|
|
|
local $SIG{'HUP'} = 'IGNORE';
|
|
|
|
local $SIG{'PIPE'} = 'IGNORE';
|
|
|
|
local $SIG{'TERM'} = 'IGNORE';
|
2019-01-13 09:17:46 +00:00
|
|
|
|
2020-01-08 14:41:49 +00:00
|
|
|
# unblock all delayed signals (and possibly handle them)
|
2020-01-08 16:44:07 +00:00
|
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
|
|
or error "Can't unblock signals: $!";
|
2019-01-13 09:17:46 +00:00
|
|
|
|
2020-08-18 12:31:38 +00:00
|
|
|
my $progress = 0.0;
|
|
|
|
my $status = undef;
|
|
|
|
print_progress($progress);
|
2020-01-08 14:41:49 +00:00
|
|
|
while (my $line = <$rfh>) {
|
2020-08-18 12:31:38 +00:00
|
|
|
my ($newprogress, $newstatus) = $line_handler->($line);
|
|
|
|
next unless $newprogress;
|
|
|
|
# start a new line if the new progress value is less than the
|
|
|
|
# previous one
|
|
|
|
if ($newprogress < $progress) {
|
|
|
|
print_progress("done");
|
|
|
|
}
|
|
|
|
if (defined $newstatus) {
|
|
|
|
$status = $newstatus;
|
|
|
|
}
|
2020-08-19 06:16:19 +00:00
|
|
|
if ( defined $status
|
|
|
|
and $verbosity_level == 1
|
|
|
|
and stderr_is_tty()) {
|
2020-08-18 12:31:38 +00:00
|
|
|
# \e[2K clears everything on the current line (i.e. the
|
|
|
|
# progress bar)
|
|
|
|
print STDERR "\e[2K$status: ";
|
|
|
|
}
|
|
|
|
print_progress($newprogress);
|
|
|
|
$progress = $newprogress;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-09 07:39:40 +00:00
|
|
|
print_progress("done");
|
2018-09-23 17:47:14 +00:00
|
|
|
|
2020-01-08 14:41:49 +00:00
|
|
|
exit 0;
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
|
|
|
|
2019-01-13 09:17:46 +00:00
|
|
|
# parent: ignore signals
|
|
|
|
# by using "local", the original is automatically restored once the
|
|
|
|
# function returns
|
2020-01-08 16:44:07 +00:00
|
|
|
local $SIG{'INT'} = $ignore;
|
|
|
|
local $SIG{'HUP'} = $ignore;
|
2019-01-13 09:17:46 +00:00
|
|
|
local $SIG{'PIPE'} = $ignore;
|
|
|
|
local $SIG{'TERM'} = $ignore;
|
|
|
|
|
|
|
|
# unblock all delayed signals (and possibly handle them)
|
2020-01-08 16:44:07 +00:00
|
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
|
|
or error "Can't unblock signals: $!";
|
2019-01-13 09:17:46 +00:00
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
my $output = '';
|
2019-01-13 21:04:25 +00:00
|
|
|
my $has_error = 0;
|
|
|
|
while (my $line = <$pipe>) {
|
2020-01-08 14:41:49 +00:00
|
|
|
$has_error = $line_has_error->($line);
|
|
|
|
if ($verbosity_level >= 2) {
|
|
|
|
print STDERR $line;
|
|
|
|
} else {
|
|
|
|
# forward captured apt output
|
|
|
|
$output .= $line;
|
|
|
|
}
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
2019-01-13 21:04:25 +00:00
|
|
|
|
|
|
|
close($pipe);
|
2018-09-23 17:47:14 +00:00
|
|
|
my $fail = 0;
|
2019-01-13 21:04:25 +00:00
|
|
|
if ($? != 0 or $has_error) {
|
2020-01-08 14:41:49 +00:00
|
|
|
$fail = 1;
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
|
|
|
|
2019-01-13 21:04:25 +00:00
|
|
|
waitpid $pid2, 0;
|
2019-01-20 09:39:01 +00:00
|
|
|
$? == 0 or error "progress parsing failed";
|
2018-09-23 17:47:14 +00:00
|
|
|
|
2019-01-13 09:17:46 +00:00
|
|
|
if ($got_signal) {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "run_progress() received signal: $got_signal";
|
2019-01-13 09:17:46 +00:00
|
|
|
}
|
|
|
|
|
2019-01-13 21:04:25 +00:00
|
|
|
# only print failure after progress output finished or otherwise it
|
|
|
|
# might interfere with the remaining output
|
2018-09-23 17:47:14 +00:00
|
|
|
if ($fail) {
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($verbosity_level >= 1) {
|
|
|
|
print STDERR $output;
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
error((join ' ', $get_exec->('<$fd>')) . ' failed');
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
|
|
|
|
2019-01-13 21:04:25 +00:00
|
|
|
sub run_dpkg_progress {
|
2018-12-27 20:08:53 +00:00
|
|
|
my $options = shift;
|
2020-01-08 16:44:07 +00:00
|
|
|
my @debs = @{ $options->{PKGS} // [] };
|
|
|
|
my $get_exec
|
|
|
|
= sub { return @{ $options->{ARGV} }, "--status-fd=$_[0]", @debs; };
|
2019-01-13 21:04:25 +00:00
|
|
|
my $line_has_error = sub { return 0; };
|
2020-01-08 16:44:07 +00:00
|
|
|
my $num = 0;
|
2019-01-13 21:04:25 +00:00
|
|
|
# each package has one install and one configure step, thus the total
|
|
|
|
# number is twice the number of packages
|
2020-01-08 16:44:07 +00:00
|
|
|
my $total = (scalar @debs) * 2;
|
2019-01-13 21:04:25 +00:00
|
|
|
my $line_handler = sub {
|
2020-08-18 12:31:38 +00:00
|
|
|
my $status = undef;
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($_[0] =~ /^processing: (install|configure): /) {
|
2020-08-18 12:31:38 +00:00
|
|
|
if ($1 eq 'install') {
|
|
|
|
$status = 'installing';
|
|
|
|
} elsif ($1 eq 'configure') {
|
|
|
|
$status = 'configuring';
|
|
|
|
} else {
|
|
|
|
error "unknown status: $1";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
$num += 1;
|
|
|
|
}
|
2020-08-18 12:31:38 +00:00
|
|
|
return $num / $total * 100, $status;
|
2019-01-13 21:04:25 +00:00
|
|
|
};
|
2019-01-20 09:39:01 +00:00
|
|
|
run_progress $get_exec, $line_handler, $line_has_error;
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2019-01-13 21:04:25 +00:00
|
|
|
}
|
2019-01-13 09:17:46 +00:00
|
|
|
|
2019-01-13 21:04:25 +00:00
|
|
|
sub run_apt_progress {
|
2020-11-13 18:02:41 +00:00
|
|
|
my $options = shift;
|
|
|
|
my @debs = @{ $options->{PKGS} // [] };
|
|
|
|
my $tmpedsp;
|
|
|
|
if (exists $options->{EDSP_RES}) {
|
|
|
|
(undef, $tmpedsp) = tempfile(
|
|
|
|
"mmdebstrap.edsp.XXXXXXXXXXXX",
|
|
|
|
OPEN => 0,
|
|
|
|
TMPDIR => 1
|
|
|
|
);
|
|
|
|
}
|
2019-01-13 21:04:25 +00:00
|
|
|
my $get_exec = sub {
|
2020-11-13 18:02:41 +00:00
|
|
|
my @prefix = ();
|
|
|
|
my @opts = ();
|
|
|
|
my $solverpath = "/usr/lib/mmdebstrap/solvers";
|
|
|
|
if (-e "./proxysolver") {
|
|
|
|
# for development purposes, use the current directory if it
|
|
|
|
# contains a file called proxysolver
|
|
|
|
$solverpath = getcwd();
|
|
|
|
}
|
|
|
|
if (exists $options->{EDSP_RES}) {
|
|
|
|
push @prefix, 'env', "APT_EDSP_DUMP_FILENAME=$tmpedsp";
|
|
|
|
push @opts, "-oDir::Bin::solvers=$solverpath",
|
|
|
|
'--solver=proxysolver';
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
return (
|
2020-11-13 18:02:41 +00:00
|
|
|
@prefix,
|
2020-01-08 16:44:07 +00:00
|
|
|
@{ $options->{ARGV} },
|
2020-11-13 18:02:41 +00:00
|
|
|
@opts,
|
2020-01-08 14:41:49 +00:00
|
|
|
"-oAPT::Status-Fd=$_[0]",
|
|
|
|
# prevent apt from messing up the terminal and allow dpkg to
|
|
|
|
# receive SIGINT and quit immediately without waiting for
|
|
|
|
# maintainer script to finish
|
|
|
|
'-oDpkg::Use-Pty=false',
|
|
|
|
@debs
|
2020-01-08 16:44:07 +00:00
|
|
|
);
|
|
|
|
};
|
2019-04-29 22:07:35 +00:00
|
|
|
my $line_has_error = sub { return 0; };
|
|
|
|
if ($options->{FIND_APT_WARNINGS}) {
|
2020-01-08 14:41:49 +00:00
|
|
|
$line_has_error = sub {
|
|
|
|
# apt-get doesn't report a non-zero exit if the update failed.
|
|
|
|
# Thus, we have to parse its output. See #778357, #776152, #696335
|
|
|
|
# and #745735
|
|
|
|
if ($_[0] =~ /^(W: |Err:)/) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
};
|
2019-04-29 22:07:35 +00:00
|
|
|
}
|
2019-01-13 21:04:25 +00:00
|
|
|
my $line_handler = sub {
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) {
|
2020-08-18 12:31:38 +00:00
|
|
|
my $status = undef;
|
|
|
|
if ($1 eq 'pmstatus') {
|
|
|
|
$status = "installing";
|
|
|
|
} elsif ($1 eq 'dlstatus') {
|
|
|
|
$status = "downloading";
|
|
|
|
} else {
|
|
|
|
error "unknown status: $1";
|
|
|
|
}
|
|
|
|
return $2, $status;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2019-01-13 21:04:25 +00:00
|
|
|
};
|
2019-04-25 06:49:28 +00:00
|
|
|
run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR};
|
2020-11-13 18:02:41 +00:00
|
|
|
if (exists $options->{EDSP_RES}) {
|
|
|
|
info "parsing EDSP results...";
|
|
|
|
open my $fh, '<', $tmpedsp
|
|
|
|
or error "failed to open $tmpedsp for reading: $!";
|
|
|
|
my $inst = 0;
|
|
|
|
my $pkg;
|
|
|
|
my $ver;
|
|
|
|
while (my $line = <$fh>) {
|
|
|
|
chomp $line;
|
|
|
|
if ($line ne "") {
|
|
|
|
if ($line =~ /^Install: \d+/) {
|
|
|
|
$inst = 1;
|
|
|
|
} elsif ($line =~ /^Package: (.*)/) {
|
|
|
|
$pkg = $1;
|
|
|
|
} elsif ($line =~ /^Version: (.*)/) {
|
|
|
|
$ver = $1;
|
|
|
|
}
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
if ($inst == 1 && defined $pkg && defined $ver) {
|
|
|
|
push @{ $options->{EDSP_RES} }, [$pkg, $ver];
|
|
|
|
}
|
|
|
|
$inst = 0;
|
|
|
|
undef $pkg;
|
|
|
|
undef $ver;
|
|
|
|
}
|
|
|
|
close $fh;
|
|
|
|
unlink $tmpedsp;
|
|
|
|
}
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2018-09-23 17:47:14 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 07:39:40 +00:00
|
|
|
sub run_chroot {
|
2020-01-08 16:44:07 +00:00
|
|
|
my $cmd = shift;
|
2019-01-13 09:17:46 +00:00
|
|
|
my $options = shift;
|
|
|
|
|
|
|
|
my @cleanup_tasks = ();
|
|
|
|
|
|
|
|
my $cleanup = sub {
|
2020-01-08 14:41:49 +00:00
|
|
|
my $signal = $_[0];
|
|
|
|
while (my $task = pop @cleanup_tasks) {
|
|
|
|
$task->();
|
|
|
|
}
|
|
|
|
if ($signal) {
|
|
|
|
warning "pid $PID cought signal: $signal";
|
|
|
|
exit 1;
|
|
|
|
}
|
2019-01-13 09:17:46 +00:00
|
|
|
};
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
local $SIG{INT} = $cleanup;
|
|
|
|
local $SIG{HUP} = $cleanup;
|
2019-01-13 09:17:46 +00:00
|
|
|
local $SIG{PIPE} = $cleanup;
|
|
|
|
local $SIG{TERM} = $cleanup;
|
|
|
|
|
|
|
|
eval {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
|
|
|
|
# if more than essential should be installed, make the system look
|
2020-01-08 15:23:34 +00:00
|
|
|
# more like a real one by creating or bind-mounting the device
|
|
|
|
# nodes
|
2020-01-08 14:41:49 +00:00
|
|
|
foreach my $file (@devfiles) {
|
2020-01-08 16:44:07 +00:00
|
|
|
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
|
|
|
|
= @{$file};
|
2020-01-08 14:41:49 +00:00
|
|
|
next if $fname eq './dev/';
|
2020-01-08 16:44:07 +00:00
|
|
|
if ($type == 0) { # normal file
|
2020-01-08 14:41:49 +00:00
|
|
|
error "type 0 not implemented";
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif ($type == 1) { # hardlink
|
2020-01-08 14:41:49 +00:00
|
|
|
error "type 1 not implemented";
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif ($type == 2) { # symlink
|
2020-01-08 14:41:49 +00:00
|
|
|
if (!$options->{havemknod}) {
|
2020-01-08 16:44:07 +00:00
|
|
|
if ( $options->{mode} eq 'fakechroot'
|
|
|
|
and $linkname =~ /^\/proc/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# there is no /proc in fakechroot mode
|
|
|
|
next;
|
|
|
|
}
|
2020-01-08 16:41:46 +00:00
|
|
|
if (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('root', 'unshare')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
push @cleanup_tasks, sub {
|
2020-01-08 16:44:07 +00:00
|
|
|
unlink "$options->{root}/$fname"
|
|
|
|
or warn "cannot unlink $fname: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
symlink $linkname, "$options->{root}/$fname"
|
|
|
|
or error "cannot create symlink $fname";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-08 15:23:34 +00:00
|
|
|
} elsif ($type == 3 or $type == 4) {
|
|
|
|
# character/block special
|
2020-01-08 14:41:49 +00:00
|
|
|
if (!$options->{havemknod}) {
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '>', "$options->{root}/$fname"
|
|
|
|
or error "cannot open $options->{root}/$fname: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
close $fh;
|
|
|
|
if ($options->{mode} eq 'unshare') {
|
|
|
|
push @cleanup_tasks, sub {
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('umount', '--no-mtab',
|
|
|
|
"$options->{root}/$fname")
|
|
|
|
or warn "umount $fname failed: $?";
|
|
|
|
unlink "$options->{root}/$fname"
|
|
|
|
or warn "cannot unlink $fname: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
};
|
|
|
|
} elsif ($options->{mode} eq 'root') {
|
|
|
|
push @cleanup_tasks, sub {
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('umount',
|
|
|
|
"$options->{root}/$fname")
|
|
|
|
or warn "umount failed: $?";
|
|
|
|
unlink "$options->{root}/$fname"
|
|
|
|
or warn "cannot unlink $fname: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
};
|
|
|
|
} else {
|
|
|
|
error "unknown mode: $options->{mode}";
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('mount', '-o', 'bind', "/$fname",
|
|
|
|
"$options->{root}/$fname")
|
|
|
|
or error "mount $fname failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif ($type == 5) { # directory
|
2020-01-08 14:41:49 +00:00
|
|
|
if (!$options->{havemknod}) {
|
2020-01-08 16:41:46 +00:00
|
|
|
if (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('root', 'unshare')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
push @cleanup_tasks, sub {
|
2020-01-08 16:44:07 +00:00
|
|
|
rmdir "$options->{root}/$fname"
|
|
|
|
or warn "cannot rmdir $fname: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (-e "$options->{root}/$fname") {
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!-d "$options->{root}/$fname") {
|
2020-01-08 16:41:46 +00:00
|
|
|
error "$fname already exists but is not a"
|
|
|
|
. " directory";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
} else {
|
2020-01-08 16:44:07 +00:00
|
|
|
my $num_created
|
|
|
|
= make_path "$options->{root}/$fname",
|
|
|
|
{ error => \my $err };
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($err && @$err) {
|
2020-01-08 16:44:07 +00:00
|
|
|
error(
|
|
|
|
join "; ",
|
|
|
|
(
|
|
|
|
map {
|
|
|
|
"cannot create "
|
|
|
|
. (join ": ", %{$_})
|
|
|
|
} @$err
|
|
|
|
));
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($num_created == 0) {
|
|
|
|
error "cannot create $options->{root}/$fname";
|
|
|
|
}
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
chmod $mode, "$options->{root}/$fname"
|
|
|
|
or error "cannot chmod $fname: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
if ($options->{mode} eq 'unshare') {
|
|
|
|
push @cleanup_tasks, sub {
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('umount', '--no-mtab',
|
|
|
|
"$options->{root}/$fname")
|
|
|
|
or warn "umount $fname failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
};
|
|
|
|
} elsif ($options->{mode} eq 'root') {
|
|
|
|
push @cleanup_tasks, sub {
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('umount', "$options->{root}/$fname")
|
|
|
|
or warn "umount $fname failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
};
|
|
|
|
} else {
|
|
|
|
error "unknown mode: $options->{mode}";
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('mount', '-o', 'bind', "/$fname",
|
|
|
|
"$options->{root}/$fname")
|
|
|
|
or error "mount $fname failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
error "unsupported type: $type";
|
|
|
|
}
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('proot', 'fakechroot', 'chrootless')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# we cannot mount in fakechroot and proot mode
|
2020-01-08 15:23:34 +00:00
|
|
|
# in proot mode we have /dev bind-mounted already through
|
|
|
|
# --bind=/dev
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
error "unknown mode: $options->{mode}";
|
|
|
|
}
|
|
|
|
# We can only mount /proc and /sys after extracting the essential
|
|
|
|
# set because if we mount it before, then base-files will not be able
|
|
|
|
# to extract those
|
|
|
|
if ($options->{mode} eq 'root') {
|
|
|
|
push @cleanup_tasks, sub {
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('umount', "$options->{root}/sys")
|
|
|
|
or warn "umount /sys failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
};
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system(
|
2020-01-24 09:14:10 +00:00
|
|
|
'mount', '-t', 'sysfs',
|
|
|
|
'-o', 'ro,nosuid,nodev,noexec', 'sys',
|
|
|
|
"$options->{root}/sys"
|
2020-01-08 16:44:07 +00:00
|
|
|
) or error "mount /sys failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($options->{mode} eq 'unshare') {
|
2020-01-08 15:23:34 +00:00
|
|
|
# naturally we have to clean up after ourselves in sudo mode where
|
|
|
|
# we do a real mount. But we also need to unmount in unshare mode
|
|
|
|
# because otherwise, even with the --one-file-system tar option,
|
|
|
|
# the permissions of the mount source will be stored and not the
|
|
|
|
# mount target (the directory)
|
2020-01-08 14:41:49 +00:00
|
|
|
push @cleanup_tasks, sub {
|
|
|
|
# since we cannot write to /etc/mtab we need --no-mtab
|
|
|
|
# unmounting /sys only seems to be successful with --lazy
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('umount', '--no-mtab', '--lazy',
|
|
|
|
"$options->{root}/sys")
|
|
|
|
or warn "umount /sys failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
};
|
|
|
|
# without the network namespace unshared, we cannot mount a new
|
|
|
|
# sysfs. Since we need network, we just bind-mount.
|
|
|
|
#
|
|
|
|
# we have to rbind because just using bind results in "wrong fs
|
|
|
|
# type, bad option, bad superblock" error
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys")
|
|
|
|
or error "mount /sys failed: $?";
|
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('proot', 'fakechroot', 'chrootless')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# we cannot mount in fakechroot and proot mode
|
2020-01-08 15:23:34 +00:00
|
|
|
# in proot mode we have /proc bind-mounted already through
|
|
|
|
# --bind=/proc
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
error "unknown mode: $options->{mode}";
|
|
|
|
}
|
|
|
|
if ($options->{mode} eq 'root') {
|
|
|
|
push @cleanup_tasks, sub {
|
|
|
|
# some maintainer scripts mount additional stuff into /proc
|
|
|
|
# which we need to unmount beforehand
|
2020-01-10 08:29:34 +00:00
|
|
|
if (
|
|
|
|
is_mountpoint(
|
|
|
|
$options->{root} . "/proc/sys/fs/binfmt_misc"
|
|
|
|
)
|
|
|
|
) {
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('umount',
|
|
|
|
"$options->{root}/proc/sys/fs/binfmt_misc")
|
|
|
|
or error "umount /proc/sys/fs/binfmt_misc failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('umount', "$options->{root}/proc")
|
|
|
|
or error "umount /proc failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
};
|
2020-01-24 09:14:10 +00:00
|
|
|
0 == system('mount', '-t', 'proc', '-o', 'ro', 'proc',
|
|
|
|
"$options->{root}/proc")
|
2020-01-08 16:44:07 +00:00
|
|
|
or error "mount /proc failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($options->{mode} eq 'unshare') {
|
2020-01-08 15:23:34 +00:00
|
|
|
# naturally we have to clean up after ourselves in sudo mode where
|
|
|
|
# we do a real mount. But we also need to unmount in unshare mode
|
|
|
|
# because otherwise, even with the --one-file-system tar option,
|
|
|
|
# the permissions of the mount source will be stored and not the
|
|
|
|
# mount target (the directory)
|
2020-01-08 14:41:49 +00:00
|
|
|
push @cleanup_tasks, sub {
|
|
|
|
# since we cannot write to /etc/mtab we need --no-mtab
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('umount', '--no-mtab', "$options->{root}/proc")
|
|
|
|
or error "umount /proc failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
};
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('mount', '-t', 'proc', 'proc', "$options->{root}/proc")
|
|
|
|
or error "mount /proc failed: $?";
|
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('proot', 'fakechroot', 'chrootless')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# we cannot mount in fakechroot and proot mode
|
2020-01-08 15:23:34 +00:00
|
|
|
# in proot mode we have /sys bind-mounted already through
|
|
|
|
# --bind=/sys
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
error "unknown mode: $options->{mode}";
|
|
|
|
}
|
|
|
|
|
|
|
|
# prevent daemons from starting
|
|
|
|
# the directory might not exist in custom variant, for example
|
2020-03-07 22:39:53 +00:00
|
|
|
#
|
|
|
|
# ideally, we should use update-alternatives but we cannot rely on it
|
|
|
|
# existing inside the chroot
|
|
|
|
#
|
|
|
|
# See #911290 for more problems of this interface
|
2020-01-08 14:41:49 +00:00
|
|
|
if (-d "$options->{root}/usr/sbin/") {
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d"
|
|
|
|
or error "cannot open policy-rc.d: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
print $fh "#!/bin/sh\n";
|
|
|
|
print $fh "exit 101\n";
|
|
|
|
close $fh;
|
2020-01-08 16:44:07 +00:00
|
|
|
chmod 0755, "$options->{root}/usr/sbin/policy-rc.d"
|
|
|
|
or error "cannot chmod policy-rc.d: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# the file might not exist if it was removed in a hook
|
|
|
|
if (-e "$options->{root}/sbin/start-stop-daemon") {
|
|
|
|
if (-e "$options->{root}/sbin/start-stop-daemon.REAL") {
|
2020-01-08 16:41:46 +00:00
|
|
|
error "$options->{root}/sbin/start-stop-daemon.REAL already"
|
|
|
|
. " exists";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
move(
|
|
|
|
"$options->{root}/sbin/start-stop-daemon",
|
|
|
|
"$options->{root}/sbin/start-stop-daemon.REAL"
|
|
|
|
) or error "cannot move start-stop-daemon: $!";
|
|
|
|
open my $fh, '>', "$options->{root}/sbin/start-stop-daemon"
|
|
|
|
or error "cannot open start-stop-daemon: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
print $fh "#!/bin/sh\n";
|
2020-01-08 16:41:46 +00:00
|
|
|
print $fh "echo \"Warning: Fake start-stop-daemon called, doing"
|
|
|
|
. " nothing\">&2\n";
|
2020-01-08 14:41:49 +00:00
|
|
|
close $fh;
|
2020-01-08 16:44:07 +00:00
|
|
|
chmod 0755, "$options->{root}/sbin/start-stop-daemon"
|
|
|
|
or error "cannot chmod start-stop-daemon: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
&{$cmd}();
|
|
|
|
|
|
|
|
# cleanup
|
|
|
|
if (-e "$options->{root}/sbin/start-stop-daemon.REAL") {
|
2020-01-08 16:44:07 +00:00
|
|
|
move(
|
|
|
|
"$options->{root}/sbin/start-stop-daemon.REAL",
|
|
|
|
"$options->{root}/sbin/start-stop-daemon"
|
|
|
|
) or error "cannot move start-stop-daemon: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
if (-e "$options->{root}/usr/sbin/policy-rc.d") {
|
2020-01-08 16:44:07 +00:00
|
|
|
unlink "$options->{root}/usr/sbin/policy-rc.d"
|
|
|
|
or error "cannot unlink policy-rc.d: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2019-01-13 09:17:46 +00:00
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
my $error = $@;
|
|
|
|
|
|
|
|
# we use the cleanup function to do the unmounting
|
|
|
|
$cleanup->(0);
|
|
|
|
|
|
|
|
if ($error) {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "run_chroot failed: $error";
|
2019-01-13 09:17:46 +00:00
|
|
|
}
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2019-01-13 09:17:46 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 07:39:40 +00:00
|
|
|
sub run_hooks {
|
2020-01-08 16:44:07 +00:00
|
|
|
my $name = shift;
|
2019-02-20 12:32:49 +00:00
|
|
|
my $options = shift;
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
if (scalar @{ $options->{"${name}_hook"} } == 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
return;
|
2019-02-20 12:32:49 +00:00
|
|
|
}
|
|
|
|
|
2020-01-10 10:44:15 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
info "not running ${name}-hooks because of --dry-run";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-02-20 12:32:49 +00:00
|
|
|
my $runner = sub {
|
2020-01-08 16:44:07 +00:00
|
|
|
foreach my $script (@{ $options->{"${name}_hook"} }) {
|
2020-01-16 09:38:14 +00:00
|
|
|
if (
|
|
|
|
$script =~ /^(
|
|
|
|
copy-in|copy-out
|
|
|
|
|tar-in|tar-out
|
|
|
|
|upload|download
|
|
|
|
|sync-in|sync-out
|
|
|
|
)\ /x
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
info "running special hook: $script";
|
2020-01-08 16:44:07 +00:00
|
|
|
if (
|
|
|
|
any { $_ eq $options->{variant} } ('extract', 'custom')
|
|
|
|
and any { $_ eq $options->{mode} }
|
|
|
|
('fakechroot', 'proot') and $name ne 'setup'
|
|
|
|
) {
|
2020-01-08 16:41:46 +00:00
|
|
|
info "the copy-in, copy-out, tar-in and tar-out commands"
|
|
|
|
. " in fakechroot mode or proot mode might fail in"
|
|
|
|
. " extract and custom variants because there might be"
|
|
|
|
. " no tar inside the chroot";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
my $pid = fork() // error "fork() failed: $!";
|
|
|
|
if ($pid == 0) {
|
|
|
|
# whatever the script writes on stdout is sent to the
|
|
|
|
# socket
|
|
|
|
# whatever is written to the socket, send to stdin
|
2020-01-08 16:44:07 +00:00
|
|
|
open(STDOUT, '>&', $options->{hooksock})
|
|
|
|
or error "cannot open STDOUT: $!";
|
|
|
|
open(STDIN, '<&', $options->{hooksock})
|
|
|
|
or error "cannot open STDIN: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
|
|
|
|
# we execute ourselves under sh to avoid having to
|
|
|
|
# implement a clever parser of the quoting used in $script
|
|
|
|
# for the filenames
|
|
|
|
my $prefix = "";
|
2020-01-08 16:44:07 +00:00
|
|
|
if ($is_covering) {
|
|
|
|
$prefix
|
|
|
|
= "$EXECUTABLE_NAME -MDevel::Cover=-silent,-nogcov ";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-08 16:41:46 +00:00
|
|
|
exec 'sh', '-c',
|
|
|
|
"$prefix$PROGRAM_NAME --hook-helper"
|
|
|
|
. " \"\$1\" \"\$2\" \"\$3\" \"\$4\" \"\$5\" $script",
|
2020-01-08 16:44:07 +00:00
|
|
|
'exec', $options->{root}, $options->{mode}, $name,
|
|
|
|
(
|
|
|
|
defined $options->{qemu}
|
|
|
|
? "qemu-$options->{qemu}"
|
|
|
|
: 'env',
|
|
|
|
$verbosity_level
|
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
waitpid($pid, 0);
|
|
|
|
$? == 0 or error "special hook failed with exit code $?";
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif (-x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) {
|
2020-01-08 14:41:49 +00:00
|
|
|
info "running --$name-hook directly: $script $options->{root}";
|
|
|
|
# execute it directly if it's an executable file
|
|
|
|
# or if it there are no shell metacharacters
|
|
|
|
# (the /a regex modifier makes \w match only ASCII)
|
2020-04-09 10:50:24 +00:00
|
|
|
0 == system('env', '--unset=TMPDIR', '--unset=APT_CONFIG',
|
|
|
|
$script, $options->{root})
|
2020-01-08 16:44:07 +00:00
|
|
|
or error "command failed: $script";
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
2020-01-08 16:41:46 +00:00
|
|
|
info "running --$name-hook in shell: sh -c '$script' exec"
|
|
|
|
. " $options->{root}";
|
2020-01-08 14:41:49 +00:00
|
|
|
# otherwise, wrap everything in sh -c
|
2020-04-09 10:50:24 +00:00
|
|
|
0 == system('env', '--unset=TMPDIR', '--unset=APT_CONFIG',
|
2020-03-07 01:06:11 +00:00
|
|
|
'sh', '-c', $script, 'exec', $options->{root})
|
2020-01-08 16:44:07 +00:00
|
|
|
or error "command failed: $script";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
2019-02-20 12:32:49 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
if ($name eq 'setup') {
|
2020-01-08 14:41:49 +00:00
|
|
|
# execute directly without mounting anything (the mount points do not
|
|
|
|
# exist yet)
|
|
|
|
&{$runner}();
|
2019-02-20 12:32:49 +00:00
|
|
|
} else {
|
2020-01-09 07:39:40 +00:00
|
|
|
run_chroot(\&$runner, $options);
|
2019-02-20 12:32:49 +00:00
|
|
|
}
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2019-02-20 12:32:49 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
sub setup {
|
|
|
|
my $options = shift;
|
|
|
|
|
2019-01-20 09:39:01 +00:00
|
|
|
foreach my $key (sort keys %{$options}) {
|
2020-01-08 14:41:49 +00:00
|
|
|
my $value = $options->{$key};
|
|
|
|
if (!defined $value) {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
if (ref $value eq '') {
|
|
|
|
debug "$key: $options->{$key}";
|
|
|
|
} elsif (ref $value eq 'ARRAY') {
|
|
|
|
debug "$key: [" . (join ', ', @{$value}) . "]";
|
|
|
|
} elsif (ref $value eq 'GLOB') {
|
|
|
|
debug "$key: GLOB";
|
|
|
|
} else {
|
|
|
|
error "unknown type for key $key: " . (ref $value);
|
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 23:27:57 +00:00
|
|
|
if (-e $options->{apttrusted} && !-r $options->{apttrusted}) {
|
|
|
|
warning "cannot read $options->{apttrusted}";
|
|
|
|
}
|
|
|
|
if (-e $options->{apttrustedparts} && !-r $options->{apttrustedparts}) {
|
|
|
|
warning "cannot read $options->{apttrustedparts}";
|
|
|
|
}
|
|
|
|
|
2020-04-09 21:07:02 +00:00
|
|
|
run_setup($options);
|
|
|
|
|
|
|
|
run_hooks('setup', $options);
|
|
|
|
|
|
|
|
run_update($options);
|
|
|
|
|
2020-11-13 18:02:41 +00:00
|
|
|
(my $pkgs_to_install, my $essential_pkgs, my $cached_debs)
|
|
|
|
= run_download($options);
|
2020-04-09 21:07:02 +00:00
|
|
|
|
|
|
|
if ( $options->{mode} ne 'chrootless'
|
|
|
|
or $options->{variant} eq 'extract') {
|
|
|
|
# We have to extract the packages from @essential_pkgs either if we run
|
|
|
|
# in chrootless mode and extract variant or in any other mode. In
|
|
|
|
# other words, the only scenario in which the @essential_pkgs are not
|
|
|
|
# extracted are in chrootless mode in any other than the extract
|
|
|
|
# variant.
|
|
|
|
run_extract($options, $essential_pkgs);
|
|
|
|
}
|
|
|
|
|
|
|
|
run_hooks('extract', $options);
|
|
|
|
|
|
|
|
if ($options->{variant} ne 'extract') {
|
|
|
|
my $chrootcmd = [];
|
|
|
|
if ($options->{mode} ne 'chrootless') {
|
|
|
|
$chrootcmd = run_prepare($options);
|
|
|
|
}
|
|
|
|
|
2020-11-13 18:02:41 +00:00
|
|
|
run_essential($options, $essential_pkgs, $chrootcmd, $cached_debs);
|
2020-04-09 21:07:02 +00:00
|
|
|
|
|
|
|
run_hooks('essential', $options);
|
|
|
|
|
|
|
|
run_install($options, $pkgs_to_install, $chrootcmd);
|
|
|
|
|
|
|
|
run_hooks('customize', $options);
|
|
|
|
}
|
|
|
|
|
|
|
|
run_cleanup($options);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub run_setup() {
|
|
|
|
my $options = shift;
|
|
|
|
|
2020-08-15 16:09:06 +00:00
|
|
|
my $dpkgversion;
|
|
|
|
{
|
2020-09-02 20:58:20 +00:00
|
|
|
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
|
|
|
|
if ($pid == 0) {
|
|
|
|
# redirect stderr to /dev/null to hide error messages from dpkg
|
|
|
|
# versions before 1.20.0
|
|
|
|
open(STDERR, '>', '/dev/null')
|
|
|
|
or error "cannot open /dev/null for writing: $!";
|
|
|
|
exec 'dpkg', '--robot', '--version';
|
|
|
|
}
|
2020-08-15 16:09:06 +00:00
|
|
|
chomp(
|
|
|
|
$dpkgversion = do { local $/; <$fh> }
|
|
|
|
);
|
|
|
|
close $fh;
|
|
|
|
if ($? == 0 and $dpkgversion =~ /^([0-9]+\.[0-9]+\.[0-9]+) \(\S+\)$/) {
|
|
|
|
# dpkg is new enough for the --robot option
|
|
|
|
$dpkgversion = version->new($1);
|
|
|
|
} else {
|
|
|
|
$dpkgversion = undef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-07 22:40:55 +00:00
|
|
|
{
|
|
|
|
my @directories = (
|
|
|
|
'/etc/apt/apt.conf.d', '/etc/apt/sources.list.d',
|
|
|
|
'/etc/apt/preferences.d', '/var/cache/apt',
|
2020-08-15 16:09:06 +00:00
|
|
|
'/var/lib/apt/lists/partial', '/tmp'
|
2020-03-07 22:40:55 +00:00
|
|
|
);
|
2020-08-15 16:09:06 +00:00
|
|
|
# we need /var/lib/dpkg in case we need to write to /var/lib/dpkg/arch
|
|
|
|
push @directories, '/var/lib/dpkg';
|
2020-08-24 14:28:32 +00:00
|
|
|
# since we do not know the dpkg version inside the chroot at this
|
|
|
|
# point, we can only omit it in chrootless mode
|
|
|
|
if ( $options->{mode} ne 'chrootless'
|
|
|
|
or not defined $dpkgversion
|
|
|
|
or $dpkgversion < "1.20.0") {
|
2020-08-15 16:09:06 +00:00
|
|
|
push @directories, '/etc/dpkg/dpkg.cfg.d/';
|
|
|
|
}
|
2020-03-07 22:40:55 +00:00
|
|
|
# if dpkg and apt operate from the outside we need some more
|
|
|
|
# directories because dpkg and apt might not even be installed inside
|
|
|
|
# the chroot
|
|
|
|
if ($options->{mode} eq 'chrootless') {
|
2020-08-15 16:09:06 +00:00
|
|
|
push @directories, '/var/log/apt';
|
2020-08-24 14:28:32 +00:00
|
|
|
# since we do not know the dpkg version inside the chroot at this
|
|
|
|
# point, we can only omit it in chrootless mode
|
|
|
|
if ( $options->{mode} ne 'chrootless'
|
|
|
|
or not defined $dpkgversion
|
|
|
|
or $dpkgversion < "1.20.0") {
|
2020-08-15 16:09:06 +00:00
|
|
|
push @directories, '/var/lib/dpkg/triggers',
|
|
|
|
'/var/lib/dpkg/info', '/var/lib/dpkg/alternatives',
|
|
|
|
'/var/lib/dpkg/updates';
|
|
|
|
}
|
2020-03-07 22:40:55 +00:00
|
|
|
}
|
|
|
|
foreach my $dir (@directories) {
|
|
|
|
if (-e "$options->{root}/$dir") {
|
|
|
|
if (!-d "$options->{root}/$dir") {
|
|
|
|
error "$dir already exists but is not a directory";
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
my $num_created = make_path "$options->{root}/$dir",
|
|
|
|
{ error => \my $err };
|
|
|
|
if ($err && @$err) {
|
|
|
|
error(
|
|
|
|
join "; ",
|
|
|
|
(map { "cannot create " . (join ": ", %{$_}) } @$err));
|
|
|
|
} elsif ($num_created == 0) {
|
|
|
|
error "cannot create $options->{root}/$dir";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-03 15:18:34 +00:00
|
|
|
# make sure /tmp is not 0755 like the rest
|
|
|
|
chmod 01777, "$options->{root}/tmp" or error "cannot chmod /tmp: $!";
|
2020-03-07 22:40:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# The TMPDIR set by the user or even /tmp might be inaccessible by the
|
|
|
|
# unshared user. Thus, we place all temporary files in /tmp inside the new
|
|
|
|
# rootfs.
|
|
|
|
#
|
|
|
|
# This will affect calls to tempfile() as well as runs of "apt-get update"
|
|
|
|
# which will create temporary clearsigned.message.XXXXXX files to verify
|
|
|
|
# signatures.
|
|
|
|
{
|
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
|
|
$ENV{"TMPDIR"} = "$options->{root}/tmp";
|
|
|
|
}
|
|
|
|
|
2020-03-07 01:11:35 +00:00
|
|
|
my ($conf, $tmpfile)
|
2020-03-07 22:40:55 +00:00
|
|
|
= tempfile("mmdebstrap.apt.conf.XXXXXXXXXXXX", TMPDIR => 1)
|
2020-01-08 16:44:07 +00:00
|
|
|
or error "cannot open apt.conf: $!";
|
2018-09-18 09:20:24 +00:00
|
|
|
print $conf "Apt::Architecture \"$options->{nativearch}\";\n";
|
|
|
|
# the host system might have configured additional architectures
|
|
|
|
# force only the native architecture
|
2020-01-08 16:44:07 +00:00
|
|
|
if (scalar @{ $options->{foreignarchs} } > 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
print $conf "Apt::Architectures { \"$options->{nativearch}\"; ";
|
2020-01-08 16:44:07 +00:00
|
|
|
foreach my $arch (@{ $options->{foreignarchs} }) {
|
2020-01-08 14:41:49 +00:00
|
|
|
print $conf "\"$arch\"; ";
|
|
|
|
}
|
|
|
|
print $conf "};\n";
|
2018-09-18 09:20:24 +00:00
|
|
|
} else {
|
2020-01-08 14:41:49 +00:00
|
|
|
print $conf "Apt::Architectures \"$options->{nativearch}\";\n";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
2019-02-28 11:22:42 +00:00
|
|
|
print $conf "Dir \"$options->{root}\";\n";
|
2019-04-25 06:51:42 +00:00
|
|
|
# not needed anymore for apt 1.3 and newer
|
2020-01-08 16:44:07 +00:00
|
|
|
print $conf
|
|
|
|
"Dir::State::Status \"$options->{root}/var/lib/dpkg/status\";\n";
|
2018-09-18 09:20:24 +00:00
|
|
|
# for authentication, use the keyrings from the host
|
2019-12-03 09:16:39 +00:00
|
|
|
print $conf "Dir::Etc::Trusted \"$options->{apttrusted}\";\n";
|
|
|
|
print $conf "Dir::Etc::TrustedParts \"$options->{apttrustedparts}\";\n";
|
2019-03-25 13:31:45 +00:00
|
|
|
if ($options->{variant} ne 'apt') {
|
2020-01-08 14:41:49 +00:00
|
|
|
# apt considers itself essential. Thus, when generating an EDSP
|
|
|
|
# document for an external solver, it will add the Essential:yes field
|
|
|
|
# to the apt package stanza. This is unnecessary for any other variant
|
|
|
|
# than 'apt' because in all other variants we compile the set of
|
|
|
|
# packages we consider essential ourselves and for the 'essential'
|
|
|
|
# variant it would even be wrong to add apt. This workaround is only
|
|
|
|
# needed when apt is used with an external solver but doesn't hurt
|
|
|
|
# otherwise and we don't have a good way to figure out whether apt is
|
|
|
|
# using an external solver or not short of parsing the --aptopt
|
|
|
|
# options.
|
|
|
|
print $conf "pkgCacheGen::ForceEssential \",\";\n";
|
2019-03-25 13:31:45 +00:00
|
|
|
}
|
2020-03-07 01:12:21 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
# Without this option, apt will fail with:
|
|
|
|
# E: Could not configure 'libc6:amd64'.
|
|
|
|
# E: Could not perform immediate configuration on 'libgcc1:amd64'.
|
|
|
|
#
|
|
|
|
# See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=953260
|
|
|
|
print $conf "APT::Immediate-Configure false;\n";
|
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
close $conf;
|
|
|
|
|
2018-09-23 17:43:14 +00:00
|
|
|
# We put certain configuration items in their own configuration file
|
|
|
|
# because they have to be valid for apt invocation from outside as well as
|
|
|
|
# from inside the chroot.
|
|
|
|
# The config filename is chosen such that any settings in it will be
|
|
|
|
# overridden by what the user specified with --aptopt.
|
|
|
|
{
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
|
|
|
|
or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
print $fh "Apt::Install-Recommends false;\n";
|
|
|
|
print $fh "Acquire::Languages \"none\";\n";
|
|
|
|
close $fh;
|
2018-09-23 17:43:14 +00:00
|
|
|
}
|
|
|
|
|
2020-08-15 16:09:06 +00:00
|
|
|
# apt-get update requires this
|
2018-09-18 09:20:24 +00:00
|
|
|
{
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '>', "$options->{root}/var/lib/dpkg/status"
|
|
|
|
or error "failed to open(): $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
close $fh;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2019-01-07 12:16:51 +00:00
|
|
|
# /var/lib/dpkg/available is required to exist or otherwise package
|
|
|
|
# removals will fail
|
2020-08-24 14:28:32 +00:00
|
|
|
# since we do not know the dpkg version inside the chroot at this point, we
|
|
|
|
# can only omit it in chrootless mode
|
|
|
|
if ( $options->{mode} ne 'chrootless'
|
|
|
|
or not defined $dpkgversion
|
|
|
|
or $dpkgversion < "1.20.0") {
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '>', "$options->{root}/var/lib/dpkg/available"
|
|
|
|
or error "failed to open(): $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
close $fh;
|
2019-01-07 12:16:51 +00:00
|
|
|
}
|
|
|
|
|
2019-08-21 12:57:54 +00:00
|
|
|
# /var/lib/dpkg/cmethopt is used by dselect
|
|
|
|
# see #930788
|
2020-08-24 14:28:32 +00:00
|
|
|
# since we do not know the dpkg version inside the chroot at this point, we
|
|
|
|
# can only omit it in chrootless mode
|
|
|
|
if ( $options->{mode} ne 'chrootless'
|
|
|
|
or not defined $dpkgversion
|
|
|
|
or $dpkgversion < "1.20.0") {
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '>', "$options->{root}/var/lib/dpkg/cmethopt"
|
|
|
|
or error "failed to open(): $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
print $fh "apt apt\n";
|
|
|
|
close $fh;
|
2019-08-21 12:57:54 +00:00
|
|
|
}
|
|
|
|
|
2019-08-26 16:25:21 +00:00
|
|
|
# we create /var/lib/dpkg/arch inside the chroot either if there is more
|
|
|
|
# than the native architecture in the chroot or if chrootless mode is
|
|
|
|
# used to create a chroot of a different architecture than the native
|
|
|
|
# architecture outside the chroot.
|
2020-01-08 16:44:07 +00:00
|
|
|
chomp(my $hostarch = `dpkg --print-architecture`);
|
|
|
|
if (
|
|
|
|
scalar @{ $options->{foreignarchs} } > 0
|
|
|
|
or ( $options->{mode} eq 'chrootless'
|
|
|
|
and $hostarch ne $options->{nativearch})
|
|
|
|
) {
|
|
|
|
open my $fh, '>', "$options->{root}/var/lib/dpkg/arch"
|
|
|
|
or error "cannot open /var/lib/dpkg/arch: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
print $fh "$options->{nativearch}\n";
|
2020-01-08 16:44:07 +00:00
|
|
|
foreach my $arch (@{ $options->{foreignarchs} }) {
|
2020-01-08 14:41:49 +00:00
|
|
|
print $fh "$arch\n";
|
|
|
|
}
|
|
|
|
close $fh;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
if (scalar @{ $options->{aptopts} } > 0) {
|
|
|
|
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap"
|
|
|
|
or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!";
|
|
|
|
foreach my $opt (@{ $options->{aptopts} }) {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (-r $opt) {
|
|
|
|
# flush handle because copy() uses syswrite() which bypasses
|
|
|
|
# buffered IO
|
|
|
|
$fh->flush();
|
|
|
|
copy $opt, $fh or error "cannot copy $opt: $!";
|
|
|
|
} else {
|
|
|
|
print $fh $opt;
|
|
|
|
if ($opt !~ /;$/) {
|
|
|
|
print $fh ';';
|
|
|
|
}
|
|
|
|
if ($opt !~ /\n$/) {
|
|
|
|
print $fh "\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close $fh;
|
2020-04-10 10:55:31 +00:00
|
|
|
if ($verbosity_level >= 3) {
|
|
|
|
debug "content of /etc/apt/apt.conf.d/99mmdebstrap:";
|
|
|
|
copy("$options->{root}/etc/apt/apt.conf.d/99mmdebstrap", \*STDERR);
|
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
if (scalar @{ $options->{dpkgopts} } > 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# FIXME: in chrootless mode, dpkg will only read the configuration
|
|
|
|
# from the host
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap"
|
|
|
|
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
|
|
|
|
foreach my $opt (@{ $options->{dpkgopts} }) {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (-r $opt) {
|
|
|
|
# flush handle because copy() uses syswrite() which bypasses
|
|
|
|
# buffered IO
|
|
|
|
$fh->flush();
|
|
|
|
copy $opt, $fh or error "cannot copy $opt: $!";
|
|
|
|
} else {
|
|
|
|
print $fh $opt;
|
|
|
|
if ($opt !~ /\n$/) {
|
|
|
|
print $fh "\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close $fh;
|
2020-04-10 10:55:31 +00:00
|
|
|
if ($verbosity_level >= 3) {
|
|
|
|
debug "content of /etc/dpkg/dpkg.cfg.d/99mmdebstrap:";
|
|
|
|
copy("$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap",
|
|
|
|
\*STDERR);
|
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '>', "$options->{root}/etc/fstab"
|
|
|
|
or error "cannot open fstab: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n";
|
|
|
|
close $fh;
|
2020-01-08 16:44:07 +00:00
|
|
|
chmod 0644, "$options->{root}/etc/fstab"
|
|
|
|
or error "cannot chmod fstab: $!";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-22 22:30:28 +00:00
|
|
|
# write /etc/apt/sources.list and files in /etc/apt/sources.list.d/
|
2018-09-18 09:20:24 +00:00
|
|
|
{
|
2020-01-22 22:30:28 +00:00
|
|
|
my $firstentry = $options->{sourceslists}->[0];
|
|
|
|
# if the first sources.list entry is of one-line type and without
|
|
|
|
# explicit filename, then write out an actual /etc/apt/sources.list
|
|
|
|
# otherwise everything goes into /etc/apt/sources.list.d
|
|
|
|
my $fname;
|
|
|
|
if ($firstentry->{type} eq 'one-line'
|
|
|
|
&& !defined $firstentry->{fname}) {
|
|
|
|
$fname = "$options->{root}/etc/apt/sources.list";
|
|
|
|
} else {
|
|
|
|
$fname = "$options->{root}/etc/apt/sources.list.d/0000";
|
|
|
|
if (defined $firstentry->{fname}) {
|
|
|
|
$fname .= $firstentry->{fname};
|
|
|
|
if ( $firstentry->{fname} !~ /\.list/
|
|
|
|
&& $firstentry->{fname} !~ /\.sources/) {
|
|
|
|
if ($firstentry->{type} eq 'one-line') {
|
|
|
|
$fname .= '.list';
|
|
|
|
} elsif ($firstentry->{type} eq 'deb822') {
|
|
|
|
$fname .= '.sources';
|
|
|
|
} else {
|
|
|
|
error "invalid type: $firstentry->{type}";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
# if no filename is given, then this must be a deb822 file
|
|
|
|
# because if it was a one-line type file, then it would've been
|
|
|
|
# written to /etc/apt/sources.list
|
|
|
|
$fname .= 'main.sources';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
open my $fh, '>', "$fname" or error "cannot open $fname: $!";
|
|
|
|
print $fh $firstentry->{content};
|
2020-01-08 14:41:49 +00:00
|
|
|
close $fh;
|
2020-01-22 22:30:28 +00:00
|
|
|
# everything else goes into /etc/apt/sources.list.d/
|
|
|
|
for (my $i = 1 ; $i < scalar @{ $options->{sourceslists} } ; $i++) {
|
|
|
|
my $entry = $options->{sourceslists}->[$i];
|
|
|
|
my $fname = "$options->{root}/etc/apt/sources.list.d/"
|
|
|
|
. sprintf("%04d", $i);
|
|
|
|
if (defined $entry->{fname}) {
|
|
|
|
$fname .= $entry->{fname};
|
|
|
|
if ( $entry->{fname} !~ /\.list/
|
|
|
|
&& $entry->{fname} !~ /\.sources/) {
|
|
|
|
if ($entry->{type} eq 'one-line') {
|
|
|
|
$fname .= '.list';
|
|
|
|
} elsif ($entry->{type} eq 'deb822') {
|
|
|
|
$fname .= '.sources';
|
|
|
|
} else {
|
|
|
|
error "invalid type: $entry->{type}";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if ($entry->{type} eq 'one-line') {
|
|
|
|
$fname .= 'main.list';
|
|
|
|
} elsif ($entry->{type} eq 'deb822') {
|
|
|
|
$fname .= 'main.sources';
|
|
|
|
} else {
|
|
|
|
error "invalid type: $entry->{type}";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
open my $fh, '>', "$fname" or error "cannot open $fname: $!";
|
|
|
|
print $fh $entry->{content};
|
|
|
|
close $fh;
|
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2018-10-22 15:05:56 +00:00
|
|
|
# allow network access from within
|
2019-09-15 12:12:49 +00:00
|
|
|
if (-e "/etc/resolv.conf") {
|
2020-01-08 16:44:07 +00:00
|
|
|
copy("/etc/resolv.conf", "$options->{root}/etc/resolv.conf")
|
|
|
|
or error "cannot copy /etc/resolv.conf: $!";
|
2019-09-15 12:12:49 +00:00
|
|
|
} else {
|
2020-01-08 16:41:46 +00:00
|
|
|
warning("Host system does not have a /etc/resolv.conf to copy into the"
|
2020-01-08 16:44:07 +00:00
|
|
|
. " rootfs.");
|
2019-09-15 12:12:49 +00:00
|
|
|
}
|
|
|
|
if (-e "/etc/hostname") {
|
2020-01-08 16:44:07 +00:00
|
|
|
copy("/etc/hostname", "$options->{root}/etc/hostname")
|
|
|
|
or error "cannot copy /etc/hostname: $!";
|
2019-09-15 12:12:49 +00:00
|
|
|
} else {
|
2020-01-08 16:41:46 +00:00
|
|
|
warning("Host system does not have a /etc/hostname to copy into the"
|
2020-01-08 16:44:07 +00:00
|
|
|
. " rootfs.");
|
2019-09-15 12:12:49 +00:00
|
|
|
}
|
2018-10-22 15:05:56 +00:00
|
|
|
|
|
|
|
if ($options->{havemknod}) {
|
2020-01-08 14:41:49 +00:00
|
|
|
foreach my $file (@devfiles) {
|
2020-01-08 16:44:07 +00:00
|
|
|
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
|
|
|
|
= @{$file};
|
|
|
|
if ($type == 0) { # normal file
|
2020-01-08 14:41:49 +00:00
|
|
|
error "type 0 not implemented";
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif ($type == 1) { # hardlink
|
2020-01-08 14:41:49 +00:00
|
|
|
error "type 1 not implemented";
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif ($type == 2) { # symlink
|
|
|
|
if ( $options->{mode} eq 'fakechroot'
|
|
|
|
and $linkname =~ /^\/proc/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# there is no /proc in fakechroot mode
|
|
|
|
next;
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
symlink $linkname, "$options->{root}/$fname"
|
|
|
|
or error "cannot create symlink $fname";
|
|
|
|
next; # chmod cannot work on symlinks
|
|
|
|
} elsif ($type == 3) { # character special
|
|
|
|
0 == system('mknod', "$options->{root}/$fname", 'c',
|
|
|
|
$devmajor, $devminor)
|
|
|
|
or error "mknod failed: $?";
|
|
|
|
} elsif ($type == 4) { # block special
|
|
|
|
0 == system('mknod', "$options->{root}/$fname", 'b',
|
|
|
|
$devmajor, $devminor)
|
|
|
|
or error "mknod failed: $?";
|
|
|
|
} elsif ($type == 5) { # directory
|
2020-01-08 14:41:49 +00:00
|
|
|
if (-e "$options->{root}/$fname") {
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!-d "$options->{root}/$fname") {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "$fname already exists but is not a directory";
|
|
|
|
}
|
|
|
|
} else {
|
2020-01-08 16:44:07 +00:00
|
|
|
my $num_created = make_path "$options->{root}/$fname",
|
|
|
|
{ error => \my $err };
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($err && @$err) {
|
2020-01-08 16:44:07 +00:00
|
|
|
error(
|
|
|
|
join "; ",
|
|
|
|
(
|
|
|
|
map { "cannot create " . (join ": ", %{$_}) }
|
|
|
|
@$err
|
|
|
|
));
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($num_created == 0) {
|
|
|
|
error "cannot create $options->{root}/$fname";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
error "unsupported type: $type";
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
chmod $mode, "$options->{root}/$fname"
|
|
|
|
or error "cannot chmod $fname: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2018-10-22 15:05:56 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
# we tell apt about the configuration via a config file passed via the
|
|
|
|
# APT_CONFIG environment variable instead of using the --option command
|
|
|
|
# line arguments because configuration settings like Dir::Etc have already
|
|
|
|
# been evaluated at the time that apt takes its command line arguments
|
|
|
|
# into account.
|
2020-01-09 07:39:40 +00:00
|
|
|
{
|
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
|
|
$ENV{"APT_CONFIG"} = "$tmpfile";
|
|
|
|
}
|
2020-11-13 18:02:41 +00:00
|
|
|
# we have to make the config file world readable so that a possible
|
|
|
|
# /usr/lib/apt/solvers/apt process which is run by the _apt user is also
|
|
|
|
# able to read it
|
|
|
|
chmod 0666, "$tmpfile" or error "cannot chmod $tmpfile: $!";
|
2020-01-21 12:12:44 +00:00
|
|
|
if ($verbosity_level >= 3) {
|
2020-04-09 10:51:24 +00:00
|
|
|
0 == system('apt-get', '--version')
|
|
|
|
or error "apt-get --version failed: $?";
|
2020-01-21 12:12:44 +00:00
|
|
|
0 == system('apt-config', 'dump') or error "apt-config failed: $?";
|
2020-03-07 01:13:26 +00:00
|
|
|
debug "content of $tmpfile:";
|
|
|
|
copy($tmpfile, \*STDERR);
|
2020-01-21 12:12:44 +00:00
|
|
|
}
|
2018-09-21 06:04:40 +00:00
|
|
|
|
2020-06-08 13:45:22 +00:00
|
|
|
if (any { $_ eq $options->{mode} } ('fakechroot', 'proot')) {
|
|
|
|
# Apt dropping privileges to another user than root is not useful in
|
|
|
|
# fakechroot and proot mode because all users are faked and thus there
|
|
|
|
# is no real privilege difference anyways. Thus, we also print no
|
|
|
|
# warning message in this case.
|
|
|
|
open my $fh, '>>', $tmpfile
|
|
|
|
or error "cannot open $tmpfile for appending: $!";
|
|
|
|
print $fh "APT::Sandbox::User \"root\";\n";
|
|
|
|
close $fh;
|
|
|
|
} else {
|
|
|
|
# when apt-get update is run by the root user, then apt will attempt to
|
|
|
|
# drop privileges to the _apt user. This will fail if the _apt user
|
|
|
|
# does not have permissions to read the root directory. In that case,
|
|
|
|
# we have to disable apt sandboxing. This can for example happen in
|
|
|
|
# root mode when the path of the chroot is not in a world-readable
|
|
|
|
# location.
|
2020-01-08 14:41:49 +00:00
|
|
|
my $partial = '/var/lib/apt/lists/partial';
|
2020-01-08 16:44:07 +00:00
|
|
|
if (
|
|
|
|
system('/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test',
|
|
|
|
'-r', "$options->{root}$partial") != 0
|
|
|
|
) {
|
2020-01-08 16:41:46 +00:00
|
|
|
warning "Download is performed unsandboxed as root as file"
|
|
|
|
. " $options->{root}$partial couldn't be accessed by user _apt";
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '>>', $tmpfile
|
|
|
|
or error "cannot open $tmpfile for appending: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
print $fh "APT::Sandbox::User \"root\";\n";
|
|
|
|
close $fh;
|
|
|
|
}
|
2019-02-28 10:54:03 +00:00
|
|
|
}
|
|
|
|
|
2019-02-20 12:32:49 +00:00
|
|
|
# setting PATH for chroot, ldconfig, start-stop-daemon...
|
|
|
|
if (defined $ENV{PATH} && $ENV{PATH} ne "") {
|
2020-01-09 07:39:40 +00:00
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
2020-01-08 14:41:49 +00:00
|
|
|
$ENV{PATH} = "$ENV{PATH}:/usr/sbin:/usr/bin:/sbin:/bin";
|
2019-02-20 12:32:49 +00:00
|
|
|
} else {
|
2020-01-09 07:39:40 +00:00
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
2020-01-08 14:41:49 +00:00
|
|
|
$ENV{PATH} = "/usr/sbin:/usr/bin:/sbin:/bin";
|
2019-02-20 12:32:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 21:07:02 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub run_update() {
|
|
|
|
my $options = shift;
|
2019-02-20 12:32:49 +00:00
|
|
|
|
2019-01-20 09:39:01 +00:00
|
|
|
info "running apt-get update...";
|
2020-01-08 16:44:07 +00:00
|
|
|
run_apt_progress({
|
|
|
|
ARGV => ['apt-get', 'update'],
|
|
|
|
CHDIR => $options->{root},
|
|
|
|
FIND_APT_WARNINGS => 1
|
|
|
|
});
|
2018-09-21 06:04:40 +00:00
|
|
|
|
|
|
|
# check if anything was downloaded at all
|
|
|
|
{
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '-|', 'apt-get',
|
|
|
|
'indextargets' // error "failed to fork(): $!";
|
|
|
|
chomp(
|
|
|
|
my $indextargets = do { local $/; <$fh> }
|
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
close $fh;
|
|
|
|
if ($indextargets eq '') {
|
|
|
|
if ($verbosity_level >= 1) {
|
2020-01-21 23:28:22 +00:00
|
|
|
0 == system('apt-cache', 'policy')
|
|
|
|
or error "apt-cache failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
error "apt-get update didn't download anything";
|
|
|
|
}
|
2018-09-21 06:04:40 +00:00
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-04-09 21:07:02 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub run_download() {
|
|
|
|
my $options = shift;
|
|
|
|
|
2019-10-28 14:35:29 +00:00
|
|
|
my @pkgs_to_install;
|
2020-01-08 16:44:07 +00:00
|
|
|
for my $incl (@{ $options->{include} }) {
|
2020-01-08 14:41:49 +00:00
|
|
|
for my $pkg (split /[,\s]+/, $incl) {
|
|
|
|
# strip leading and trailing whitespace
|
|
|
|
$pkg =~ s/^\s+|\s+$//g;
|
|
|
|
# skip if the remainder is an empty string
|
|
|
|
if ($pkg eq '') {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
# do not append component if it's already in the list
|
2020-01-08 16:44:07 +00:00
|
|
|
if (any { $_ eq $pkg } @pkgs_to_install) {
|
2020-01-08 14:41:49 +00:00
|
|
|
next;
|
|
|
|
}
|
|
|
|
push @pkgs_to_install, $pkg;
|
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
if ($options->{variant} eq 'buildd') {
|
2020-01-08 14:41:49 +00:00
|
|
|
push @pkgs_to_install, 'build-essential';
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
2020-11-13 18:02:41 +00:00
|
|
|
|
|
|
|
# We use /var/cache/apt/archives/ to figure out which packages apt chooses
|
|
|
|
# to install. That's why the directory must be empty if:
|
|
|
|
# - /var/cache/apt/archives exists, and
|
|
|
|
# - no simulation run is done, and
|
|
|
|
# - the variant is not extract or custom or the number to be
|
|
|
|
# installed packages not zero
|
|
|
|
my @cached_debs = ();
|
|
|
|
my @dl_debs = ();
|
|
|
|
if (
|
|
|
|
!$options->{dryrun}
|
|
|
|
&& ((none { $_ eq $options->{variant} } ('extract', 'custom'))
|
|
|
|
|| scalar @pkgs_to_install != 0)
|
|
|
|
&& -d "$options->{root}/var/cache/apt/archives/"
|
|
|
|
) {
|
|
|
|
my $apt_archives = "/var/cache/apt/archives/";
|
|
|
|
opendir my $dh, "$options->{root}/$apt_archives"
|
|
|
|
or error "cannot read $apt_archives";
|
|
|
|
while (my $deb = readdir $dh) {
|
|
|
|
if ($deb !~ /\.deb$/) {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
if (!-f "$options->{root}/$apt_archives/$deb") {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
push @cached_debs, $deb;
|
|
|
|
}
|
|
|
|
closedir $dh;
|
|
|
|
if (scalar @cached_debs > 0) {
|
|
|
|
if (any { $_ eq 'download/empty' } @{ $options->{skip} }) {
|
|
|
|
info "skipping download/empty as requested";
|
|
|
|
} else {
|
|
|
|
error("/var/cache/apt/archives/ inside the chroot contains: "
|
|
|
|
. (join ', ', (sort @cached_debs)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
# To figure out the right package set for the apt variant we can use:
|
|
|
|
# $ apt-get dist-upgrade -o dir::state::status=/dev/null
|
|
|
|
# This is because that variants only contain essential packages and
|
|
|
|
# apt and libapt treats apt as essential. If we want to install less
|
|
|
|
# (essential variant) then we have to compute the package set ourselves.
|
|
|
|
# Same if we want to install priority based variants.
|
2018-10-22 15:05:56 +00:00
|
|
|
if (any { $_ eq $options->{variant} } ('extract', 'custom')) {
|
2020-05-03 13:06:24 +00:00
|
|
|
if (scalar @pkgs_to_install == 0) {
|
|
|
|
info "nothing to download -- skipping...";
|
|
|
|
return ([], []);
|
|
|
|
}
|
|
|
|
|
2020-11-13 18:02:41 +00:00
|
|
|
my %result = ();
|
2020-01-10 10:44:15 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
info "simulate downloading packages with apt...";
|
|
|
|
} else {
|
2020-11-13 18:02:41 +00:00
|
|
|
# if there are already packages in /var/cache/apt/archives/, we
|
|
|
|
# need to use our proxysolver to obtain the solution chosen by apt
|
|
|
|
if (scalar @cached_debs > 0) {
|
|
|
|
$result{EDSP_RES} = \@dl_debs;
|
|
|
|
}
|
2020-01-10 10:44:15 +00:00
|
|
|
info "downloading packages with apt...";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
run_apt_progress({
|
2020-01-08 16:44:07 +00:00
|
|
|
ARGV => [
|
2020-01-10 10:44:15 +00:00
|
|
|
'apt-get',
|
|
|
|
'--yes',
|
|
|
|
'-oApt::Get::Download-Only=true',
|
|
|
|
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
|
|
|
|
'install'
|
2020-01-08 16:44:07 +00:00
|
|
|
],
|
2020-01-08 14:41:49 +00:00
|
|
|
PKGS => [@pkgs_to_install],
|
2020-11-13 18:02:41 +00:00
|
|
|
%result
|
2020-01-08 14:41:49 +00:00
|
|
|
});
|
2018-10-22 15:05:56 +00:00
|
|
|
} elsif ($options->{variant} eq 'apt') {
|
2020-01-08 14:41:49 +00:00
|
|
|
# if we just want to install Essential:yes packages, apt and their
|
|
|
|
# dependencies then we can make use of libapt treating apt as
|
|
|
|
# implicitly essential. An upgrade with the (currently) empty status
|
|
|
|
# file will trigger an installation of the essential packages plus apt.
|
|
|
|
#
|
|
|
|
# 2018-09-02, #debian-dpkg on OFTC, times in UTC+2
|
|
|
|
# 23:39 < josch> I'll just put it in my script and if it starts
|
|
|
|
# breaking some time I just say it's apt's fault. :P
|
|
|
|
# 23:42 < DonKult> that is how it usually works, so yes, do that :P (<-
|
|
|
|
# and please add that line next to it so you can
|
|
|
|
# remind me in 5+ years that I said that after I wrote
|
|
|
|
# in the bugreport: "Are you crazy?!? Nobody in his
|
|
|
|
# right mind would even suggest depending on it!")
|
2020-11-13 18:02:41 +00:00
|
|
|
my %result = ();
|
2020-01-10 10:44:15 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
info "simulate downloading packages with apt...";
|
|
|
|
} else {
|
2020-11-13 18:02:41 +00:00
|
|
|
# if there are already packages in /var/cache/apt/archives/, we
|
|
|
|
# need to use our proxysolver to obtain the solution chosen by apt
|
|
|
|
if (scalar @cached_debs > 0) {
|
|
|
|
$result{EDSP_RES} = \@dl_debs;
|
|
|
|
}
|
2020-01-10 10:44:15 +00:00
|
|
|
info "downloading packages with apt...";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
run_apt_progress({
|
2020-01-08 16:44:07 +00:00
|
|
|
ARGV => [
|
2020-01-10 10:44:15 +00:00
|
|
|
'apt-get',
|
|
|
|
'--yes',
|
|
|
|
'-oApt::Get::Download-Only=true',
|
|
|
|
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
|
|
|
|
'dist-upgrade'
|
2020-01-08 16:44:07 +00:00
|
|
|
],
|
2020-11-13 18:02:41 +00:00
|
|
|
%result
|
2020-01-08 14:41:49 +00:00
|
|
|
});
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{variant} } (
|
|
|
|
'essential', 'standard', 'important', 'required', 'buildd',
|
|
|
|
'minbase'
|
|
|
|
)
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
my %ess_pkgs;
|
2020-01-08 16:44:07 +00:00
|
|
|
open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format',
|
|
|
|
'$(FILENAME)', 'Created-By: Packages')
|
|
|
|
or error "cannot start apt-get indextargets: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
while (my $fname = <$pipe_apt>) {
|
|
|
|
chomp $fname;
|
2020-01-08 16:44:07 +00:00
|
|
|
open(my $pipe_cat, '-|', '/usr/lib/apt/apt-helper', 'cat-file',
|
|
|
|
$fname)
|
|
|
|
or error "cannot start apt-helper cat-file: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
|
|
|
|
my $pkgname;
|
2020-01-08 16:44:07 +00:00
|
|
|
my $ess = '';
|
2020-01-08 14:41:49 +00:00
|
|
|
my $prio = 'optional';
|
|
|
|
my $arch = '';
|
|
|
|
while (my $line = <$pipe_cat>) {
|
|
|
|
chomp $line;
|
|
|
|
# Dpkg::Index takes 10 seconds to parse a typical Packages
|
|
|
|
# file. Thus we instead use a simple parser that just retrieve
|
|
|
|
# the information we need.
|
|
|
|
if ($line ne "") {
|
|
|
|
if ($line =~ /^Package: (.*)/) {
|
|
|
|
$pkgname = $1;
|
|
|
|
} elsif ($line =~ /^Essential: yes$/) {
|
2020-01-08 16:44:07 +00:00
|
|
|
$ess = 'yes';
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($line =~ /^Priority: (.*)/) {
|
|
|
|
$prio = $1;
|
|
|
|
} elsif ($line =~ /^Architecture: (.*)/) {
|
|
|
|
$arch = $1;
|
|
|
|
}
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
# we are only interested of packages of native architecture or
|
|
|
|
# Architecture:all
|
|
|
|
if ($arch eq $options->{nativearch} or $arch eq 'all') {
|
|
|
|
# the line is empty, thus a package stanza just finished
|
|
|
|
# processing and we can handle it now
|
|
|
|
if ($ess eq 'yes') {
|
|
|
|
$ess_pkgs{$pkgname} = ();
|
|
|
|
} elsif ($options->{variant} eq 'essential') {
|
|
|
|
# for this variant we are only interested in the
|
|
|
|
# essential packages
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{variant} } (
|
|
|
|
'standard', 'important', 'required', 'buildd',
|
|
|
|
'minbase'
|
|
|
|
)
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($prio eq 'optional' or $prio eq 'extra') {
|
2020-01-08 15:23:34 +00:00
|
|
|
# always ignore packages of priority optional and
|
|
|
|
# extra
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($prio eq 'standard') {
|
2020-01-08 16:44:07 +00:00
|
|
|
if (
|
|
|
|
none { $_ eq $options->{variant} }
|
|
|
|
('important', 'required', 'buildd', 'minbase')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
push @pkgs_to_install, $pkgname;
|
|
|
|
}
|
|
|
|
} elsif ($prio eq 'important') {
|
2020-01-08 16:44:07 +00:00
|
|
|
if (
|
|
|
|
none { $_ eq $options->{variant} }
|
|
|
|
('required', 'buildd', 'minbase')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
push @pkgs_to_install, $pkgname;
|
|
|
|
}
|
|
|
|
} elsif ($prio eq 'required') {
|
|
|
|
# required packages are part of all sets except
|
|
|
|
# essential and apt
|
|
|
|
push @pkgs_to_install, $pkgname;
|
|
|
|
} else {
|
|
|
|
error "unknown priority: $prio";
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
error "unknown variant: $options->{variant}";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# reset values
|
|
|
|
undef $pkgname;
|
2020-01-08 16:44:07 +00:00
|
|
|
$ess = '';
|
2020-01-08 14:41:49 +00:00
|
|
|
$prio = 'optional';
|
|
|
|
$arch = '';
|
|
|
|
}
|
|
|
|
|
|
|
|
close $pipe_cat;
|
|
|
|
$? == 0 or error "apt-helper cat-file failed: $?";
|
|
|
|
}
|
|
|
|
close $pipe_apt;
|
|
|
|
$? == 0 or error "apt-get indextargets failed: $?";
|
|
|
|
|
|
|
|
debug "Identified the following Essential:yes packages:";
|
|
|
|
foreach my $pkg (sort keys %ess_pkgs) {
|
|
|
|
debug " $pkg";
|
|
|
|
}
|
|
|
|
|
2020-11-13 18:02:41 +00:00
|
|
|
my %result = ();
|
2020-01-10 10:44:15 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
info "simulate downloading packages with apt...";
|
|
|
|
} else {
|
2020-11-13 18:02:41 +00:00
|
|
|
# if there are already packages in /var/cache/apt/archives/, we
|
|
|
|
# need to use our proxysolver to obtain the solution chosen by apt
|
|
|
|
if (scalar @cached_debs > 0) {
|
|
|
|
$result{EDSP_RES} = \@dl_debs;
|
|
|
|
}
|
2020-01-10 10:44:15 +00:00
|
|
|
info "downloading packages with apt...";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
run_apt_progress({
|
2020-01-08 16:44:07 +00:00
|
|
|
ARGV => [
|
2020-01-10 10:44:15 +00:00
|
|
|
'apt-get',
|
|
|
|
'--yes',
|
|
|
|
'-oApt::Get::Download-Only=true',
|
|
|
|
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
|
|
|
|
'install'
|
2020-01-08 16:44:07 +00:00
|
|
|
],
|
2020-01-08 14:41:49 +00:00
|
|
|
PKGS => [keys %ess_pkgs],
|
2020-11-13 18:02:41 +00:00
|
|
|
%result
|
2020-01-08 14:41:49 +00:00
|
|
|
});
|
2018-10-22 12:44:45 +00:00
|
|
|
} else {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "unknown variant: $options->{variant}";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2018-09-21 18:32:07 +00:00
|
|
|
my @essential_pkgs;
|
2020-11-13 18:02:41 +00:00
|
|
|
if (scalar @cached_debs > 0 && scalar @dl_debs > 0) {
|
|
|
|
my $archives = "/var/cache/apt/archives/";
|
|
|
|
# for each package in @dl_debs, check if it's in
|
|
|
|
# /var/cache/apt/archives/ and add it to @essential_pkgs
|
|
|
|
foreach my $p (@dl_debs) {
|
|
|
|
my ($pkg, $ver_epoch) = @{$p};
|
|
|
|
# apt appends the architecture at the end of the package name
|
|
|
|
($pkg, my $arch) = split ':', $pkg, 2;
|
|
|
|
# apt replaces the colon by its percent encoding %3a
|
|
|
|
my $ver = $ver_epoch;
|
|
|
|
$ver =~ s/:/%3a/;
|
|
|
|
# the architecture returned by apt is the native architecture.
|
|
|
|
# Since we don't know whether the package is architecture
|
|
|
|
# independent or not, we first try with the native arch and then
|
|
|
|
# with "all" and only error out if neither exists.
|
|
|
|
if (-e "$options->{root}/$archives/${pkg}_${ver}_$arch.deb") {
|
|
|
|
push @essential_pkgs, "$archives/${pkg}_${ver}_$arch.deb";
|
|
|
|
} elsif (-e "$options->{root}/$archives/${pkg}_${ver}_all.deb") {
|
|
|
|
push @essential_pkgs, "$archives/${pkg}_${ver}_all.deb";
|
|
|
|
} else {
|
|
|
|
error( "cannot find package for $pkg:$arch (= $ver_epoch) "
|
|
|
|
. "in /var/cache/apt/archives/");
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-11-13 18:02:41 +00:00
|
|
|
} else {
|
|
|
|
# collect the .deb files that were downloaded by apt from the content
|
|
|
|
# of /var/cache/apt/archives/
|
|
|
|
if (!$options->{dryrun}) {
|
|
|
|
my $apt_archives = "/var/cache/apt/archives/";
|
|
|
|
opendir my $dh, "$options->{root}/$apt_archives"
|
|
|
|
or error "cannot read $apt_archives";
|
|
|
|
while (my $deb = readdir $dh) {
|
|
|
|
if ($deb !~ /\.deb$/) {
|
|
|
|
next;
|
2020-01-10 10:44:15 +00:00
|
|
|
}
|
2020-11-13 18:02:41 +00:00
|
|
|
$deb = "$apt_archives/$deb";
|
|
|
|
if (!-f "$options->{root}/$deb") {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
push @essential_pkgs, $deb;
|
|
|
|
}
|
|
|
|
closedir $dh;
|
|
|
|
|
|
|
|
if (scalar @essential_pkgs == 0) {
|
|
|
|
# check if a file:// URI was used
|
|
|
|
open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format',
|
|
|
|
'$(URI)', 'Created-By: Packages')
|
|
|
|
or error "cannot start apt-get indextargets: $!";
|
|
|
|
while (my $uri = <$pipe_apt>) {
|
|
|
|
if ($uri =~ /^file:\/\//) {
|
|
|
|
error
|
|
|
|
"nothing got downloaded -- use copy:// instead of"
|
|
|
|
. " file://";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
error "nothing got downloaded";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-21 18:32:07 +00:00
|
|
|
}
|
2020-11-13 18:02:41 +00:00
|
|
|
# Unpack order matters. Since we create this list using two different
|
|
|
|
# methods but we want both methods to have the same result, we sort the
|
|
|
|
# list before returning it.
|
|
|
|
@essential_pkgs = sort @essential_pkgs;
|
2018-09-21 18:32:07 +00:00
|
|
|
|
2020-11-13 18:02:41 +00:00
|
|
|
return (\@pkgs_to_install, \@essential_pkgs, \@cached_debs);
|
2020-04-09 21:07:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sub run_extract() {
|
|
|
|
my $options = shift;
|
|
|
|
my $essential_pkgs = shift;
|
|
|
|
|
|
|
|
if ($options->{dryrun}) {
|
2020-01-10 10:44:15 +00:00
|
|
|
info "skip extracting packages because of --dry-run";
|
2020-04-09 21:07:02 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-03 13:06:24 +00:00
|
|
|
if (scalar @{$essential_pkgs} == 0) {
|
|
|
|
info "nothing to extract -- skipping...";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-04-09 21:07:02 +00:00
|
|
|
info "extracting archives...";
|
|
|
|
print_progress 0.0;
|
|
|
|
my $counter = 0;
|
|
|
|
my $total = scalar @{$essential_pkgs};
|
|
|
|
foreach my $deb (@{$essential_pkgs}) {
|
|
|
|
$counter += 1;
|
2020-08-25 11:02:33 +00:00
|
|
|
|
|
|
|
my $tarfilter;
|
|
|
|
my @tarfilterargs;
|
|
|
|
# if the path-excluded option was added to the dpkg config,
|
|
|
|
# insert the tarfilter between dpkg-deb and tar
|
|
|
|
if (-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") {
|
|
|
|
open(my $fh, '<',
|
|
|
|
"$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")
|
|
|
|
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
|
|
|
|
my @matches = grep { /^path-(?:exclude|include)=/ } <$fh>;
|
|
|
|
close $fh;
|
|
|
|
chop @matches; # remove trailing newline
|
|
|
|
@tarfilterargs = map { "--" . $_ } @matches;
|
|
|
|
}
|
|
|
|
if (scalar @tarfilterargs > 0) {
|
|
|
|
if (-x "./tarfilter") {
|
|
|
|
$tarfilter = "./tarfilter";
|
|
|
|
} else {
|
|
|
|
$tarfilter = "mmtarfilter";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
my $dpkg_writer;
|
|
|
|
my $tar_reader;
|
|
|
|
my $filter_reader;
|
|
|
|
my $filter_writer;
|
|
|
|
if (scalar @tarfilterargs > 0) {
|
|
|
|
pipe $filter_reader, $dpkg_writer or error "pipe failed: $!";
|
|
|
|
pipe $tar_reader, $filter_writer or error "pipe failed: $!";
|
|
|
|
} else {
|
|
|
|
pipe $tar_reader, $dpkg_writer or error "pipe failed: $!";
|
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
# not using dpkg-deb --extract as that would replace the
|
|
|
|
# merged-usr symlinks with plain directories
|
2020-08-25 11:02:33 +00:00
|
|
|
# not using dpkg --unpack because that would try running preinst
|
|
|
|
# maintainer scripts
|
2020-04-09 21:07:02 +00:00
|
|
|
my $pid1 = fork() // error "fork() failed: $!";
|
|
|
|
if ($pid1 == 0) {
|
2020-08-25 11:02:33 +00:00
|
|
|
open(STDOUT, '>&', $dpkg_writer) or error "cannot open STDOUT: $!";
|
|
|
|
close($tar_reader) or error "cannot close tar_reader: $!";
|
|
|
|
if (scalar @tarfilterargs > 0) {
|
|
|
|
close($filter_reader)
|
|
|
|
or error "cannot close filter_reader: $!";
|
|
|
|
close($filter_writer)
|
|
|
|
or error "cannot close filter_writer: $!";
|
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb");
|
|
|
|
eval { Devel::Cover::set_coverage("none") } if $is_covering;
|
|
|
|
exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb";
|
|
|
|
}
|
2020-08-25 11:02:33 +00:00
|
|
|
my $pid2;
|
|
|
|
if (scalar @tarfilterargs > 0) {
|
|
|
|
$pid2 = fork() // error "fork() failed: $!";
|
|
|
|
if ($pid2 == 0) {
|
|
|
|
open(STDIN, '<&', $filter_reader)
|
|
|
|
or error "cannot open STDIN: $!";
|
|
|
|
open(STDOUT, '>&', $filter_writer)
|
|
|
|
or error "cannot open STDOUT: $!";
|
|
|
|
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
|
|
|
|
close($tar_reader) or error "cannot close tar_reader: $!";
|
|
|
|
debug("running $tarfilter " . (join " ", @tarfilterargs));
|
|
|
|
eval { Devel::Cover::set_coverage("none") } if $is_covering;
|
|
|
|
exec $tarfilter, @tarfilterargs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
my $pid3 = fork() // error "fork() failed: $!";
|
|
|
|
if ($pid3 == 0) {
|
|
|
|
open(STDIN, '<&', $tar_reader) or error "cannot open STDIN: $!";
|
|
|
|
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
|
|
|
|
if (scalar @tarfilterargs > 0) {
|
|
|
|
close($filter_reader)
|
|
|
|
or error "cannot close filter_reader: $!";
|
|
|
|
close($filter_writer)
|
|
|
|
or error "cannot close filter_writer: $!";
|
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
debug( "running tar -C $options->{root}"
|
|
|
|
. " --keep-directory-symlink --extract --file -");
|
|
|
|
eval { Devel::Cover::set_coverage("none") } if $is_covering;
|
|
|
|
exec 'tar', '-C', $options->{root},
|
|
|
|
'--keep-directory-symlink', '--extract', '--file', '-';
|
|
|
|
}
|
2020-08-25 11:02:33 +00:00
|
|
|
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
|
|
|
|
close($tar_reader) or error "cannot close tar_reader: $!";
|
|
|
|
if (scalar @tarfilterargs > 0) {
|
|
|
|
close($filter_reader) or error "cannot close filter_reader: $!";
|
|
|
|
close($filter_writer) or error "cannot close filter_writer: $!";
|
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
waitpid($pid1, 0);
|
|
|
|
$? == 0 or error "dpkg-deb --fsys-tarfile failed: $?";
|
2020-08-25 11:02:33 +00:00
|
|
|
if (scalar @tarfilterargs > 0) {
|
|
|
|
waitpid($pid2, 0);
|
|
|
|
$? == 0 or error "tarfilter failed: $?";
|
|
|
|
}
|
|
|
|
waitpid($pid3, 0);
|
2020-04-09 21:07:02 +00:00
|
|
|
$? == 0 or error "tar --extract failed: $?";
|
|
|
|
print_progress($counter / $total * 100);
|
|
|
|
}
|
|
|
|
print_progress "done";
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub run_prepare {
|
|
|
|
my $options = shift;
|
|
|
|
|
|
|
|
if ($options->{mode} eq 'fakechroot') {
|
|
|
|
# this borrows from and extends
|
|
|
|
# /etc/fakechroot/debootstrap.env and
|
|
|
|
# /etc/fakechroot/chroot.env
|
|
|
|
{
|
|
|
|
my @fakechrootsubst = ();
|
|
|
|
foreach my $d ('/usr/sbin', '/usr/bin', '/sbin', '/bin') {
|
|
|
|
push @fakechrootsubst, "$d/chroot=/usr/sbin/chroot.fakechroot";
|
|
|
|
push @fakechrootsubst, "$d/mkfifo=/bin/true";
|
|
|
|
push @fakechrootsubst, "$d/ldconfig=/bin/true";
|
|
|
|
push @fakechrootsubst, "$d/ldd=/usr/bin/ldd.fakechroot";
|
|
|
|
push @fakechrootsubst, "$d/ischroot=/bin/true";
|
|
|
|
}
|
|
|
|
if (defined $ENV{FAKECHROOT_CMD_SUBST}
|
|
|
|
&& $ENV{FAKECHROOT_CMD_SUBST} ne "") {
|
|
|
|
push @fakechrootsubst, split /:/, $ENV{FAKECHROOT_CMD_SUBST};
|
|
|
|
}
|
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
|
|
$ENV{FAKECHROOT_CMD_SUBST} = join ':', @fakechrootsubst;
|
|
|
|
}
|
|
|
|
if (defined $ENV{FAKECHROOT_EXCLUDE_PATH}
|
|
|
|
&& $ENV{FAKECHROOT_EXCLUDE_PATH} ne "") {
|
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
|
|
$ENV{FAKECHROOT_EXCLUDE_PATH}
|
|
|
|
= "$ENV{FAKECHROOT_EXCLUDE_PATH}:/dev:/proc:/sys";
|
|
|
|
} else {
|
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
|
|
$ENV{FAKECHROOT_EXCLUDE_PATH} = '/dev:/proc:/sys';
|
|
|
|
}
|
|
|
|
# workaround for long unix socket path if FAKECHROOT_BASE
|
|
|
|
# exceeds the limit of 108 bytes
|
|
|
|
{
|
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
|
|
$ENV{FAKECHROOT_AF_UNIX_PATH} = "/tmp";
|
|
|
|
}
|
|
|
|
{
|
|
|
|
my @ldsoconf = ('/etc/ld.so.conf');
|
|
|
|
opendir(my $dh, '/etc/ld.so.conf.d')
|
|
|
|
or error "Can't opendir(/etc/ld.so.conf.d): $!";
|
|
|
|
while (my $entry = readdir $dh) {
|
|
|
|
# skip the "." and ".." entries
|
|
|
|
next if $entry eq ".";
|
|
|
|
next if $entry eq "..";
|
|
|
|
next if $entry !~ /\.conf$/;
|
|
|
|
push @ldsoconf, "/etc/ld.so.conf.d/$entry";
|
|
|
|
}
|
|
|
|
closedir($dh);
|
|
|
|
my @ldlibpath = ();
|
|
|
|
if (defined $ENV{LD_LIBRARY_PATH}
|
|
|
|
&& $ENV{LD_LIBRARY_PATH} ne "") {
|
|
|
|
push @ldlibpath, (split /:/, $ENV{LD_LIBRARY_PATH});
|
|
|
|
}
|
|
|
|
# FIXME: workaround allowing installation of systemd should
|
|
|
|
# live in fakechroot, see #917920
|
|
|
|
push @ldlibpath, "/lib/systemd";
|
|
|
|
foreach my $fname (@ldsoconf) {
|
|
|
|
open my $fh, "<", $fname
|
|
|
|
or error "cannot open $fname for reading: $!";
|
|
|
|
while (my $line = <$fh>) {
|
|
|
|
next if $line !~ /^\//;
|
|
|
|
push @ldlibpath, $line;
|
|
|
|
}
|
|
|
|
close $fh;
|
|
|
|
}
|
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
|
|
$ENV{LD_LIBRARY_PATH} = join ':', @ldlibpath;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
# make sure that APT_CONFIG and TMPDIR are not set when executing
|
|
|
|
# anything inside the chroot
|
|
|
|
my @chrootcmd = ('env', '--unset=APT_CONFIG', '--unset=TMPDIR');
|
|
|
|
if ($options->{mode} eq 'proot') {
|
|
|
|
push @chrootcmd,
|
|
|
|
(
|
|
|
|
'proot', '--root-id',
|
|
|
|
'--bind=/dev', '--bind=/proc',
|
|
|
|
'--bind=/sys', "--rootfs=$options->{root}",
|
|
|
|
'--cwd=/'
|
|
|
|
);
|
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('root', 'unshare', 'fakechroot')
|
|
|
|
) {
|
|
|
|
push @chrootcmd, ('/usr/sbin/chroot', $options->{root});
|
2018-11-20 23:13:10 +00:00
|
|
|
} else {
|
2020-04-09 21:07:02 +00:00
|
|
|
error "unknown mode: $options->{mode}";
|
|
|
|
}
|
|
|
|
|
|
|
|
# copy qemu-user-static binary into chroot or setup proot with
|
|
|
|
# --qemu
|
|
|
|
if (defined $options->{qemu}) {
|
|
|
|
if ($options->{mode} eq 'proot') {
|
|
|
|
push @chrootcmd, "--qemu=qemu-$options->{qemu}";
|
|
|
|
} elsif ($options->{mode} eq 'fakechroot') {
|
|
|
|
# Make sure that the fakeroot and fakechroot shared
|
|
|
|
# libraries exist for the right architecture
|
|
|
|
open my $fh, '-|', 'dpkg-architecture', '-a',
|
|
|
|
$options->{nativearch},
|
|
|
|
'-qDEB_HOST_MULTIARCH' // error "failed to fork(): $!";
|
|
|
|
chomp(
|
|
|
|
my $deb_host_multiarch = do { local $/; <$fh> }
|
|
|
|
);
|
|
|
|
close $fh;
|
|
|
|
if (($? != 0) or (!$deb_host_multiarch)) {
|
|
|
|
error "dpkg-architecture failed: $?";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
my $fakechrootdir = "/usr/lib/$deb_host_multiarch/fakechroot";
|
|
|
|
if (!-e "$fakechrootdir/libfakechroot.so") {
|
|
|
|
error "$fakechrootdir/libfakechroot.so doesn't exist."
|
|
|
|
. " Install libfakechroot:$options->{nativearch}"
|
|
|
|
. " outside the chroot";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
my $fakerootdir = "/usr/lib/$deb_host_multiarch/libfakeroot";
|
|
|
|
if (!-e "$fakerootdir/libfakeroot-sysv.so") {
|
|
|
|
error "$fakerootdir/libfakeroot-sysv.so doesn't exist."
|
|
|
|
. " Install libfakeroot:$options->{nativearch}"
|
|
|
|
. " outside the chroot";
|
|
|
|
}
|
|
|
|
# The rest of this block sets environment variables, so we
|
|
|
|
# have to add the "no critic" statement to stop perlcritic
|
|
|
|
# from complaining about setting global variables
|
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
|
|
# fakechroot only fills LD_LIBRARY_PATH with the
|
|
|
|
# directories of the host's architecture. We append the
|
|
|
|
# directories of the chroot architecture.
|
|
|
|
$ENV{LD_LIBRARY_PATH}
|
|
|
|
= "$ENV{LD_LIBRARY_PATH}:$fakechrootdir:$fakerootdir";
|
|
|
|
# The binfmt support on the outside is used, so qemu needs
|
|
|
|
# to know where it has to look for shared libraries
|
|
|
|
if (defined $ENV{QEMU_LD_PREFIX}
|
|
|
|
&& $ENV{QEMU_LD_PREFIX} ne "") {
|
|
|
|
$ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
|
|
|
|
} else {
|
|
|
|
$ENV{QEMU_LD_PREFIX} = $options->{root};
|
|
|
|
}
|
|
|
|
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) {
|
|
|
|
# other modes require a static qemu-user binary
|
|
|
|
my $qemubin = "/usr/bin/qemu-$options->{qemu}-static";
|
|
|
|
if (!-e $qemubin) {
|
|
|
|
error "cannot find $qemubin";
|
|
|
|
}
|
|
|
|
copy $qemubin, "$options->{root}/$qemubin"
|
|
|
|
or error "cannot copy $qemubin: $!";
|
|
|
|
# File::Copy does not retain permissions but on some
|
|
|
|
# platforms (like Travis CI) the binfmt interpreter must
|
|
|
|
# have the executable bit set or otherwise execve will
|
|
|
|
# fail with EACCES
|
|
|
|
chmod 0755, "$options->{root}/$qemubin"
|
|
|
|
or error "cannot chmod $qemubin: $!";
|
|
|
|
} else {
|
|
|
|
error "unknown mode: $options->{mode}";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2018-11-20 23:13:10 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 21:07:02 +00:00
|
|
|
# some versions of coreutils use the renameat2 system call in mv.
|
|
|
|
# This breaks certain versions of fakechroot and proot. Here we do
|
|
|
|
# a sanity check and warn the user in case things might break.
|
|
|
|
if (any { $_ eq $options->{mode} } ('fakechroot', 'proot')
|
|
|
|
and -e "$options->{root}/bin/mv") {
|
|
|
|
mkdir "$options->{root}/000-move-me"
|
|
|
|
or error "cannot create directory: $!";
|
|
|
|
my $ret = system @chrootcmd, '/bin/mv', '/000-move-me',
|
|
|
|
'/001-delete-me';
|
|
|
|
if ($ret != 0) {
|
|
|
|
if ($options->{mode} eq 'proot') {
|
|
|
|
info "the /bin/mv binary inside the chroot doesn't"
|
|
|
|
. " work under proot";
|
|
|
|
info "this is likely due to missing support for"
|
|
|
|
. " renameat2 in proot";
|
|
|
|
info "see https://github.com/proot-me/PRoot/issues/147";
|
|
|
|
} else {
|
|
|
|
info "the /bin/mv binary inside the chroot doesn't"
|
|
|
|
. " work under fakechroot";
|
|
|
|
info "with certain versions of coreutils and glibc,"
|
|
|
|
. " this is due to missing support for renameat2 in"
|
|
|
|
. " fakechroot";
|
|
|
|
info "see https://github.com/dex4er/fakechroot/issues/60";
|
|
|
|
}
|
|
|
|
info "expect package post installation scripts not to work";
|
|
|
|
rmdir "$options->{root}/000-move-me"
|
|
|
|
or error "cannot rmdir: $!";
|
|
|
|
} else {
|
|
|
|
rmdir "$options->{root}/001-delete-me"
|
|
|
|
or error "cannot rmdir: $!";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return \@chrootcmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub run_essential() {
|
|
|
|
my $options = shift;
|
|
|
|
my $essential_pkgs = shift;
|
|
|
|
my $chrootcmd = shift;
|
2020-11-13 18:02:41 +00:00
|
|
|
my $cached_debs = shift;
|
2020-03-22 13:08:21 +00:00
|
|
|
|
2020-05-03 13:06:24 +00:00
|
|
|
if (scalar @{$essential_pkgs} == 0) {
|
|
|
|
info "no essential packages -- skipping...";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-11-20 23:13:10 +00:00
|
|
|
if ($options->{mode} eq 'chrootless') {
|
2020-01-10 10:44:15 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
info "simulate installing packages...";
|
|
|
|
} else {
|
|
|
|
info "installing packages...";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
# FIXME: the dpkg config from the host is parsed before the command
|
|
|
|
# line arguments are parsed and might break this mode
|
|
|
|
# Example: if the host has --path-exclude set, then this will also
|
|
|
|
# affect the chroot.
|
|
|
|
my @chrootless_opts = (
|
|
|
|
'-oDPkg::Options::=--force-not-root',
|
|
|
|
'-oDPkg::Options::=--force-script-chrootless',
|
|
|
|
'-oDPkg::Options::=--root=' . $options->{root},
|
2020-01-10 10:44:15 +00:00
|
|
|
'-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log",
|
|
|
|
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
|
2020-01-08 16:44:07 +00:00
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
if (defined $options->{qemu}) {
|
|
|
|
# The binfmt support on the outside is used, so qemu needs to know
|
|
|
|
# where it has to look for shared libraries
|
|
|
|
if (defined $ENV{QEMU_LD_PREFIX}
|
|
|
|
&& $ENV{QEMU_LD_PREFIX} ne "") {
|
2020-01-09 07:39:40 +00:00
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
2020-01-08 14:41:49 +00:00
|
|
|
$ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
|
|
|
|
} else {
|
2020-01-09 07:39:40 +00:00
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
2020-01-08 14:41:49 +00:00
|
|
|
$ENV{QEMU_LD_PREFIX} = $options->{root};
|
|
|
|
}
|
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
run_apt_progress({
|
|
|
|
ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
|
|
|
|
PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}],
|
|
|
|
});
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('root', 'unshare', 'fakechroot', 'proot')
|
|
|
|
) {
|
2020-04-09 21:07:02 +00:00
|
|
|
# install the extracted packages properly
|
|
|
|
# we need --force-depends because dpkg does not take Pre-Depends
|
|
|
|
# into account and thus doesn't install them in the right order
|
|
|
|
# And the --predep-package option is broken: #539133
|
|
|
|
if ($options->{dryrun}) {
|
|
|
|
info "simulate installing packages...";
|
|
|
|
} else {
|
|
|
|
info "installing packages...";
|
|
|
|
run_chroot(
|
|
|
|
sub {
|
|
|
|
run_dpkg_progress({
|
|
|
|
ARGV => [
|
|
|
|
@{$chrootcmd}, 'env',
|
|
|
|
'--unset=TMPDIR', 'dpkg',
|
|
|
|
'--install', '--force-depends'
|
|
|
|
],
|
|
|
|
PKGS => $essential_pkgs,
|
|
|
|
});
|
|
|
|
},
|
|
|
|
$options
|
|
|
|
);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
error "unknown mode: $options->{mode}";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-11-13 18:02:41 +00:00
|
|
|
if (any { $_ eq 'essential/unlink' } @{ $options->{skip} }) {
|
|
|
|
info "skipping essential/unlink as requested";
|
|
|
|
} else {
|
|
|
|
foreach my $deb (@{$essential_pkgs}) {
|
|
|
|
# do not unlink those packages that were in /var/cache/apt/archive
|
|
|
|
# before the download phase
|
|
|
|
next
|
|
|
|
if any { "/var/cache/apt/archives/$_" eq $deb } @{$cached_debs};
|
|
|
|
unlink "$options->{root}/$deb"
|
|
|
|
or error "cannot unlink $deb: $!";
|
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub run_install() {
|
|
|
|
my $options = shift;
|
|
|
|
my $pkgs_to_install = shift;
|
|
|
|
my $chrootcmd = shift;
|
|
|
|
|
|
|
|
if ($options->{mode} eq 'chrootless') {
|
|
|
|
if (scalar @{$pkgs_to_install} > 0) {
|
|
|
|
my @chrootless_opts = (
|
|
|
|
'-oDPkg::Options::=--force-not-root',
|
|
|
|
'-oDPkg::Options::=--force-script-chrootless',
|
|
|
|
'-oDPkg::Options::=--root=' . $options->{root},
|
|
|
|
'-oDPkg::Options::=--log='
|
|
|
|
. "$options->{root}/var/log/dpkg.log",
|
|
|
|
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
|
|
|
|
);
|
|
|
|
run_apt_progress({
|
|
|
|
ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
|
|
|
|
PKGS => $pkgs_to_install,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('root', 'unshare', 'fakechroot', 'proot')
|
|
|
|
) {
|
|
|
|
# run essential hooks
|
|
|
|
if ($options->{variant} ne 'custom') {
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-04-09 21:07:02 +00:00
|
|
|
if ($options->{variant} ne 'custom'
|
|
|
|
and scalar @{$pkgs_to_install} > 0) {
|
|
|
|
# some packages have to be installed from the outside before
|
|
|
|
# anything can be installed from the inside.
|
|
|
|
#
|
|
|
|
# we do not need to install any *-archive-keyring packages
|
|
|
|
# inside the chroot prior to installing the packages, because
|
|
|
|
# the keyring is only used when doing "apt-get update" and that
|
|
|
|
# was already done at the beginning using key material from the
|
|
|
|
# outside. Since the apt cache is already filled and we are not
|
|
|
|
# calling "apt-get update" again, the keyring can be installed
|
|
|
|
# later during installation. But: if it's not installed during
|
|
|
|
# installation, then we might end up with a fully installed
|
|
|
|
# system without keyrings that are valid for its sources.list.
|
|
|
|
my @pkgs_to_install_from_outside;
|
|
|
|
|
|
|
|
# install apt if necessary
|
|
|
|
if ($options->{variant} ne 'apt') {
|
|
|
|
push @pkgs_to_install_from_outside, 'apt';
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 21:07:02 +00:00
|
|
|
# since apt will be run inside the chroot, make sure that
|
|
|
|
# apt-transport-https and ca-certificates gets installed first
|
|
|
|
# if any mirror is a https URI
|
|
|
|
open(my $pipe_apt, '-|', 'apt-get', 'indextargets',
|
|
|
|
'--format', '$(URI)', 'Created-By: Packages')
|
|
|
|
or error "cannot start apt-get indextargets: $!";
|
|
|
|
while (my $uri = <$pipe_apt>) {
|
|
|
|
if ($uri =~ /^https:\/\//) {
|
2020-04-10 10:55:52 +00:00
|
|
|
info "https mirror found -- adding apt-transport-https "
|
|
|
|
. "and ca-certificates";
|
2020-04-09 21:07:02 +00:00
|
|
|
# FIXME: support for https is part of apt >= 1.5
|
|
|
|
push @pkgs_to_install_from_outside, 'apt-transport-https';
|
|
|
|
push @pkgs_to_install_from_outside, 'ca-certificates';
|
|
|
|
last;
|
|
|
|
} elsif ($uri =~ /^tor(\+[a-z]+)*:\/\//) {
|
|
|
|
# tor URIs can be tor+http://, tor+https:// or even
|
|
|
|
# tor+mirror+file://
|
2020-04-10 10:55:52 +00:00
|
|
|
info "tor mirror found -- adding apt-transport-tor";
|
2020-04-09 21:07:02 +00:00
|
|
|
push @pkgs_to_install_from_outside, 'apt-transport-tor';
|
|
|
|
last;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
}
|
|
|
|
close $pipe_apt;
|
|
|
|
$? == 0 or error "apt-get indextargets failed";
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-04-09 21:07:02 +00:00
|
|
|
if (scalar @pkgs_to_install_from_outside > 0) {
|
2020-11-13 18:02:41 +00:00
|
|
|
my @cached_debs = ();
|
|
|
|
my @dl_debs = ();
|
|
|
|
# /var/cache/apt/archives/ might not be empty either because
|
|
|
|
# the user used hooks to populate it or because skip options
|
|
|
|
# like essential/unlink or check/empty were used.
|
|
|
|
{
|
|
|
|
my $apt_archives = "/var/cache/apt/archives/";
|
|
|
|
opendir my $dh, "$options->{root}/$apt_archives"
|
|
|
|
or error "cannot read $apt_archives";
|
|
|
|
while (my $deb = readdir $dh) {
|
|
|
|
if ($deb !~ /\.deb$/) {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
if (!-f "$options->{root}/$apt_archives/$deb") {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
push @cached_debs, $deb;
|
|
|
|
}
|
|
|
|
closedir $dh;
|
|
|
|
}
|
|
|
|
my %result = ();
|
2020-04-09 21:07:02 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
info 'simulate downloading '
|
|
|
|
. (join ', ', @pkgs_to_install_from_outside) . "...";
|
|
|
|
} else {
|
2020-11-13 18:02:41 +00:00
|
|
|
if (scalar @cached_debs > 0) {
|
|
|
|
$result{EDSP_RES} = \@dl_debs;
|
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
info 'downloading '
|
|
|
|
. (join ', ', @pkgs_to_install_from_outside) . "...";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
run_apt_progress({
|
|
|
|
ARGV => [
|
|
|
|
'apt-get',
|
|
|
|
'--yes',
|
|
|
|
'-oApt::Get::Download-Only=true',
|
|
|
|
$options->{dryrun}
|
|
|
|
? '-oAPT::Get::Simulate=true'
|
|
|
|
: (),
|
|
|
|
'install'
|
|
|
|
],
|
|
|
|
PKGS => [@pkgs_to_install_from_outside],
|
2020-11-13 18:02:41 +00:00
|
|
|
%result
|
2020-04-09 21:07:02 +00:00
|
|
|
});
|
|
|
|
if ($options->{dryrun}) {
|
|
|
|
info 'simulate installing '
|
|
|
|
. (join ', ', @pkgs_to_install_from_outside) . "...";
|
|
|
|
} else {
|
|
|
|
my @debs_to_install;
|
2020-11-13 18:02:41 +00:00
|
|
|
if (scalar @cached_debs > 0 && scalar @dl_debs > 0) {
|
|
|
|
my $archives = "/var/cache/apt/archives/";
|
|
|
|
my $prefix = "$options->{root}/$archives";
|
|
|
|
# for each package in @dl_debs, check if it's in
|
|
|
|
# /var/cache/apt/archives/ and add it to
|
|
|
|
# @debs_to_install
|
|
|
|
foreach my $p (@dl_debs) {
|
|
|
|
my ($pkg, $ver_epoch) = @{$p};
|
|
|
|
# apt appends the architecture at the end of the
|
|
|
|
# package name
|
|
|
|
($pkg, my $arch) = split ':', $pkg, 2;
|
|
|
|
# apt replaces the colon by its percent encoding
|
|
|
|
my $ver = $ver_epoch;
|
|
|
|
$ver =~ s/:/%3a/;
|
|
|
|
# the architecture returned by apt is the native
|
|
|
|
# architecture. Since we don't know whether the
|
|
|
|
# package is architecture independent or not, we
|
|
|
|
# first try with the native arch and then
|
|
|
|
# with "all" and only error out if neither exists.
|
|
|
|
if (-e "$prefix/${pkg}_${ver}_$arch.deb") {
|
|
|
|
push @debs_to_install,
|
|
|
|
"$archives/${pkg}_${ver}_$arch.deb";
|
|
|
|
} elsif (-e "$prefix/${pkg}_${ver}_all.deb") {
|
|
|
|
push @debs_to_install,
|
|
|
|
"$archives/${pkg}_${ver}_all.deb";
|
|
|
|
} else {
|
|
|
|
error( "cannot find package for "
|
|
|
|
. "$pkg:$arch (= $ver_epoch) "
|
|
|
|
. "in /var/cache/apt/archives/");
|
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
}
|
2020-11-13 18:02:41 +00:00
|
|
|
} else {
|
|
|
|
my $apt_archives = "/var/cache/apt/archives/";
|
|
|
|
opendir my $dh, "$options->{root}/$apt_archives"
|
|
|
|
or error "cannot read $apt_archives";
|
|
|
|
while (my $deb = readdir $dh) {
|
|
|
|
if ($deb !~ /\.deb$/) {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
$deb = "$apt_archives/$deb";
|
|
|
|
if (!-f "$options->{root}/$deb") {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
push @debs_to_install, $deb;
|
2020-04-09 21:07:02 +00:00
|
|
|
}
|
2020-11-13 18:02:41 +00:00
|
|
|
closedir $dh;
|
2020-01-10 10:44:15 +00:00
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
if (scalar @debs_to_install == 0) {
|
|
|
|
warning "nothing got downloaded -- maybe the packages"
|
|
|
|
. " were already installed?";
|
2020-01-10 10:44:15 +00:00
|
|
|
} else {
|
2020-04-09 21:07:02 +00:00
|
|
|
# we need --force-depends because dpkg does not take
|
|
|
|
# Pre-Depends into account and thus doesn't install
|
|
|
|
# them in the right order
|
|
|
|
info 'installing '
|
|
|
|
. (join ', ', @pkgs_to_install_from_outside) . "...";
|
|
|
|
run_dpkg_progress({
|
|
|
|
ARGV => [
|
|
|
|
@{$chrootcmd}, 'env',
|
|
|
|
'--unset=TMPDIR', 'dpkg',
|
|
|
|
'--install', '--force-depends'
|
|
|
|
],
|
|
|
|
PKGS => \@debs_to_install,
|
|
|
|
});
|
|
|
|
foreach my $deb (@debs_to_install) {
|
2020-11-13 18:02:41 +00:00
|
|
|
# do not unlink those packages that were in
|
|
|
|
# /var/cache/apt/archive before the install phase
|
|
|
|
next
|
|
|
|
if any { "/var/cache/apt/archives/$_" eq $deb }
|
|
|
|
@cached_debs;
|
2020-04-09 21:07:02 +00:00
|
|
|
unlink "$options->{root}/$deb"
|
|
|
|
or error "cannot unlink $deb: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-09 21:07:02 +00:00
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-04-09 21:07:02 +00:00
|
|
|
if (!$options->{dryrun}) {
|
|
|
|
run_chroot(
|
|
|
|
sub {
|
|
|
|
info "installing remaining packages inside the"
|
|
|
|
. " chroot...";
|
|
|
|
run_apt_progress({
|
|
|
|
ARGV => [
|
|
|
|
@{$chrootcmd}, 'env',
|
|
|
|
'--unset=APT_CONFIG', '--unset=TMPDIR',
|
|
|
|
'apt-get', '--yes',
|
|
|
|
'install'
|
|
|
|
],
|
|
|
|
PKGS => $pkgs_to_install,
|
|
|
|
});
|
|
|
|
},
|
|
|
|
$options
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
info "simulate installing remaining packages inside the"
|
|
|
|
. " chroot...";
|
|
|
|
run_apt_progress({
|
|
|
|
ARGV => [
|
|
|
|
'apt-get', '--yes',
|
|
|
|
'-oAPT::Get::Simulate=true', 'install'
|
|
|
|
],
|
|
|
|
PKGS => $pkgs_to_install,
|
|
|
|
});
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
2018-10-22 15:05:56 +00:00
|
|
|
} else {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "unknown mode: $options->{mode}";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 21:07:02 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub run_cleanup() {
|
|
|
|
my $options = shift;
|
2019-01-08 10:28:27 +00:00
|
|
|
|
2020-04-09 22:00:36 +00:00
|
|
|
if (any { $_ eq 'cleanup/apt' } @{ $options->{skip} }) {
|
|
|
|
info "skipping cleanup/apt as requested";
|
|
|
|
} else {
|
|
|
|
info "cleaning package lists and apt cache...";
|
|
|
|
run_apt_progress({
|
|
|
|
ARGV => [
|
|
|
|
'apt-get', '--option',
|
|
|
|
'Dir::Etc::SourceList=/dev/null', '--option',
|
|
|
|
'Dir::Etc::SourceParts=/dev/null', 'update'
|
|
|
|
],
|
|
|
|
CHDIR => $options->{root},
|
|
|
|
});
|
|
|
|
run_apt_progress(
|
|
|
|
{ ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} });
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-04-09 22:00:36 +00:00
|
|
|
# apt since 1.6 creates the auxfiles directory. If apt inside the
|
|
|
|
# chroot is older than that, then it will not know how to clean it.
|
|
|
|
if (-e "$options->{root}/var/lib/apt/lists/auxfiles") {
|
|
|
|
rmdir "$options->{root}/var/lib/apt/lists/auxfiles"
|
|
|
|
or die "cannot rmdir /var/lib/apt/lists/auxfiles: $!";
|
|
|
|
}
|
2019-04-23 11:28:55 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 22:00:36 +00:00
|
|
|
if (any { $_ eq 'cleanup/mmdebstrap' } @{ $options->{skip} }) {
|
|
|
|
info "skipping cleanup/mmdebstrap as requested";
|
|
|
|
} else {
|
|
|
|
# clean up temporary configuration file
|
|
|
|
unlink "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
|
|
|
|
or error "failed to unlink /etc/apt/apt.conf.d/00mmdebstrap: $!";
|
|
|
|
|
|
|
|
if (defined $ENV{APT_CONFIG} && -e $ENV{APT_CONFIG}) {
|
|
|
|
unlink $ENV{APT_CONFIG}
|
|
|
|
or error "failed to unlink $ENV{APT_CONFIG}: $!";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (defined $options->{qemu}
|
|
|
|
and any { $_ eq $options->{mode} } ('root', 'unshare')) {
|
|
|
|
unlink "$options->{root}/usr/bin/qemu-$options->{qemu}-static"
|
|
|
|
or error
|
|
|
|
"cannot unlink /usr/bin/qemu-$options->{qemu}-static: $!";
|
|
|
|
}
|
2018-09-18 15:11:02 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 22:00:36 +00:00
|
|
|
if (any { $_ eq 'cleanup/reproducible' } @{ $options->{skip} }) {
|
|
|
|
info "skipping cleanup/reproducible as requested";
|
|
|
|
} else {
|
|
|
|
# clean up certain files to make output reproducible
|
|
|
|
foreach my $fname (
|
|
|
|
'/var/log/dpkg.log', '/var/log/apt/history.log',
|
|
|
|
'/var/log/apt/term.log', '/var/log/alternatives.log',
|
|
|
|
'/var/cache/ldconfig/aux-cache', '/var/log/apt/eipp.log.xz'
|
|
|
|
) {
|
|
|
|
my $path = "$options->{root}$fname";
|
|
|
|
if (!-e $path) {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
unlink $path or error "cannot unlink $path: $!";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (-e "$options->{root}/etc/machine-id") {
|
|
|
|
# from machine-id(5):
|
|
|
|
# For operating system images which are created once and used on
|
|
|
|
# multiple machines, for example for containers or in the cloud,
|
|
|
|
# /etc/machine-id should be an empty file in the generic file
|
|
|
|
# system image. An ID will be generated during boot and saved to
|
|
|
|
# this file if possible. Having an empty file in place is useful
|
|
|
|
# because it allows a temporary file to be bind-mounted over the
|
|
|
|
# real file, in case the image is used read-only.
|
|
|
|
unlink "$options->{root}/etc/machine-id"
|
|
|
|
or error "cannot unlink /etc/machine-id: $!";
|
|
|
|
open my $fh, '>', "$options->{root}/etc/machine-id"
|
|
|
|
or error "failed to open(): $!";
|
|
|
|
close $fh;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-21 12:13:58 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 22:00:36 +00:00
|
|
|
if (any { $_ eq 'cleanup/tmp' } @{ $options->{skip} }) {
|
|
|
|
info "skipping cleanup/tmp as requested";
|
|
|
|
} else {
|
|
|
|
# remove any possible leftovers in /tmp but warn about it
|
|
|
|
if (-d "$options->{root}/tmp") {
|
|
|
|
opendir(my $dh, "$options->{root}/tmp")
|
|
|
|
or error "Can't opendir($options->{root}/tmp): $!";
|
|
|
|
while (my $entry = readdir $dh) {
|
|
|
|
# skip the "." and ".." entries
|
|
|
|
next if $entry eq ".";
|
|
|
|
next if $entry eq "..";
|
|
|
|
warning "deleting files in /tmp: $entry";
|
|
|
|
remove_tree("$options->{root}/tmp/$entry",
|
|
|
|
{ error => \my $err });
|
|
|
|
if (@$err) {
|
|
|
|
for my $diag (@$err) {
|
|
|
|
my ($file, $message) = %$diag;
|
|
|
|
if ($file eq '') { warning "general error: $message"; }
|
|
|
|
else { warning "problem unlinking $file: $message"; }
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-09 22:00:36 +00:00
|
|
|
closedir($dh);
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-06 11:44:44 +00:00
|
|
|
}
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2019-12-09 09:40:51 +00:00
|
|
|
# messages from process inside unshared namespace to the outside
|
|
|
|
# openw -- open file for writing
|
|
|
|
# untar -- extract tar into directory
|
|
|
|
# write -- write data to last opened file or tar process
|
|
|
|
# close -- finish file writing or tar extraction
|
|
|
|
# adios -- last message and tear-down
|
|
|
|
# messages from process outside unshared namespace to the inside
|
|
|
|
# okthx -- success
|
|
|
|
sub checkokthx {
|
2020-01-08 16:44:07 +00:00
|
|
|
my $fh = shift;
|
|
|
|
my $ret = read($fh, my $buf, 2 + 5) // error "cannot read from socket: $!";
|
2019-12-09 09:40:51 +00:00
|
|
|
if ($ret == 0) { error "received eof on socket"; }
|
|
|
|
my ($len, $msg) = unpack("nA5", $buf);
|
|
|
|
if ($msg ne "okthx") { error "expected okthx but got: $msg"; }
|
2020-01-08 16:44:07 +00:00
|
|
|
if ($len != 0) { error "expected no payload but got $len bytes"; }
|
2020-01-09 07:39:40 +00:00
|
|
|
return;
|
2019-12-09 09:40:51 +00:00
|
|
|
}
|
|
|
|
|
2020-08-15 16:29:17 +00:00
|
|
|
# resolve a path inside a chroot
|
|
|
|
sub chrooted_realpath {
|
|
|
|
my $root = shift;
|
|
|
|
my $src = shift;
|
|
|
|
my $result = $root;
|
|
|
|
my $prefix;
|
|
|
|
|
|
|
|
# relative paths are relative to the root of the chroot
|
|
|
|
# remove prefixed slashes
|
|
|
|
$src =~ s{^/+}{};
|
|
|
|
my $loop = 0;
|
|
|
|
while (length $src) {
|
|
|
|
if ($loop > 25) {
|
|
|
|
error "too many levels of symbolic links";
|
|
|
|
}
|
|
|
|
# Get the first directory component.
|
|
|
|
($prefix, $src) = split m{/+}, $src, 2;
|
|
|
|
# Resolve the first directory component.
|
|
|
|
if ($prefix eq ".") {
|
|
|
|
# Ignore, stay at the same directory.
|
|
|
|
} elsif ($prefix eq "..") {
|
|
|
|
# Go up one directory.
|
|
|
|
$result =~ s{(.*)/[^/]*}{$1};
|
|
|
|
# but not further than the root
|
|
|
|
if ($result !~ m/^\Q$root\E/) {
|
|
|
|
$result = $root;
|
|
|
|
}
|
|
|
|
} elsif (-l "$result/$prefix") {
|
|
|
|
my $dst = readlink "$result/$prefix";
|
|
|
|
if ($dst =~ s{^/+}{}) {
|
|
|
|
# Absolute pathname, reset result back to $root.
|
|
|
|
$result = $root;
|
|
|
|
}
|
|
|
|
$src = length $src ? "$dst/$src" : $dst;
|
|
|
|
$loop++;
|
|
|
|
} else {
|
|
|
|
# Otherwise append the prefix.
|
|
|
|
$result = "$result/$prefix";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return $result;
|
|
|
|
}
|
|
|
|
|
2020-01-16 11:02:11 +00:00
|
|
|
sub hookhelper {
|
|
|
|
# we put everything in an eval block because that way we can easily handle
|
|
|
|
# errors without goto labels or much code duplication: the error handler
|
|
|
|
# has to send an "error" message to the other side
|
|
|
|
eval {
|
2020-01-08 14:41:49 +00:00
|
|
|
my $root = $ARGV[1];
|
|
|
|
my $mode = $ARGV[2];
|
|
|
|
my $hook = $ARGV[3];
|
|
|
|
my $qemu = $ARGV[4];
|
|
|
|
$verbosity_level = $ARGV[5];
|
|
|
|
my $command = $ARGV[6];
|
|
|
|
|
|
|
|
my @cmdprefix = ();
|
2020-01-21 12:24:49 +00:00
|
|
|
my @tarcmd = (
|
|
|
|
'tar', '--numeric-owner', '--xattrs', '--format=pax',
|
|
|
|
'--pax-option=exthdr.name=%d/PaxHeaders/%f,'
|
|
|
|
. 'delete=atime,delete=ctime'
|
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($hook eq 'setup') {
|
|
|
|
if ($mode eq 'proot') {
|
|
|
|
# since we cannot run tar inside the chroot under proot during
|
|
|
|
# the setup hook because the chroot is empty, we have to run
|
|
|
|
# tar from the outside, which leads to all files being owned
|
|
|
|
# by the user running mmdebstrap. To let the ownership
|
|
|
|
# information not be completely off, we force all files be
|
|
|
|
# owned by the root user.
|
2020-01-21 12:24:49 +00:00
|
|
|
push @tarcmd, '--owner=0', '--group=0';
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-03-22 13:08:21 +00:00
|
|
|
} elsif (any { $_ eq $hook } ('extract', 'essential', 'customize')) {
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($mode eq 'fakechroot') {
|
|
|
|
# Fakechroot requires tar to run inside the chroot or
|
|
|
|
# otherwise absolute symlinks will include the path to the
|
|
|
|
# root directory
|
|
|
|
push @cmdprefix, '/usr/sbin/chroot', $root;
|
|
|
|
} elsif ($mode eq 'proot') {
|
|
|
|
# proot requires tar to run inside proot or otherwise
|
|
|
|
# permissions will be completely off
|
2020-01-08 16:44:07 +00:00
|
|
|
push @cmdprefix, 'proot', '--root-id', "--rootfs=$root",
|
|
|
|
'--cwd=/', "--qemu=$qemu";
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif (any { $_ eq $mode } ('root', 'chrootless', 'unshare')) {
|
2020-08-15 16:29:17 +00:00
|
|
|
# not chrooting in this case
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
error "unknown mode: $mode";
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
error "unknown hook: $hook";
|
|
|
|
}
|
|
|
|
|
2020-01-16 09:38:14 +00:00
|
|
|
if (
|
|
|
|
any { $_ eq $command }
|
|
|
|
('copy-in', 'tar-in', 'upload', 'sync-in')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (scalar @ARGV < 9) {
|
2020-08-18 07:36:27 +00:00
|
|
|
error "$command needs at least one path on the"
|
2020-01-08 16:41:46 +00:00
|
|
|
. " outside and the output path inside the chroot";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
my $outpath = $ARGV[-1];
|
2020-01-08 16:44:07 +00:00
|
|
|
for (my $i = 7 ; $i < $#ARGV ; $i++) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# the right argument for tar's --directory argument depends on
|
|
|
|
# whether tar is called from inside the chroot or from the
|
|
|
|
# outside
|
|
|
|
my $directory;
|
|
|
|
if ($hook eq 'setup') {
|
2020-08-15 16:29:17 +00:00
|
|
|
# tar runs outside, so acquire the correct path
|
|
|
|
$directory = chrooted_realpath $root, $outpath;
|
2020-03-22 13:08:21 +00:00
|
|
|
} elsif (
|
|
|
|
any { $_ eq $hook }
|
|
|
|
('extract', 'essential', 'customize')
|
|
|
|
) {
|
2020-08-15 16:29:17 +00:00
|
|
|
if (any { $_ eq $mode } ('fakechroot', 'proot')) {
|
|
|
|
# tar will run inside the chroot
|
|
|
|
$directory = $outpath;
|
|
|
|
} elsif (
|
|
|
|
any { $_ eq $mode }
|
|
|
|
('root', 'chrootless', 'unshare')
|
|
|
|
) {
|
|
|
|
$directory = chrooted_realpath $root, $outpath;
|
|
|
|
} else {
|
|
|
|
error "unknown mode: $mode";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
error "unknown hook: $hook";
|
|
|
|
}
|
|
|
|
|
2020-08-15 16:29:17 +00:00
|
|
|
# if chrooted_realpath was used and if neither fakechroot or
|
|
|
|
# proot were used (absolute symlinks will be broken) we can
|
|
|
|
# check and potentially fail early if the target does not exist
|
|
|
|
if (none { $_ eq $mode } ('fakechroot', 'proot')) {
|
|
|
|
my $dirtocheck = $directory;
|
|
|
|
if ($command eq 'upload') {
|
|
|
|
# check the parent directory instead
|
|
|
|
$dirtocheck =~ s/(.*)\/[^\/]*/$1/;
|
|
|
|
}
|
|
|
|
if (!-e $dirtocheck) {
|
|
|
|
error "path does not exist: $dirtocheck";
|
|
|
|
}
|
|
|
|
if (!-d $dirtocheck) {
|
|
|
|
error "path is not a directory: $dirtocheck";
|
|
|
|
}
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
|
|
|
|
my $fh;
|
|
|
|
if ($command eq 'upload') {
|
|
|
|
# open the requested file for writing
|
2020-01-08 16:44:07 +00:00
|
|
|
open $fh, '|-', @cmdprefix, 'sh', '-c', 'cat > "$1"',
|
|
|
|
'exec', $directory // error "failed to fork(): $!";
|
2020-01-16 09:38:14 +00:00
|
|
|
} elsif (
|
|
|
|
any { $_ eq $command }
|
|
|
|
('copy-in', 'tar-in', 'sync-in')
|
|
|
|
) {
|
2020-01-08 15:23:34 +00:00
|
|
|
# open a tar process that extracts the tarfile that we
|
|
|
|
# supply it with on stdin to the output directory inside
|
|
|
|
# the chroot
|
2020-11-13 21:36:58 +00:00
|
|
|
my @cmd = (
|
|
|
|
@cmdprefix, @tarcmd, '--xattrs-include=*',
|
|
|
|
'--directory', $directory, '--extract', '--file', '-'
|
|
|
|
);
|
|
|
|
debug("helper: running " . (join " ", @cmd));
|
|
|
|
open($fh, '|-', @cmd) // error "failed to fork(): $!";
|
2020-01-16 09:38:14 +00:00
|
|
|
} else {
|
|
|
|
error "unknown command: $command";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ($command eq 'copy-in') {
|
|
|
|
# instruct the parent process to create a tarball of the
|
|
|
|
# requested path outside the chroot
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: sending mktar";
|
2020-01-08 16:44:07 +00:00
|
|
|
print STDOUT (
|
|
|
|
pack("n", length $ARGV[$i]) . "mktar" . $ARGV[$i]);
|
2020-01-16 09:38:14 +00:00
|
|
|
} elsif ($command eq 'sync-in') {
|
|
|
|
# instruct the parent process to create a tarball of the
|
|
|
|
# content of the requested path outside the chroot
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: sending mktac";
|
2020-01-16 09:38:14 +00:00
|
|
|
print STDOUT (
|
|
|
|
pack("n", length $ARGV[$i]) . "mktac" . $ARGV[$i]);
|
|
|
|
} elsif (any { $_ eq $command } ('upload', 'tar-in')) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# instruct parent process to open a tarball of the
|
|
|
|
# requested path outside the chroot for reading
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: sending openr";
|
2020-01-08 16:44:07 +00:00
|
|
|
print STDOUT (
|
|
|
|
pack("n", length $ARGV[$i]) . "openr" . $ARGV[$i]);
|
2020-01-16 09:38:14 +00:00
|
|
|
} else {
|
|
|
|
error "unknown command: $command";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
STDOUT->flush();
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: waiting for okthx";
|
2020-01-08 14:41:49 +00:00
|
|
|
checkokthx \*STDIN;
|
|
|
|
|
|
|
|
# handle "write" messages from the parent process and feed
|
|
|
|
# their payload into the tar process until a "close" message
|
|
|
|
# is encountered
|
2020-01-08 16:44:07 +00:00
|
|
|
while (1) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# receive the next message
|
2020-01-08 16:44:07 +00:00
|
|
|
my $ret = read(STDIN, my $buf, 2 + 5)
|
|
|
|
// error "cannot read from socket: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
my ($len, $msg) = unpack("nA5", $buf);
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: received message: $msg";
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($msg eq "close") {
|
|
|
|
# finish the loop
|
|
|
|
if ($len != 0) {
|
|
|
|
error "expected no payload but got $len bytes";
|
|
|
|
}
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: sending okthx";
|
2020-01-08 16:44:07 +00:00
|
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
|
|
or error "cannot write to socket: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
STDOUT->flush();
|
|
|
|
last;
|
|
|
|
} elsif ($msg ne "write") {
|
|
|
|
error "expected write but got: $msg";
|
|
|
|
}
|
|
|
|
# read the payload
|
|
|
|
my $content;
|
|
|
|
{
|
2020-01-08 16:44:07 +00:00
|
|
|
my $ret = read(STDIN, $content, $len)
|
|
|
|
// error "error cannot read from socket: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# write the payload to the tar process
|
2020-01-08 16:44:07 +00:00
|
|
|
print $fh $content
|
|
|
|
or error "cannot write to tar process: $!";
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: sending okthx";
|
2020-01-08 16:44:07 +00:00
|
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
|
|
or error "cannot write to socket: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
STDOUT->flush();
|
|
|
|
}
|
|
|
|
close $fh;
|
|
|
|
if ($command ne 'upload' and $? != 0) {
|
|
|
|
error "tar failed";
|
|
|
|
}
|
|
|
|
}
|
2020-01-16 09:38:14 +00:00
|
|
|
} elsif (
|
|
|
|
any { $_ eq $command }
|
|
|
|
('copy-out', 'tar-out', 'download', 'sync-out')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (scalar @ARGV < 9) {
|
2020-08-18 07:36:27 +00:00
|
|
|
error "$command needs at least one path inside the chroot and"
|
2020-01-08 16:41:46 +00:00
|
|
|
. " the output path on the outside";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
my $outpath = $ARGV[-1];
|
2020-01-08 16:44:07 +00:00
|
|
|
for (my $i = 7 ; $i < $#ARGV ; $i++) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# the right argument for tar's --directory argument depends on
|
|
|
|
# whether tar is called from inside the chroot or from the
|
|
|
|
# outside
|
|
|
|
my $directory;
|
|
|
|
if ($hook eq 'setup') {
|
2020-08-15 16:29:17 +00:00
|
|
|
# tar runs outside, so acquire the correct path
|
|
|
|
$directory = chrooted_realpath $root, $ARGV[$i];
|
2020-03-22 13:08:21 +00:00
|
|
|
} elsif (
|
|
|
|
any { $_ eq $hook }
|
|
|
|
('extract', 'essential', 'customize')
|
|
|
|
) {
|
2020-08-15 16:29:17 +00:00
|
|
|
if (any { $_ eq $mode } ('fakechroot', 'proot')) {
|
|
|
|
# tar will run inside the chroot
|
|
|
|
$directory = $ARGV[$i];
|
|
|
|
} elsif (
|
|
|
|
any { $_ eq $mode }
|
|
|
|
('root', 'chrootless', 'unshare')
|
|
|
|
) {
|
|
|
|
$directory = chrooted_realpath $root, $ARGV[$i];
|
|
|
|
} else {
|
|
|
|
error "unknown mode: $mode";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
error "unknown hook: $hook";
|
|
|
|
}
|
|
|
|
|
2020-08-15 16:29:17 +00:00
|
|
|
# if chrooted_realpath was used and if neither fakechroot or
|
|
|
|
# proot were used (absolute symlinks will be broken) we can
|
|
|
|
# check and potentially fail early if the source does not exist
|
|
|
|
if (none { $_ eq $mode } ('fakechroot', 'proot')) {
|
|
|
|
if (!-e $directory) {
|
|
|
|
error "path does not exist: $directory";
|
|
|
|
}
|
|
|
|
if ($command eq 'download') {
|
|
|
|
if (!-f $directory) {
|
|
|
|
error "path is not a file: $directory";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
|
|
|
|
my $fh;
|
|
|
|
if ($command eq 'download') {
|
|
|
|
# open the requested file for reading
|
2020-01-08 16:44:07 +00:00
|
|
|
open $fh, '-|', @cmdprefix, 'sh', '-c', 'cat "$1"',
|
|
|
|
'exec', $directory // error "failed to fork(): $!";
|
2020-01-16 09:38:14 +00:00
|
|
|
} elsif ($command eq 'sync-out') {
|
2020-01-08 15:23:34 +00:00
|
|
|
# Open a tar process that creates a tarfile of everything
|
2020-01-16 09:38:14 +00:00
|
|
|
# inside the requested directory inside the chroot and
|
|
|
|
# writes it to stdout.
|
2020-11-13 21:36:58 +00:00
|
|
|
my @cmd = (
|
|
|
|
@cmdprefix, @tarcmd, '--directory',
|
|
|
|
$directory, '--create', '--file', '-', '.'
|
|
|
|
);
|
|
|
|
debug("helper: running " . (join " ", @cmd));
|
|
|
|
open($fh, '-|', @cmd) // error "failed to fork(): $!";
|
2020-01-16 09:38:14 +00:00
|
|
|
} elsif (any { $_ eq $command } ('copy-out', 'tar-out')) {
|
|
|
|
# Open a tar process that creates a tarfile of the
|
|
|
|
# requested directory inside the chroot and writes it to
|
|
|
|
# stdout. To emulate the behaviour of cp, change to the
|
|
|
|
# dirname of the requested path first.
|
2020-11-13 21:36:58 +00:00
|
|
|
my @cmd = (
|
|
|
|
@cmdprefix, @tarcmd, '--directory',
|
|
|
|
dirname($directory), '--create', '--file', '-',
|
|
|
|
basename($directory));
|
|
|
|
debug("helper: running " . (join " ", @cmd));
|
|
|
|
open($fh, '-|', @cmd) // error "failed to fork(): $!";
|
2020-01-16 09:38:14 +00:00
|
|
|
} else {
|
|
|
|
error "unknown command: $command";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
|
2020-01-16 09:38:14 +00:00
|
|
|
if (any { $_ eq $command } ('copy-out', 'sync-out')) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# instruct the parent process to extract a tarball to a
|
|
|
|
# certain path outside the chroot
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: sending untar";
|
2020-01-08 16:44:07 +00:00
|
|
|
print STDOUT (
|
|
|
|
pack("n", length $outpath) . "untar" . $outpath);
|
2020-01-16 09:38:14 +00:00
|
|
|
} elsif (any { $_ eq $command } ('download', 'tar-out')) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# instruct parent process to open a tarball of the
|
|
|
|
# requested path outside the chroot for writing
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: sending openw";
|
2020-01-08 16:44:07 +00:00
|
|
|
print STDOUT (
|
|
|
|
pack("n", length $outpath) . "openw" . $outpath);
|
2020-01-16 09:38:14 +00:00
|
|
|
} else {
|
|
|
|
error "unknown command: $command";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
STDOUT->flush();
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: waiting for okthx";
|
2020-01-08 14:41:49 +00:00
|
|
|
checkokthx \*STDIN;
|
|
|
|
|
|
|
|
# read from the tar process and send as payload to the parent
|
|
|
|
# process
|
|
|
|
while (1) {
|
|
|
|
# read from tar
|
2020-01-08 16:44:07 +00:00
|
|
|
my $ret = read($fh, my $cont, 4096)
|
|
|
|
// error "cannot read from pipe: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($ret == 0) { last; }
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: sending write";
|
2020-01-08 14:41:49 +00:00
|
|
|
# send to parent
|
|
|
|
print STDOUT pack("n", $ret) . "write" . $cont;
|
|
|
|
STDOUT->flush();
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: waiting for okthx";
|
2020-01-08 14:41:49 +00:00
|
|
|
checkokthx \*STDIN;
|
|
|
|
if ($ret < 4096) { last; }
|
|
|
|
}
|
|
|
|
|
|
|
|
# signal to the parent process that we are done
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: sending close";
|
2020-01-08 14:41:49 +00:00
|
|
|
print STDOUT pack("n", 0) . "close";
|
|
|
|
STDOUT->flush();
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "helper: waiting for okthx";
|
2020-01-08 14:41:49 +00:00
|
|
|
checkokthx \*STDIN;
|
|
|
|
|
|
|
|
close $fh;
|
2020-01-16 09:38:14 +00:00
|
|
|
if ($? != 0) {
|
|
|
|
error "$command failed";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
error "unknown command: $command";
|
|
|
|
}
|
2020-01-16 11:02:11 +00:00
|
|
|
};
|
|
|
|
if ($@) {
|
|
|
|
# inform the other side that something went wrong
|
|
|
|
print STDOUT (pack("n", 0) . "error");
|
|
|
|
STDOUT->flush();
|
|
|
|
error "hookhelper failed: $@";
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
sub hooklistener {
|
|
|
|
# we put everything in an eval block because that way we can easily handle
|
|
|
|
# errors without goto labels or much code duplication: the error handler
|
|
|
|
# has to send an "error" message to the other side
|
|
|
|
eval {
|
|
|
|
while (1) {
|
|
|
|
# get the next message
|
|
|
|
my $msg = "error";
|
|
|
|
my $len = -1;
|
|
|
|
{
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: reading next command";
|
2020-08-15 20:36:13 +00:00
|
|
|
my $ret = read(STDIN, my $buf, 2 + 5)
|
|
|
|
// error "cannot read from socket: $!";
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: finished reading command";
|
2020-08-15 20:36:13 +00:00
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
($len, $msg) = unpack("nA5", $buf);
|
|
|
|
}
|
|
|
|
if ($msg eq "adios") {
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: received message: adios";
|
2020-08-15 20:36:13 +00:00
|
|
|
# setup finished, so we break out of the loop
|
|
|
|
if ($len != 0) {
|
|
|
|
error "expected no payload but got $len bytes";
|
|
|
|
}
|
|
|
|
last;
|
|
|
|
} elsif ($msg eq "openr") {
|
|
|
|
# handle the openr message
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: received message: openr";
|
2020-08-15 20:36:13 +00:00
|
|
|
my $infile;
|
|
|
|
{
|
|
|
|
my $ret = read(STDIN, $infile, $len)
|
|
|
|
// error "cannot read from socket: $!";
|
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# make sure that the requested path exists outside the chroot
|
|
|
|
if (!-e $infile) {
|
|
|
|
error "$infile does not exist";
|
|
|
|
}
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
|
|
or error "cannot write to socket: $!";
|
|
|
|
STDOUT->flush();
|
|
|
|
|
|
|
|
open my $fh, '<', $infile
|
|
|
|
or error "failed to open $infile for reading: $!";
|
|
|
|
|
|
|
|
# read from the file and send as payload to the child process
|
|
|
|
while (1) {
|
|
|
|
# read from file
|
|
|
|
my $ret = read($fh, my $cont, 4096)
|
|
|
|
// error "cannot read from pipe: $!";
|
|
|
|
if ($ret == 0) { last; }
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending write";
|
2020-08-15 20:36:13 +00:00
|
|
|
# send to child
|
|
|
|
print STDOUT pack("n", $ret) . "write" . $cont;
|
|
|
|
STDOUT->flush();
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: waiting for okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
checkokthx \*STDIN;
|
|
|
|
if ($ret < 4096) { last; }
|
|
|
|
}
|
|
|
|
|
|
|
|
# signal to the child process that we are done
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending close";
|
2020-08-15 20:36:13 +00:00
|
|
|
print STDOUT pack("n", 0) . "close";
|
|
|
|
STDOUT->flush();
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: waiting for okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
checkokthx \*STDIN;
|
|
|
|
|
|
|
|
close $fh;
|
|
|
|
} elsif ($msg eq "openw") {
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: received message: openw";
|
2020-08-15 20:36:13 +00:00
|
|
|
# payload is the output directory
|
|
|
|
my $outfile;
|
|
|
|
{
|
|
|
|
my $ret = read(STDIN, $outfile, $len)
|
|
|
|
// error "cannot read from socket: $!";
|
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# make sure that the directory exists
|
|
|
|
my $outdir = dirname($outfile);
|
|
|
|
if (-e $outdir) {
|
|
|
|
if (!-d $outdir) {
|
|
|
|
error "$outdir already exists but is not a directory";
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
my $num_created = make_path $outdir, { error => \my $err };
|
|
|
|
if ($err && @$err) {
|
|
|
|
error(
|
|
|
|
join "; ",
|
|
|
|
(
|
|
|
|
map { "cannot create " . (join ": ", %{$_}) }
|
|
|
|
@$err
|
|
|
|
));
|
|
|
|
} elsif ($num_created == 0) {
|
|
|
|
error "cannot create $outdir";
|
|
|
|
}
|
|
|
|
}
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
|
|
or error "cannot write to socket: $!";
|
|
|
|
STDOUT->flush();
|
|
|
|
|
|
|
|
# now we expect one or more "write" messages containing the
|
|
|
|
# tarball to write
|
|
|
|
open my $fh, '>', $outfile
|
|
|
|
or error "failed to open $outfile for writing: $!";
|
|
|
|
|
|
|
|
# handle "write" messages from the child process and feed
|
|
|
|
# their payload into the file handle until a "close" message
|
|
|
|
# is encountered
|
|
|
|
while (1) {
|
|
|
|
# receive the next message
|
|
|
|
my $ret = read(STDIN, my $buf, 2 + 5)
|
|
|
|
// error "cannot read from socket: $!";
|
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
my ($len, $msg) = unpack("nA5", $buf);
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: received message: $msg";
|
2020-08-15 20:36:13 +00:00
|
|
|
if ($msg eq "close") {
|
|
|
|
# finish the loop
|
|
|
|
if ($len != 0) {
|
|
|
|
error "expected no payload but got $len bytes";
|
|
|
|
}
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
|
|
or error "cannot write to socket: $!";
|
|
|
|
STDOUT->flush();
|
|
|
|
last;
|
|
|
|
} elsif ($msg ne "write") {
|
|
|
|
# we should not receive this message at this point
|
|
|
|
error "expected write but got: $msg";
|
|
|
|
}
|
|
|
|
# read the payload
|
|
|
|
my $content;
|
|
|
|
{
|
|
|
|
my $ret = read(STDIN, $content, $len)
|
|
|
|
// error "error cannot read from socket: $!";
|
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# write the payload to the file handle
|
|
|
|
print $fh $content
|
|
|
|
or error "cannot write to file handle: $!";
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
|
|
or error "cannot write to socket: $!";
|
|
|
|
STDOUT->flush();
|
|
|
|
}
|
|
|
|
close $fh;
|
|
|
|
} elsif (any { $_ eq $msg } ('mktar', 'mktac')) {
|
|
|
|
# handle the mktar message
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: received message: $msg";
|
2020-08-15 20:36:13 +00:00
|
|
|
my $indir;
|
|
|
|
{
|
|
|
|
my $ret = read(STDIN, $indir, $len)
|
|
|
|
// error "cannot read from socket: $!";
|
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# make sure that the requested path exists outside the chroot
|
|
|
|
if (!-e $indir) {
|
|
|
|
error "$indir does not exist";
|
|
|
|
}
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
|
|
or error "cannot write to socket: $!";
|
|
|
|
STDOUT->flush();
|
|
|
|
|
|
|
|
# Open a tar process creating a tarfile of the instructed
|
|
|
|
# path. To emulate the behaviour of cp, change to the
|
|
|
|
# dirname of the requested path first.
|
2020-11-13 21:36:58 +00:00
|
|
|
my @cmd = (
|
|
|
|
'tar',
|
|
|
|
'--numeric-owner',
|
|
|
|
'--xattrs',
|
|
|
|
'--format=pax',
|
|
|
|
'--pax-option=exthdr.name=%d/PaxHeaders/%f,'
|
|
|
|
. 'delete=atime,delete=ctime',
|
|
|
|
'--directory',
|
|
|
|
$msg eq 'mktar' ? dirname($indir) : $indir,
|
|
|
|
'--create',
|
|
|
|
'--file',
|
|
|
|
'-',
|
|
|
|
$msg eq 'mktar' ? basename($indir) : '.'
|
|
|
|
);
|
|
|
|
debug("listener: running " . (join " ", @cmd));
|
|
|
|
open(my $fh, '-|', @cmd) // error "failed to fork(): $!";
|
2020-08-15 20:36:13 +00:00
|
|
|
|
|
|
|
# read from the tar process and send as payload to the child
|
|
|
|
# process
|
|
|
|
while (1) {
|
|
|
|
# read from tar
|
|
|
|
my $ret = read($fh, my $cont, 4096)
|
|
|
|
// error "cannot read from pipe: $!";
|
|
|
|
if ($ret == 0) { last; }
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending write ($ret bytes)";
|
2020-08-15 20:36:13 +00:00
|
|
|
# send to child
|
|
|
|
print STDOUT pack("n", $ret) . "write" . $cont;
|
|
|
|
STDOUT->flush();
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: waiting for okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
checkokthx \*STDIN;
|
|
|
|
if ($ret < 4096) { last; }
|
|
|
|
}
|
|
|
|
|
|
|
|
# signal to the child process that we are done
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending close";
|
2020-08-15 20:36:13 +00:00
|
|
|
print STDOUT pack("n", 0) . "close";
|
|
|
|
STDOUT->flush();
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: waiting for okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
checkokthx \*STDIN;
|
|
|
|
|
|
|
|
close $fh;
|
|
|
|
if ($? != 0) {
|
|
|
|
error "tar failed";
|
|
|
|
}
|
|
|
|
} elsif ($msg eq "untar") {
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: received message: untar";
|
2020-08-15 20:36:13 +00:00
|
|
|
# payload is the output directory
|
|
|
|
my $outdir;
|
|
|
|
{
|
|
|
|
my $ret = read(STDIN, $outdir, $len)
|
|
|
|
// error "cannot read from socket: $!";
|
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# make sure that the directory exists
|
|
|
|
if (-e $outdir) {
|
|
|
|
if (!-d $outdir) {
|
|
|
|
error "$outdir already exists but is not a directory";
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
my $num_created = make_path $outdir, { error => \my $err };
|
|
|
|
if ($err && @$err) {
|
|
|
|
error(
|
|
|
|
join "; ",
|
|
|
|
(
|
|
|
|
map { "cannot create " . (join ": ", %{$_}) }
|
|
|
|
@$err
|
|
|
|
));
|
|
|
|
} elsif ($num_created == 0) {
|
|
|
|
error "cannot create $outdir";
|
|
|
|
}
|
|
|
|
}
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
|
|
or error "cannot write to socket: $!";
|
|
|
|
STDOUT->flush();
|
|
|
|
|
|
|
|
# now we expect one or more "write" messages containing the
|
|
|
|
# tarball to unpack
|
|
|
|
open my $fh, '|-', 'tar', '--numeric-owner', '--xattrs',
|
|
|
|
'--xattrs-include=*', '--directory', $outdir,
|
|
|
|
'--extract', '--file',
|
|
|
|
'-' // error "failed to fork(): $!";
|
|
|
|
|
|
|
|
# handle "write" messages from the child process and feed
|
|
|
|
# their payload into the tar process until a "close" message
|
|
|
|
# is encountered
|
|
|
|
while (1) {
|
|
|
|
# receive the next message
|
|
|
|
my $ret = read(STDIN, my $buf, 2 + 5)
|
|
|
|
// error "cannot read from socket: $!";
|
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
my ($len, $msg) = unpack("nA5", $buf);
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: received message: $msg";
|
2020-08-15 20:36:13 +00:00
|
|
|
if ($msg eq "close") {
|
|
|
|
# finish the loop
|
|
|
|
if ($len != 0) {
|
|
|
|
error "expected no payload but got $len bytes";
|
|
|
|
}
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
|
|
or error "cannot write to socket: $!";
|
|
|
|
STDOUT->flush();
|
|
|
|
last;
|
|
|
|
} elsif ($msg ne "write") {
|
|
|
|
# we should not receive this message at this point
|
|
|
|
error "expected write but got: $msg";
|
|
|
|
}
|
|
|
|
# read the payload
|
|
|
|
my $content;
|
|
|
|
{
|
|
|
|
my $ret = read(STDIN, $content, $len)
|
|
|
|
// error "error cannot read from socket: $!";
|
|
|
|
if ($ret == 0) {
|
|
|
|
error "received eof on socket";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# write the payload to the tar process
|
|
|
|
print $fh $content
|
|
|
|
or error "cannot write to tar process: $!";
|
2020-11-13 21:36:58 +00:00
|
|
|
debug "listener: sending okthx";
|
2020-08-15 20:36:13 +00:00
|
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
|
|
or error "cannot write to socket: $!";
|
|
|
|
STDOUT->flush();
|
|
|
|
}
|
|
|
|
close $fh;
|
|
|
|
if ($? != 0) {
|
|
|
|
error "tar failed";
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
error "unknown message: $msg";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
if ($@) {
|
2020-08-28 12:36:14 +00:00
|
|
|
debug("hooklistener errored out: $@");
|
2020-08-15 20:36:13 +00:00
|
|
|
# inform the other side that something went wrong
|
|
|
|
print STDOUT (pack("n", 0) . "error")
|
|
|
|
or error "cannot write to socket: $!";
|
|
|
|
STDOUT->flush();
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-24 14:20:04 +00:00
|
|
|
# parse files of the format found in /usr/share/distro-info/ and return two
|
|
|
|
# lists: the first contains codenames of end-of-life distros and the second
|
|
|
|
# list contains codenames of currently active distros
|
|
|
|
sub parse_distro_info {
|
|
|
|
my $file = shift;
|
|
|
|
my @eol = ();
|
|
|
|
my @current = ();
|
|
|
|
my $today = POSIX::strftime "%Y-%m-%d", localtime;
|
|
|
|
open my $fh, '<', $file or error "cannot open $file: $!";
|
|
|
|
my $i = 0;
|
|
|
|
while (my $line = <$fh>) {
|
|
|
|
chomp($line);
|
|
|
|
$i++;
|
|
|
|
my @cells = split /,/, $line;
|
|
|
|
if (scalar @cells < 4) {
|
|
|
|
error "cannot parse line $i of $file";
|
|
|
|
}
|
|
|
|
if (
|
|
|
|
$i == 1
|
|
|
|
and ( scalar @cells < 6
|
|
|
|
or $cells[0] ne 'version'
|
|
|
|
or $cells[1] ne 'codename'
|
|
|
|
or $cells[2] ne 'series'
|
|
|
|
or $cells[3] ne 'created'
|
|
|
|
or $cells[4] ne 'release'
|
|
|
|
or $cells[5] ne 'eol')
|
|
|
|
) {
|
|
|
|
error "cannot find correct header in $file";
|
|
|
|
}
|
|
|
|
if ($i == 1) {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
if (scalar @cells == 6) {
|
|
|
|
if ($cells[5] !~ m/^\d\d\d\d-\d\d-\d\d$/) {
|
|
|
|
error "invalid eof date format in $file:$i: $cells[5]";
|
|
|
|
}
|
|
|
|
# since the date format is iso8601, we can use lexicographic string
|
|
|
|
# comparison to compare dates
|
|
|
|
if ($cells[5] lt $today) {
|
|
|
|
push @eol, $cells[2];
|
|
|
|
} else {
|
|
|
|
push @current, $cells[2];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
push @current, $cells[2];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close $fh;
|
|
|
|
return ([@eol], [@current]);
|
|
|
|
}
|
|
|
|
|
|
|
|
sub get_suite_by_vendor {
|
|
|
|
my %suite_by_vendor = (
|
|
|
|
'debian' => {},
|
|
|
|
'ubuntu' => {},
|
|
|
|
'tanglu' => {},
|
|
|
|
'kali' => {},
|
|
|
|
);
|
|
|
|
|
|
|
|
# pre-fill with some known values
|
|
|
|
foreach my $suite (
|
|
|
|
'potato', 'woody', 'sarge', 'etch',
|
|
|
|
'lenny', 'squeeze', 'wheezy', 'jessie'
|
|
|
|
) {
|
|
|
|
$suite_by_vendor{'debian'}->{$suite} = 1;
|
|
|
|
}
|
|
|
|
foreach my $suite (
|
|
|
|
'unstable', 'stable', 'oldstable', 'stretch',
|
|
|
|
'buster', 'bullseye', 'bookworm'
|
|
|
|
) {
|
|
|
|
$suite_by_vendor{'debian'}->{$suite} = 0;
|
|
|
|
}
|
|
|
|
foreach my $suite ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis') {
|
|
|
|
$suite_by_vendor{'tanglu'}->{$suite} = 0;
|
|
|
|
}
|
|
|
|
foreach my $suite ('kali-dev', 'kali-rolling', 'kali-bleeding-edge') {
|
|
|
|
$suite_by_vendor{'kali'}->{$suite} = 0;
|
|
|
|
}
|
|
|
|
foreach
|
|
|
|
my $suite ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic') {
|
|
|
|
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
|
|
|
|
}
|
|
|
|
# if the Debian package distro-info-data is installed, then we can use it,
|
|
|
|
# to get better data about new distros or EOL distros
|
|
|
|
if (-e '/usr/share/distro-info/debian.csv') {
|
|
|
|
my ($eol, $current)
|
|
|
|
= parse_distro_info('/usr/share/distro-info/debian.csv');
|
|
|
|
foreach my $suite (@{$eol}) {
|
|
|
|
$suite_by_vendor{'debian'}->{$suite} = 1;
|
|
|
|
}
|
|
|
|
foreach my $suite (@{$current}) {
|
|
|
|
$suite_by_vendor{'debian'}->{$suite} = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (-e '/usr/share/distro-info/ubuntu.csv') {
|
|
|
|
my ($eol, $current)
|
|
|
|
= parse_distro_info('/usr/share/distro-info/ubuntu.csv');
|
|
|
|
foreach my $suite (@{$eol}, @{$current}) {
|
|
|
|
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# if debootstrap is installed we infer distro names from the symlink
|
|
|
|
# targets of the scripts in /usr/share/debootstrap/scripts/
|
|
|
|
my $debootstrap_scripts = '/usr/share/debootstrap/scripts/';
|
|
|
|
if (-d $debootstrap_scripts) {
|
|
|
|
opendir(my $dh, $debootstrap_scripts)
|
|
|
|
or error "Can't opendir($debootstrap_scripts): $!";
|
|
|
|
while (my $suite = readdir $dh) {
|
|
|
|
# this is only a heuristic -- don't overwrite anything but instead
|
|
|
|
# just update anything that was missing
|
|
|
|
if (!-l "$debootstrap_scripts/$suite") {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
my $target = readlink "$debootstrap_scripts/$suite";
|
|
|
|
if ($target eq "sid"
|
|
|
|
and not exists $suite_by_vendor{'debian'}->{$suite}) {
|
|
|
|
$suite_by_vendor{'debian'}->{$suite} = 0;
|
|
|
|
} elsif ($target eq "gutsy"
|
|
|
|
and not exists $suite_by_vendor{'ubuntu'}->{$suite}) {
|
|
|
|
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
|
|
|
|
} elsif ($target eq "aequorea"
|
|
|
|
and not exists $suite_by_vendor{'tanglu'}->{$suite}) {
|
|
|
|
$suite_by_vendor{'tanglu'}->{$suite} = 0;
|
|
|
|
} elsif ($target eq "kali"
|
|
|
|
and not exists $suite_by_vendor{'kali'}->{$suite}) {
|
|
|
|
$suite_by_vendor{'kali'}->{$suite} = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
closedir($dh);
|
|
|
|
}
|
|
|
|
|
|
|
|
return %suite_by_vendor;
|
|
|
|
}
|
|
|
|
|
|
|
|
# try to guess the right keyring path for the given suite
|
|
|
|
sub get_keyring_by_suite {
|
|
|
|
my $query = shift;
|
|
|
|
my $suite_by_vendor = shift;
|
|
|
|
|
|
|
|
my $debianvendor;
|
|
|
|
my $ubuntuvendor;
|
|
|
|
eval {
|
|
|
|
require Dpkg::Vendor::Debian;
|
|
|
|
require Dpkg::Vendor::Ubuntu;
|
|
|
|
$debianvendor = Dpkg::Vendor::Debian->new();
|
|
|
|
$ubuntuvendor = Dpkg::Vendor::Ubuntu->new();
|
|
|
|
};
|
|
|
|
|
|
|
|
my $keyring_by_vendor = sub {
|
|
|
|
my $vendor = shift;
|
|
|
|
my $eol = shift;
|
|
|
|
if ($vendor eq 'debian') {
|
|
|
|
if ($eol) {
|
|
|
|
if (defined $debianvendor) {
|
|
|
|
return $debianvendor->run_hook(
|
|
|
|
'archive-keyrings-historic');
|
|
|
|
} else {
|
|
|
|
return
|
|
|
|
'/usr/share/keyrings/debian-archive-removed-keys.gpg';
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (defined $debianvendor) {
|
|
|
|
return $debianvendor->run_hook('archive-keyrings');
|
|
|
|
} else {
|
|
|
|
return '/usr/share/keyrings/debian-archive-keyring.gpg';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} elsif ($vendor eq 'ubuntu') {
|
|
|
|
if (defined $ubuntuvendor) {
|
|
|
|
return $ubuntuvendor->run_hook('archive-keyrings');
|
|
|
|
} else {
|
|
|
|
return '/usr/share/keyrings/ubuntu-archive-keyring.gpg';
|
|
|
|
}
|
|
|
|
} elsif ($vendor eq 'tanglu') {
|
|
|
|
return '/usr/share/keyrings/tanglu-archive-keyring.gpg';
|
|
|
|
} elsif ($vendor eq 'kali') {
|
|
|
|
return '/usr/share/keyrings/kali-archive-keyring.gpg';
|
|
|
|
} else {
|
|
|
|
error "unknown vendor: $vendor";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
my %keyrings = ();
|
|
|
|
foreach my $vendor (keys %{$suite_by_vendor}) {
|
|
|
|
foreach my $suite (keys %{ $suite_by_vendor->{$vendor} }) {
|
|
|
|
my $keyring = $keyring_by_vendor->(
|
|
|
|
$vendor, $suite_by_vendor->{$vendor}->{$suite});
|
|
|
|
debug "suite $suite with keyring $keyring";
|
|
|
|
$keyrings{$suite} = $keyring;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (exists $keyrings{$query}) {
|
|
|
|
return $keyrings{$query};
|
|
|
|
} else {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sub get_sourceslist_by_suite {
|
|
|
|
my $suite = shift;
|
|
|
|
my $arch = shift;
|
|
|
|
my $signedby = shift;
|
|
|
|
my $compstr = shift;
|
|
|
|
my $suite_by_vendor = shift;
|
|
|
|
|
|
|
|
my @debstable = keys %{ $suite_by_vendor->{'debian'} };
|
|
|
|
my @ubuntustable = keys %{ $suite_by_vendor->{'ubuntu'} };
|
|
|
|
my @tanglustable = keys %{ $suite_by_vendor->{'tanglu'} };
|
|
|
|
my @kali = keys %{ $suite_by_vendor->{'kali'} };
|
|
|
|
|
|
|
|
my $mirror = 'http://deb.debian.org/debian';
|
|
|
|
my $secmirror = 'http://security.debian.org/debian-security';
|
|
|
|
if (any { $_ eq $suite } @ubuntustable) {
|
|
|
|
if (any { $_ eq $arch } ('amd64', 'i386')) {
|
|
|
|
$mirror = 'http://archive.ubuntu.com/ubuntu';
|
|
|
|
$secmirror = 'http://security.ubuntu.com/ubuntu';
|
|
|
|
} else {
|
|
|
|
$mirror = 'http://ports.ubuntu.com/ubuntu-ports';
|
|
|
|
$secmirror = 'http://ports.ubuntu.com/ubuntu-ports';
|
|
|
|
}
|
|
|
|
if (-e '/usr/share/debootstrap/scripts/gutsy') {
|
|
|
|
# try running the debootstrap script but ignore errors
|
|
|
|
my $script = 'set -eu;
|
|
|
|
default_mirror() { echo $1; };
|
|
|
|
mirror_style() { :; };
|
|
|
|
download_style() { :; };
|
|
|
|
finddebs_style() { :; };
|
|
|
|
variants() { :; };
|
|
|
|
keyring() { :; };
|
|
|
|
doing_variant() { false; };
|
|
|
|
. /usr/share/debootstrap/scripts/gutsy;';
|
|
|
|
open my $fh, '-|', 'env', "ARCH=$arch", "SUITE=$suite",
|
|
|
|
'sh', '-c', $script // last;
|
|
|
|
chomp(
|
|
|
|
my $output = do { local $/; <$fh> }
|
|
|
|
);
|
|
|
|
close $fh;
|
|
|
|
if ($? == 0 && $output ne '') {
|
|
|
|
$mirror = $output;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} elsif (any { $_ eq $suite } @tanglustable) {
|
|
|
|
$mirror = 'http://archive.tanglu.org/tanglu';
|
|
|
|
} elsif (any { $_ eq $suite } @kali) {
|
|
|
|
$mirror = 'https://http.kali.org/kali';
|
|
|
|
}
|
|
|
|
my $sourceslist = '';
|
|
|
|
$sourceslist .= "deb$signedby $mirror $suite $compstr\n";
|
|
|
|
if (any { $_ eq $suite } @ubuntustable) {
|
|
|
|
$sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n";
|
|
|
|
$sourceslist .= "deb$signedby $secmirror $suite-security $compstr\n";
|
|
|
|
} elsif (any { $_ eq $suite } @tanglustable) {
|
|
|
|
$sourceslist .= "deb$signedby $secmirror $suite-updates $compstr\n";
|
|
|
|
} elsif (any { $_ eq $suite } @debstable
|
|
|
|
and none { $_ eq $suite } ('testing', 'unstable', 'sid')) {
|
|
|
|
$sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n";
|
|
|
|
if (any { $_ eq $suite } ('bullseye')) {
|
|
|
|
# starting from bullseye use
|
|
|
|
# https://lists.debian.org/87r26wqr2a.fsf@43-1.org
|
|
|
|
$sourceslist
|
|
|
|
.= "deb$signedby $secmirror $suite-security" . " $compstr\n";
|
|
|
|
} else {
|
|
|
|
$sourceslist
|
|
|
|
.= "deb$signedby $secmirror $suite/updates" . " $compstr\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return $sourceslist;
|
|
|
|
}
|
|
|
|
|
2020-01-22 22:30:28 +00:00
|
|
|
sub guess_sources_format {
|
|
|
|
my $content = shift;
|
|
|
|
my $is_deb822 = 0;
|
|
|
|
my $is_oneline = 0;
|
|
|
|
for my $line (split "\n", $content) {
|
|
|
|
if ($line =~ /^deb(-src)? /) {
|
|
|
|
$is_oneline = 1;
|
|
|
|
last;
|
|
|
|
}
|
|
|
|
if ($line =~ /^[^#:\s]+:/) {
|
|
|
|
$is_deb822 = 1;
|
|
|
|
last;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ($is_deb822) {
|
|
|
|
return 'deb822';
|
|
|
|
}
|
|
|
|
if ($is_oneline) {
|
|
|
|
return 'one-line';
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-04-09 16:33:05 +00:00
|
|
|
sub approx_disk_usage {
|
|
|
|
my $directory = shift;
|
|
|
|
info "approximating disk usage...";
|
2020-06-23 20:45:17 +00:00
|
|
|
# the "du" utility reports different results depending on the underlying
|
|
|
|
# filesystem, see https://bugs.debian.org/650077 for a discussion
|
|
|
|
#
|
|
|
|
# we use code similar to the one used by dpkg-gencontrol instead
|
|
|
|
#
|
|
|
|
# Regular files are measured in number of 1024 byte blocks. All other
|
|
|
|
# entries are assumed to take one block of space.
|
|
|
|
#
|
|
|
|
# We ignore /dev because depending on the mode, the directory might be
|
|
|
|
# populated or not and we want consistent disk usage results independent
|
|
|
|
# of the mode.
|
|
|
|
my $installed_size = 0;
|
|
|
|
my $scan_installed_size = sub {
|
|
|
|
if ($File::Find::name eq "$directory/dev") {
|
|
|
|
# add all entries of @devfiles once
|
|
|
|
$installed_size += scalar @devfiles;
|
|
|
|
} elsif ($File::Find::name =~ /^$directory\/dev\//) {
|
|
|
|
# ignore everything below /dev
|
|
|
|
} elsif (-f $File::Find::name) {
|
|
|
|
# add file size in 1024 byte blocks, rounded up
|
|
|
|
$installed_size += int(((-s $File::Find::name) + 1024) / 1024);
|
|
|
|
} else {
|
|
|
|
# all other entries are assumed to only take up one block
|
|
|
|
$installed_size += 1;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
find($scan_installed_size, $directory);
|
|
|
|
|
|
|
|
# because the above is only a heuristic we add 10% extra for good measure
|
|
|
|
return int($installed_size * 1.1);
|
2020-04-09 16:33:05 +00:00
|
|
|
}
|
|
|
|
|
2020-01-16 11:02:11 +00:00
|
|
|
sub main() {
|
2020-08-25 14:06:05 +00:00
|
|
|
my $before = Time::HiRes::time;
|
|
|
|
|
2020-01-16 11:02:11 +00:00
|
|
|
umask 022;
|
|
|
|
|
|
|
|
if (scalar @ARGV >= 7 && $ARGV[0] eq "--hook-helper") {
|
|
|
|
hookhelper();
|
2020-01-08 14:41:49 +00:00
|
|
|
exit 0;
|
2019-12-09 09:40:51 +00:00
|
|
|
}
|
2020-08-15 20:36:13 +00:00
|
|
|
|
|
|
|
# this is the counterpart to --hook-helper and will receive and carry
|
|
|
|
# out its instructions
|
|
|
|
if (scalar @ARGV == 1 && $ARGV[0] eq "--hook-listener") {
|
|
|
|
hooklistener();
|
|
|
|
exit 0;
|
|
|
|
}
|
|
|
|
|
2020-01-18 22:13:10 +00:00
|
|
|
# this is like:
|
|
|
|
# lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' ...
|
|
|
|
# but without needing lxc
|
|
|
|
if ($ARGV[0] eq "--unshare-helper") {
|
|
|
|
if (!test_unshare(1)) {
|
|
|
|
exit 1;
|
|
|
|
}
|
|
|
|
my @idmap = read_subuid_subgid;
|
|
|
|
my $pid = get_unshare_cmd(
|
|
|
|
sub {
|
|
|
|
0 == system @ARGV[1 .. $#ARGV] or error "system failed: $?";
|
|
|
|
},
|
|
|
|
\@idmap
|
|
|
|
);
|
|
|
|
waitpid $pid, 0;
|
|
|
|
$? == 0 or error "unshared command failed";
|
|
|
|
exit 0;
|
|
|
|
}
|
2019-12-09 09:40:51 +00:00
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
my $mtime = time;
|
|
|
|
if (exists $ENV{SOURCE_DATE_EPOCH}) {
|
2020-01-08 16:44:07 +00:00
|
|
|
$mtime = $ENV{SOURCE_DATE_EPOCH} + 0;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 07:39:40 +00:00
|
|
|
{
|
|
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
|
|
$ENV{DEBIAN_FRONTEND} = 'noninteractive';
|
|
|
|
$ENV{DEBCONF_NONINTERACTIVE_SEEN} = 'true';
|
|
|
|
$ENV{LC_ALL} = 'C.UTF-8';
|
|
|
|
$ENV{LANGUAGE} = 'C.UTF-8';
|
|
|
|
$ENV{LANG} = 'C.UTF-8';
|
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
# copy ARGV because getopt modifies it
|
|
|
|
my @ARGVORIG = @ARGV;
|
|
|
|
|
2020-01-21 23:28:48 +00:00
|
|
|
# obtain the correct defaults for the keyring locations that apt knows
|
|
|
|
# about
|
|
|
|
my $apttrusted
|
|
|
|
= `eval \$(apt-config shell v Dir::Etc::trusted/f); printf \$v`;
|
|
|
|
my $apttrustedparts
|
|
|
|
= `eval \$(apt-config shell v Dir::Etc::trustedparts/d); printf \$v`;
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
chomp(my $hostarch = `dpkg --print-architecture`);
|
2018-09-18 09:20:24 +00:00
|
|
|
my $options = {
|
2020-01-08 16:44:07 +00:00
|
|
|
components => ["main"],
|
|
|
|
variant => "important",
|
|
|
|
include => [],
|
|
|
|
architectures => [$hostarch],
|
|
|
|
mode => 'auto',
|
|
|
|
dpkgopts => [],
|
|
|
|
aptopts => [],
|
2020-01-21 23:28:48 +00:00
|
|
|
apttrusted => $apttrusted,
|
|
|
|
apttrustedparts => $apttrustedparts,
|
2020-01-08 16:44:07 +00:00
|
|
|
noop => [],
|
|
|
|
setup_hook => [],
|
2020-03-22 13:08:21 +00:00
|
|
|
extract_hook => [],
|
2020-01-08 16:44:07 +00:00
|
|
|
essential_hook => [],
|
|
|
|
customize_hook => [],
|
2020-01-10 10:44:15 +00:00
|
|
|
dryrun => 0,
|
2020-04-09 22:00:36 +00:00
|
|
|
skip => [],
|
2018-09-18 09:20:24 +00:00
|
|
|
};
|
2019-02-23 07:43:15 +00:00
|
|
|
my $logfile = undef;
|
2020-04-09 16:33:05 +00:00
|
|
|
my $format = 'auto';
|
2020-01-08 16:44:07 +00:00
|
|
|
Getopt::Long::Configure('default', 'bundling', 'auto_abbrev',
|
|
|
|
'ignore_case_always');
|
2018-09-18 09:20:24 +00:00
|
|
|
GetOptions(
|
2020-01-08 16:44:07 +00:00
|
|
|
'h|help' => sub { pod2usage(-exitval => 0, -verbose => 1) },
|
|
|
|
'man' => sub { pod2usage(-exitval => 0, -verbose => 2) },
|
|
|
|
'version' => sub { print STDOUT "mmdebstrap $VERSION\n"; exit 0; },
|
|
|
|
'components=s@' => \$options->{components},
|
|
|
|
'variant=s' => \$options->{variant},
|
|
|
|
'include=s@' => \$options->{include},
|
2020-01-08 14:41:49 +00:00
|
|
|
'architectures=s@' => \$options->{architectures},
|
2020-01-08 16:44:07 +00:00
|
|
|
'mode=s' => \$options->{mode},
|
|
|
|
'dpkgopt=s@' => \$options->{dpkgopts},
|
|
|
|
'aptopt=s@' => \$options->{aptopts},
|
|
|
|
'keyring=s' => sub {
|
2020-01-08 14:41:49 +00:00
|
|
|
my ($opt_name, $opt_value) = @_;
|
|
|
|
if ($opt_value =~ /"/) {
|
2020-01-08 16:41:46 +00:00
|
|
|
error "--keyring: apt cannot handle paths with double quotes:"
|
|
|
|
. " $opt_value";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!-e $opt_value) {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "keyring \"$opt_value\" does not exist";
|
|
|
|
}
|
|
|
|
my $abs_path = abs_path($opt_value);
|
|
|
|
if (!defined $abs_path) {
|
|
|
|
error "unable to get absolute path of --keyring: $opt_value";
|
|
|
|
}
|
|
|
|
# since abs_path resolved all symlinks for us, we can now test
|
|
|
|
# what the actual target actually is
|
2020-01-21 12:38:53 +00:00
|
|
|
if (-d $abs_path) {
|
|
|
|
$options->{apttrustedparts} = $abs_path;
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
2020-01-21 12:38:53 +00:00
|
|
|
$options->{apttrusted} = $abs_path;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
},
|
2020-01-08 16:44:07 +00:00
|
|
|
's|silent' => sub { $verbosity_level = 0; },
|
|
|
|
'q|quiet' => sub { $verbosity_level = 0; },
|
2020-01-08 14:41:49 +00:00
|
|
|
'v|verbose' => sub { $verbosity_level = 2; },
|
2020-01-08 16:44:07 +00:00
|
|
|
'd|debug' => sub { $verbosity_level = 3; },
|
2020-04-09 16:33:05 +00:00
|
|
|
'format=s' => \$format,
|
2020-01-08 14:41:49 +00:00
|
|
|
'logfile=s' => \$logfile,
|
|
|
|
# no-op options so that mmdebstrap can be used with
|
|
|
|
# sbuild-createchroot --debootstrap=mmdebstrap
|
2020-01-08 16:44:07 +00:00
|
|
|
'resolve-deps' => sub { push @{ $options->{noop} }, 'resolve-deps'; },
|
|
|
|
'merged-usr' => sub { push @{ $options->{noop} }, 'merged-usr'; },
|
|
|
|
'no-merged-usr' =>
|
|
|
|
sub { push @{ $options->{noop} }, 'no-merged-usr'; },
|
|
|
|
'force-check-gpg' =>
|
|
|
|
sub { push @{ $options->{noop} }, 'force-check-gpg'; },
|
|
|
|
'setup-hook=s@' => \$options->{setup_hook},
|
2020-03-22 13:08:21 +00:00
|
|
|
'extract-hook=s@' => \$options->{extract_hook},
|
2020-01-08 14:41:49 +00:00
|
|
|
'essential-hook=s@' => \$options->{essential_hook},
|
|
|
|
'customize-hook=s@' => \$options->{customize_hook},
|
2020-08-15 22:50:46 +00:00
|
|
|
'hook-directory=s' => sub {
|
|
|
|
my ($opt_name, $opt_value) = @_;
|
|
|
|
if (!-e $opt_value) {
|
|
|
|
error "hook directory \"$opt_value\" does not exist";
|
|
|
|
}
|
|
|
|
my $abs_path = abs_path($opt_value);
|
|
|
|
if (!defined $abs_path) {
|
|
|
|
error( "unable to get absolute path of "
|
|
|
|
. "--hook-directory: $opt_value");
|
|
|
|
}
|
|
|
|
# since abs_path resolved all symlinks for us, we can now test
|
|
|
|
# what the actual target actually is
|
|
|
|
if (!-d $opt_value) {
|
|
|
|
error "hook directory \"$opt_value\" is not a directory";
|
|
|
|
}
|
|
|
|
# gather all files starting with special prefixes into the
|
|
|
|
# respective keys of a hash
|
|
|
|
my %scripts;
|
|
|
|
opendir(my $dh, $opt_value)
|
|
|
|
or error "Can't opendir($opt_value): $!";
|
|
|
|
while (my $entry = readdir $dh) {
|
|
|
|
foreach
|
|
|
|
my $hook ('setup', 'extract', 'essential', 'customize') {
|
2020-08-17 16:57:36 +00:00
|
|
|
if ($entry =~ m/^\Q$hook\E/ and -x "$opt_value/$entry") {
|
|
|
|
push @{ $scripts{$hook} }, "$opt_value/$entry";
|
|
|
|
}
|
2020-08-15 22:50:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
closedir($dh);
|
|
|
|
# add the sorted list associated with each key to the respective
|
|
|
|
# list of hooks
|
|
|
|
foreach my $hook (keys %scripts) {
|
2020-08-17 16:57:36 +00:00
|
|
|
push @{ $options->{"${hook}_hook"} },
|
|
|
|
(sort @{ $scripts{$hook} });
|
2020-08-15 22:50:46 +00:00
|
|
|
}
|
|
|
|
},
|
2020-03-07 01:12:21 +00:00
|
|
|
# Sometimes --simulate fails even though non-simulate succeeds because
|
|
|
|
# in simulate mode, apt cannot rely on dpkg to figure out tricky
|
|
|
|
# dependency situations and will give up instead when it cannot find
|
|
|
|
# a solution.
|
|
|
|
#
|
|
|
|
# 2020-02-06, #debian-apt on OFTC, times in UTC+1
|
|
|
|
# 12:52 < DonKult> [...] It works in non-simulation because simulate is
|
|
|
|
# more picky. If you wanna know why simulate complains
|
|
|
|
# here prepare for long suffering in dependency hell.
|
|
|
|
'simulate' => \$options->{dryrun},
|
|
|
|
'dry-run' => \$options->{dryrun},
|
2020-04-09 22:00:36 +00:00
|
|
|
'skip=s@' => \$options->{skip},
|
2018-09-18 09:20:24 +00:00
|
|
|
) or pod2usage(-exitval => 2, -verbose => 1);
|
|
|
|
|
2019-02-23 07:43:15 +00:00
|
|
|
if (defined($logfile)) {
|
2020-01-08 14:41:49 +00:00
|
|
|
open(STDERR, '>', $logfile) or error "cannot open $logfile: $!";
|
2019-02-23 07:43:15 +00:00
|
|
|
}
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
foreach my $arg (@{ $options->{noop} }) {
|
2020-01-08 16:41:46 +00:00
|
|
|
info "The option --$arg is a no-op. It only exists for compatibility"
|
|
|
|
. " with some debootstrap wrappers.";
|
2019-02-15 11:42:46 +00:00
|
|
|
}
|
|
|
|
|
2020-01-10 10:44:15 +00:00
|
|
|
if ($options->{dryrun}) {
|
2020-03-22 13:08:21 +00:00
|
|
|
foreach my $hook ('setup', 'extract', 'essential', 'customize') {
|
2020-01-10 10:44:15 +00:00
|
|
|
if (scalar @{ $options->{"${hook}_hook"} } > 0) {
|
|
|
|
warning "In dry-run mode, --$hook-hook options have no effect";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
my @valid_variants = (
|
|
|
|
'extract', 'custom', 'essential', 'apt',
|
|
|
|
'required', 'minbase', 'buildd', 'important',
|
|
|
|
'debootstrap', '-', 'standard'
|
|
|
|
);
|
|
|
|
if (none { $_ eq $options->{variant} } @valid_variants) {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "invalid variant. Choose from " . (join ', ', @valid_variants);
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
# debootstrap and - are an alias for important
|
2018-09-23 17:36:07 +00:00
|
|
|
if (any { $_ eq $options->{variant} } ('-', 'debootstrap')) {
|
2020-01-08 14:41:49 +00:00
|
|
|
$options->{variant} = 'important';
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
if ($options->{variant} eq 'essential'
|
|
|
|
and scalar @{ $options->{include} } > 0) {
|
2020-01-08 16:41:46 +00:00
|
|
|
warning "cannot install extra packages with variant essential because"
|
|
|
|
. " apt is missing";
|
2018-10-23 16:04:05 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
# fakeroot is an alias for fakechroot
|
|
|
|
if ($options->{mode} eq 'fakeroot') {
|
2020-01-08 14:41:49 +00:00
|
|
|
$options->{mode} = 'fakechroot';
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
# sudo is an alias for root
|
|
|
|
if ($options->{mode} eq 'sudo') {
|
2020-01-08 14:41:49 +00:00
|
|
|
$options->{mode} = 'root';
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
my @valid_modes
|
|
|
|
= ('auto', 'root', 'unshare', 'fakechroot', 'proot', 'chrootless');
|
2018-09-23 17:36:07 +00:00
|
|
|
if (none { $_ eq $options->{mode} } @valid_modes) {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "invalid mode. Choose from " . (join ', ', @valid_modes);
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 16:33:05 +00:00
|
|
|
# sqfs is an alias for squashfs
|
|
|
|
if ($format eq 'sqfs') {
|
2020-05-02 21:53:41 +00:00
|
|
|
$format = 'squashfs';
|
2020-04-09 16:33:05 +00:00
|
|
|
}
|
|
|
|
# dir is an alias for directory
|
|
|
|
if ($format eq 'dir') {
|
|
|
|
$format = 'directory';
|
|
|
|
}
|
|
|
|
my @valid_formats = ('auto', 'directory', 'tar', 'squashfs', 'ext2');
|
|
|
|
if (none { $_ eq $format } @valid_formats) {
|
|
|
|
error "invalid format. Choose from " . (join ', ', @valid_formats);
|
|
|
|
}
|
|
|
|
|
2020-11-12 21:36:10 +00:00
|
|
|
foreach my $tool ('dpkg', 'dpkg-deb', 'apt-get', 'apt-cache', 'apt-config',
|
|
|
|
'tar') {
|
|
|
|
my $found = 0;
|
|
|
|
foreach my $path (split /:/, $ENV{PATH}) {
|
|
|
|
if (-f "$path/$tool" && -x _ ) {
|
|
|
|
$found = 1;
|
|
|
|
last;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!$found) {
|
|
|
|
error "cannot find $tool";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-25 13:35:38 +00:00
|
|
|
my $check_fakechroot_running = sub {
|
2020-01-08 14:41:49 +00:00
|
|
|
# test if we are inside fakechroot already
|
|
|
|
# We fork a child process because setting FAKECHROOT_DETECT seems to
|
|
|
|
# be an irreversible operation for fakechroot.
|
|
|
|
my $pid = open my $rfh, '-|' // error "failed to fork(): $!";
|
|
|
|
if ($pid == 0) {
|
|
|
|
# with the FAKECHROOT_DETECT environment variable set, any program
|
|
|
|
# execution will be replaced with the output "fakeroot [version]"
|
2020-01-09 07:39:40 +00:00
|
|
|
local $ENV{FAKECHROOT_DETECT} = 0;
|
2020-01-08 14:41:49 +00:00
|
|
|
exec 'echo', 'If fakechroot is running, this will not be printed';
|
|
|
|
}
|
|
|
|
my $content = do { local $/; <$rfh> };
|
|
|
|
waitpid $pid, 0;
|
|
|
|
my $result = 0;
|
|
|
|
if ($? == 0 and $content =~ /^fakechroot \d\.\d+$/) {
|
|
|
|
$result = 1;
|
|
|
|
}
|
|
|
|
return $result;
|
2019-03-25 13:35:38 +00:00
|
|
|
};
|
|
|
|
|
2018-10-23 16:04:05 +00:00
|
|
|
# figure out the mode to use or test whether the chosen mode is legal
|
|
|
|
if ($options->{mode} eq 'auto') {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (&{$check_fakechroot_running}()) {
|
|
|
|
# if mmdebstrap is executed inside fakechroot, then we assume the
|
|
|
|
# user expects fakechroot mode
|
|
|
|
$options->{mode} = 'fakechroot';
|
|
|
|
} elsif ($EFFECTIVE_USER_ID == 0) {
|
|
|
|
# if mmdebstrap is executed as root, we assume the user wants root
|
|
|
|
# mode
|
|
|
|
$options->{mode} = 'root';
|
|
|
|
} elsif (test_unshare(0)) {
|
|
|
|
# otherwise, unshare mode is our best option if test_unshare()
|
|
|
|
# succeeds
|
|
|
|
$options->{mode} = 'unshare';
|
|
|
|
} elsif (system('fakechroot --version>/dev/null') == 0) {
|
|
|
|
# the next fallback is fakechroot
|
|
|
|
# exec ourselves again but within fakechroot
|
|
|
|
my @prefix = ();
|
2020-01-08 16:44:07 +00:00
|
|
|
if ($is_covering) {
|
2020-01-08 14:41:49 +00:00
|
|
|
@prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
|
|
|
|
}
|
|
|
|
exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
|
|
|
|
} elsif (system('proot --version>/dev/null') == 0) {
|
|
|
|
# and lastly, proot
|
|
|
|
$options->{mode} = 'proot';
|
|
|
|
} else {
|
|
|
|
error "unable to pick chroot mode automatically";
|
|
|
|
}
|
|
|
|
info "automatically chosen mode: $options->{mode}";
|
2018-10-23 16:04:05 +00:00
|
|
|
} elsif ($options->{mode} eq 'root') {
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($EFFECTIVE_USER_ID != 0) {
|
|
|
|
error "need to be root";
|
|
|
|
}
|
2018-10-23 16:04:05 +00:00
|
|
|
} elsif ($options->{mode} eq 'proot') {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (system('proot --version>/dev/null') != 0) {
|
|
|
|
error "need working proot binary";
|
|
|
|
}
|
2018-10-23 16:04:05 +00:00
|
|
|
} elsif ($options->{mode} eq 'fakechroot') {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (&{$check_fakechroot_running}()) {
|
|
|
|
# fakechroot is already running
|
|
|
|
} elsif (system('fakechroot --version>/dev/null') != 0) {
|
|
|
|
error "need working fakechroot binary";
|
|
|
|
} else {
|
|
|
|
# exec ourselves again but within fakechroot
|
|
|
|
my @prefix = ();
|
2020-01-08 16:44:07 +00:00
|
|
|
if ($is_covering) {
|
2020-01-08 14:41:49 +00:00
|
|
|
@prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
|
|
|
|
}
|
|
|
|
exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
|
|
|
|
}
|
2018-10-23 16:04:05 +00:00
|
|
|
} elsif ($options->{mode} eq 'unshare') {
|
2020-01-08 14:41:49 +00:00
|
|
|
if (!test_unshare(1)) {
|
|
|
|
my $procfile = '/proc/sys/kernel/unprivileged_userns_clone';
|
2020-01-08 16:44:07 +00:00
|
|
|
open(my $fh, '<', $procfile)
|
|
|
|
or error "failed to open $procfile: $!";
|
|
|
|
chomp(
|
|
|
|
my $content = do { local $/; <$fh> }
|
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
close($fh);
|
|
|
|
if ($content ne "1") {
|
2020-01-08 16:41:46 +00:00
|
|
|
info "/proc/sys/kernel/unprivileged_userns_clone is set to"
|
|
|
|
. " $content";
|
|
|
|
info "Try running:";
|
|
|
|
info " sudo sysctl -w kernel.unprivileged_userns_clone=1";
|
|
|
|
info "or permanently enable unprivileged usernamespaces by"
|
|
|
|
. " putting the setting into /etc/sysctl.d/";
|
2020-03-07 01:13:53 +00:00
|
|
|
info "THIS SETTING HAS SECURITY IMPLICATIONS!";
|
|
|
|
info "Refer to https://bugs.debian.org/cgi-bin/"
|
2020-01-08 16:41:46 +00:00
|
|
|
. "bugreport.cgi?bug=898446";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
exit 1;
|
|
|
|
}
|
2018-10-23 16:04:05 +00:00
|
|
|
} elsif ($options->{mode} eq 'chrootless') {
|
2020-05-02 21:54:04 +00:00
|
|
|
if ($EFFECTIVE_USER_ID == 0) {
|
|
|
|
warning "running chrootless mode as root might damage the host "
|
|
|
|
. "system";
|
|
|
|
}
|
2018-10-23 16:04:05 +00:00
|
|
|
} else {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "unknown mode: $options->{mode}";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2019-10-27 21:04:21 +00:00
|
|
|
my @architectures = ();
|
2020-01-08 16:44:07 +00:00
|
|
|
foreach my $archs (@{ $options->{architectures} }) {
|
2020-01-08 14:41:49 +00:00
|
|
|
foreach my $arch (split /[,\s]+/, $archs) {
|
|
|
|
# strip leading and trailing whitespace
|
|
|
|
$arch =~ s/^\s+|\s+$//g;
|
|
|
|
# skip if the remainder is an empty string
|
|
|
|
if ($arch eq '') {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
# do not append component if it's already in the list
|
2020-01-08 16:44:07 +00:00
|
|
|
if (any { $_ eq $arch } @architectures) {
|
2020-01-08 14:41:49 +00:00
|
|
|
next;
|
|
|
|
}
|
|
|
|
push @architectures, $arch;
|
|
|
|
}
|
2019-10-27 21:04:21 +00:00
|
|
|
}
|
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
$options->{nativearch} = $hostarch;
|
2019-10-27 21:04:21 +00:00
|
|
|
$options->{foreignarchs} = [];
|
|
|
|
if (scalar @architectures == 0) {
|
2020-01-08 16:41:46 +00:00
|
|
|
warning "empty architecture list: falling back to native architecture"
|
|
|
|
. " $hostarch";
|
2019-10-27 21:04:21 +00:00
|
|
|
} elsif (scalar @architectures == 1) {
|
2020-01-08 14:41:49 +00:00
|
|
|
$options->{nativearch} = $architectures[0];
|
2019-10-27 21:04:21 +00:00
|
|
|
} else {
|
2020-01-08 14:41:49 +00:00
|
|
|
$options->{nativearch} = $architectures[0];
|
2020-01-08 16:44:07 +00:00
|
|
|
push @{ $options->{foreignarchs} },
|
|
|
|
@architectures[1 .. $#architectures];
|
2019-10-27 21:04:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
debug "Native architecture (outside): $hostarch";
|
|
|
|
debug "Native architecture (inside): $options->{nativearch}";
|
2020-01-08 16:44:07 +00:00
|
|
|
debug("Foreign architectures (inside): "
|
|
|
|
. (join ', ', @{ $options->{foreignarchs} }));
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2018-09-21 06:00:06 +00:00
|
|
|
{
|
2020-01-08 14:41:49 +00:00
|
|
|
# FIXME: autogenerate this list
|
|
|
|
my $deb2qemu = {
|
2020-01-08 16:44:07 +00:00
|
|
|
alpha => 'alpha',
|
|
|
|
amd64 => 'x86_64',
|
|
|
|
arm => 'arm',
|
|
|
|
arm64 => 'aarch64',
|
|
|
|
armel => 'arm',
|
|
|
|
armhf => 'arm',
|
|
|
|
hppa => 'hppa',
|
|
|
|
i386 => 'i386',
|
|
|
|
m68k => 'm68k',
|
|
|
|
mips => 'mips',
|
|
|
|
mips64 => 'mips64',
|
2020-01-08 14:41:49 +00:00
|
|
|
mips64el => 'mips64el',
|
2020-01-08 16:44:07 +00:00
|
|
|
mipsel => 'mipsel',
|
|
|
|
powerpc => 'ppc',
|
|
|
|
ppc64 => 'ppc64',
|
|
|
|
ppc64el => 'ppc64le',
|
|
|
|
riscv64 => 'riscv64',
|
|
|
|
s390x => 's390x',
|
|
|
|
sh4 => 'sh4',
|
|
|
|
sparc => 'sparc',
|
|
|
|
sparc64 => 'sparc64',
|
2020-01-08 14:41:49 +00:00
|
|
|
};
|
2020-05-01 05:39:26 +00:00
|
|
|
if (any { $_ eq 'check/qemu' } @{ $options->{skip} }) {
|
|
|
|
info "skipping check/qemu as requested";
|
|
|
|
} elsif ($options->{mode} eq "chrootless") {
|
2020-04-14 16:25:02 +00:00
|
|
|
info "skipping emulation check in chrootless mode";
|
|
|
|
} elsif ($hostarch ne $options->{nativearch}) {
|
2020-04-10 10:25:24 +00:00
|
|
|
if (system('arch-test --version>/dev/null') != 0) {
|
|
|
|
error "install arch-test for foreign architecture support";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
my $withemu = 0;
|
2020-01-08 16:44:07 +00:00
|
|
|
my $noemu = 0;
|
2020-01-08 14:41:49 +00:00
|
|
|
{
|
|
|
|
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
|
|
|
|
if ($pid == 0) {
|
|
|
|
{
|
2020-01-09 07:39:40 +00:00
|
|
|
## no critic (TestingAndDebugging::ProhibitNoWarnings)
|
|
|
|
# don't print a warning if the following fails
|
|
|
|
no warnings;
|
2020-01-08 14:41:49 +00:00
|
|
|
exec 'arch-test', $options->{nativearch};
|
|
|
|
}
|
2020-01-08 15:23:34 +00:00
|
|
|
# if exec didn't work (for example because the arch-test
|
|
|
|
# program is missing) prepare for the worst and assume that
|
|
|
|
# the architecture cannot be executed
|
2020-01-08 16:41:46 +00:00
|
|
|
print "$options->{nativearch}: not supported on this"
|
|
|
|
. " machine/kernel\n";
|
2020-01-08 14:41:49 +00:00
|
|
|
exit 1;
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
chomp(
|
|
|
|
my $content = do { local $/; <$fh> }
|
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
close $fh;
|
|
|
|
if ($? == 0 and $content eq "$options->{nativearch}: ok") {
|
|
|
|
$withemu = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
{
|
|
|
|
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
|
|
|
|
if ($pid == 0) {
|
|
|
|
{
|
2020-01-09 07:39:40 +00:00
|
|
|
## no critic (TestingAndDebugging::ProhibitNoWarnings)
|
|
|
|
# don't print a warning if the following fails
|
|
|
|
no warnings;
|
2020-01-08 14:41:49 +00:00
|
|
|
exec 'arch-test', '-n', $options->{nativearch};
|
|
|
|
}
|
2020-01-08 15:23:34 +00:00
|
|
|
# if exec didn't work (for example because the arch-test
|
|
|
|
# program is missing) prepare for the worst and assume that
|
|
|
|
# the architecture cannot be executed
|
2020-01-08 16:41:46 +00:00
|
|
|
print "$options->{nativearch}: not supported on this"
|
|
|
|
. " machine/kernel\n";
|
2020-01-08 14:41:49 +00:00
|
|
|
exit 1;
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
chomp(
|
|
|
|
my $content = do { local $/; <$fh> }
|
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
close $fh;
|
|
|
|
if ($? == 0 and $content eq "$options->{nativearch}: ok") {
|
|
|
|
$noemu = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
# four different outcomes, depending on whether arch-test
|
|
|
|
# succeeded with or without emulation
|
|
|
|
#
|
|
|
|
# withemu | noemu |
|
|
|
|
# --------+-------+-----------------
|
|
|
|
# 0 | 0 | test why emu doesn't work and quit
|
|
|
|
# 0 | 1 | should never happen
|
|
|
|
# 1 | 0 | use qemu emulation
|
|
|
|
# 1 | 1 | don't use qemu emulation
|
|
|
|
if ($withemu == 0 and $noemu == 0) {
|
|
|
|
{
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '<', '/proc/filesystems'
|
|
|
|
or error "failed to open /proc/filesystems: $!";
|
2020-01-09 07:39:40 +00:00
|
|
|
unless (grep { /^nodev\tbinfmt_misc$/ } (<$fh>)) {
|
2020-01-08 16:41:46 +00:00
|
|
|
warning "binfmt_misc not found in /proc/filesystems --"
|
|
|
|
. " is the module loaded?";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
close $fh;
|
|
|
|
}
|
|
|
|
{
|
2020-01-08 16:44:07 +00:00
|
|
|
open my $fh, '<', '/proc/mounts'
|
|
|
|
or error "failed to open /proc/mounts: $!";
|
|
|
|
unless (
|
2020-01-09 07:39:40 +00:00
|
|
|
grep {
|
2020-04-10 10:25:45 +00:00
|
|
|
/^binfmt_misc\s+
|
|
|
|
\/proc\/sys\/fs\/binfmt_misc\s+
|
|
|
|
binfmt_misc\s+/x
|
2020-01-09 07:39:40 +00:00
|
|
|
} (<$fh>)
|
2020-01-08 16:44:07 +00:00
|
|
|
) {
|
2020-01-08 16:41:46 +00:00
|
|
|
warning "binfmt_misc not found in /proc/mounts -- not"
|
|
|
|
. " mounted?";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
close $fh;
|
|
|
|
}
|
|
|
|
{
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!exists $deb2qemu->{ $options->{nativearch} }) {
|
2020-01-08 16:41:46 +00:00
|
|
|
warning "no mapping from $options->{nativearch} to"
|
|
|
|
. " qemu-user binary";
|
2020-04-10 10:26:00 +00:00
|
|
|
} elsif (
|
|
|
|
system('/usr/sbin/update-binfmts --version>/dev/null')
|
|
|
|
!= 0) {
|
|
|
|
warning "cannot find /usr/sbin/update-binfmts";
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
2020-01-08 16:44:07 +00:00
|
|
|
my $binfmt_identifier
|
|
|
|
= 'qemu-' . $deb2qemu->{ $options->{nativearch} };
|
|
|
|
open my $fh, '-|', '/usr/sbin/update-binfmts',
|
|
|
|
'--display',
|
|
|
|
$binfmt_identifier // error "failed to fork(): $!";
|
|
|
|
chomp(
|
|
|
|
my $binfmts = do { local $/; <$fh> }
|
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
close $fh;
|
2020-04-10 10:26:14 +00:00
|
|
|
if ($? != 0 || $binfmts eq '') {
|
2020-01-08 16:41:46 +00:00
|
|
|
warning "$binfmt_identifier is not a supported"
|
|
|
|
. " binfmt name";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-01-08 16:41:46 +00:00
|
|
|
error "$options->{nativearch} can neither be executed natively"
|
|
|
|
. " nor via qemu user emulation with binfmt_misc";
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($withemu == 0 and $noemu == 1) {
|
|
|
|
error "arch-test succeeded without emu but not with emu";
|
|
|
|
} elsif ($withemu == 1 and $noemu == 0) {
|
2020-01-08 16:41:46 +00:00
|
|
|
info "$options->{nativearch} cannot be executed, falling back"
|
|
|
|
. " to qemu-user";
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!exists $deb2qemu->{ $options->{nativearch} }) {
|
2020-01-08 16:41:46 +00:00
|
|
|
error "no mapping from $options->{nativearch} to qemu-user"
|
|
|
|
. " binary";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
$options->{qemu} = $deb2qemu->{ $options->{nativearch} };
|
2020-04-10 10:26:42 +00:00
|
|
|
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
|
|
|
|
my $qemubin = "/usr/bin/qemu-$options->{qemu}-static";
|
|
|
|
if (!-e $qemubin) {
|
|
|
|
error "cannot find $qemubin";
|
|
|
|
}
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($withemu == 1 and $noemu == 1) {
|
2020-01-08 16:41:46 +00:00
|
|
|
info "$options->{nativearch} is different from $hostarch but"
|
|
|
|
. " can be executed natively";
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
error "logic error";
|
|
|
|
}
|
|
|
|
} else {
|
2020-01-08 16:41:46 +00:00
|
|
|
info "chroot architecture $options->{nativearch} is equal to the"
|
|
|
|
. " host's architecture";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 08:09:22 +00:00
|
|
|
{
|
2020-01-08 14:41:49 +00:00
|
|
|
my $suite;
|
|
|
|
if (scalar @ARGV > 0) {
|
|
|
|
$suite = shift @ARGV;
|
|
|
|
if (scalar @ARGV > 0) {
|
|
|
|
$options->{target} = shift @ARGV;
|
|
|
|
} else {
|
|
|
|
$options->{target} = '-';
|
|
|
|
}
|
|
|
|
} else {
|
2020-01-08 16:44:07 +00:00
|
|
|
info
|
|
|
|
"No SUITE specified, expecting sources.list on standard input";
|
2020-01-08 14:41:49 +00:00
|
|
|
$options->{target} = '-';
|
|
|
|
}
|
|
|
|
|
2020-01-22 22:30:28 +00:00
|
|
|
my $sourceslists = [];
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!defined $suite) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# If no suite was specified, then the whole sources.list has to
|
|
|
|
# come from standard input
|
|
|
|
info "Reading sources.list from standard input...";
|
2020-01-22 22:30:28 +00:00
|
|
|
my $content = do {
|
|
|
|
local $/;
|
|
|
|
## no critic (InputOutput::ProhibitExplicitStdin)
|
|
|
|
<STDIN>;
|
|
|
|
};
|
|
|
|
my $type = guess_sources_format($content);
|
|
|
|
if (!defined $type
|
|
|
|
|| ($type ne "deb822" and $type ne "one-line")) {
|
|
|
|
error "cannot determine sources.list format";
|
|
|
|
}
|
|
|
|
push @{$sourceslists},
|
|
|
|
{
|
|
|
|
type => $type,
|
|
|
|
fname => undef,
|
|
|
|
content => $content,
|
|
|
|
};
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
my @components = ();
|
2020-01-08 16:44:07 +00:00
|
|
|
foreach my $comp (@{ $options->{components} }) {
|
2020-01-08 14:41:49 +00:00
|
|
|
my @comps = split /[,\s]+/, $comp;
|
|
|
|
foreach my $c (@comps) {
|
|
|
|
# strip leading and trailing whitespace
|
|
|
|
$c =~ s/^\s+|\s+$//g;
|
|
|
|
# skip if the remainder is an empty string
|
|
|
|
if ($c eq "") {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
# do not append component if it's already in the list
|
2020-01-08 16:44:07 +00:00
|
|
|
if (any { $_ eq $c } @components) {
|
2020-01-08 14:41:49 +00:00
|
|
|
next;
|
|
|
|
}
|
|
|
|
push @components, $c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
my $compstr = join " ", @components;
|
|
|
|
# if the currently selected apt keyrings do not contain the
|
|
|
|
# necessary key material for the chosen suite, then attempt adding
|
|
|
|
# a signed-by option
|
2020-08-24 14:20:04 +00:00
|
|
|
my $signedby = '';
|
|
|
|
my %suite_by_vendor = get_suite_by_vendor();
|
2020-01-08 14:41:49 +00:00
|
|
|
{
|
2020-08-24 14:20:04 +00:00
|
|
|
my $keyring = get_keyring_by_suite($suite, \%suite_by_vendor);
|
|
|
|
if (!defined $keyring) {
|
2020-01-21 23:29:38 +00:00
|
|
|
last;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-21 23:29:38 +00:00
|
|
|
|
2020-01-08 14:41:49 +00:00
|
|
|
# we can only check if we need the signed-by entry if we u
|
|
|
|
# automatically chosen keyring exists
|
2020-01-21 23:29:38 +00:00
|
|
|
if (!defined $keyring || !-e $keyring) {
|
|
|
|
last;
|
|
|
|
}
|
|
|
|
|
|
|
|
# we can only check key material if gpg is installed
|
|
|
|
my $gpghome = tempdir(
|
|
|
|
"mmdebstrap.gpghome.XXXXXXXXXXXX",
|
|
|
|
TMPDIR => 1,
|
|
|
|
CLEANUP => 1
|
|
|
|
);
|
|
|
|
my @gpgcmd = (
|
|
|
|
'gpg', '--quiet',
|
|
|
|
'--ignore-time-conflict', '--no-options',
|
|
|
|
'--no-default-keyring', '--homedir',
|
|
|
|
$gpghome, '--no-auto-check-trustdb',
|
|
|
|
'--trust-model', 'always'
|
|
|
|
);
|
2020-01-22 22:30:56 +00:00
|
|
|
my ($ret, $message);
|
2020-01-21 23:29:38 +00:00
|
|
|
{
|
2020-01-22 22:30:56 +00:00
|
|
|
my $fh;
|
|
|
|
{
|
|
|
|
# change warning handler to prevent message
|
|
|
|
# Can't exec "gpg": No such file or directory
|
|
|
|
local $SIG{__WARN__} = sub { $message = shift; };
|
|
|
|
$ret = open $fh, '-|', @gpgcmd, '--version';
|
|
|
|
}
|
|
|
|
# we only want to check if the gpg command exists
|
|
|
|
close $fh;
|
2020-01-21 23:29:38 +00:00
|
|
|
}
|
|
|
|
if ($? != 0 || !defined $ret || defined $message) {
|
|
|
|
info "gpg --version failed: cannot determine the right"
|
|
|
|
. " signed-by value";
|
|
|
|
last;
|
|
|
|
}
|
|
|
|
# find all the fingerprints of the keys apt currently
|
|
|
|
# knows about
|
|
|
|
my @keyringopts = ();
|
|
|
|
opendir my $dh, "$options->{apttrustedparts}"
|
|
|
|
or error "cannot read $options->{apttrustedparts}";
|
|
|
|
while (my $filename = readdir $dh) {
|
|
|
|
if ($filename !~ /\.(asc|gpg)$/) {
|
|
|
|
next;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-21 23:29:38 +00:00
|
|
|
push @keyringopts, '--keyring',
|
|
|
|
"$options->{apttrustedparts}/$filename";
|
|
|
|
}
|
2020-08-18 07:35:56 +00:00
|
|
|
closedir $dh;
|
2020-01-21 23:29:38 +00:00
|
|
|
if (-e $options->{apttrusted}) {
|
|
|
|
push @keyringopts, '--keyring', $options->{apttrusted};
|
|
|
|
}
|
|
|
|
my @aptfingerprints = ();
|
|
|
|
if (scalar @keyringopts == 0) {
|
|
|
|
$signedby = " [signed-by=\"$keyring\"]";
|
|
|
|
last;
|
|
|
|
}
|
2020-01-22 22:30:56 +00:00
|
|
|
{
|
|
|
|
open my $fh, '-|', @gpgcmd, @keyringopts, '--with-colons',
|
|
|
|
'--list-keys' // error "failed to fork(): $!";
|
|
|
|
while (my $line = <$fh>) {
|
|
|
|
if ($line !~ /^fpr:::::::::([^:]+):/) {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
push @aptfingerprints, $1;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-22 22:30:56 +00:00
|
|
|
close $fh;
|
2020-01-21 23:29:38 +00:00
|
|
|
}
|
|
|
|
if ($? != 0) {
|
|
|
|
error "gpg failed";
|
|
|
|
}
|
|
|
|
if (scalar @aptfingerprints == 0) {
|
|
|
|
$signedby = " [signed-by=\"$keyring\"]";
|
|
|
|
last;
|
|
|
|
}
|
|
|
|
# check if all fingerprints from the keyring that we guessed
|
|
|
|
# are known by apt and only add signed-by option if that's not
|
|
|
|
# the case
|
|
|
|
my @suitefingerprints = ();
|
2020-01-22 22:30:56 +00:00
|
|
|
{
|
|
|
|
open my $fh, '-|', @gpgcmd, '--keyring', $keyring,
|
|
|
|
'--with-colons',
|
|
|
|
'--list-keys' // error "failed to fork(): $!";
|
|
|
|
while (my $line = <$fh>) {
|
|
|
|
if ($line !~ /^fpr:::::::::([^:]+):/) {
|
|
|
|
next;
|
|
|
|
}
|
|
|
|
# if this fingerprint is not known by apt, then we need
|
|
|
|
#to add the signed-by option
|
|
|
|
if (none { $_ eq $1 } @aptfingerprints) {
|
|
|
|
$signedby = " [signed-by=\"$keyring\"]";
|
|
|
|
last;
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-22 22:30:56 +00:00
|
|
|
close $fh;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-21 23:29:38 +00:00
|
|
|
if ($? != 0) {
|
|
|
|
error "gpg failed";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
if (scalar @ARGV > 0) {
|
|
|
|
for my $arg (@ARGV) {
|
|
|
|
if ($arg eq '-') {
|
2020-01-22 22:30:28 +00:00
|
|
|
info 'Reading sources.list from standard input...';
|
|
|
|
my $content = do {
|
|
|
|
local $/;
|
|
|
|
## no critic (InputOutput::ProhibitExplicitStdin)
|
|
|
|
<STDIN>;
|
|
|
|
};
|
|
|
|
my $type = guess_sources_format($content);
|
|
|
|
if (!defined $type
|
|
|
|
|| ($type ne 'deb822' and $type ne 'one-line')) {
|
|
|
|
error "cannot determine sources.list format";
|
|
|
|
}
|
|
|
|
# if last entry is of same type and without filename,
|
|
|
|
# then append
|
|
|
|
if ( scalar @{$sourceslists} > 0
|
|
|
|
&& $sourceslists->[-1]{type} eq $type
|
|
|
|
&& !defined $sourceslists->[-1]{fname}) {
|
|
|
|
$sourceslists->[-1]{content}
|
|
|
|
.= ($type eq 'one-line' ? "\n" : "\n\n")
|
|
|
|
. $content;
|
|
|
|
} else {
|
|
|
|
push @{$sourceslists},
|
|
|
|
{
|
|
|
|
type => $type,
|
|
|
|
fname => undef,
|
|
|
|
content => $content,
|
|
|
|
};
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($arg =~ /^deb(-src)? /) {
|
2020-01-22 22:30:28 +00:00
|
|
|
my $content = "$arg\n";
|
|
|
|
# if last entry is of same type and without filename,
|
|
|
|
# then append
|
|
|
|
if ( scalar @{$sourceslists} > 0
|
|
|
|
&& $sourceslists->[-1]{type} eq 'one-line'
|
|
|
|
&& !defined $sourceslists->[-1]{fname}) {
|
|
|
|
$sourceslists->[-1]{content} .= "\n" . $content;
|
|
|
|
} else {
|
|
|
|
push @{$sourceslists},
|
|
|
|
{
|
|
|
|
type => 'one-line',
|
|
|
|
fname => undef,
|
|
|
|
content => $content,
|
|
|
|
};
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($arg =~ /:\/\//) {
|
2020-01-22 22:30:28 +00:00
|
|
|
my $content = "deb$signedby $arg $suite $compstr\n";
|
|
|
|
# if last entry is of same type and without filename,
|
|
|
|
# then append
|
|
|
|
if ( scalar @{$sourceslists} > 0
|
|
|
|
&& $sourceslists->[-1]{type} eq 'one-line'
|
|
|
|
&& !defined $sourceslists->[-1]{fname}) {
|
|
|
|
$sourceslists->[-1]{content} .= "\n" . $content;
|
|
|
|
} else {
|
|
|
|
push @{$sourceslists},
|
|
|
|
{
|
|
|
|
type => 'one-line',
|
|
|
|
fname => undef,
|
|
|
|
content => $content,
|
|
|
|
};
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif (-f $arg) {
|
2020-01-22 22:30:28 +00:00
|
|
|
my $content = '';
|
2020-01-08 14:41:49 +00:00
|
|
|
open my $fh, '<', $arg or error "cannot open $arg: $!";
|
|
|
|
while (my $line = <$fh>) {
|
2020-01-22 22:30:28 +00:00
|
|
|
$content .= $line;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
close $fh;
|
2020-01-22 22:30:28 +00:00
|
|
|
my $type = undef;
|
|
|
|
if ($arg =~ /\.list$/) {
|
|
|
|
$type = 'one-line';
|
|
|
|
} elsif ($arg =~ /\.sources$/) {
|
|
|
|
$type = 'deb822';
|
|
|
|
} else {
|
|
|
|
$type = guess_sources_format($content);
|
|
|
|
}
|
|
|
|
if (!defined $type
|
|
|
|
|| ($type ne 'deb822' and $type ne 'one-line')) {
|
|
|
|
error "cannot determine sources.list format";
|
|
|
|
}
|
|
|
|
push @{$sourceslists},
|
|
|
|
{
|
|
|
|
type => $type,
|
|
|
|
fname => basename($arg),
|
|
|
|
content => $content,
|
|
|
|
};
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
error "invalid mirror: $arg";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2020-08-24 14:20:04 +00:00
|
|
|
my $sourceslist
|
|
|
|
= get_sourceslist_by_suite($suite, $options->{nativearch},
|
|
|
|
$signedby, $compstr, \%suite_by_vendor);
|
2020-01-22 22:30:28 +00:00
|
|
|
push @{$sourceslists},
|
|
|
|
{
|
|
|
|
type => 'one-line',
|
|
|
|
fname => undef,
|
|
|
|
content => $sourceslist,
|
|
|
|
};
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-01-22 22:30:28 +00:00
|
|
|
if (scalar @{$sourceslists} == 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "empty apt sources.list";
|
|
|
|
}
|
2020-01-22 22:30:28 +00:00
|
|
|
debug("sources list entries:");
|
|
|
|
for my $list (@{$sourceslists}) {
|
|
|
|
if (defined $list->{fname}) {
|
|
|
|
debug("fname: $list->{fname}");
|
|
|
|
}
|
|
|
|
debug("type: $list->{type}");
|
|
|
|
debug("content:");
|
|
|
|
for my $line (split "\n", $list->{content}) {
|
|
|
|
debug(" $line");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
$options->{sourceslists} = $sourceslists;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ($options->{target} ne '-') {
|
2020-01-08 14:41:49 +00:00
|
|
|
my $abs_path = abs_path($options->{target});
|
|
|
|
if (!defined $abs_path) {
|
2020-01-08 16:41:46 +00:00
|
|
|
error "unable to get absolute path of target directory"
|
|
|
|
. " $options->{target}";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
$options->{target} = $abs_path;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ($options->{target} eq '/') {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "refusing to use the filesystem root as output directory";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2019-01-20 09:41:29 +00:00
|
|
|
my $tar_compressor = get_tar_compressor($options->{target});
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-04-09 16:33:05 +00:00
|
|
|
# figure out the right format
|
|
|
|
if ($format eq 'auto') {
|
|
|
|
if ($options->{target} ne '-' and -d $options->{target}) {
|
|
|
|
$format = 'directory';
|
|
|
|
} elsif (
|
|
|
|
defined $tar_compressor
|
|
|
|
or $options->{target} =~ /\.tar$/
|
|
|
|
or $options->{target} eq '-'
|
|
|
|
or -p $options->{target} # named pipe (fifo)
|
|
|
|
or -c $options->{target} # character special like /dev/null
|
|
|
|
) {
|
|
|
|
$format = 'tar';
|
|
|
|
# check if the compressor is installed
|
|
|
|
if (defined $tar_compressor) {
|
|
|
|
my $pid = fork() // error "fork() failed: $!";
|
|
|
|
if ($pid == 0) {
|
|
|
|
open(STDOUT, '>', '/dev/null')
|
|
|
|
or error "cannot open /dev/null for writing: $!";
|
|
|
|
open(STDIN, '<', '/dev/null')
|
|
|
|
or error "cannot open /dev/null for reading: $!";
|
|
|
|
exec { $tar_compressor->[0] } @{$tar_compressor}
|
|
|
|
or error("cannot exec "
|
|
|
|
. (join " ", @{$tar_compressor})
|
|
|
|
. ": $!");
|
|
|
|
}
|
|
|
|
waitpid $pid, 0;
|
|
|
|
if ($? != 0) {
|
|
|
|
error("failed to start " . (join " ", @{$tar_compressor}));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} elsif ($options->{target} =~ /\.(squashfs|sqfs)$/) {
|
|
|
|
$format = 'squashfs';
|
|
|
|
# check if tar2sqfs is installed
|
2020-01-08 14:41:49 +00:00
|
|
|
my $pid = fork() // error "fork() failed: $!";
|
|
|
|
if ($pid == 0) {
|
2020-01-08 16:44:07 +00:00
|
|
|
open(STDOUT, '>', '/dev/null')
|
|
|
|
or error "cannot open /dev/null for writing: $!";
|
|
|
|
open(STDIN, '<', '/dev/null')
|
|
|
|
or error "cannot open /dev/null for reading: $!";
|
2020-04-09 16:33:05 +00:00
|
|
|
exec('tar2sqfs', '--version')
|
|
|
|
or error("cannot exec tar2sqfs --version: $!");
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
waitpid $pid, 0;
|
|
|
|
if ($? != 0) {
|
2020-04-09 16:33:05 +00:00
|
|
|
error("failed to start tar2sqfs --version");
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-04-09 16:33:05 +00:00
|
|
|
} elsif ($options->{target} =~ /\.ext2$/) {
|
|
|
|
$format = 'ext2';
|
|
|
|
# check if the installed version of genext2fs supports tarballs on
|
|
|
|
# stdin
|
2020-11-12 14:49:10 +00:00
|
|
|
(undef, my $filename) = tempfile(
|
|
|
|
"mmdebstrap.ext2.XXXXXXXXXXXX",
|
|
|
|
OPEN => 0,
|
|
|
|
TMPDIR => 1
|
|
|
|
);
|
2020-04-09 16:33:05 +00:00
|
|
|
open my $fh, '|-', 'genext2fs', '-B', '1024', '-b', '8', '-N',
|
2020-07-09 05:34:03 +00:00
|
|
|
'11', '-a', '-', $filename // error "failed to fork(): $!";
|
2020-04-09 16:33:05 +00:00
|
|
|
# write 10240 null-bytes to genext2fs -- this represents an empty
|
|
|
|
# tar archive
|
|
|
|
print $fh ("\0" x 10240)
|
|
|
|
or error "cannot write to genext2fs process";
|
|
|
|
close $fh;
|
|
|
|
my $exitstatus = $?;
|
|
|
|
unlink $filename // die "cannot unlink $filename";
|
|
|
|
if ($exitstatus != 0) {
|
|
|
|
error "genext2fs failed with exit status: $exitstatus";
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
$format = 'directory';
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-04-09 16:33:05 +00:00
|
|
|
info "automatically chosen format: $format";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 16:33:05 +00:00
|
|
|
if ($options->{target} eq '-' and $format ne 'tar') {
|
|
|
|
error "the $format format is unable to write to standard output";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
|
2020-01-08 16:44:07 +00:00
|
|
|
if ( any { $_ eq $options->{variant} } ('extract', 'custom')
|
|
|
|
and any { $_ eq $options->{mode} } ('fakechroot', 'proot')) {
|
2020-04-09 16:33:05 +00:00
|
|
|
info "creating a tarball or squashfs image or ext2 image in"
|
|
|
|
. " fakechroot mode or proot mode might fail in extract and"
|
|
|
|
. " custom variants because there might be no tar inside the"
|
|
|
|
. " chroot";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
# try to fail early if target tarball or squashfs image cannot be
|
|
|
|
# opened for writing
|
|
|
|
if ($options->{target} ne '-') {
|
2020-01-10 10:44:15 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
if (-e $options->{target}) {
|
|
|
|
info "not overwriting $options->{target} because in"
|
|
|
|
. " dry-run mode";
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
open my $fh, '>', $options->{target}
|
|
|
|
or error "cannot open $options->{target} for writing: $!";
|
|
|
|
close $fh;
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
# since the output is a tarball, we create the rootfs in a temporary
|
|
|
|
# directory
|
2020-01-21 23:30:12 +00:00
|
|
|
$options->{root} = tempdir('mmdebstrap.XXXXXXXXXX', TMPDIR => 1);
|
2020-01-08 14:41:49 +00:00
|
|
|
info "using $options->{root} as tempdir";
|
|
|
|
# in unshare and root mode, other users than the current user need to
|
|
|
|
# access the rootfs, most prominently, the _apt user. Thus, make the
|
|
|
|
# temporary directory world readable.
|
|
|
|
if (any { $_ eq $options->{mode} } ('unshare', 'root')) {
|
|
|
|
chmod 0755, $options->{root} or error "cannot chmod root: $!";
|
|
|
|
}
|
2020-04-09 16:33:05 +00:00
|
|
|
} elsif ($format eq 'directory') {
|
2020-01-08 14:41:49 +00:00
|
|
|
# user does not seem to have specified a tarball as output, thus work
|
|
|
|
# directly in the supplied directory
|
|
|
|
$options->{root} = $options->{target};
|
|
|
|
if (-e $options->{root}) {
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!-d $options->{root}) {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "$options->{root} exists and is not a directory";
|
|
|
|
}
|
2020-05-01 05:39:52 +00:00
|
|
|
if (any { $_ eq 'check/empty' } @{ $options->{skip} }) {
|
|
|
|
info "skipping check/empty as requested";
|
|
|
|
} else {
|
|
|
|
# check if the directory is empty or contains nothing more than
|
|
|
|
# an empty lost+found directory. The latter exists on freshly
|
|
|
|
# created ext3 and ext4 partitions.
|
|
|
|
# rationale for requiring an empty directory:
|
|
|
|
# https://bugs.debian.org/833525
|
|
|
|
opendir(my $dh, $options->{root})
|
|
|
|
or error "Can't opendir($options->{root}): $!";
|
|
|
|
while (my $entry = readdir $dh) {
|
|
|
|
# skip the "." and ".." entries
|
|
|
|
next if $entry eq ".";
|
|
|
|
next if $entry eq "..";
|
|
|
|
# if the entry is a directory named "lost+found" then skip
|
|
|
|
# it, if it's empty
|
|
|
|
if ($entry eq "lost+found"
|
|
|
|
and -d "$options->{root}/$entry") {
|
|
|
|
opendir(my $dh2, "$options->{root}/$entry");
|
|
|
|
# Attempt reading the directory thrice. If the third
|
|
|
|
# time succeeds, then it has more entries than just "."
|
|
|
|
# and ".." and must thus not be empty.
|
|
|
|
readdir $dh2;
|
|
|
|
readdir $dh2;
|
|
|
|
# rationale for requiring an empty directory:
|
|
|
|
# https://bugs.debian.org/833525
|
|
|
|
if (readdir $dh2) {
|
|
|
|
error "$options->{root} contains a non-empty"
|
|
|
|
. " lost+found directory";
|
|
|
|
}
|
|
|
|
closedir($dh2);
|
|
|
|
} else {
|
|
|
|
error "$options->{root} is not empty";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-01 05:39:52 +00:00
|
|
|
closedir($dh);
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
} else {
|
2020-01-08 16:44:07 +00:00
|
|
|
my $num_created = make_path "$options->{root}",
|
|
|
|
{ error => \my $err };
|
2020-01-08 14:41:49 +00:00
|
|
|
if ($err && @$err) {
|
2020-01-08 16:44:07 +00:00
|
|
|
error(join "; ",
|
|
|
|
(map { "cannot create " . (join ": ", %{$_}) } @$err));
|
2020-01-08 14:41:49 +00:00
|
|
|
} elsif ($num_created == 0) {
|
|
|
|
error "cannot create $options->{root}";
|
|
|
|
}
|
|
|
|
}
|
2020-04-09 16:33:05 +00:00
|
|
|
} else {
|
|
|
|
error "unknown format: $format";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2018-10-22 13:05:19 +00:00
|
|
|
# check for double quotes because apt doesn't allow to escape them and
|
|
|
|
# thus paths with double quotes are invalid in the apt config
|
|
|
|
if ($options->{root} =~ /"/) {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "apt cannot handle paths with double quotes";
|
2018-10-22 13:05:19 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
my @idmap;
|
|
|
|
# for unshare mode the rootfs directory has to have appropriate
|
|
|
|
# permissions
|
|
|
|
if ($options->{mode} eq 'unshare') {
|
2020-01-08 14:41:49 +00:00
|
|
|
@idmap = read_subuid_subgid;
|
|
|
|
# sanity check
|
2020-01-10 08:29:34 +00:00
|
|
|
if ( scalar(@idmap) != 2
|
|
|
|
|| $idmap[0][0] ne 'u'
|
|
|
|
|| $idmap[1][0] ne 'g') {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "invalid idmap";
|
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-01-08 16:44:07 +00:00
|
|
|
my $outer_gid = $REAL_GROUP_ID + 0;
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-01-09 07:39:40 +00:00
|
|
|
my $pid = get_unshare_cmd(
|
|
|
|
sub { chown 1, 1, $options->{root} },
|
|
|
|
[
|
|
|
|
['u', '0', $REAL_USER_ID, '1'],
|
|
|
|
['g', '0', $outer_gid, '1'],
|
|
|
|
['u', '1', $idmap[0][2], '1'],
|
|
|
|
['g', '1', $idmap[1][2], '1']]);
|
2020-01-08 14:41:49 +00:00
|
|
|
waitpid $pid, 0;
|
|
|
|
$? == 0 or error "chown failed";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
# figure out whether we have mknod
|
|
|
|
$options->{havemknod} = 0;
|
|
|
|
if ($options->{mode} eq 'unshare') {
|
2020-01-09 07:39:40 +00:00
|
|
|
my $pid = get_unshare_cmd(
|
|
|
|
sub {
|
|
|
|
$options->{havemknod} = havemknod($options->{root});
|
|
|
|
},
|
|
|
|
\@idmap
|
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
waitpid $pid, 0;
|
|
|
|
$? == 0 or error "havemknod failed";
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('root', 'fakechroot', 'proot', 'chrootless')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
$options->{havemknod} = havemknod($options->{root});
|
2018-10-22 12:44:45 +00:00
|
|
|
} else {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "unknown mode: $options->{mode}";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
my $devtar = '';
|
2018-10-23 13:19:01 +00:00
|
|
|
# We always craft the /dev entries ourselves if a tarball is to be created
|
2020-04-09 16:33:05 +00:00
|
|
|
if (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
|
2020-01-08 14:41:49 +00:00
|
|
|
foreach my $file (@devfiles) {
|
2020-01-08 16:44:07 +00:00
|
|
|
my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
|
|
|
|
= @{$file};
|
|
|
|
my $entry = pack(
|
|
|
|
'a100 a8 a8 a8 a12 a12 A8 a1 a100 a8 a32 a32 a8 a8 a155 x12',
|
2020-01-08 14:41:49 +00:00
|
|
|
$fname,
|
2020-01-08 16:44:07 +00:00
|
|
|
sprintf('%07o', $mode),
|
2020-08-15 20:36:13 +00:00
|
|
|
sprintf('%07o', 0), # uid
|
|
|
|
sprintf('%07o', 0), # gid
|
|
|
|
sprintf('%011o', 0), # size
|
|
|
|
sprintf('%011o', $mtime),
|
|
|
|
'', # checksum
|
|
|
|
$type,
|
|
|
|
$linkname,
|
|
|
|
"ustar ",
|
|
|
|
'', # username
|
|
|
|
'', # groupname
|
|
|
|
defined($devmajor) ? sprintf('%07o', $devmajor) : '',
|
|
|
|
defined($devminor) ? sprintf('%07o', $devminor) : '',
|
|
|
|
'', # prefix
|
|
|
|
);
|
|
|
|
# compute and insert checksum
|
|
|
|
substr($entry, 148, 7)
|
|
|
|
= sprintf("%06o\0", unpack("%16C*", $entry));
|
|
|
|
$devtar .= $entry;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-08-15 20:36:13 +00:00
|
|
|
} elsif ($format eq 'directory') {
|
|
|
|
# nothing to do
|
2018-10-22 12:44:45 +00:00
|
|
|
} else {
|
2020-08-15 20:36:13 +00:00
|
|
|
error "unknown format: $format";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
my $exitstatus = 0;
|
|
|
|
my @taropts = (
|
|
|
|
'--sort=name',
|
|
|
|
"--mtime=\@$mtime",
|
|
|
|
'--clamp-mtime',
|
|
|
|
'--numeric-owner',
|
|
|
|
'--one-file-system',
|
|
|
|
'--format=pax',
|
|
|
|
'--pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime',
|
|
|
|
'-c',
|
|
|
|
'--exclude=./dev'
|
|
|
|
);
|
|
|
|
# tar2sqfs and genext2fs do not support extended attributes
|
|
|
|
if ($format eq "squashfs") {
|
|
|
|
warning "tar2sqfs does not support extended attributes";
|
|
|
|
} elsif ($format eq "ext2") {
|
|
|
|
warning "genext2fs does not support extended attributes";
|
|
|
|
} else {
|
|
|
|
push @taropts, '--xattrs';
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
# disable signals so that we can fork and change behaviour of the signal
|
|
|
|
# handler in the parent and child without getting interrupted
|
|
|
|
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
|
|
|
|
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
my $pid;
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
# a pipe to transfer the final tarball from the child to the parent
|
|
|
|
pipe my $rfh, my $wfh;
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
# instead of two pipe calls, creating four file handles, we use socketpair
|
|
|
|
socketpair my $childsock, my $parentsock, AF_UNIX, SOCK_STREAM, PF_UNSPEC
|
|
|
|
or error "socketpair failed: $!";
|
|
|
|
$options->{hooksock} = $childsock;
|
2020-08-18 10:08:55 +00:00
|
|
|
# for communicating the required number of blocks, we don't need
|
|
|
|
# bidirectional communication, so a pipe() is enough
|
|
|
|
# we don't communicate this via the hook communication because
|
|
|
|
# a) this would abuse the functionality exclusively for hooks
|
|
|
|
# b) it puts code writing the protocol outside of the helper/listener
|
|
|
|
# c) the forked listener process cannot communicate to its parent
|
|
|
|
pipe my $nblkreader, my $nblkwriter or error "pipe failed: $!";
|
2020-08-15 20:36:13 +00:00
|
|
|
if ($options->{mode} eq 'unshare') {
|
|
|
|
$pid = get_unshare_cmd(
|
|
|
|
sub {
|
|
|
|
# child
|
|
|
|
local $SIG{'INT'} = 'DEFAULT';
|
|
|
|
local $SIG{'HUP'} = 'DEFAULT';
|
|
|
|
local $SIG{'PIPE'} = 'DEFAULT';
|
|
|
|
local $SIG{'TERM'} = 'DEFAULT';
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
# unblock all delayed signals (and possibly handle them)
|
|
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
|
|
or error "Can't unblock signals: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
close $rfh;
|
|
|
|
close $parentsock;
|
|
|
|
open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
setup($options);
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
print $childsock (pack('n', 0) . 'adios');
|
|
|
|
$childsock->flush();
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
close $childsock;
|
|
|
|
|
2020-08-18 10:08:55 +00:00
|
|
|
close $nblkreader;
|
|
|
|
if (!$options->{dryrun} && $format eq 'ext2') {
|
|
|
|
my $numblocks = approx_disk_usage($options->{root});
|
|
|
|
print $nblkwriter "$numblocks\n";
|
|
|
|
$nblkwriter->flush();
|
|
|
|
}
|
|
|
|
close $nblkwriter;
|
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
info "simulate creating tarball...";
|
|
|
|
} elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
|
|
|
|
info "creating tarball...";
|
|
|
|
|
|
|
|
# redirect tar output to the writing end of the pipe so
|
|
|
|
# that the parent process can capture the output
|
|
|
|
open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!";
|
|
|
|
|
|
|
|
# Add ./dev as the first entries of the tar file.
|
|
|
|
# We cannot add them after calling tar, because there is no
|
|
|
|
# way to prevent tar from writing NULL entries at the end.
|
|
|
|
if (any { $_ eq 'output/dev' } @{ $options->{skip} }) {
|
|
|
|
info "skipping output/dev as requested";
|
|
|
|
} else {
|
|
|
|
print $devtar;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-08-15 20:36:13 +00:00
|
|
|
|
|
|
|
# pack everything except ./dev
|
|
|
|
0 == system('tar', @taropts, '-C', $options->{root}, '.')
|
|
|
|
or error "tar failed: $?";
|
|
|
|
|
|
|
|
info "done";
|
|
|
|
} elsif ($format eq 'directory') {
|
|
|
|
# nothing to do
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
2020-08-15 20:36:13 +00:00
|
|
|
error "unknown format: $format";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
exit 0;
|
|
|
|
},
|
|
|
|
\@idmap
|
|
|
|
);
|
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('root', 'fakechroot', 'proot', 'chrootless')
|
|
|
|
) {
|
|
|
|
$pid = fork() // error "fork() failed: $!";
|
|
|
|
if ($pid == 0) {
|
|
|
|
local $SIG{'INT'} = 'DEFAULT';
|
|
|
|
local $SIG{'HUP'} = 'DEFAULT';
|
|
|
|
local $SIG{'PIPE'} = 'DEFAULT';
|
|
|
|
local $SIG{'TERM'} = 'DEFAULT';
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
# unblock all delayed signals (and possibly handle them)
|
|
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
|
|
or error "Can't unblock signals: $!";
|
|
|
|
|
|
|
|
close $rfh;
|
|
|
|
close $parentsock;
|
|
|
|
open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!";
|
|
|
|
|
|
|
|
setup($options);
|
|
|
|
|
|
|
|
print $childsock (pack('n', 0) . 'adios');
|
|
|
|
$childsock->flush();
|
|
|
|
|
|
|
|
close $childsock;
|
|
|
|
|
2020-08-18 10:08:55 +00:00
|
|
|
close $nblkreader;
|
|
|
|
if (!$options->{dryrun} && $format eq 'ext2') {
|
|
|
|
my $numblocks = approx_disk_usage($options->{root});
|
|
|
|
print $nblkwriter $numblocks;
|
|
|
|
$nblkwriter->flush();
|
|
|
|
}
|
|
|
|
close $nblkwriter;
|
|
|
|
|
2020-08-15 20:36:13 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
info "simulate creating tarball...";
|
|
|
|
} elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
|
|
|
|
info "creating tarball...";
|
|
|
|
|
|
|
|
# redirect tar output to the writing end of the pipe so that
|
|
|
|
# the parent process can capture the output
|
|
|
|
open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!";
|
|
|
|
|
|
|
|
# Add ./dev as the first entries of the tar file.
|
|
|
|
# We cannot add them after calling tar, because there is no way
|
|
|
|
# to prevent tar from writing NULL entries at the end.
|
|
|
|
if (any { $_ eq 'output/dev' } @{ $options->{skip} }) {
|
|
|
|
info "skipping output/dev as requested";
|
|
|
|
} else {
|
|
|
|
print $devtar;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-08-15 20:36:13 +00:00
|
|
|
|
|
|
|
if ($options->{mode} eq 'fakechroot') {
|
|
|
|
# Fakechroot requires tar to run inside the chroot or
|
|
|
|
# otherwise absolute symlinks will include the path to the
|
|
|
|
# root directory
|
|
|
|
0 == system('/usr/sbin/chroot', $options->{root}, 'tar',
|
|
|
|
@taropts, '-C', '/', '.')
|
|
|
|
or error "tar failed: $?";
|
|
|
|
} elsif ($options->{mode} eq 'proot') {
|
|
|
|
# proot requires tar to run inside proot or otherwise
|
|
|
|
# permissions will be completely off
|
|
|
|
my @qemuopt = ();
|
|
|
|
if (defined $options->{qemu}) {
|
|
|
|
push @qemuopt, "--qemu=qemu-$options->{qemu}";
|
|
|
|
push @taropts, "--exclude=./host-rootfs";
|
2020-04-09 16:33:05 +00:00
|
|
|
}
|
2020-08-15 20:36:13 +00:00
|
|
|
0 == system('proot', '--root-id',
|
|
|
|
"--rootfs=$options->{root}", '--cwd=/', @qemuopt,
|
|
|
|
'tar', @taropts, '-C', '/', '.')
|
|
|
|
or error "tar failed: $?";
|
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('root', 'chrootless')
|
|
|
|
) {
|
|
|
|
# If the chroot directory is not owned by the root user,
|
|
|
|
# then we assume that no measure was taken to fake root
|
|
|
|
# permissions. Since the final tarball should contain
|
|
|
|
# entries with root ownership, we instruct tar to do so.
|
|
|
|
my @owneropts = ();
|
|
|
|
if ((stat $options->{root})[4] != 0) {
|
|
|
|
push @owneropts, '--owner=0', '--group=0',
|
|
|
|
'--numeric-owner';
|
|
|
|
}
|
|
|
|
0 == system('tar', @taropts, @owneropts, '-C',
|
|
|
|
$options->{root}, '.')
|
|
|
|
or error "tar failed: $?";
|
|
|
|
} else {
|
|
|
|
error "unknown mode: $options->{mode}";
|
2020-04-09 16:33:05 +00:00
|
|
|
}
|
2020-08-15 20:36:13 +00:00
|
|
|
|
|
|
|
info "done";
|
|
|
|
} elsif ($format eq 'directory') {
|
|
|
|
# nothing to do
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
2020-08-15 20:36:13 +00:00
|
|
|
error "unknown format: $format";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-08-15 20:36:13 +00:00
|
|
|
|
|
|
|
exit 0;
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-08-15 20:36:13 +00:00
|
|
|
} else {
|
|
|
|
error "unknown mode: $options->{mode}";
|
|
|
|
}
|
|
|
|
|
|
|
|
# parent
|
|
|
|
|
|
|
|
my $got_signal = 0;
|
|
|
|
my $waiting_for = "setup";
|
|
|
|
my $ignore = sub {
|
|
|
|
$got_signal = shift;
|
|
|
|
info "main() received signal $got_signal: waiting for $waiting_for...";
|
2019-12-09 09:40:51 +00:00
|
|
|
};
|
2020-08-15 20:36:13 +00:00
|
|
|
|
|
|
|
local $SIG{'INT'} = $ignore;
|
|
|
|
local $SIG{'HUP'} = $ignore;
|
|
|
|
local $SIG{'PIPE'} = $ignore;
|
|
|
|
local $SIG{'TERM'} = $ignore;
|
|
|
|
|
|
|
|
# unblock all delayed signals (and possibly handle them)
|
|
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
|
|
or error "Can't unblock signals: $!";
|
|
|
|
|
|
|
|
close $wfh;
|
|
|
|
close $childsock;
|
|
|
|
|
|
|
|
debug "starting to listen for hooks";
|
|
|
|
# handle special hook commands via parentsock
|
|
|
|
my $lpid = fork() // error "fork() failed: $!";
|
|
|
|
if ($lpid == 0) {
|
|
|
|
# whatever the script writes on stdout is sent to the
|
|
|
|
# socket
|
|
|
|
# whatever is written to the socket, send to stdin
|
|
|
|
open(STDOUT, '>&', $parentsock)
|
|
|
|
or error "cannot open STDOUT: $!";
|
|
|
|
open(STDIN, '<&', $parentsock)
|
|
|
|
or error "cannot open STDIN: $!";
|
|
|
|
|
|
|
|
# we execute ourselves under sh to avoid having to
|
|
|
|
# implement a clever parser of the quoting used in $script
|
|
|
|
# for the filenames
|
|
|
|
my @prefix = ();
|
|
|
|
if ($is_covering) {
|
|
|
|
@prefix = ($EXECUTABLE_NAME, "-MDevel::Cover=-silent,-nogcov");
|
|
|
|
}
|
|
|
|
exec @prefix, $PROGRAM_NAME, "--hook-listener";
|
|
|
|
}
|
|
|
|
waitpid($lpid, 0);
|
|
|
|
if ($? != 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# we cannot die here because that would leave the other thread
|
|
|
|
# running without a parent
|
|
|
|
warning "listening on child socket failed: $@";
|
|
|
|
$exitstatus = 1;
|
2019-12-09 09:40:51 +00:00
|
|
|
}
|
|
|
|
debug "finish to listen for hooks";
|
|
|
|
|
|
|
|
close $parentsock;
|
2018-12-28 04:09:08 +00:00
|
|
|
|
2020-08-18 10:08:55 +00:00
|
|
|
my $numblocks = 0;
|
|
|
|
close $nblkwriter;
|
|
|
|
if (!$options->{dryrun} && $format eq 'ext2') {
|
|
|
|
chomp($numblocks = <$nblkreader>);
|
|
|
|
}
|
|
|
|
close $nblkreader;
|
|
|
|
|
2020-01-10 10:44:15 +00:00
|
|
|
if ($options->{dryrun}) {
|
|
|
|
# nothing to do
|
2020-04-09 16:33:05 +00:00
|
|
|
} elsif ($format eq 'directory') {
|
|
|
|
# nothing to do
|
|
|
|
} elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# we use eval() so that error() doesn't take this process down and
|
|
|
|
# thus leaves the setup() process without a parent
|
|
|
|
eval {
|
|
|
|
if ($options->{target} eq '-') {
|
|
|
|
if (!copy($rfh, *STDOUT)) {
|
|
|
|
error "cannot copy to standard output: $!";
|
|
|
|
}
|
|
|
|
} else {
|
2020-04-09 16:33:05 +00:00
|
|
|
if ( $format eq 'squashfs'
|
|
|
|
or $format eq 'ext2'
|
|
|
|
or defined $tar_compressor) {
|
2020-01-08 14:41:49 +00:00
|
|
|
my @argv = ();
|
2020-04-09 16:33:05 +00:00
|
|
|
if ($format eq 'squashfs') {
|
2020-01-08 14:41:49 +00:00
|
|
|
push @argv, 'tar2sqfs',
|
2020-01-10 10:44:15 +00:00
|
|
|
'--quiet', '--no-skip', '--force',
|
|
|
|
'--exportable',
|
2020-01-08 16:44:07 +00:00
|
|
|
'--compressor', 'xz',
|
|
|
|
'--block-size', '1048576',
|
|
|
|
$options->{target};
|
2020-04-09 16:33:05 +00:00
|
|
|
} elsif ($format eq 'ext2') {
|
|
|
|
if ($numblocks <= 0) {
|
|
|
|
error "invalid number of blocks: $numblocks";
|
|
|
|
}
|
|
|
|
push @argv, 'genext2fs', '-B', 1024, '-b', $numblocks,
|
2020-07-09 05:34:03 +00:00
|
|
|
'-i', '16384', '-a', '-', $options->{target};
|
2020-04-09 16:33:05 +00:00
|
|
|
} elsif ($format eq 'tar') {
|
2020-01-08 14:41:49 +00:00
|
|
|
push @argv, @{$tar_compressor};
|
2020-04-09 16:33:05 +00:00
|
|
|
} else {
|
|
|
|
error "unknown format: $format";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
POSIX::sigprocmask(SIG_BLOCK, $sigset)
|
|
|
|
or error "Can't block signals: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
my $cpid = fork() // error "fork() failed: $!";
|
|
|
|
if ($cpid == 0) {
|
|
|
|
# child: default signal handlers
|
2020-01-09 07:39:40 +00:00
|
|
|
local $SIG{'INT'} = 'DEFAULT';
|
|
|
|
local $SIG{'HUP'} = 'DEFAULT';
|
|
|
|
local $SIG{'PIPE'} = 'DEFAULT';
|
|
|
|
local $SIG{'TERM'} = 'DEFAULT';
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-01-08 15:23:34 +00:00
|
|
|
# unblock all delayed signals (and possibly handle
|
|
|
|
# them)
|
2020-01-08 16:44:07 +00:00
|
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
|
|
or error "Can't unblock signals: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
|
2020-04-09 16:33:05 +00:00
|
|
|
# redirect stdout to file or /dev/null
|
|
|
|
if ($format eq 'squashfs' or $format eq 'ext2') {
|
2020-01-08 16:44:07 +00:00
|
|
|
open(STDOUT, '>', '/dev/null')
|
|
|
|
or error "cannot open /dev/null for writing: $!";
|
2020-04-09 16:33:05 +00:00
|
|
|
} elsif ($format eq 'tar') {
|
2020-01-08 16:44:07 +00:00
|
|
|
open(STDOUT, '>', $options->{target})
|
|
|
|
or error
|
|
|
|
"cannot open $options->{target} for writing: $!";
|
2020-04-09 16:33:05 +00:00
|
|
|
} else {
|
|
|
|
error "unknown format: $format";
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
open(STDIN, '<&', $rfh)
|
|
|
|
or error "cannot open file handle for reading: $!";
|
2020-01-09 07:39:40 +00:00
|
|
|
eval { Devel::Cover::set_coverage("none") }
|
2020-01-08 16:44:07 +00:00
|
|
|
if $is_covering;
|
|
|
|
exec { $argv[0] } @argv
|
|
|
|
or
|
|
|
|
error("cannot exec " . (join " ", @argv) . ": $!");
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
|
|
or error "Can't unblock signals: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
waitpid $cpid, 0;
|
|
|
|
if ($? != 0) {
|
2020-05-02 21:55:34 +00:00
|
|
|
error("failed to run " . (join " ", @argv));
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
} else {
|
2020-01-08 16:44:07 +00:00
|
|
|
if (!copy($rfh, $options->{target})) {
|
2020-01-08 14:41:49 +00:00
|
|
|
error "cannot copy to $options->{target}: $!";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
if ($@) {
|
|
|
|
# we cannot die here because that would leave the other thread
|
|
|
|
# running without a parent
|
|
|
|
warning "creating tarball failed: $@";
|
|
|
|
$exitstatus = 1;
|
|
|
|
}
|
2020-04-09 16:33:05 +00:00
|
|
|
} else {
|
|
|
|
error "unknown format: $format";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
close($rfh);
|
|
|
|
waitpid $pid, 0;
|
2018-09-21 17:06:47 +00:00
|
|
|
if ($? != 0) {
|
2020-01-08 14:41:49 +00:00
|
|
|
$exitstatus = 1;
|
2018-09-21 17:06:47 +00:00
|
|
|
}
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-01-13 09:17:46 +00:00
|
|
|
# change signal handler message
|
|
|
|
$waiting_for = "cleanup";
|
|
|
|
|
2020-04-09 16:33:05 +00:00
|
|
|
if ($format eq 'directory') {
|
|
|
|
# nothing to do
|
|
|
|
} elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
|
|
|
|
if (!-e $options->{root}) {
|
|
|
|
error "$options->{root} does not exist";
|
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
info "removing tempdir $options->{root}...";
|
|
|
|
if ($options->{mode} eq 'unshare') {
|
|
|
|
# We don't have permissions to remove the directory outside
|
|
|
|
# the unshared namespace, so we remove it here.
|
|
|
|
# Since this is still inside the unshared namespace, there is
|
|
|
|
# no risk of removing anything important.
|
2020-01-09 07:39:40 +00:00
|
|
|
$pid = get_unshare_cmd(
|
|
|
|
sub {
|
|
|
|
# File::Path will produce the error "cannot stat initial
|
|
|
|
# working directory" if the working directory cannot be
|
|
|
|
# accessed by the unprivileged unshared user. Thus, we
|
|
|
|
# first navigate to the parent of the root directory.
|
|
|
|
chdir "$options->{root}/.."
|
|
|
|
or error "unable to chdir() to parent directory of"
|
|
|
|
. " $options->{root}: $!";
|
|
|
|
remove_tree($options->{root}, { error => \my $err });
|
|
|
|
if (@$err) {
|
|
|
|
for my $diag (@$err) {
|
|
|
|
my ($file, $message) = %$diag;
|
|
|
|
if ($file eq '') {
|
|
|
|
warning "general error: $message";
|
|
|
|
} else {
|
2020-03-07 22:42:19 +00:00
|
|
|
# the unshared process might not have
|
|
|
|
# sufficient permissions to remove the root
|
|
|
|
# directory. We attempt its removal later, so
|
|
|
|
# don't warn if its removal fails now.
|
|
|
|
if ($file ne $options->{root}) {
|
|
|
|
warning
|
|
|
|
"problem unlinking $file: $message";
|
|
|
|
}
|
2020-01-09 07:39:40 +00:00
|
|
|
}
|
2020-01-08 14:41:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-01-09 07:39:40 +00:00
|
|
|
},
|
|
|
|
\@idmap
|
|
|
|
);
|
2020-01-08 14:41:49 +00:00
|
|
|
waitpid $pid, 0;
|
|
|
|
$? == 0 or error "remove_tree failed";
|
2020-03-07 22:42:19 +00:00
|
|
|
# in unshare mode, the toplevel directory might've been created in
|
|
|
|
# a directory that the unshared user cannot change and thus cannot
|
|
|
|
# delete. If the root directory still exists after remove_tree
|
|
|
|
# above, we attempt its removal again outside as the normal user.
|
|
|
|
if (-e $options->{root}) {
|
|
|
|
rmdir "$options->{root}"
|
|
|
|
or error "cannot rmdir $options->{root}: $!";
|
|
|
|
}
|
2020-01-08 16:44:07 +00:00
|
|
|
} elsif (
|
|
|
|
any { $_ eq $options->{mode} }
|
|
|
|
('root', 'fakechroot', 'proot', 'chrootless')
|
|
|
|
) {
|
2020-01-08 14:41:49 +00:00
|
|
|
# without unshare, we use the system's rm to recursively remove the
|
|
|
|
# temporary directory just to make sure that we do not accidentally
|
|
|
|
# remove more than we should by using --one-file-system.
|
|
|
|
#
|
|
|
|
# --interactive=never is needed when in proot mode, the
|
|
|
|
# write-protected file /apt/apt.conf.d/01autoremove-kernels is to
|
|
|
|
# be removed.
|
2020-01-08 16:44:07 +00:00
|
|
|
0 == system('rm', '--interactive=never', '--recursive',
|
|
|
|
'--preserve-root', '--one-file-system', $options->{root})
|
|
|
|
or error "rm failed: $!";
|
2020-01-08 14:41:49 +00:00
|
|
|
} else {
|
|
|
|
error "unknown mode: $options->{mode}";
|
|
|
|
}
|
2020-04-09 16:33:05 +00:00
|
|
|
} else {
|
|
|
|
error "unknown format: $format";
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
2018-09-21 17:06:47 +00:00
|
|
|
|
2019-01-13 09:17:46 +00:00
|
|
|
if ($got_signal) {
|
2020-01-08 14:41:49 +00:00
|
|
|
$exitstatus = 1;
|
2019-01-13 09:17:46 +00:00
|
|
|
}
|
|
|
|
|
2020-08-25 14:06:05 +00:00
|
|
|
if ($exitstatus == 0) {
|
|
|
|
my $duration = Time::HiRes::time - $before;
|
|
|
|
info "success in " . (sprintf "%.04f", $duration) . " seconds";
|
|
|
|
}
|
|
|
|
|
2018-09-21 17:06:47 +00:00
|
|
|
exit $exitstatus;
|
2018-09-18 09:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
main();
|
|
|
|
|
|
|
|
__END__
|
|
|
|
|
|
|
|
=head1 NAME
|
|
|
|
|
|
|
|
mmdebstrap - multi-mirror Debian chroot creation
|
|
|
|
|
|
|
|
=head1 SYNOPSIS
|
|
|
|
|
|
|
|
B<mmdebstrap> [B<OPTION...>] [I<SUITE> [I<TARGET> [I<MIRROR>...]]]
|
|
|
|
|
|
|
|
=head1 DESCRIPTION
|
|
|
|
|
|
|
|
B<mmdebstrap> creates a Debian chroot of I<SUITE> into I<TARGET> from one or
|
|
|
|
more I<MIRROR>s. It is meant as an alternative to the debootstrap tool (see
|
|
|
|
section B<DEBOOTSTRAP>). In contrast to debootstrap it uses apt to resolve
|
|
|
|
dependencies and is thus able to use more than one mirror and resolve more
|
|
|
|
complex dependencies.
|
|
|
|
|
2019-08-27 22:53:04 +00:00
|
|
|
If no I<MIRROR> option is provided, L<http://deb.debian.org/debian> is used.
|
|
|
|
If I<SUITE> is a stable release name and no I<MIRROR> is specified, then
|
|
|
|
mirrors for updates and security are automatically added. If a I<MIRROR>
|
|
|
|
option starts with "deb " or "deb-src " then it is used as a one-line-style
|
|
|
|
format entry for apt's sources.list inside the chroot. If a I<MIRROR> option
|
|
|
|
contains a "://" then it is interpreted as a mirror URI and the apt line
|
|
|
|
inside the chroot is assembled as "deb [arch=A] B C D" where A is the host's
|
|
|
|
native architecture, B is the I<MIRROR>, C is the given I<SUITE> and D is the
|
|
|
|
components given via B<--components> (defaults to "main"). If a I<MIRROR>
|
|
|
|
option happens to be an existing file, then its contents are pasted into the
|
|
|
|
chroot's sources.list. This can be used to supply a deb822 style
|
|
|
|
sources.list. If I<MIRROR> is C<-> then standard input is pasted into the
|
|
|
|
chroot's sources.list. More than one mirror can be specified and are appended
|
|
|
|
to the chroot's sources.list in the given order. If any mirror contains a
|
|
|
|
https URI, then the packages apt-transport-https and ca-certificates will be
|
|
|
|
installed inside the chroot. If any mirror contains a tor+xxx URI, then the
|
|
|
|
apt-transport-tor package will be installed inside the chroot.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-03-22 13:10:06 +00:00
|
|
|
The optional I<TARGET> argument can either be the path to a directory, the path
|
2020-04-09 16:33:05 +00:00
|
|
|
to a tarball filename, the path to a squashfs image, the path to an ext2 image,
|
|
|
|
a FIFO, a character special device, or C<->. Without the B<--format> option,
|
|
|
|
I<TARGET> will be used to choose the format. See the section B<FORMATS> for
|
|
|
|
more information. If no I<TARGET> was specified or if I<TARGET> is C<->, an
|
|
|
|
uncompressed tarball will be sent to standard output.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
The I<SUITE> may be a valid release code name (eg, sid, stretch, jessie) or a
|
|
|
|
symbolic name (eg, unstable, testing, stable, oldstable). Any suite name that
|
|
|
|
works with apt on the given mirror will work. If no I<SUITE> was specified,
|
|
|
|
then a single I<MIRROR> C<-> is added and thus the information of the desired
|
|
|
|
suite has to come from standard input as part of a valid apt sources.list
|
|
|
|
file.
|
|
|
|
|
2019-02-23 07:43:15 +00:00
|
|
|
All status output is printed to standard error unless B<--logfile> is used to
|
|
|
|
redirect it to a file or B<--quiet> or B<--silent> is used to suppress any
|
|
|
|
output on standard error. Help and version information will be printed to
|
|
|
|
standard error with the B<--help> and B<--version> options, respectively.
|
|
|
|
Otherwise, an uncompressed tarball might be sent to standard output if
|
|
|
|
I<TARGET> is C<-> or if no I<TARGET> was specified.
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
=head1 OPTIONS
|
|
|
|
|
2019-02-20 17:00:52 +00:00
|
|
|
Options are case insensitive. Short options may be bundled. Long options
|
|
|
|
require a double dash and may be abbreviated to uniqueness.
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
=over 8
|
|
|
|
|
|
|
|
=item B<-h,--help>
|
|
|
|
|
2019-11-29 07:45:08 +00:00
|
|
|
Print synopsis and options of this man page and exit.
|
|
|
|
|
|
|
|
=item B<--man>
|
|
|
|
|
|
|
|
Show the full man page as generated from Perl POD in a pager. This requires
|
|
|
|
the perldoc program from the perl-doc package. This is the same as running:
|
|
|
|
|
|
|
|
pod2man /usr/bin/mmdebstrap | man -l -
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-02-23 07:55:31 +00:00
|
|
|
=item B<--version>
|
|
|
|
|
|
|
|
Print the B<mmdebstrap> version and exit.
|
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
=item B<--variant>=I<name>
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
Choose which package set to install. Valid variant I<name>s are B<extract>,
|
2018-10-22 15:05:56 +00:00
|
|
|
B<custom>, B<essential>, B<apt>, B<required>, B<minbase>, B<buildd>,
|
|
|
|
B<important>, B<debootstrap>, B<->, and B<standard>. The default variant is
|
2020-01-21 12:17:31 +00:00
|
|
|
B<debootstrap>. See the section B<VARIANTS> for more information.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
=item B<--mode>=I<name>
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
Choose how to perform the chroot operation and create a filesystem with
|
2020-01-08 16:19:30 +00:00
|
|
|
ownership information different from the current user. Valid mode I<name>s are
|
2020-01-10 11:05:01 +00:00
|
|
|
B<auto>, B<sudo>, B<root>, B<unshare>, B<fakeroot>, B<fakechroot>, B<proot> and
|
|
|
|
B<chrootless>. The default mode is B<auto>. See the section B<MODES> for more
|
|
|
|
information.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-04-09 16:33:05 +00:00
|
|
|
=item B<--format>=I<name>
|
|
|
|
|
|
|
|
Choose the output format. Valid format I<name>s are B<auto>, B<directory>,
|
|
|
|
B<tar>, B<squashfs>, and B<ext2>. The default format is B<auto>. See the
|
|
|
|
section B<FORMATS> for more information.
|
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
=item B<--aptopt>=I<option>|I<file>
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-08-18 07:37:08 +00:00
|
|
|
Pass arbitrary I<option>s to apt. Will be permamently added to
|
|
|
|
F</etc/apt/apt.conf.d/99mmdebstrap> inside the chroot. Use hooks for temporary
|
|
|
|
configuration options. Can be specified multiple times. Each I<option> will be
|
|
|
|
appended to 99mmdebstrap. A semicolon will be added at the end of the option if
|
|
|
|
necessary. If the command line argument is an existing I<file>, the content of
|
|
|
|
the file will be appended to 99mmdebstrap verbatim.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-03-25 13:50:41 +00:00
|
|
|
Example: This is necessary for allowing old timestamps from snapshot.debian.org
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2018-12-27 13:42:29 +00:00
|
|
|
--aptopt='Acquire::Check-Valid-Until "false"'
|
2019-03-25 13:50:41 +00:00
|
|
|
|
|
|
|
Example: Settings controlling download of package description translations
|
|
|
|
|
2018-12-27 13:42:29 +00:00
|
|
|
--aptopt='Acquire::Languages { "environment"; "en"; }'
|
|
|
|
--aptopt='Acquire::Languages "none"'
|
2019-03-25 13:50:41 +00:00
|
|
|
|
2020-05-01 21:37:38 +00:00
|
|
|
Example: Enable installing Recommends (by default B<mmdebstrap> doesn't)
|
2019-03-25 13:50:41 +00:00
|
|
|
|
2018-12-27 13:42:29 +00:00
|
|
|
--aptopt='Apt::Install-Recommends "true"'
|
2019-03-25 13:50:41 +00:00
|
|
|
|
|
|
|
Example: Configure apt-cacher or apt-cacher-ng as an apt proxy
|
|
|
|
|
2019-01-07 12:19:38 +00:00
|
|
|
--aptopt='Acquire::http { Proxy "http://127.0.0.1:3142"; }'
|
2019-03-25 13:50:41 +00:00
|
|
|
|
|
|
|
Example: For situations in which the apt sandbox user cannot access the chroot
|
|
|
|
|
2019-02-28 10:54:03 +00:00
|
|
|
--aptopt='APT::Sandbox::User "root"'
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-03-25 13:50:41 +00:00
|
|
|
Example: Minimizing the number of packages installed from experimental
|
|
|
|
|
|
|
|
--aptopt='APT::Solver "aspcud"'
|
2020-01-08 16:19:30 +00:00
|
|
|
--aptopt='APT::Solver::aspcud::Preferences
|
|
|
|
"-count(solution,APT-Release:=/a=experimental/),-removed,-changed,-new"'
|
2019-03-25 13:50:41 +00:00
|
|
|
|
2019-10-28 15:29:38 +00:00
|
|
|
=item B<--keyring>=I<file>|I<directory>
|
|
|
|
|
2019-12-03 09:16:39 +00:00
|
|
|
Change the default keyring to use by apt. By default, F</etc/apt/trusted.gpg>
|
|
|
|
and F</etc/apt/trusted.gpg.d> are used. Depending on whether a file or
|
|
|
|
directory is passed to this option, the former and latter default can be
|
|
|
|
changed, respectively. Since apt only supports a single keyring file and
|
|
|
|
directory, respectively, you can B<not> use this option to pass multiple files
|
|
|
|
and/or directories. Using the C<--keyring> argument in the following way is
|
|
|
|
equal to keeping the default:
|
|
|
|
|
|
|
|
--keyring=/etc/apt/trusted.gpg --keyring=/etc/apt/trusted.gpg.d
|
|
|
|
|
|
|
|
If you need to pass multiple keyrings, use the C<signed-by> option when
|
|
|
|
specifying the mirror like this:
|
|
|
|
|
|
|
|
mmdebstrap mysuite out.tar "deb [signed-by=/path/to/key.gpg] http://..."
|
|
|
|
|
|
|
|
The C<signed-by> option will automatically be added to the final
|
|
|
|
C<sources.list> if the keyring required for the selected I<SUITE> is not yet
|
|
|
|
trusted by apt. Automatically adding the C<signed-by> option in these cases
|
|
|
|
requires C<gpg> to be installed. If C<gpg> and C<ubuntu-archive-keyring> are
|
|
|
|
installed, then you can create a Ubuntu Bionic chroot on Debian like this:
|
|
|
|
|
|
|
|
mmdebstrap bionic ubuntu-bionic.tar
|
|
|
|
|
|
|
|
The resulting chroot will have a C<source.list> with a C<signed-by> option
|
|
|
|
pointing to F</usr/share/keyrings/ubuntu-archive-keyring.gpg>.
|
2019-10-28 15:29:38 +00:00
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
=item B<--dpkgopt>=I<option>|I<file>
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-08-18 07:37:08 +00:00
|
|
|
Pass arbitrary I<option>s to dpkg. Will be permanently added to
|
|
|
|
F</etc/dpkg/dpkg.cfg.d/99mmdebstrap> inside the chroot. Use hooks for temporary
|
|
|
|
configuration options. Can be specified multiple times. Each I<option> will be
|
|
|
|
appended to 99mmdebstrap. If the command line argument is an existing I<file>,
|
|
|
|
the content of the file will be appended to 99mmdebstrap verbatim.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-03-25 13:50:41 +00:00
|
|
|
Example: Exclude paths to reduce chroot size
|
|
|
|
|
|
|
|
--dpkgopt='path-exclude=/usr/share/man/*'
|
|
|
|
--dpkgopt='path-include=/usr/share/man/man[1-9]/*'
|
|
|
|
--dpkgopt='path-exclude=/usr/share/locale/*'
|
|
|
|
--dpkgopt='path-include=/usr/share/locale/locale.alias'
|
|
|
|
--dpkgopt='path-exclude=/usr/share/doc/*'
|
|
|
|
--dpkgopt='path-include=/usr/share/doc/*/copyright'
|
|
|
|
--dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*'
|
2020-06-23 21:12:16 +00:00
|
|
|
--dpkgopt='path-exclude=/usr/share/{doc,info,man,omf,help,gnome/help}/*'
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
=item B<--include>=I<pkg1>[,I<pkg2>,...]
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-10-28 14:35:29 +00:00
|
|
|
Comma or whitespace separated list of packages which will be installed in
|
|
|
|
addition to the packages installed by the specified variant. The direct and
|
|
|
|
indirect hard dependencies will also be installed. The behaviour of this
|
|
|
|
option depends on the selected variant. The B<extract> and B<custom> variants
|
|
|
|
install no packages by default, so for these variants, the packages specified
|
|
|
|
by this option will be the only ones that get either extracted or installed by
|
|
|
|
dpkg, respectively. For all other variants, apt is used to install the
|
|
|
|
additional packages. The B<essential> variant does not include apt and thus,
|
|
|
|
the include option will only work when the B<chrootless> mode is selected and
|
|
|
|
thus apt from the outside can be used. Package names are directly passed to
|
|
|
|
apt and thus, you can use apt features like C<pkg/suite>, C<pkg=version>,
|
|
|
|
C<pkg-> or use a glob or regex for C<pkg>. See apt(8) for the supported
|
|
|
|
syntax. The option can be specified multiple times and the packages are
|
|
|
|
concatenated in the order in which they are given on the command line. If
|
|
|
|
later list items are repeated, then they get dropped so that the resulting
|
|
|
|
package list is free of duplicates. So the following are equivalent:
|
|
|
|
|
|
|
|
--include="pkg1/stable pkg2=1.0 pkg3-"
|
|
|
|
--include=pkg1/stable,pkg2=1.0,pkg3-
|
|
|
|
--incl=pkg1/stable --incl="pkg2=1.0 pkg3-" --incl=pkg2=1.0,pkg3-
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
=item B<--components>=I<comp1>[,I<comp2>,...]
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-10-19 06:10:36 +00:00
|
|
|
Comma or whitespace separated list of components like main, contrib and
|
|
|
|
non-free which will be used for all URI-only I<MIRROR> arguments. The option
|
|
|
|
can be specified multiple times and the components are concatenated in the
|
|
|
|
order in which they are given on the command line. If later list items are
|
|
|
|
repeated, then they get dropped so that the resulting component list is free
|
|
|
|
of duplicates. So the following are equivalent:
|
|
|
|
|
|
|
|
--components="main contrib non-free"
|
|
|
|
--components=main,contrib,non-free
|
|
|
|
--comp=main --comp="contrib non-free" --comp="main,non-free"
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
=item B<--architectures>=I<native>[,I<foreign1>,...]
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-10-27 21:04:21 +00:00
|
|
|
Comma or whitespace separated list of architectures. The first architecture is
|
|
|
|
the I<native> architecture inside the chroot. The remaining architectures will
|
|
|
|
be added to the foreign dpkg architectures. Without this option, the I<native>
|
2019-01-08 10:27:56 +00:00
|
|
|
architecture of the chroot defaults to the native architecture of the system
|
2019-10-27 21:04:21 +00:00
|
|
|
running B<mmdebstrap>. The option can be specified multiple times and values
|
|
|
|
are concatenated. If later list items are repeated, then they get dropped so
|
|
|
|
that the resulting list is free of duplicates. So the following are
|
|
|
|
equivalent:
|
|
|
|
|
|
|
|
--architectures="amd64 armhf mipsel"
|
|
|
|
--architectures=amd64,armhf,mipsel
|
|
|
|
--arch=amd64 --arch="armhf mipsel" --arch=armhf,mipsel
|
2019-01-08 10:27:56 +00:00
|
|
|
|
2020-01-10 10:44:15 +00:00
|
|
|
=item B<--simulate>, B<--dry-run>
|
|
|
|
|
|
|
|
Run apt-get with B<--simulate>. Only the package cache is initialized but no
|
|
|
|
binary packages are downloaded or installed. Use this option to quickly check
|
|
|
|
whether a package selection within a certain suite and variant can in principle
|
|
|
|
be installed as far as their dependencies go. If the output is a tarball, then
|
|
|
|
no output is produced. If the output is a directory, then the directory will be
|
|
|
|
left populated with the skeleton files and directories necessary for apt to run
|
2020-06-23 21:14:37 +00:00
|
|
|
in it. No hooks are executed in with B<--simulate> or B<--dry-run>.
|
2020-01-10 10:44:15 +00:00
|
|
|
|
2019-02-20 12:32:49 +00:00
|
|
|
=item B<--setup-hook>=I<command>
|
|
|
|
|
|
|
|
Execute arbitrary I<command>s right after initial setup (directory creation,
|
|
|
|
configuration of apt and dpkg, ...) but before any packages are downloaded or
|
|
|
|
installed. At that point, the chroot directory does not contain any
|
2019-12-09 09:40:51 +00:00
|
|
|
executables and thus cannot be chroot-ed into. See section B<HOOKS> for more
|
|
|
|
information.
|
2019-02-20 12:32:49 +00:00
|
|
|
|
2019-03-25 13:50:41 +00:00
|
|
|
Example: Setup merged-/usr via symlinks
|
2019-02-20 12:32:49 +00:00
|
|
|
|
2020-01-08 16:19:30 +00:00
|
|
|
--setup-hook='for d in bin sbin lib; do ln -s usr/$d "$1/$d";
|
|
|
|
mkdir -p "$1/usr/$d"; done'
|
2019-02-20 12:32:49 +00:00
|
|
|
|
2019-03-27 10:44:45 +00:00
|
|
|
Example: Setup chroot for installing a sub-essential busybox-based chroot with
|
2020-01-08 16:19:30 +00:00
|
|
|
--variant=custom
|
|
|
|
--include=dpkg,busybox,libc-bin,base-files,base-passwd,debianutils
|
2019-03-27 10:44:45 +00:00
|
|
|
|
|
|
|
--setup-hook='mkdir -p "$1/bin"'
|
2020-01-08 16:19:30 +00:00
|
|
|
--setup-hook='for p in awk cat chmod chown cp diff echo env grep less ln
|
|
|
|
mkdir mount rm rmdir sed sh sleep sort touch uname; do
|
|
|
|
ln -s busybox "$1/bin/$p"; done'
|
2019-03-27 10:44:45 +00:00
|
|
|
--setup-hook='echo root:x:0:0:root:/root:/bin/sh > "$1/etc/passwd"'
|
|
|
|
--setup-hook='printf "root:x:0:\nmail:x:8:\nutmp:x:43:\n" > "$1/etc/group"'
|
|
|
|
|
2020-06-23 21:14:37 +00:00
|
|
|
=item B<--extract-hook>=I<command>
|
|
|
|
|
|
|
|
Execute arbitrary I<command>s after the Essential:yes packages have been
|
|
|
|
extracted but before installing them. See section B<HOOKS> for more
|
|
|
|
information.
|
|
|
|
|
|
|
|
Example: Install busybox symlinks
|
|
|
|
|
|
|
|
--extract-hook='chroot "$1" busybox --install -s'
|
|
|
|
|
2019-02-20 12:32:49 +00:00
|
|
|
=item B<--essential-hook>=I<command>
|
|
|
|
|
|
|
|
Execute arbitrary I<command>s after the Essential:yes packages have been
|
|
|
|
installed but before installing the remaining packages. The hook is not
|
2019-12-09 09:40:51 +00:00
|
|
|
executed for the B<extract> and B<custom> variants. See section B<HOOKS> for
|
|
|
|
more information.
|
2019-02-20 12:32:49 +00:00
|
|
|
|
2019-03-25 13:50:41 +00:00
|
|
|
Example: Enable unattended upgrades
|
2019-02-20 12:32:49 +00:00
|
|
|
|
2020-01-08 16:19:30 +00:00
|
|
|
--essential-hook='echo unattended-upgrades
|
|
|
|
unattended-upgrades/enable_auto_updates boolean true
|
|
|
|
| chroot "$1" debconf-set-selections'
|
2019-03-25 13:50:41 +00:00
|
|
|
|
|
|
|
Example: Select Europe/Berlin as the timezone
|
|
|
|
|
2020-01-08 16:19:30 +00:00
|
|
|
--essential-hook='echo tzdata tzdata/Areas select Europe
|
|
|
|
| chroot "$1" debconf-set-selections'
|
|
|
|
--essential-hook='echo tzdata tzdata/Zones/Europe select Berlin
|
|
|
|
| chroot "$1" debconf-set-selections'
|
2019-02-20 12:32:49 +00:00
|
|
|
|
|
|
|
=item B<--customize-hook>=I<command>
|
2019-01-08 10:28:27 +00:00
|
|
|
|
2019-02-20 12:32:49 +00:00
|
|
|
Execute arbitrary I<command>s after the chroot is set up and all packages got
|
2019-12-09 09:40:51 +00:00
|
|
|
installed but before final cleanup actions are carried out. See section
|
|
|
|
B<HOOKS> for more information.
|
2019-01-08 10:28:27 +00:00
|
|
|
|
2019-03-25 13:50:41 +00:00
|
|
|
Example: Preparing a chroot for use with autopkgtest
|
2019-01-08 10:28:27 +00:00
|
|
|
|
2019-02-20 12:32:49 +00:00
|
|
|
--customize-hook='chroot "$1" passwd --delete root'
|
2020-01-08 16:19:30 +00:00
|
|
|
--customize-hook='chroot "$1" useradd --home-dir /home/user
|
|
|
|
--create-home user'
|
2019-02-20 12:32:49 +00:00
|
|
|
--customize-hook='chroot "$1" passwd --delete user'
|
|
|
|
--customize-hook='echo host > "$1/etc/hostname"'
|
2019-09-04 13:45:43 +00:00
|
|
|
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"'
|
2019-02-20 12:32:49 +00:00
|
|
|
--customize-hook=/usr/share/autopkgtest/setup-commands/setup-testbed
|
2019-01-08 10:28:27 +00:00
|
|
|
|
2020-08-15 22:50:46 +00:00
|
|
|
=item B<--hook-directory>=I<directory>
|
|
|
|
|
|
|
|
Execute scripts in I<directory> with filenames starting with C<setup>,
|
|
|
|
C<extract>, C<essential> or C<customize>, at the respective stages during an
|
|
|
|
mmdebstrap run. The files must be marked executable. Their extension is
|
|
|
|
ignored. Subdirectories are not traversed. This option is a short-hand for
|
|
|
|
specifying the remaining four hook options individually for each file in the
|
|
|
|
directory. If there are more than one script for a stage, then they are added
|
|
|
|
alphabetically. This is useful in cases, where a user wants to run the same
|
|
|
|
hooks frequently. For example, given a directory C<./hooks> with two scripts
|
|
|
|
C<setup01-foo.sh> and C<setup01-bar.sh>, this call:
|
|
|
|
|
|
|
|
mmdebstrap --customize=./scriptA --hook-dir=./hooks --setup=./scriptB
|
|
|
|
|
|
|
|
is equivalent to this call:
|
|
|
|
|
|
|
|
mmdebstrap --customize=./scriptA --setup=./hooks/setup01-foo.sh \
|
|
|
|
--setup=./hooks/setup02-bar.sh --setup=./scriptB
|
|
|
|
|
|
|
|
The option can be specified multiple times and scripts are added to the
|
|
|
|
respective hooks in the order the options are given on the command line. Thus,
|
|
|
|
if the scripts in two directories depend upon each other, the scripts must be
|
|
|
|
placed into a common directory and be named such that they get added in the
|
|
|
|
correct order.
|
|
|
|
|
|
|
|
Example: Run mmdebstrap with eatmydata
|
|
|
|
|
|
|
|
--hook-dir=/usr/share/mmdebstrap/hooks/eatmydata
|
|
|
|
|
2020-06-23 21:14:37 +00:00
|
|
|
=item B<--skip>=I<stage>[,I<stage>,...]
|
|
|
|
|
|
|
|
B<mmdebstrap> tries hard to implement sensible defaults and will try to stop
|
|
|
|
you before shooting yourself in the foot. This option is for when you are sure
|
|
|
|
you know what you are doing and allows one to skip certain actions and safety
|
|
|
|
checks.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-02-20 17:02:32 +00:00
|
|
|
=item B<-q,--quiet>, B<-s,--silent>
|
2019-01-20 09:39:01 +00:00
|
|
|
|
2019-02-20 17:17:00 +00:00
|
|
|
Do not write anything to standard error. If used together with B<--verbose> or
|
|
|
|
B<--debug>, only the last option will take effect.
|
2019-01-20 09:39:01 +00:00
|
|
|
|
2019-02-20 17:02:32 +00:00
|
|
|
=item B<-v,--verbose>
|
2019-01-20 09:39:01 +00:00
|
|
|
|
|
|
|
Instead of progress bars, write the dpkg and apt output directly to standard
|
2019-02-20 17:17:00 +00:00
|
|
|
error. If used together with B<--quiet> or B<--debug>, only the last option
|
|
|
|
will take effect.
|
2019-01-20 09:39:01 +00:00
|
|
|
|
2019-02-20 17:02:32 +00:00
|
|
|
=item B<-d,--debug>
|
2019-01-20 09:39:01 +00:00
|
|
|
|
|
|
|
In addition to the output produced by B<--verbose>, write detailed debugging
|
2019-02-20 17:17:00 +00:00
|
|
|
information to standard error. Errors will print a backtrace. If used together
|
|
|
|
with B<--quiet> or B<--verbose>, only the last option will take effect.
|
|
|
|
|
2019-02-23 07:43:15 +00:00
|
|
|
=item B<--logfile>=I<filename>
|
|
|
|
|
|
|
|
Instead of writing status information to standard error, write it into the
|
|
|
|
file given by I<filename>.
|
2019-01-20 09:39:01 +00:00
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
=back
|
|
|
|
|
|
|
|
=head1 MODES
|
|
|
|
|
|
|
|
Creating a Debian chroot requires not only permissions for running chroot but
|
|
|
|
also the ability to create files owned by the superuser. The selected mode
|
|
|
|
decides which way this is achieved.
|
|
|
|
|
|
|
|
=over 8
|
|
|
|
|
|
|
|
=item B<auto>
|
|
|
|
|
|
|
|
This mode automatically selects a fitting mode. If the effective user id is the
|
|
|
|
one of the superuser, then the B<sudo> mode is chosen. Otherwise, the
|
|
|
|
B<unshare> mode is picked if the system has the sysctl
|
|
|
|
C<kernel.unprivileged_userns_clone> set to C<1>. Should that not be the case
|
2019-07-24 14:47:47 +00:00
|
|
|
and if the fakechroot binary exists, the B<fakechroot> mode is chosen. Lastly,
|
|
|
|
the B<proot> mode is used if the proot binary exists.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
=item B<sudo>, B<root>
|
|
|
|
|
2019-02-28 10:54:03 +00:00
|
|
|
This mode directly executes chroot and is the same mode of operation as is
|
|
|
|
used by debootstrap. It is the only mode that can directly create a directory
|
|
|
|
chroot with the right permissions. If the chroot directory is not accessible
|
|
|
|
by the _apt user, then apt sandboxing will be automatically disabled.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
=item B<unshare>
|
|
|
|
|
2019-09-04 13:47:15 +00:00
|
|
|
This mode uses Linux user namespaces to allow unprivileged use of chroot and
|
2018-09-18 09:20:24 +00:00
|
|
|
creation of files that appear to be owned by the superuser inside the unshared
|
2020-04-12 07:10:30 +00:00
|
|
|
namespace. A tarball created in this mode should be bit-by-bit identical to a
|
|
|
|
tarball created with the B<root> mode. This mode requires the sysctl
|
|
|
|
C<kernel.unprivileged_userns_clone> being set to C<1>. B<SETTING THIS OPTION
|
|
|
|
HAS SECURITY IMPLICATIONS>. Refer to
|
2020-03-07 01:13:53 +00:00
|
|
|
L<https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=898446>
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-04-12 07:10:30 +00:00
|
|
|
A directory chroot created with this mode will end up with wrong ownership
|
|
|
|
information. For correct ownership information, the directory must be accessed
|
|
|
|
from a user namespace with the right subuid/subgid offset, like so:
|
|
|
|
|
|
|
|
$ lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' -- \
|
|
|
|
> /usr/sbin/chroot ./debian-rootfs /bin/bash
|
|
|
|
|
|
|
|
Or without LXC:
|
|
|
|
|
|
|
|
$ mmdebstrap --unshare-helper /usr/sbin/chroot ./debian-rootfs /bin/bash
|
|
|
|
|
|
|
|
Or, if you don't mind using superuser privileges and have systemd-nspawn
|
|
|
|
available and you know your subuid/subgid offset (100000 in this example):
|
|
|
|
|
|
|
|
$ sudo systemd-nspawn --private-users=100000 \
|
|
|
|
> --directory=./debian-rootfs /bin/bash
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
=item B<fakeroot>, B<fakechroot>
|
|
|
|
|
2019-02-23 07:49:19 +00:00
|
|
|
This mode will exec B<mmdebstrap> again under C<fakechroot fakeroot>. A
|
2020-01-16 17:03:13 +00:00
|
|
|
directory chroot created with this mode will end up with wrong permissions. If
|
|
|
|
you need a directory then run B<mmdebstrap> under C<fakechroot fakeroot -s
|
|
|
|
fakeroot.env> and use C<fakeroot.env> later when entering the chroot with
|
|
|
|
C<fakechroot fakeroot -i fakeroot.env chroot ...>. This mode will not work if
|
|
|
|
maintainer scripts are unable to handle C<LD_PRELOAD> correctly like the
|
|
|
|
package B<initramfs-tools> until version 0.132. This mode will also not work
|
2020-03-07 22:42:41 +00:00
|
|
|
with a different libc inside the chroot than on the outside. Since ldconfig
|
|
|
|
cannot run under fakechroot, the final system will not contain
|
|
|
|
F</etc/ld.so.cache>. See the section B<LIMITATIONS> in B<fakechroot(1)>.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
=item B<proot>
|
|
|
|
|
|
|
|
This mode will carry out all calls to chroot with proot instead. Since
|
2020-01-04 00:10:46 +00:00
|
|
|
ownership information is only retained while proot is still running, this will
|
|
|
|
lead to wrong ownership information in the final directory (everything will be
|
|
|
|
owned by the user that executed B<mmdebstrap>) and tarball (everything will be
|
|
|
|
owned by the root user). Extended attributes are not retained. This mode is
|
|
|
|
useful if you plan to use the chroot with proot.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2018-10-22 15:05:56 +00:00
|
|
|
=item B<chrootless>
|
|
|
|
|
|
|
|
Uses the dpkg option C<--force-script-chrootless> to install packages into
|
2020-05-01 21:37:38 +00:00
|
|
|
I<TARGET> without dpkg and apt inside I<TARGET> but using apt and dpkg from the
|
|
|
|
machine running B<mmdebstrap>. Maintainer scripts are run without chrooting
|
2020-04-11 21:10:13 +00:00
|
|
|
into I<TARGET> and rely on their dependencies being installed on the machine
|
2019-09-04 11:50:25 +00:00
|
|
|
running B<mmdebstrap>. Unless B<mmdebstrap> was run inside fakeroot, the
|
2020-05-01 21:37:38 +00:00
|
|
|
I<TARGET> directory will be owned by the user running B<mmdebstrap>.
|
2020-05-01 21:42:17 +00:00
|
|
|
B<WARNING>: if this option is used carelessly with packages that do not support
|
|
|
|
C<DPKG_ROOT>, this mode can result in undesired changes to the system running
|
|
|
|
B<mmdebstrap> because maintainer-scripts will be run without B<chroot(1)>.
|
2018-10-22 15:05:56 +00:00
|
|
|
|
2018-12-06 16:15:56 +00:00
|
|
|
=for TODO
|
|
|
|
=item B<qemu>
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
=back
|
|
|
|
|
|
|
|
=head1 VARIANTS
|
|
|
|
|
2018-10-22 15:05:56 +00:00
|
|
|
All package sets also include the direct and indirect hard dependencies (but
|
|
|
|
not recommends) of the selected package sets. The variants B<minbase>,
|
|
|
|
B<buildd> and B<->, resemble the package sets that debootstrap would install
|
|
|
|
with the same I<--variant> argument.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
=over 8
|
|
|
|
|
2018-10-22 15:05:56 +00:00
|
|
|
=item B<extract>
|
|
|
|
|
|
|
|
Installs nothing by default (not even C<Essential:yes> packages). Packages
|
|
|
|
given by the C<--include> option are extracted but will not be installed.
|
|
|
|
|
|
|
|
=item B<custom>
|
|
|
|
|
|
|
|
Installs nothing by default (not even C<Essential:yes> packages). Packages
|
|
|
|
given by the C<--include> option will be installed. If another mode than
|
|
|
|
B<chrootless> was selected and dpkg was not part of the included package set,
|
|
|
|
then this variant will fail because it cannot configure the packages.
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
=item B<essential>
|
|
|
|
|
2018-10-22 15:03:39 +00:00
|
|
|
C<Essential:yes> packages.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
=item B<apt>
|
|
|
|
|
|
|
|
The B<essential> set plus apt.
|
|
|
|
|
|
|
|
=item B<required>, B<minbase>
|
|
|
|
|
|
|
|
The B<essential> set plus all packages with Priority:required and apt.
|
|
|
|
|
|
|
|
=item B<buildd>
|
|
|
|
|
|
|
|
The B<minbase> set plus build-essential.
|
|
|
|
|
|
|
|
=item B<important>, B<debootstrap>, B<->
|
|
|
|
|
2020-01-08 16:19:30 +00:00
|
|
|
The B<required> set plus all packages with Priority:important. This is the
|
|
|
|
default of debootstrap.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
=item B<standard>
|
|
|
|
|
|
|
|
The B<important> set plus all packages with Priority:standard.
|
|
|
|
|
|
|
|
=back
|
|
|
|
|
2020-04-09 16:33:05 +00:00
|
|
|
=head1 FORMATS
|
|
|
|
|
2020-05-01 21:37:38 +00:00
|
|
|
The output format of B<mmdebstrap> is specified using the B<--format> option.
|
2020-04-09 16:33:05 +00:00
|
|
|
Without that option the default format is I<auto>. The following formats exist:
|
|
|
|
|
|
|
|
=over 8
|
|
|
|
|
|
|
|
=item B<auto>
|
|
|
|
|
|
|
|
When selecting this format (the default), the actual format will be inferred
|
2020-04-12 07:11:21 +00:00
|
|
|
from the I<TARGET> positional argument. If I<TARGET> was not specified, then
|
|
|
|
the B<tar> format will be chosen. If I<TARGET> is an existing directory, and
|
|
|
|
does not equal to C<->, then the B<directory> format will be chosen. If
|
2020-04-09 16:33:05 +00:00
|
|
|
I<TARGET> ends with C<.tar> or with one of the filename extensions listed in
|
2020-04-11 21:10:13 +00:00
|
|
|
the section B<COMPRESSION>, or if I<TARGET> equals C<->, or if I<TARGET> is a
|
|
|
|
named pipe (fifo) or if I<TARGET> is a character special file like
|
2020-04-09 16:33:05 +00:00
|
|
|
F</dev/null>, then the B<tar> format will be chosen. If I<TARGET> ends with
|
|
|
|
C<.squashfs> or C<.sqfs>, then the B<squashfs> format will be chosen. If
|
|
|
|
<TARGET> ends with C<.ext2> then the B<ext2> format will be chosen. If none of
|
|
|
|
these conditions apply, the B<directory> format will be chosen.
|
|
|
|
|
|
|
|
=item B<directory>, B<dir>
|
|
|
|
|
|
|
|
A chroot directory will be created in I<TARGET>. If the directory already
|
|
|
|
exists, it must either be empty or only contain an empty C<lost+found>
|
|
|
|
directory. The special I<TARGET> C<-> does not work with this format because a
|
|
|
|
directory cannot be written to standard output. If you need your directory be
|
|
|
|
named C<->, then just explicitly pass the relative path to it like F<./->. If
|
|
|
|
a directory is chosen as output in any other mode than B<sudo>, then its
|
|
|
|
contents will have wrong ownership information and special device files will be
|
2020-04-12 07:11:35 +00:00
|
|
|
missing. Refer to the section B<MODES> for more information.
|
2020-04-09 16:33:05 +00:00
|
|
|
|
|
|
|
=item B<tar>
|
|
|
|
|
|
|
|
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
|
|
|
|
C<$TMPDIR> is not set. A tarball of that directory will be stored in I<TARGET>
|
|
|
|
or sent to standard output if I<TARGET> was omitted or if I<TARGET> equals
|
|
|
|
C<->. If I<TARGET> ends with one of the filename extensions listed in the
|
|
|
|
section B<COMPRESSION>, then a compressed tarball will be created. The tarball
|
|
|
|
will be in POSIX 1003.1-2001 (pax) format and will contain extended attributes.
|
|
|
|
To preserve the extended attributes, you have to pass B<--xattrs
|
|
|
|
--xattrs-include='*'> to tar when extracting the tarball.
|
|
|
|
|
|
|
|
=item B<squashfs>, B<sqfs>
|
|
|
|
|
|
|
|
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
|
|
|
|
C<$TMPDIR> is not set. A tarball of that directory will be piped to the
|
|
|
|
C<tar2sqfs> utility, which will create an xz compressed squashfs image with a
|
|
|
|
blocksize of 1048576 bytes in I<TARGET>. The special I<TARGET> C<-> does not
|
|
|
|
work with this format because C<tar2sqfs> can only write to a regular file. If
|
|
|
|
you need your squashfs image be named C<->, then just explicitly pass the
|
2020-05-02 21:55:05 +00:00
|
|
|
relative path to it like F<./->. Since C<tar2sqfs> does not support extended
|
|
|
|
attributes, the resulting image will not contain them.
|
2020-04-09 16:33:05 +00:00
|
|
|
|
|
|
|
=item B<ext2>
|
|
|
|
|
|
|
|
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
|
|
|
|
C<$TMPDIR> is not set. A tarball of that directory will be piped to the
|
|
|
|
C<genext2fs> utility, which will create an ext2 image that will be
|
|
|
|
approximately 90% full in I<TARGET>. The special I<TARGET> C<-> does not work
|
|
|
|
with this format because C<genext2fs> can only write to a regular file. If you
|
|
|
|
need your ext2 image be named C<->, then just explicitly pass the relative path
|
2020-05-01 21:44:12 +00:00
|
|
|
to it like F<./->. To convert the result to an ext3 image, use C<tune2fs -O
|
|
|
|
has_journal TARGET> and to convert it to ext4, use C<tune2fs -O
|
2020-05-02 21:55:05 +00:00
|
|
|
extents,uninit_bg,dir_index,has_journal TARGET>. Since C<genext2fs> does not
|
|
|
|
support extended attributes, the resulting image will not contain them.
|
2020-04-09 16:33:05 +00:00
|
|
|
|
|
|
|
=back
|
|
|
|
|
2019-12-09 09:40:51 +00:00
|
|
|
=head1 HOOKS
|
|
|
|
|
|
|
|
This section describes properties of the hook options B<--setup-hook>,
|
2020-06-23 21:14:37 +00:00
|
|
|
B<--extract-hook>, B<--essential-hook> and B<--customize-hook> which are common
|
2020-08-18 07:38:22 +00:00
|
|
|
to all four of them. Any information specific to each hook is documented under
|
2020-06-23 21:14:37 +00:00
|
|
|
the specific hook options in the section B<OPTIONS>.
|
2019-12-09 09:40:51 +00:00
|
|
|
|
|
|
|
The options can be specified multiple times and the commands are executed in
|
2020-08-18 07:38:22 +00:00
|
|
|
the order in which they are given on the command line. There are four
|
2019-12-09 09:40:51 +00:00
|
|
|
different types of hook option arguments. If the argument passed to the hook
|
2020-03-07 01:06:11 +00:00
|
|
|
option starts with C<copy-in>, C<copy-out>, C<tar-in>, C<tar-out>, C<upload> or
|
|
|
|
C<download> followed by a space, then the hook is interpreted as a special
|
2019-12-09 09:40:51 +00:00
|
|
|
hook. Otherwise, if I<command> is an existing executable file from C<$PATH> or
|
|
|
|
if I<command> does not contain any shell metacharacters, then I<command> is
|
|
|
|
directly exec-ed with the path to the chroot directory passed as the first
|
|
|
|
argument. Otherwise, I<command> is executed under I<sh> and the chroot
|
2020-03-07 01:06:11 +00:00
|
|
|
directory can be accessed via I<$1>. All environment variables set by
|
2019-12-09 09:40:51 +00:00
|
|
|
B<mmdebstrap> (like C<APT_CONFIG>, C<DEBIAN_FRONTEND>, C<LC_ALL> and C<PATH>)
|
2020-03-07 01:06:11 +00:00
|
|
|
are preserved. All environment variables set by the user are preserved, except
|
|
|
|
for C<TMPDIR> which is cleared.
|
2019-12-09 09:40:51 +00:00
|
|
|
|
|
|
|
The paths inside the chroot are relative to the root directory of the chroot.
|
|
|
|
The path on the outside is relative to current directory of the original
|
|
|
|
B<mmdebstrap> invocation. The path inside the chroot must already exist. Paths
|
|
|
|
outside the chroot are created as necessary.
|
|
|
|
|
2020-08-15 16:29:17 +00:00
|
|
|
In B<fakechroot> and B<proot> mode, C<tar>, or C<sh> and C<cat> have to be run
|
|
|
|
inside the chroot or otherwise, symlinks will be wrongly resolved and/or
|
|
|
|
permissions will be off. This means that the special hooks might fail in
|
|
|
|
B<fakechroot> and B<proot> mode for the B<setup> hook or for the B<extract> and
|
|
|
|
B<custom> variants if no C<tar> or C<sh> and C<cat> is available inside the
|
|
|
|
chroot.
|
2019-12-09 09:40:51 +00:00
|
|
|
|
|
|
|
=over 8
|
|
|
|
|
|
|
|
=item B<copy-out> I<pathinside> [I<pathinside> ...] I<pathoutside>
|
|
|
|
|
|
|
|
Recursively copies one or more files and directories out of the chroot into,
|
|
|
|
placing them into I<pathoutside> outside of the chroot.
|
|
|
|
|
|
|
|
=item B<copy-in> I<pathoutside> [I<pathoutside> ...] I<pathinside>
|
|
|
|
|
|
|
|
Recursively copies one or more files and directories into the chroot into,
|
|
|
|
placing them into I<pathinside> inside of the chroot.
|
|
|
|
|
2020-01-16 09:38:14 +00:00
|
|
|
=item B<sync-out> I<pathinside> I<pathoutside>
|
|
|
|
|
|
|
|
Recursively copy everything inside I<pathinside> inside the chroot into
|
|
|
|
I<pathoutside>. In contrast to B<copy-out>, this command synchronizes the
|
|
|
|
content of I<pathinside> with the content of I<pathoutside> without deleting
|
|
|
|
anything from I<pathoutside> but overwriting content as necessary. Use this
|
|
|
|
command over B<copy-out> if you don't want to create a new directory outside
|
|
|
|
the chroot but only update the content of an existing directory.
|
|
|
|
|
|
|
|
=item B<sync-in> I<pathoutside> I<pathinside>
|
|
|
|
|
|
|
|
Recursively copy everything inside I<pathoutside> into I<pathinside> inside the
|
|
|
|
chroot. In contrast to B<copy-in>, this command synchronizes the content of
|
|
|
|
I<pathoutside> with the content of I<pathinside> without deleting anything from
|
|
|
|
I<pathinside> but overwriting content as necessary. Use this command over
|
|
|
|
B<copy-in> if you don't want to create a new directory inside the chroot but
|
|
|
|
only update the content of an existing directory.
|
|
|
|
|
2019-12-09 09:40:51 +00:00
|
|
|
=item B<tar-in> I<outside.tar> I<pathinside>
|
|
|
|
|
|
|
|
Unpacks a tarball I<outside.tar> from outside the chroot into a certain
|
|
|
|
location I<pathinside> inside the chroot.
|
|
|
|
|
|
|
|
=item B<tar-out> I<pathinside> I<outside.tar>
|
|
|
|
|
|
|
|
Packs the path I<pathinside> from inside the chroot into a tarball, placing it
|
|
|
|
into a certain location I<outside.tar> outside the chroot.
|
|
|
|
|
|
|
|
=item B<download> I<fileinside> I<fileoutside>
|
|
|
|
|
|
|
|
Copy the file given by I<fileinside> from inside the chroot to outside the
|
|
|
|
chroot as I<fileoutside>. In contrast to B<copy-out>, this command only
|
|
|
|
handles files and not directories. To copy a directory recursively out of the
|
|
|
|
chroot, use B<copy-out> or B<tar-out>. Its advantage is, that by being able to
|
|
|
|
specify the full path on the outside, including the filename, the file on the
|
|
|
|
outside can have a different name from the file on the inside.
|
|
|
|
|
|
|
|
=item B<upload> I<fileoutside> I<fileinside>
|
|
|
|
|
|
|
|
Copy the file given by I<fileoutside> from outside the chroot to inside the
|
|
|
|
chroot as I<fileinside>. In contrast to B<copy-in>, this command only
|
|
|
|
handles files and not directories. To copy a directory recursively into the
|
|
|
|
chroot, use B<copy-in> or B<tar-in>. Its advantage is, that by being able to
|
|
|
|
specify the full path on the inside, including the filename, the file on the
|
|
|
|
inside can have a different name from the file on the outside.
|
|
|
|
|
|
|
|
=back
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
=head1 EXAMPLES
|
|
|
|
|
|
|
|
Use like debootstrap:
|
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
$ sudo mmdebstrap unstable ./unstable-chroot
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
Without superuser privileges:
|
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
$ mmdebstrap unstable unstable-chroot.tar
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-02-20 17:18:31 +00:00
|
|
|
With no command line arguments at all. The chroot content is entirely defined
|
|
|
|
by a sources.list file on standard input.
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
$ mmdebstrap < /etc/apt/sources.list > unstable-chroot.tar
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2019-10-05 05:51:05 +00:00
|
|
|
Since the tarball is output on stdout, members of it can be excluded using tar
|
|
|
|
on-the-fly. For example the /dev directory can be removed from the final
|
|
|
|
tarbal in cases where it is to be extracted by a non-root user who cannot
|
|
|
|
create device nodes:
|
|
|
|
|
|
|
|
$ mmdebstrap unstable | tar --delete ./dev > unstable-chroot.tar
|
|
|
|
|
2020-01-07 16:40:13 +00:00
|
|
|
Instead of a tarball, a squashfs image can be created:
|
|
|
|
|
|
|
|
$ mmdebstrap unstable unstable-chroot.squashfs
|
|
|
|
|
2020-01-16 17:03:13 +00:00
|
|
|
By default, B<mmdebstrap> runs B<tar2sqfs> with C<--no-skip --exportable
|
|
|
|
--compressor xz --block-size 1048576>. To choose a different set of options,
|
|
|
|
pipe the output of B<mmdebstrap> into B<tar2sqfs> manually.
|
|
|
|
|
2019-10-05 05:51:05 +00:00
|
|
|
By default, debootstrapping a stable distribution will add mirrors for security
|
|
|
|
and updates to the sources.list.
|
|
|
|
|
|
|
|
$ mmdebstrap stable stable-chroot.tar
|
|
|
|
|
|
|
|
If you don't want this behaviour, you can override it by manually specifying a
|
|
|
|
mirror in various different ways:
|
|
|
|
|
|
|
|
$ mmdebstrap stable stable-chroot.tar http://deb.debian.org/debian
|
2020-01-08 16:19:30 +00:00
|
|
|
$ mmdebstrap stable stable-chroot.tar \
|
|
|
|
"deb http://deb.debian.org/debian stable main"
|
2019-10-05 05:51:05 +00:00
|
|
|
$ mmdebstrap stable stable-chroot.tar /path/to/sources.list
|
|
|
|
$ mmdebstrap stable stable-chroot.tar - < /path/to/sources.list
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
Drop locales (but not the symlink to the locale name alias database),
|
|
|
|
translated manual packages (but not the untranslated ones), and documentation
|
|
|
|
(but not copyright and Debian changelog).
|
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
$ mmdebstrap --variant=essential \
|
2018-09-18 09:20:24 +00:00
|
|
|
--dpkgopt='path-exclude=/usr/share/man/*' \
|
|
|
|
--dpkgopt='path-include=/usr/share/man/man[1-9]/*' \
|
|
|
|
--dpkgopt='path-exclude=/usr/share/locale/*' \
|
|
|
|
--dpkgopt='path-include=/usr/share/locale/locale.alias' \
|
|
|
|
--dpkgopt='path-exclude=/usr/share/doc/*' \
|
|
|
|
--dpkgopt='path-include=/usr/share/doc/*/copyright' \
|
|
|
|
--dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*' \
|
|
|
|
unstable debian-unstable.tar
|
|
|
|
|
2019-01-08 10:27:56 +00:00
|
|
|
Use as debootstrap replacement in sbuild-createchroot:
|
|
|
|
|
|
|
|
$ sbuild-createchroot --debootstrap=mmdebstrap \
|
|
|
|
--make-sbuild-tarball ~/.cache/sbuild/unstable-amd64.tar.gz \
|
|
|
|
unstable $(mktemp -d)
|
|
|
|
|
2020-03-07 01:07:10 +00:00
|
|
|
Create a bootable USB Stick that boots into a full Debian desktop:
|
|
|
|
|
|
|
|
$ mmdebstrap --aptopt='Apt::Install-Recommends "true"' --customize-hook \
|
|
|
|
'chroot "$1" adduser --gecos user --disabled-password user' \
|
|
|
|
--customize-hook='echo 'user:live' | chroot "$1" chpasswd' \
|
|
|
|
--customize-hook='echo host > "$1/etc/hostname"' \
|
|
|
|
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
|
|
|
|
--include=linux-image-amd64,task-desktop unstable debian-unstable.tar
|
|
|
|
$ cat << END > extlinux.conf
|
|
|
|
> default linux
|
|
|
|
> timeout 0
|
|
|
|
>
|
|
|
|
> label linux
|
|
|
|
> kernel /vmlinuz
|
|
|
|
> append initrd=/initrd.img root=LABEL=rootfs
|
|
|
|
END
|
|
|
|
# You can use $(sudo blockdev --getsize64 /dev/sdXXX) to get the right
|
|
|
|
# image size for the target medium in bytes
|
|
|
|
$ guestfish -N debian-unstable.img=disk:8G -- part-disk /dev/sda mbr : \
|
|
|
|
part-set-bootable /dev/sda 1 true : mkfs ext2 /dev/sda1 : \
|
|
|
|
set-label /dev/sda1 rootfs : mount /dev/sda1 / : \
|
|
|
|
tar-in debian-unstable.tar / xattrs:true : \
|
|
|
|
upload /usr/lib/SYSLINUX/mbr.bin /mbr.bin : \
|
|
|
|
copy-file-to-device /mbr.bin /dev/sda size:440 : rm /mbr.bin : \
|
|
|
|
extlinux / : copy-in extlinux.conf / : sync : umount / : shutdown
|
|
|
|
$ qemu-system-x86_64 -m 1G -enable-kvm debian-unstable.img
|
|
|
|
$ sudo dd if=debian-unstable.img of=/dev/sdXXX status=progress
|
|
|
|
|
2020-03-07 01:25:55 +00:00
|
|
|
Build libdvdcss2.deb without installing installing anything or changing apt
|
|
|
|
sources on the current system:
|
|
|
|
|
|
|
|
$ mmdebstrap --variant=apt --components=main,contrib --include=libdvd-pkg \
|
|
|
|
--customize-hook='chroot $1 /usr/lib/libdvd-pkg/b-i_libdvdcss.sh' \
|
|
|
|
| tar --extract --verbose --strip-components=4 \
|
|
|
|
--wildcards './usr/src/libdvd-pkg/libdvdcss2_*_*.deb'
|
|
|
|
$ ls libdvdcss2_*_*.deb
|
|
|
|
|
2019-01-08 10:28:27 +00:00
|
|
|
Use as replacement for autopkgtest-build-qemu and vmdb2:
|
|
|
|
|
|
|
|
$ mmdebstrap --variant=important --include=linux-image-amd64 \
|
2019-02-20 12:32:49 +00:00
|
|
|
--customize-hook='chroot "$1" passwd --delete root' \
|
2020-01-08 16:19:30 +00:00
|
|
|
--customize-hook='chroot "$1" useradd --home-dir /home/user
|
|
|
|
--create-home user' \
|
2019-02-20 12:32:49 +00:00
|
|
|
--customize-hook='chroot "$1" passwd --delete user' \
|
|
|
|
--customize-hook='echo host > "$1/etc/hostname"' \
|
|
|
|
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
|
|
|
|
--customize-hook=/usr/share/autopkgtest/setup-commands/setup-testbed \
|
2019-01-08 10:28:27 +00:00
|
|
|
unstable debian-unstable.tar
|
|
|
|
$ cat << END > extlinux.conf
|
|
|
|
> default linux
|
|
|
|
> timeout 0
|
|
|
|
>
|
|
|
|
> label linux
|
|
|
|
> kernel /vmlinuz
|
2020-03-07 01:07:10 +00:00
|
|
|
> append initrd=/initrd.img root=/dev/vda1 console=ttyS0
|
2019-01-08 10:28:27 +00:00
|
|
|
END
|
2020-03-07 01:07:10 +00:00
|
|
|
$ guestfish -N debian-unstable.img=disk:8G -- \
|
2019-01-08 10:28:27 +00:00
|
|
|
part-disk /dev/sda mbr : \
|
|
|
|
part-set-bootable /dev/sda 1 true : \
|
|
|
|
mkfs ext2 /dev/sda1 : mount /dev/sda1 / : \
|
2020-01-03 15:05:28 +00:00
|
|
|
tar-in debian-unstable.tar / xattrs:true : \
|
2020-02-06 07:14:19 +00:00
|
|
|
extlinux / : copy-in extlinux.conf / : \
|
|
|
|
sync : umount / : shutdown
|
2019-01-08 10:28:27 +00:00
|
|
|
$ qemu-img convert -O qcow2 debian-unstable.img debian-unstable.qcow2
|
|
|
|
|
2020-05-03 15:19:03 +00:00
|
|
|
As a debootstrap wrapper to run it without superuser privileges but using Linux
|
|
|
|
user namespaces instead. This fixes Debian bug #829134.
|
|
|
|
|
|
|
|
$ mmdebstrap --variant=custom --mode=unshare \
|
|
|
|
--setup-hook='env container=lxc debootstrap unstable "$1"' \
|
|
|
|
- debian-debootstrap.tar
|
|
|
|
|
2019-10-05 05:51:05 +00:00
|
|
|
Build a non-Debian chroot like Ubuntu bionic:
|
|
|
|
|
2020-01-08 16:19:30 +00:00
|
|
|
$ mmdebstrap --aptopt='Dir::Etc::Trusted
|
|
|
|
"/usr/share/keyrings/ubuntu-keyring-2012-archive.gpg"' bionic bionic.tar
|
2019-10-05 05:51:05 +00:00
|
|
|
|
2018-10-22 15:04:35 +00:00
|
|
|
=head1 ENVIRONMENT VARIABLES
|
|
|
|
|
2020-03-07 01:06:11 +00:00
|
|
|
=over 8
|
|
|
|
|
|
|
|
=item C<SOURCE_DATE_EPOCH>
|
|
|
|
|
2018-10-22 15:04:35 +00:00
|
|
|
By setting C<SOURCE_DATE_EPOCH> the result will be reproducible over multiple
|
|
|
|
runs with the same options and mirror content.
|
|
|
|
|
2020-03-07 01:06:11 +00:00
|
|
|
=item C<TMPDIR>
|
|
|
|
|
|
|
|
When creating a tarball, a temporary directory is populated with the rootfs
|
|
|
|
before the tarball is packed. The location of that temporary directory will be
|
|
|
|
in F</tmp> or the location pointed to by C<TMPDIR> if that environment variable
|
|
|
|
is set.
|
|
|
|
|
|
|
|
=back
|
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
=head1 DEBOOTSTRAP
|
|
|
|
|
|
|
|
This section lists some differences to debootstrap.
|
|
|
|
|
|
|
|
=over 8
|
|
|
|
|
|
|
|
=item * More than one mirror possible
|
|
|
|
|
|
|
|
=item * Default mirrors for stable releases include updates and security mirror
|
|
|
|
|
|
|
|
=item * Multiple ways to operate as non-root: fakechroot, proot, unshare
|
|
|
|
|
2018-09-23 19:15:12 +00:00
|
|
|
=item * 3-6 times faster
|
2018-09-18 09:20:24 +00:00
|
|
|
|
2020-01-08 16:19:30 +00:00
|
|
|
=item * Can create a chroot with only C<Essential:yes> packages and their deps
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
=item * Reproducible output by default if $SOURCE_DATE_EPOCH is set
|
|
|
|
|
|
|
|
=item * Can create output on filesystems with nodev set
|
|
|
|
|
|
|
|
=item * apt cache and lists are cleaned at the end
|
|
|
|
|
|
|
|
=item * foreign architecture chroots using qemu-user
|
|
|
|
|
|
|
|
=back
|
|
|
|
|
|
|
|
Limitations in comparison to debootstrap:
|
|
|
|
|
|
|
|
=over 8
|
|
|
|
|
2020-01-19 21:22:50 +00:00
|
|
|
=item * Only runs on systems with apt installed (Debian and derivatives)
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
=item * No I<SCRIPT> argument
|
|
|
|
|
2020-01-08 16:19:30 +00:00
|
|
|
=item * Some debootstrap options don't exist, namely:
|
|
|
|
|
2020-01-19 21:22:50 +00:00
|
|
|
I<--second-stage>, I<--exclude>, <--resolve-deps>, I<--force-check-gpg>,
|
|
|
|
I<--merged-usr> and I<--no-merged-usr>
|
2018-09-18 09:20:24 +00:00
|
|
|
|
|
|
|
=back
|
|
|
|
|
2019-01-20 09:41:29 +00:00
|
|
|
=head1 COMPRESSION
|
|
|
|
|
|
|
|
B<mmdebstrap> will choose a suitable compressor for the output tarball
|
|
|
|
depending on the filename extension. The following mapping from filename
|
|
|
|
extension to compressor applies:
|
|
|
|
|
|
|
|
extension compressor
|
|
|
|
--------------------
|
|
|
|
.tar none
|
|
|
|
.gz gzip
|
|
|
|
.tgz gzip
|
|
|
|
.taz gzip
|
|
|
|
.Z compress
|
|
|
|
.taZ compress
|
|
|
|
.bz2 bzip2
|
|
|
|
.tbz bzip2
|
|
|
|
.tbz2 bzip2
|
|
|
|
.tz2 bzip2
|
|
|
|
.lz lzip
|
|
|
|
.lzma lzma
|
|
|
|
.tlz lzma
|
|
|
|
.lzo lzop
|
|
|
|
.lz4 lz4
|
2020-01-16 17:03:13 +00:00
|
|
|
.xz xz --threads=0
|
|
|
|
.txz xz --threads=0
|
2019-01-20 09:41:29 +00:00
|
|
|
.zst zstd
|
|
|
|
|
2020-01-16 17:03:13 +00:00
|
|
|
To change compression specific options, either use the respecitve environment
|
2020-05-01 21:37:38 +00:00
|
|
|
variables like B<XZ_OPT> or send B<mmdebstrap> output to your compressor of
|
|
|
|
choice with a pipe.
|
2020-01-16 17:03:13 +00:00
|
|
|
|
2019-03-01 11:53:16 +00:00
|
|
|
=head1 BUGS
|
|
|
|
|
2019-10-05 05:51:18 +00:00
|
|
|
https://gitlab.mister-muffin.de/josch/mmdebstrap/issues
|
|
|
|
|
|
|
|
https://bugs.debian.org/src:mmdebstrap
|
|
|
|
|
2019-03-01 11:53:16 +00:00
|
|
|
As of version 1.19.5, dpkg does not provide facilities preventing it from
|
|
|
|
reading the dpkg configuration of the machine running B<mmdebstrap>.
|
2020-04-09 10:49:07 +00:00
|
|
|
Therefore, until this dpkg limitation is fixed, a default dpkg configuration is
|
|
|
|
recommended on machines running B<mmdebstrap>. If you are using B<mmdebstrap>
|
|
|
|
as the non-root user, then as a workaround you could run C<chmod 600
|
|
|
|
/etc/dpkg/dpkg.cfg.d/*> so that the config files are only accessible by the
|
|
|
|
root user.
|
2019-03-01 11:53:16 +00:00
|
|
|
|
2019-10-05 05:51:18 +00:00
|
|
|
Setting [trusted=yes] to allow signed archives without a known public key will
|
|
|
|
fail because of a gpg warning in the apt output. Since apt does not
|
|
|
|
communicate its status via any other means than human readable strings,
|
|
|
|
B<mmdebstrap> treats any warning from "apt-get update" as an error. Fixing
|
|
|
|
this will require apt to provide a machine readable status interface. See
|
|
|
|
Debian bugs #778357, #776152, #696335, and #745735.
|
|
|
|
|
2020-09-02 20:58:50 +00:00
|
|
|
Using --variant=standard is broken since August 03 2020 until python becomes
|
2020-08-25 14:07:13 +00:00
|
|
|
installable again. See Debian bug #968217.
|
2019-12-09 09:40:51 +00:00
|
|
|
|
2018-09-18 09:20:24 +00:00
|
|
|
=head1 SEE ALSO
|
|
|
|
|
|
|
|
debootstrap(8)
|
|
|
|
|
|
|
|
=cut
|
2020-01-08 15:22:51 +00:00
|
|
|
|
|
|
|
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 ft=perl tw=79
|