forked from josch/mmdebstrap
9059 lines
390 KiB
Perl
Executable file
9059 lines
390 KiB
Perl
Executable file
#!/usr/bin/perl
|
|
#
|
|
# © 2018 - 2023 Johannes Schauer Marin Rodrigues <josch@mister-muffin.de>
|
|
#
|
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
# of this software and associated documentation files (the "Software"), to
|
|
# deal in the Software without restriction, including without limitation the
|
|
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
# sell copies of the Software, and to permit persons to whom the Software is
|
|
# furnished to do so, subject to the following conditions:
|
|
#
|
|
# The above copyright notice and this permission notice shall be included in
|
|
# all copies or substantial portions of the Software.
|
|
#
|
|
# The software is provided "as is", without warranty of any kind, express or
|
|
# implied, including but not limited to the warranties of merchantability,
|
|
# fitness for a particular purpose and noninfringement. In no event shall the
|
|
# authors or copyright holders be liable for any claim, damages or other
|
|
# liability, whether in an action of contract, tort or otherwise, arising
|
|
# from, out of or in connection with the software or the use or other dealings
|
|
# in the software.
|
|
|
|
use strict;
|
|
use warnings;
|
|
|
|
our $VERSION = '1.5.2';
|
|
|
|
use English;
|
|
use Getopt::Long;
|
|
use Pod::Usage;
|
|
use File::Copy;
|
|
use File::Path qw(make_path);
|
|
use File::Temp qw(tempfile tempdir);
|
|
use File::Basename;
|
|
use File::Find;
|
|
use Cwd qw(abs_path getcwd);
|
|
require "syscall.ph"; ## no critic (Modules::RequireBarewordIncludes)
|
|
require "sys/ioctl.ph"; ## no critic (Modules::RequireBarewordIncludes)
|
|
use Fcntl
|
|
qw(S_IFCHR S_IFBLK FD_CLOEXEC F_GETFD F_SETFD LOCK_EX O_RDONLY O_DIRECTORY);
|
|
use List::Util qw(any none);
|
|
use POSIX
|
|
qw(SIGINT SIGHUP SIGPIPE SIGTERM SIG_BLOCK SIG_UNBLOCK strftime isatty);
|
|
use Carp;
|
|
use Term::ANSIColor;
|
|
use Socket;
|
|
use Time::HiRes;
|
|
use Math::BigInt;
|
|
use Text::ParseWords;
|
|
use Digest::SHA;
|
|
use version;
|
|
|
|
## no critic (InputOutput::RequireBriefOpen)
|
|
|
|
# from sched.h
|
|
# use typeglob constants because "use constant" has several drawback as
|
|
# explained in the documentation for the Readonly CPAN module
|
|
*CLONE_NEWNS = \0x20000; # mount namespace
|
|
*CLONE_NEWUTS = \0x4000000; # utsname
|
|
*CLONE_NEWIPC = \0x8000000; # ipc
|
|
*CLONE_NEWUSER = \0x10000000; # user
|
|
*CLONE_NEWPID = \0x20000000; # pid
|
|
*CLONE_NEWNET = \0x40000000; # net
|
|
*_LINUX_CAPABILITY_VERSION_3 = \0x20080522;
|
|
*CAP_SYS_ADMIN = \21;
|
|
*PR_CAPBSET_READ = \23;
|
|
# from sys/mount.h
|
|
*MS_BIND = \0x1000;
|
|
*MS_REC = \0x4000;
|
|
*MNT_DETACH = \2;
|
|
# uuid_t NameSpace_DNS in rfc4122
|
|
*UUID_NS_DNS = \'6ba7b810-9dad-11d1-80b4-00c04fd430c8';
|
|
our (
|
|
$CLONE_NEWNS, $CLONE_NEWUTS,
|
|
$CLONE_NEWIPC, $CLONE_NEWUSER,
|
|
$CLONE_NEWPID, $CLONE_NEWNET,
|
|
$_LINUX_CAPABILITY_VERSION_3, $CAP_SYS_ADMIN,
|
|
$PR_CAPBSET_READ, $MS_BIND,
|
|
$MS_REC, $MNT_DETACH,
|
|
$UUID_NS_DNS
|
|
);
|
|
|
|
#<<<
|
|
# type codes:
|
|
# 0 -> normal file
|
|
# 1 -> hardlink
|
|
# 2 -> symlink
|
|
# 3 -> character special
|
|
# 4 -> block special
|
|
# 5 -> directory
|
|
my @linuxdevfiles = (
|
|
# file name mode type link target major minor transl.
|
|
["./dev/", oct(755), '5', undef, undef, undef, undef],
|
|
["./dev/console", oct(666), '3', undef, 5, 1, undef],
|
|
["./dev/fd", oct(777), '2', '/proc/self/fd', undef, undef, undef],
|
|
["./dev/full", oct(666), '3', undef, 1, 7, undef],
|
|
["./dev/null", oct(666), '3', undef, 1, 3, undef],
|
|
["./dev/ptmx", oct(666), '3', undef, 5, 2, undef],
|
|
["./dev/pts/", oct(755), '5', undef, undef, undef, undef],
|
|
["./dev/random", oct(666), '3', undef, 1, 8, undef],
|
|
["./dev/shm/", oct(755), '5', undef, undef, undef, undef],
|
|
["./dev/stderr", oct(777), '2', '/proc/self/fd/2', undef, undef, undef],
|
|
["./dev/stdin", oct(777), '2', '/proc/self/fd/0', undef, undef, undef],
|
|
["./dev/stdout", oct(777), '2', '/proc/self/fd/1', undef, undef, undef],
|
|
["./dev/tty", oct(666), '3', undef, 5, 0, undef],
|
|
["./dev/urandom", oct(666), '3', undef, 1, 9, undef],
|
|
["./dev/zero", oct(666), '3', undef, 1, 5, undef],
|
|
);
|
|
|
|
my @hurdfiles = (
|
|
# file name mode type link target major minor transl.
|
|
['./dev/', oct(755), '5', undef, undef, undef, undef],
|
|
['./dev/MAKEDEV', oct(755), '2', '/sbin/MAKEDEV', undef, undef, undef],
|
|
['./dev/cd0', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0@/dev/disk:cd0\0"],
|
|
['./dev/cd1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0@/dev/disk:cd1\0"],
|
|
['./dev/com0', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/com0\0device\0com0\0"],
|
|
['./dev/com1', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/com1\0device\0com1\0"],
|
|
['./dev/com2', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/com2\0device\0com2\0"],
|
|
['./dev/com3', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/com3\0device\0com3\0"],
|
|
['./dev/cons', oct(600), '0', undef, undef, undef, undef],
|
|
['./dev/console', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/console\0device\0console\0"],
|
|
['./dev/disk', oct(755), '2', 'rumpdisk', undef, undef, undef],
|
|
['./dev/eth0', oct(660), '0', undef, undef, undef,
|
|
"/hurd/devnode\0-M\0/dev/net\0eth0\0"],
|
|
['./dev/eth1', oct(660), '0', undef, undef, undef,
|
|
"/hurd/devnode\0-M\0/dev/net\0eth1\0"],
|
|
['./dev/eth2', oct(660), '0', undef, undef, undef,
|
|
"/hurd/devnode\0-M\0/dev/net\0eth2\0"],
|
|
['./dev/eth3', oct(660), '0', undef, undef, undef,
|
|
"/hurd/devnode\0-M\0/dev/net\0eth3\0"],
|
|
['./dev/fd', oct(666), '0', undef, undef, undef,
|
|
"/hurd/magic\0--directory\0fd\0"],
|
|
['./dev/fd0', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0fd0\0"],
|
|
['./dev/fd1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0fd1\0"],
|
|
['./dev/full', oct(666), '0', undef, undef, undef,
|
|
"/hurd/null\0--full\0"],
|
|
['./dev/hd0', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0\0"],
|
|
['./dev/hd0s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s1\0"],
|
|
['./dev/hd0s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s10\0"],
|
|
['./dev/hd0s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s11\0"],
|
|
['./dev/hd0s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s12\0"],
|
|
['./dev/hd0s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s13\0"],
|
|
['./dev/hd0s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s14\0"],
|
|
['./dev/hd0s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s15\0"],
|
|
['./dev/hd0s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s16\0"],
|
|
['./dev/hd0s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s2\0"],
|
|
['./dev/hd0s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s3\0"],
|
|
['./dev/hd0s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s4\0"],
|
|
['./dev/hd0s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s5\0"],
|
|
['./dev/hd0s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s6\0"],
|
|
['./dev/hd0s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s7\0"],
|
|
['./dev/hd0s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s8\0"],
|
|
['./dev/hd0s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd0s9\0"],
|
|
['./dev/hd1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1\0"],
|
|
['./dev/hd1s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s1\0"],
|
|
['./dev/hd1s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s10\0"],
|
|
['./dev/hd1s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s11\0"],
|
|
['./dev/hd1s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s12\0"],
|
|
['./dev/hd1s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s13\0"],
|
|
['./dev/hd1s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s14\0"],
|
|
['./dev/hd1s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s15\0"],
|
|
['./dev/hd1s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s16\0"],
|
|
['./dev/hd1s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s2\0"],
|
|
['./dev/hd1s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s3\0"],
|
|
['./dev/hd1s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s4\0"],
|
|
['./dev/hd1s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s5\0"],
|
|
['./dev/hd1s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s6\0"],
|
|
['./dev/hd1s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s7\0"],
|
|
['./dev/hd1s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s8\0"],
|
|
['./dev/hd1s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd1s9\0"],
|
|
['./dev/hd2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2\0"],
|
|
['./dev/hd2s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s1\0"],
|
|
['./dev/hd2s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s10\0"],
|
|
['./dev/hd2s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s11\0"],
|
|
['./dev/hd2s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s12\0"],
|
|
['./dev/hd2s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s13\0"],
|
|
['./dev/hd2s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s14\0"],
|
|
['./dev/hd2s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s15\0"],
|
|
['./dev/hd2s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s16\0"],
|
|
['./dev/hd2s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s2\0"],
|
|
['./dev/hd2s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s3\0"],
|
|
['./dev/hd2s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s4\0"],
|
|
['./dev/hd2s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s5\0"],
|
|
['./dev/hd2s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s6\0"],
|
|
['./dev/hd2s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s7\0"],
|
|
['./dev/hd2s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s8\0"],
|
|
['./dev/hd2s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd2s9\0"],
|
|
['./dev/hd3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3\0"],
|
|
['./dev/hd3s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s1\0"],
|
|
['./dev/hd3s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s10\0"],
|
|
['./dev/hd3s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s11\0"],
|
|
['./dev/hd3s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s12\0"],
|
|
['./dev/hd3s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s13\0"],
|
|
['./dev/hd3s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s14\0"],
|
|
['./dev/hd3s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s15\0"],
|
|
['./dev/hd3s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s16\0"],
|
|
['./dev/hd3s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s2\0"],
|
|
['./dev/hd3s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s3\0"],
|
|
['./dev/hd3s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s4\0"],
|
|
['./dev/hd3s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s5\0"],
|
|
['./dev/hd3s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s6\0"],
|
|
['./dev/hd3s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s7\0"],
|
|
['./dev/hd3s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s8\0"],
|
|
['./dev/hd3s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd3s9\0"],
|
|
['./dev/hd4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4\0"],
|
|
['./dev/hd4s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s1\0"],
|
|
['./dev/hd4s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s10\0"],
|
|
['./dev/hd4s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s11\0"],
|
|
['./dev/hd4s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s12\0"],
|
|
['./dev/hd4s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s13\0"],
|
|
['./dev/hd4s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s14\0"],
|
|
['./dev/hd4s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s15\0"],
|
|
['./dev/hd4s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s16\0"],
|
|
['./dev/hd4s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s2\0"],
|
|
['./dev/hd4s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s3\0"],
|
|
['./dev/hd4s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s4\0"],
|
|
['./dev/hd4s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s5\0"],
|
|
['./dev/hd4s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s6\0"],
|
|
['./dev/hd4s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s7\0"],
|
|
['./dev/hd4s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s8\0"],
|
|
['./dev/hd4s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd4s9\0"],
|
|
['./dev/hd5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5\0"],
|
|
['./dev/hd5s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s1\0"],
|
|
['./dev/hd5s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s10\0"],
|
|
['./dev/hd5s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s11\0"],
|
|
['./dev/hd5s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s12\0"],
|
|
['./dev/hd5s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s13\0"],
|
|
['./dev/hd5s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s14\0"],
|
|
['./dev/hd5s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s15\0"],
|
|
['./dev/hd5s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s16\0"],
|
|
['./dev/hd5s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s2\0"],
|
|
['./dev/hd5s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s3\0"],
|
|
['./dev/hd5s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s4\0"],
|
|
['./dev/hd5s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s5\0"],
|
|
['./dev/hd5s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s6\0"],
|
|
['./dev/hd5s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s7\0"],
|
|
['./dev/hd5s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s8\0"],
|
|
['./dev/hd5s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0hd5s9\0"],
|
|
['./dev/kbd', oct(644), '2', 'cons/kbd', undef, undef, undef],
|
|
['./dev/klog', oct(660), '0', undef, undef, undef,
|
|
"/hurd/streamio\0kmsg\0"],
|
|
['./dev/loop0', oct(640), '0', undef, undef, undef,
|
|
"/hurd/null\0"],
|
|
['./dev/loop1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/null\0"],
|
|
['./dev/loop2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/null\0"],
|
|
['./dev/loop3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/null\0"],
|
|
['./dev/loop4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/null\0"],
|
|
['./dev/loop5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/null\0"],
|
|
['./dev/loop6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/null\0"],
|
|
['./dev/loop7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/null\0"],
|
|
['./dev/lpr0', oct(660), '0', undef, undef, undef,
|
|
"/hurd/streamio\0lpr0\0"],
|
|
['./dev/lpr1', oct(660), '0', undef, undef, undef,
|
|
"/hurd/streamio\0lpr1\0"],
|
|
['./dev/lpr2', oct(660), '0', undef, undef, undef,
|
|
"/hurd/streamio\0lpr2\0"],
|
|
['./dev/mem', oct(660), '0', undef, undef, undef,
|
|
"/hurd/storeio\0--no-cache\0mem\0"],
|
|
['./dev/mouse', oct(644), '2', 'cons/mouse', undef, undef, undef],
|
|
['./dev/net', oct(755), '2', 'netdde', undef, undef, undef],
|
|
['./dev/netdde', oct(660), '0', undef, undef, undef,
|
|
"/hurd/netdde\0"],
|
|
['./dev/null', oct(666), '0', undef, undef, undef,
|
|
"/hurd/null\0"],
|
|
['./dev/pseudo-root', oct(640), '4', undef, 0, 0,
|
|
"/hurd/storeio\0pseudo-root\0"],
|
|
['./dev/ptyp0', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyp0\0pty-master\0/dev/ttyp0\0"],
|
|
['./dev/ptyp1', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyp1\0pty-master\0/dev/ttyp1\0"],
|
|
['./dev/ptyp2', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyp2\0pty-master\0/dev/ttyp2\0"],
|
|
['./dev/ptyp3', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyp3\0pty-master\0/dev/ttyp3\0"],
|
|
['./dev/ptyp4', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyp4\0pty-master\0/dev/ttyp4\0"],
|
|
['./dev/ptyp5', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyp5\0pty-master\0/dev/ttyp5\0"],
|
|
['./dev/ptyp6', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyp6\0pty-master\0/dev/ttyp6\0"],
|
|
['./dev/ptyp7', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyp7\0pty-master\0/dev/ttyp7\0"],
|
|
['./dev/ptyp8', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyp8\0pty-master\0/dev/ttyp8\0"],
|
|
['./dev/ptyp9', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyp9\0pty-master\0/dev/ttyp9\0"],
|
|
['./dev/ptypa', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypa\0pty-master\0/dev/ttypa\0"],
|
|
['./dev/ptypb', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypb\0pty-master\0/dev/ttypb\0"],
|
|
['./dev/ptypc', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypc\0pty-master\0/dev/ttypc\0"],
|
|
['./dev/ptypd', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypd\0pty-master\0/dev/ttypd\0"],
|
|
['./dev/ptype', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptype\0pty-master\0/dev/ttype\0"],
|
|
['./dev/ptypf', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypf\0pty-master\0/dev/ttypf\0"],
|
|
['./dev/ptypg', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypg\0pty-master\0/dev/ttypg\0"],
|
|
['./dev/ptyph', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyph\0pty-master\0/dev/ttyph\0"],
|
|
['./dev/ptypi', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypi\0pty-master\0/dev/ttypi\0"],
|
|
['./dev/ptypj', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypj\0pty-master\0/dev/ttypj\0"],
|
|
['./dev/ptypk', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypk\0pty-master\0/dev/ttypk\0"],
|
|
['./dev/ptypl', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypl\0pty-master\0/dev/ttypl\0"],
|
|
['./dev/ptypm', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypm\0pty-master\0/dev/ttypm\0"],
|
|
['./dev/ptypn', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypn\0pty-master\0/dev/ttypn\0"],
|
|
['./dev/ptypo', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypo\0pty-master\0/dev/ttypo\0"],
|
|
['./dev/ptypp', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypp\0pty-master\0/dev/ttypp\0"],
|
|
['./dev/ptypq', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypq\0pty-master\0/dev/ttypq\0"],
|
|
['./dev/ptypr', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypr\0pty-master\0/dev/ttypr\0"],
|
|
['./dev/ptyps', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyps\0pty-master\0/dev/ttyps\0"],
|
|
['./dev/ptypt', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypt\0pty-master\0/dev/ttypt\0"],
|
|
['./dev/ptypu', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypu\0pty-master\0/dev/ttypu\0"],
|
|
['./dev/ptypv', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptypv\0pty-master\0/dev/ttypv\0"],
|
|
['./dev/ptyq0', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyq0\0pty-master\0/dev/ttyq0\0"],
|
|
['./dev/ptyq1', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyq1\0pty-master\0/dev/ttyq1\0"],
|
|
['./dev/ptyq2', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyq2\0pty-master\0/dev/ttyq2\0"],
|
|
['./dev/ptyq3', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyq3\0pty-master\0/dev/ttyq3\0"],
|
|
['./dev/ptyq4', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyq4\0pty-master\0/dev/ttyq4\0"],
|
|
['./dev/ptyq5', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyq5\0pty-master\0/dev/ttyq5\0"],
|
|
['./dev/ptyq6', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyq6\0pty-master\0/dev/ttyq6\0"],
|
|
['./dev/ptyq7', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyq7\0pty-master\0/dev/ttyq7\0"],
|
|
['./dev/ptyq8', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyq8\0pty-master\0/dev/ttyq8\0"],
|
|
['./dev/ptyq9', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyq9\0pty-master\0/dev/ttyq9\0"],
|
|
['./dev/ptyqa', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqa\0pty-master\0/dev/ttyqa\0"],
|
|
['./dev/ptyqb', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqb\0pty-master\0/dev/ttyqb\0"],
|
|
['./dev/ptyqc', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqc\0pty-master\0/dev/ttyqc\0"],
|
|
['./dev/ptyqd', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqd\0pty-master\0/dev/ttyqd\0"],
|
|
['./dev/ptyqe', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqe\0pty-master\0/dev/ttyqe\0"],
|
|
['./dev/ptyqf', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqf\0pty-master\0/dev/ttyqf\0"],
|
|
['./dev/ptyqg', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqg\0pty-master\0/dev/ttyqg\0"],
|
|
['./dev/ptyqh', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqh\0pty-master\0/dev/ttyqh\0"],
|
|
['./dev/ptyqi', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqi\0pty-master\0/dev/ttyqi\0"],
|
|
['./dev/ptyqj', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqj\0pty-master\0/dev/ttyqj\0"],
|
|
['./dev/ptyqk', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqk\0pty-master\0/dev/ttyqk\0"],
|
|
['./dev/ptyql', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyql\0pty-master\0/dev/ttyql\0"],
|
|
['./dev/ptyqm', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqm\0pty-master\0/dev/ttyqm\0"],
|
|
['./dev/ptyqn', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqn\0pty-master\0/dev/ttyqn\0"],
|
|
['./dev/ptyqo', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqo\0pty-master\0/dev/ttyqo\0"],
|
|
['./dev/ptyqp', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqp\0pty-master\0/dev/ttyqp\0"],
|
|
['./dev/ptyqq', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqq\0pty-master\0/dev/ttyqq\0"],
|
|
['./dev/ptyqr', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqr\0pty-master\0/dev/ttyqr\0"],
|
|
['./dev/ptyqs', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqs\0pty-master\0/dev/ttyqs\0"],
|
|
['./dev/ptyqt', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqt\0pty-master\0/dev/ttyqt\0"],
|
|
['./dev/ptyqu', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqu\0pty-master\0/dev/ttyqu\0"],
|
|
['./dev/ptyqv', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ptyqv\0pty-master\0/dev/ttyqv\0"],
|
|
['./dev/random', oct(644), '0', undef, undef, undef,
|
|
"/hurd/random\0--seed-file\0/var/lib/random-seed\0"],
|
|
['./dev/rumpdisk', oct(660), '0', undef, undef, undef,
|
|
"/hurd/rumpdisk\0"],
|
|
['./dev/sd0', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0\0"],
|
|
['./dev/sd0s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s1\0"],
|
|
['./dev/sd0s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s10\0"],
|
|
['./dev/sd0s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s11\0"],
|
|
['./dev/sd0s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s12\0"],
|
|
['./dev/sd0s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s13\0"],
|
|
['./dev/sd0s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s14\0"],
|
|
['./dev/sd0s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s15\0"],
|
|
['./dev/sd0s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s16\0"],
|
|
['./dev/sd0s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s2\0"],
|
|
['./dev/sd0s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s3\0"],
|
|
['./dev/sd0s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s4\0"],
|
|
['./dev/sd0s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s5\0"],
|
|
['./dev/sd0s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s6\0"],
|
|
['./dev/sd0s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s7\0"],
|
|
['./dev/sd0s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s8\0"],
|
|
['./dev/sd0s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd0s9\0"],
|
|
['./dev/sd1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1\0"],
|
|
['./dev/sd1s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s1\0"],
|
|
['./dev/sd1s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s10\0"],
|
|
['./dev/sd1s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s11\0"],
|
|
['./dev/sd1s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s12\0"],
|
|
['./dev/sd1s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s13\0"],
|
|
['./dev/sd1s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s14\0"],
|
|
['./dev/sd1s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s15\0"],
|
|
['./dev/sd1s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s16\0"],
|
|
['./dev/sd1s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s2\0"],
|
|
['./dev/sd1s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s3\0"],
|
|
['./dev/sd1s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s4\0"],
|
|
['./dev/sd1s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s5\0"],
|
|
['./dev/sd1s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s6\0"],
|
|
['./dev/sd1s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s7\0"],
|
|
['./dev/sd1s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s8\0"],
|
|
['./dev/sd1s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd1s9\0"],
|
|
['./dev/sd2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2\0"],
|
|
['./dev/sd2s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s1\0"],
|
|
['./dev/sd2s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s10\0"],
|
|
['./dev/sd2s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s11\0"],
|
|
['./dev/sd2s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s12\0"],
|
|
['./dev/sd2s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s13\0"],
|
|
['./dev/sd2s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s14\0"],
|
|
['./dev/sd2s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s15\0"],
|
|
['./dev/sd2s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s16\0"],
|
|
['./dev/sd2s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s2\0"],
|
|
['./dev/sd2s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s3\0"],
|
|
['./dev/sd2s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s4\0"],
|
|
['./dev/sd2s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s5\0"],
|
|
['./dev/sd2s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s6\0"],
|
|
['./dev/sd2s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s7\0"],
|
|
['./dev/sd2s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s8\0"],
|
|
['./dev/sd2s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd2s9\0"],
|
|
['./dev/sd3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3\0"],
|
|
['./dev/sd3s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s1\0"],
|
|
['./dev/sd3s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s10\0"],
|
|
['./dev/sd3s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s11\0"],
|
|
['./dev/sd3s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s12\0"],
|
|
['./dev/sd3s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s13\0"],
|
|
['./dev/sd3s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s14\0"],
|
|
['./dev/sd3s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s15\0"],
|
|
['./dev/sd3s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s16\0"],
|
|
['./dev/sd3s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s2\0"],
|
|
['./dev/sd3s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s3\0"],
|
|
['./dev/sd3s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s4\0"],
|
|
['./dev/sd3s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s5\0"],
|
|
['./dev/sd3s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s6\0"],
|
|
['./dev/sd3s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s7\0"],
|
|
['./dev/sd3s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s8\0"],
|
|
['./dev/sd3s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd3s9\0"],
|
|
['./dev/sd4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4\0"],
|
|
['./dev/sd4s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s1\0"],
|
|
['./dev/sd4s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s10\0"],
|
|
['./dev/sd4s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s11\0"],
|
|
['./dev/sd4s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s12\0"],
|
|
['./dev/sd4s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s13\0"],
|
|
['./dev/sd4s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s14\0"],
|
|
['./dev/sd4s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s15\0"],
|
|
['./dev/sd4s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s16\0"],
|
|
['./dev/sd4s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s2\0"],
|
|
['./dev/sd4s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s3\0"],
|
|
['./dev/sd4s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s4\0"],
|
|
['./dev/sd4s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s5\0"],
|
|
['./dev/sd4s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s6\0"],
|
|
['./dev/sd4s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s7\0"],
|
|
['./dev/sd4s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s8\0"],
|
|
['./dev/sd4s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd4s9\0"],
|
|
['./dev/sd5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5\0"],
|
|
['./dev/sd5s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s1\0"],
|
|
['./dev/sd5s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s10\0"],
|
|
['./dev/sd5s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s11\0"],
|
|
['./dev/sd5s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s12\0"],
|
|
['./dev/sd5s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s13\0"],
|
|
['./dev/sd5s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s14\0"],
|
|
['./dev/sd5s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s15\0"],
|
|
['./dev/sd5s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s16\0"],
|
|
['./dev/sd5s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s2\0"],
|
|
['./dev/sd5s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s3\0"],
|
|
['./dev/sd5s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s4\0"],
|
|
['./dev/sd5s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s5\0"],
|
|
['./dev/sd5s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s6\0"],
|
|
['./dev/sd5s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s7\0"],
|
|
['./dev/sd5s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s8\0"],
|
|
['./dev/sd5s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0sd5s9\0"],
|
|
['./dev/shm', oct(644), '2', '/tmp', undef, undef, undef],
|
|
['./dev/stderr', oct(755), '2', 'fd/2', undef, undef, undef],
|
|
['./dev/stdin', oct(755), '2', 'fd/0', undef, undef, undef],
|
|
['./dev/stdout', oct(755), '2', 'fd/1', undef, undef, undef],
|
|
['./dev/time', oct(644), '0', undef, undef, undef,
|
|
"/hurd/storeio\0--no-cache\0time\0"],
|
|
['./dev/tty', oct(666), '0', undef, undef, undef,
|
|
"/hurd/magic\0tty\0"],
|
|
['./dev/tty1', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/tty1\0hurdio\0/dev/vcs/1/console\0"],
|
|
['./dev/tty2', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/tty2\0hurdio\0/dev/vcs/2/console\0"],
|
|
['./dev/tty3', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/tty3\0hurdio\0/dev/vcs/3/console\0"],
|
|
['./dev/tty4', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/tty4\0hurdio\0/dev/vcs/4/console\0"],
|
|
['./dev/tty5', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/tty5\0hurdio\0/dev/vcs/5/console\0"],
|
|
['./dev/tty6', oct(600), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/tty6\0hurdio\0/dev/vcs/6/console\0"],
|
|
['./dev/ttyp0', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyp0\0pty-slave\0/dev/ptyp0\0"],
|
|
['./dev/ttyp1', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyp1\0pty-slave\0/dev/ptyp1\0"],
|
|
['./dev/ttyp2', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyp2\0pty-slave\0/dev/ptyp2\0"],
|
|
['./dev/ttyp3', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyp3\0pty-slave\0/dev/ptyp3\0"],
|
|
['./dev/ttyp4', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyp4\0pty-slave\0/dev/ptyp4\0"],
|
|
['./dev/ttyp5', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyp5\0pty-slave\0/dev/ptyp5\0"],
|
|
['./dev/ttyp6', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyp6\0pty-slave\0/dev/ptyp6\0"],
|
|
['./dev/ttyp7', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyp7\0pty-slave\0/dev/ptyp7\0"],
|
|
['./dev/ttyp8', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyp8\0pty-slave\0/dev/ptyp8\0"],
|
|
['./dev/ttyp9', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyp9\0pty-slave\0/dev/ptyp9\0"],
|
|
['./dev/ttypa', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypa\0pty-slave\0/dev/ptypa\0"],
|
|
['./dev/ttypb', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypb\0pty-slave\0/dev/ptypb\0"],
|
|
['./dev/ttypc', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypc\0pty-slave\0/dev/ptypc\0"],
|
|
['./dev/ttypd', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypd\0pty-slave\0/dev/ptypd\0"],
|
|
['./dev/ttype', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttype\0pty-slave\0/dev/ptype\0"],
|
|
['./dev/ttypf', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypf\0pty-slave\0/dev/ptypf\0"],
|
|
['./dev/ttypg', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypg\0pty-slave\0/dev/ptypg\0"],
|
|
['./dev/ttyph', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyph\0pty-slave\0/dev/ptyph\0"],
|
|
['./dev/ttypi', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypi\0pty-slave\0/dev/ptypi\0"],
|
|
['./dev/ttypj', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypj\0pty-slave\0/dev/ptypj\0"],
|
|
['./dev/ttypk', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypk\0pty-slave\0/dev/ptypk\0"],
|
|
['./dev/ttypl', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypl\0pty-slave\0/dev/ptypl\0"],
|
|
['./dev/ttypm', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypm\0pty-slave\0/dev/ptypm\0"],
|
|
['./dev/ttypn', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypn\0pty-slave\0/dev/ptypn\0"],
|
|
['./dev/ttypo', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypo\0pty-slave\0/dev/ptypo\0"],
|
|
['./dev/ttypp', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypp\0pty-slave\0/dev/ptypp\0"],
|
|
['./dev/ttypq', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypq\0pty-slave\0/dev/ptypq\0"],
|
|
['./dev/ttypr', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypr\0pty-slave\0/dev/ptypr\0"],
|
|
['./dev/ttyps', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyps\0pty-slave\0/dev/ptyps\0"],
|
|
['./dev/ttypt', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypt\0pty-slave\0/dev/ptypt\0"],
|
|
['./dev/ttypu', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypu\0pty-slave\0/dev/ptypu\0"],
|
|
['./dev/ttypv', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttypv\0pty-slave\0/dev/ptypv\0"],
|
|
['./dev/ttyq0', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyq0\0pty-slave\0/dev/ptyq0\0"],
|
|
['./dev/ttyq1', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyq1\0pty-slave\0/dev/ptyq1\0"],
|
|
['./dev/ttyq2', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyq2\0pty-slave\0/dev/ptyq2\0"],
|
|
['./dev/ttyq3', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyq3\0pty-slave\0/dev/ptyq3\0"],
|
|
['./dev/ttyq4', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyq4\0pty-slave\0/dev/ptyq4\0"],
|
|
['./dev/ttyq5', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyq5\0pty-slave\0/dev/ptyq5\0"],
|
|
['./dev/ttyq6', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyq6\0pty-slave\0/dev/ptyq6\0"],
|
|
['./dev/ttyq7', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyq7\0pty-slave\0/dev/ptyq7\0"],
|
|
['./dev/ttyq8', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyq8\0pty-slave\0/dev/ptyq8\0"],
|
|
['./dev/ttyq9', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyq9\0pty-slave\0/dev/ptyq9\0"],
|
|
['./dev/ttyqa', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqa\0pty-slave\0/dev/ptyqa\0"],
|
|
['./dev/ttyqb', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqb\0pty-slave\0/dev/ptyqb\0"],
|
|
['./dev/ttyqc', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqc\0pty-slave\0/dev/ptyqc\0"],
|
|
['./dev/ttyqd', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqd\0pty-slave\0/dev/ptyqd\0"],
|
|
['./dev/ttyqe', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqe\0pty-slave\0/dev/ptyqe\0"],
|
|
['./dev/ttyqf', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqf\0pty-slave\0/dev/ptyqf\0"],
|
|
['./dev/ttyqg', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqg\0pty-slave\0/dev/ptyqg\0"],
|
|
['./dev/ttyqh', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqh\0pty-slave\0/dev/ptyqh\0"],
|
|
['./dev/ttyqi', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqi\0pty-slave\0/dev/ptyqi\0"],
|
|
['./dev/ttyqj', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqj\0pty-slave\0/dev/ptyqj\0"],
|
|
['./dev/ttyqk', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqk\0pty-slave\0/dev/ptyqk\0"],
|
|
['./dev/ttyql', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyql\0pty-slave\0/dev/ptyql\0"],
|
|
['./dev/ttyqm', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqm\0pty-slave\0/dev/ptyqm\0"],
|
|
['./dev/ttyqn', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqn\0pty-slave\0/dev/ptyqn\0"],
|
|
['./dev/ttyqo', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqo\0pty-slave\0/dev/ptyqo\0"],
|
|
['./dev/ttyqp', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqp\0pty-slave\0/dev/ptyqp\0"],
|
|
['./dev/ttyqq', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqq\0pty-slave\0/dev/ptyqq\0"],
|
|
['./dev/ttyqr', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqr\0pty-slave\0/dev/ptyqr\0"],
|
|
['./dev/ttyqs', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqs\0pty-slave\0/dev/ptyqs\0"],
|
|
['./dev/ttyqt', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqt\0pty-slave\0/dev/ptyqt\0"],
|
|
['./dev/ttyqu', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqu\0pty-slave\0/dev/ptyqu\0"],
|
|
['./dev/ttyqv', oct(666), '0', undef, undef, undef,
|
|
"/hurd/term\0/dev/ttyqv\0pty-slave\0/dev/ptyqv\0"],
|
|
['./dev/urandom', oct(755), '2', 'random', undef, undef,
|
|
"/hurd/random\0--seed-file\0/var/lib/random-seed\0--fast\0"],
|
|
['./dev/vcs', oct(600), '0', undef, undef, undef,
|
|
"/hurd/console\0"],
|
|
['./dev/wd0', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0@/dev/disk:wd0\0"],
|
|
['./dev/wd0s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:1:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:10:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:11:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:12:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:13:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:14:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:15:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:16:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:2:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:3:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:4:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:5:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:6:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:7:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:8:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd0s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:9:device:@/dev/disk:wd0\0"],
|
|
['./dev/wd1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0@/dev/disk:wd1\0"],
|
|
['./dev/wd1s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:1:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:10:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:11:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:12:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:13:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:14:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:15:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:16:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:2:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:3:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:4:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:5:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:6:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:7:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:8:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd1s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:9:device:@/dev/disk:wd1\0"],
|
|
['./dev/wd2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0@/dev/disk:wd2\0"],
|
|
['./dev/wd2s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:1:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:10:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:11:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:12:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:13:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:14:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:15:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:16:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:2:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:3:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:4:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:5:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:6:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:7:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:8:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd2s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:9:device:@/dev/disk:wd2\0"],
|
|
['./dev/wd3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0@/dev/disk:wd3\0"],
|
|
['./dev/wd3s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:1:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:10:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:11:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:12:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:13:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:14:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:15:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:16:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:2:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:3:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:4:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:5:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:6:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:7:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:8:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd3s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:9:device:@/dev/disk:wd3\0"],
|
|
['./dev/wd4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0@/dev/disk:wd4\0"],
|
|
['./dev/wd4s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:1:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:10:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:11:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:12:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:13:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:14:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:15:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:16:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:2:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:3:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:4:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:5:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:6:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:7:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:8:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd4s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:9:device:@/dev/disk:wd4\0"],
|
|
['./dev/wd5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0@/dev/disk:wd5\0"],
|
|
['./dev/wd5s1', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:1:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s10', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:10:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s11', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:11:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s12', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:12:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s13', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:13:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s14', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:14:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s15', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:15:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s16', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:16:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s2', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:2:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s3', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:3:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s4', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:4:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s5', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:5:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s6', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:6:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s7', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:7:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s8', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:8:device:@/dev/disk:wd5\0"],
|
|
['./dev/wd5s9', oct(640), '0', undef, undef, undef,
|
|
"/hurd/storeio\0-T\0typed\0part:9:device:@/dev/disk:wd5\0"],
|
|
['./dev/xconsole', oct(755), '2', '/run/xconsole', undef, undef, undef],
|
|
['./dev/zero', oct(666), '0', undef, undef, undef,
|
|
"/bin/nullauth\0--\0/hurd/storeio\0-Tzero\0"],
|
|
# file name mode type link tgt major minor transl.
|
|
['./servers/', oct(755), '5', undef, undef, undef, undef],
|
|
['./servers/acpi', oct(644), '0', undef, undef, undef,
|
|
"/hurd/acpi\0"],
|
|
['./servers/bus/', oct(755), '5', undef, undef, undef, undef],
|
|
['./servers/bus/pci/', oct(755), '5', undef, undef, undef,
|
|
"/hurd/pci-arbiter\0"],
|
|
['./servers/crash', oct(644), '2', 'crash-dump-core', undef, undef,
|
|
undef],
|
|
['./servers/crash-dump-core', oct(644), '0', undef, undef, undef,
|
|
"/hurd/crash\0--dump-core\0"],
|
|
['./servers/crash-kill', oct(644), '0', undef, undef, undef,
|
|
"/hurd/crash\0--kill\0"],
|
|
['./servers/crash-suspend', oct(644), '0', undef, undef, undef,
|
|
"/hurd/crash\0--suspend\0"],
|
|
['./servers/default-pager', oct(755), '0', undef, undef, undef,
|
|
"/hurd/proxy-defpager\0"],
|
|
['./servers/exec', oct(644), '0', undef, undef, undef,
|
|
"/hurd/exec\0"],
|
|
['./servers/password', oct(644), '0', undef, undef, undef,
|
|
"/hurd/password\0"],
|
|
['./servers/shutdown', oct(644), '0', undef, undef, undef,
|
|
"/hurd/shutdown\0"],
|
|
['./servers/socket/', oct(755), '5', undef, undef, undef, undef],
|
|
['./servers/socket/1', oct(644), '0', undef, undef, undef,
|
|
"/hurd/pflocal\0"],
|
|
['./servers/socket/2', oct(644), '0', undef, undef, undef,
|
|
"/hurd/pfinet\0-6\0/servers/socket/26\0"],
|
|
['./servers/socket/26', oct(644), '0', undef, undef, undef,
|
|
"/hurd/pfinet\0-4\0/servers/socket/2\0"],
|
|
['./servers/socket/inet', oct(644), '2', "2", undef, undef, undef],
|
|
['./servers/socket/inet6', oct(644), '2', "26", undef, undef, undef],
|
|
['./servers/socket/local', oct(644), '2', "1", undef, undef, undef],
|
|
['./servers/startup', oct(644), '0', undef, undef, undef, undef]
|
|
);
|
|
#>>>
|
|
|
|
# verbosity levels:
|
|
# 0 -> print nothing
|
|
# 1 -> normal output and progress bars
|
|
# 2 -> verbose output
|
|
# 3 -> debug output
|
|
my $verbosity_level = 1;
|
|
|
|
my $is_covering = 0;
|
|
{
|
|
# make $@ local, so we don't print "Undefined subroutine called"
|
|
# in other parts where we evaluate $@
|
|
local $@ = '';
|
|
$is_covering = !!(eval { Devel::Cover::get_coverage() });
|
|
}
|
|
|
|
# the reason why Perl::Critic warns about this is, that it suspects that the
|
|
# programmer wants to implement a test whether the terminal is interactive or
|
|
# not, in which case, complex interactions with the magic *ARGV indeed make it
|
|
# advisable to use IO::Interactive. In our case, we do not want to create an
|
|
# interactivity check but just want to check whether STDERR is opened to a tty,
|
|
# so our use of -t is fine and not "fragile and complicated" as is written in
|
|
# the description of InputOutput::ProhibitInteractiveTest. Also see
|
|
# https://github.com/Perl-Critic/Perl-Critic/issues/918
|
|
sub stderr_is_tty() {
|
|
## no critic (InputOutput::ProhibitInteractiveTest)
|
|
if (-t STDERR) {
|
|
return 1;
|
|
} else {
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
sub debug {
|
|
if ($verbosity_level < 3) {
|
|
return;
|
|
}
|
|
my $msg = shift;
|
|
my ($package, $filename, $line) = caller;
|
|
$msg = "D: $PID $line $msg";
|
|
if (stderr_is_tty()) {
|
|
$msg = colored($msg, 'clear');
|
|
}
|
|
print STDERR "$msg\n";
|
|
return;
|
|
}
|
|
|
|
sub info {
|
|
if ($verbosity_level == 0) {
|
|
return;
|
|
}
|
|
my $msg = shift;
|
|
if ($verbosity_level >= 3) {
|
|
my ($package, $filename, $line) = caller;
|
|
$msg = "$PID $line $msg";
|
|
}
|
|
$msg = "I: $msg";
|
|
if (stderr_is_tty()) {
|
|
$msg = colored($msg, 'green');
|
|
}
|
|
print STDERR "$msg\n";
|
|
return;
|
|
}
|
|
|
|
sub warning {
|
|
if ($verbosity_level == 0) {
|
|
return;
|
|
}
|
|
my $msg = shift;
|
|
$msg = "W: $msg";
|
|
if (stderr_is_tty()) {
|
|
$msg = colored($msg, 'bold yellow');
|
|
}
|
|
print STDERR "$msg\n";
|
|
return;
|
|
}
|
|
|
|
sub error {
|
|
# if error() is called with the string from a previous error() that was
|
|
# caught inside an eval(), then the string will have a newline which we
|
|
# are stripping here
|
|
chomp(my $msg = shift);
|
|
$msg = "E: $msg";
|
|
if (stderr_is_tty()) {
|
|
$msg = colored($msg, 'bold red');
|
|
}
|
|
if ($verbosity_level == 3) {
|
|
croak $msg; # produces a backtrace
|
|
} else {
|
|
die "$msg\n";
|
|
}
|
|
}
|
|
|
|
# The encoding of dev_t is MMMM Mmmm mmmM MMmm, where M is a hex digit of
|
|
# the major number and m is a hex digit of the minor number.
|
|
sub major {
|
|
my $rdev = shift;
|
|
my $right
|
|
= Math::BigInt->from_hex("0x00000000000fff00")->band($rdev)->brsft(8);
|
|
my $left
|
|
= Math::BigInt->from_hex("0xfffff00000000000")->band($rdev)->brsft(32);
|
|
return $right->bior($left);
|
|
}
|
|
|
|
sub minor {
|
|
my $rdev = shift;
|
|
my $right = Math::BigInt->from_hex("0x00000000000000ff")->band($rdev);
|
|
my $left
|
|
= Math::BigInt->from_hex("0x00000ffffff00000")->band($rdev)->brsft(12);
|
|
return $right->bior($left);
|
|
}
|
|
|
|
sub can_execute {
|
|
my $tool = shift;
|
|
my $verbose = shift // '--version';
|
|
my $pid = open my $fh, '-|' // return 0;
|
|
if ($pid == 0) {
|
|
open(STDERR, '>&', STDOUT) or die;
|
|
exec {$tool} $tool, $verbose or die;
|
|
}
|
|
chomp(
|
|
my $content = do { local $/; <$fh> }
|
|
);
|
|
close $fh;
|
|
if ($? != 0) {
|
|
return 0;
|
|
}
|
|
if (length $content == 0) {
|
|
return 0;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
# check whether a directory is mounted by comparing the device number of the
|
|
# directory itself with its parent
|
|
sub is_mountpoint {
|
|
my $dir = shift;
|
|
if (!-e $dir) {
|
|
return 0;
|
|
}
|
|
my @a = stat "$dir/.";
|
|
my @b = stat "$dir/..";
|
|
# if the device number is different, then the directory must be mounted
|
|
if ($a[0] != $b[0]) {
|
|
return 1;
|
|
}
|
|
# if the inode number is the same, then the directory must be mounted
|
|
if ($a[1] == $b[1]) {
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
# tar cannot figure out the decompression program when receiving data on
|
|
# standard input, thus we do it ourselves. This is copied from tar's
|
|
# src/suffix.c
|
|
sub get_tar_compressor {
|
|
my $filename = shift;
|
|
if ($filename eq '-') {
|
|
return;
|
|
} elsif ($filename =~ /\.tar$/) {
|
|
return;
|
|
} elsif ($filename =~ /\.(gz|tgz|taz)$/) {
|
|
return ['gzip'];
|
|
} elsif ($filename =~ /\.(Z|taZ)$/) {
|
|
return ['compress'];
|
|
} elsif ($filename =~ /\.(bz2|tbz|tbz2|tz2)$/) {
|
|
return ['bzip2'];
|
|
} elsif ($filename =~ /\.lz$/) {
|
|
return ['lzip'];
|
|
} elsif ($filename =~ /\.(lzma|tlz)$/) {
|
|
return ['lzma'];
|
|
} elsif ($filename =~ /\.lzo$/) {
|
|
return ['lzop'];
|
|
} elsif ($filename =~ /\.lz4$/) {
|
|
return ['lz4'];
|
|
} elsif ($filename =~ /\.(xz|txz)$/) {
|
|
return ['xz'];
|
|
} elsif ($filename =~ /\.zst$/) {
|
|
return ['zstd'];
|
|
}
|
|
return;
|
|
}
|
|
|
|
# avoid dependency on String::ShellQuote by implementing the mechanism
|
|
# from python's shlex.quote function
|
|
sub shellescape {
|
|
my $string = shift;
|
|
if (length $string == 0) {
|
|
return "''";
|
|
}
|
|
# search for occurrences of characters that are not safe
|
|
# the 'a' regex modifier makes sure that \w only matches ASCII
|
|
if ($string !~ m/[^\w@\%+=:,.\/-]/a) {
|
|
return $string;
|
|
}
|
|
# wrap the string in single quotes and handle existing single quotes by
|
|
# putting them outside of the single-quoted string
|
|
$string =~ s/'/'"'"'/g;
|
|
return "'$string'";
|
|
}
|
|
|
|
sub create_v5_uuid {
|
|
use bytes;
|
|
my $ns_uuid = shift;
|
|
my $name = shift;
|
|
my $version = 0x50;
|
|
# convert the namespace uuid to binary
|
|
$ns_uuid =~ tr/-//d;
|
|
$ns_uuid = pack 'H*', $ns_uuid;
|
|
# concatenate namespace and name and take sha1
|
|
my $digest = Digest::SHA->new(1);
|
|
$digest->add($ns_uuid);
|
|
$digest->add($name);
|
|
# only the first 16 bytes matter
|
|
my $uuid = substr($digest->digest(), 0, 16);
|
|
# set the version
|
|
substr $uuid, 6, 1, chr(ord(substr($uuid, 6, 1)) & 0x0f | $version);
|
|
substr $uuid, 8, 1, chr(ord(substr $uuid, 8, 1) & 0x3f | 0x80);
|
|
# convert binary back to uuid formatting
|
|
return join '-', map { unpack 'H*', $_ }
|
|
map { substr $uuid, 0, $_, '' } (4, 2, 2, 2, 6);
|
|
}
|
|
|
|
sub test_unshare_userns {
|
|
my $verbose = shift;
|
|
|
|
local *maybe_error = sub {
|
|
my $msg = shift;
|
|
if ($verbose) {
|
|
error $msg;
|
|
} else {
|
|
debug $msg;
|
|
}
|
|
};
|
|
|
|
if ($EFFECTIVE_USER_ID == 0) {
|
|
maybe_error("cannot unshare user namespace when executing as root");
|
|
return 0;
|
|
}
|
|
# arguments to syscalls have to be stored in their own variable or
|
|
# otherwise we will get "Modification of a read-only value attempted"
|
|
my $unshare_flags = $CLONE_NEWUSER;
|
|
# we spawn a new per process because if unshare succeeds, we would
|
|
# otherwise have unshared the mmdebstrap process itself which we don't want
|
|
my $pid = fork() // error "fork() failed: $!";
|
|
if ($pid == 0) {
|
|
my $ret = syscall(&SYS_unshare, $unshare_flags);
|
|
if ($ret == 0) {
|
|
exit 0;
|
|
} else {
|
|
maybe_error("unshare syscall failed: $!");
|
|
exit 1;
|
|
}
|
|
}
|
|
waitpid($pid, 0);
|
|
if (($? >> 8) != 0) {
|
|
return 0;
|
|
}
|
|
# if newuidmap and newgidmap exist, the exit status will be 1 when
|
|
# executed without parameters
|
|
system "newuidmap 2>/dev/null";
|
|
if (($? >> 8) != 1) {
|
|
if (($? >> 8) == 127) {
|
|
maybe_error("cannot find newuidmap");
|
|
} else {
|
|
maybe_error("newuidmap returned unknown exit status: $?");
|
|
}
|
|
return 0;
|
|
}
|
|
system "newgidmap 2>/dev/null";
|
|
if (($? >> 8) != 1) {
|
|
if (($? >> 8) == 127) {
|
|
maybe_error("cannot find newgidmap");
|
|
} else {
|
|
maybe_error("newgidmap returned unknown exit status: $?");
|
|
}
|
|
return 0;
|
|
}
|
|
my @idmap = read_subuid_subgid($verbose);
|
|
if (scalar @idmap == 0) {
|
|
maybe_error("failed to parse /etc/subuid and /etc/subgid");
|
|
return 0;
|
|
}
|
|
# too much can go wrong when doing the dance required to unsharing the user
|
|
# namespace, so instead of adding more complexity to support maybe_error()
|
|
# to a function that is already too complex, we use eval()
|
|
eval {
|
|
$pid = get_unshare_cmd(
|
|
sub {
|
|
if ($EFFECTIVE_USER_ID == 0) {
|
|
exit 0;
|
|
} else {
|
|
exit 1;
|
|
}
|
|
},
|
|
\@idmap
|
|
);
|
|
waitpid $pid, 0;
|
|
if ($? != 0) {
|
|
maybe_error("failed to unshare the user namespace");
|
|
return 0;
|
|
}
|
|
};
|
|
if ($@) {
|
|
maybe_error($@);
|
|
return 0;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
sub read_subuid_subgid {
|
|
my $verbose = shift;
|
|
my @result = ();
|
|
my $username = getpwuid $REAL_USER_ID;
|
|
my ($subid, $num_subid, $fh, $n);
|
|
|
|
local *maybe_warn = sub {
|
|
my $msg = shift;
|
|
if ($verbose) {
|
|
warning $msg;
|
|
} else {
|
|
debug $msg;
|
|
}
|
|
};
|
|
if (!-e "/etc/subuid") {
|
|
maybe_warn("/etc/subuid doesn't exist");
|
|
return;
|
|
}
|
|
if (!-r "/etc/subuid") {
|
|
maybe_warn("/etc/subuid is not readable");
|
|
return;
|
|
}
|
|
|
|
open $fh, "<", "/etc/subuid"
|
|
or maybe_warn("cannot open /etc/subuid for reading: $!");
|
|
if (!$fh) {
|
|
return;
|
|
}
|
|
while (my $line = <$fh>) {
|
|
($n, $subid, $num_subid) = split(/:/, $line, 3);
|
|
last if ($n eq $username);
|
|
}
|
|
close $fh;
|
|
if (!length $subid) {
|
|
maybe_warn("/etc/subuid is empty");
|
|
return;
|
|
}
|
|
if ($n ne $username) {
|
|
maybe_warn("no entry in /etc/subuid for $username");
|
|
return;
|
|
}
|
|
push @result, ["u", 0, $subid, $num_subid];
|
|
|
|
if (scalar(@result) < 1) {
|
|
maybe_warn("/etc/subuid does not contain an entry for $username");
|
|
return;
|
|
}
|
|
if (scalar(@result) > 1) {
|
|
maybe_warn("/etc/subuid contains multiple entries for $username");
|
|
return;
|
|
}
|
|
|
|
if (!-e "/etc/subgid") {
|
|
maybe_warn("/etc/subgid doesn't exist");
|
|
return;
|
|
}
|
|
if (!-r "/etc/subgid") {
|
|
maybe_warn("/etc/subgid is not readable");
|
|
return;
|
|
}
|
|
|
|
open $fh, "<", "/etc/subgid"
|
|
or maybe_warn("cannot open /etc/subgid for reading: $!");
|
|
if (!$fh) {
|
|
return;
|
|
}
|
|
while (my $line = <$fh>) {
|
|
($n, $subid, $num_subid) = split(/:/, $line, 3);
|
|
last if ($n eq $username);
|
|
}
|
|
close $fh;
|
|
if (!length $subid) {
|
|
maybe_warn("/etc/subgid is empty");
|
|
return;
|
|
}
|
|
if ($n ne $username) {
|
|
maybe_warn("no entry in /etc/subgid for $username");
|
|
return;
|
|
}
|
|
push @result, ["g", 0, $subid, $num_subid];
|
|
|
|
if (scalar(@result) < 2) {
|
|
maybe_warn("/etc/subgid does not contain an entry for $username");
|
|
return;
|
|
}
|
|
if (scalar(@result) > 2) {
|
|
maybe_warn("/etc/subgid contains multiple entries for $username");
|
|
return;
|
|
}
|
|
|
|
return @result;
|
|
}
|
|
|
|
# This function spawns two child processes forming the following process tree
|
|
#
|
|
# A
|
|
# |
|
|
# fork()
|
|
# | \
|
|
# B C
|
|
# | |
|
|
# | fork()
|
|
# | | \
|
|
# | D E
|
|
# | | |
|
|
# |unshare()
|
|
# | close()
|
|
# | | |
|
|
# | | read()
|
|
# | | newuidmap(D)
|
|
# | | newgidmap(D)
|
|
# | | /
|
|
# | waitpid()
|
|
# | |
|
|
# | fork()
|
|
# | | \
|
|
# | F G
|
|
# | | |
|
|
# | | exec()
|
|
# | | /
|
|
# | waitpid()
|
|
# | /
|
|
# waitpid()
|
|
#
|
|
# To better refer to each individual part, we give each process a new
|
|
# identifier after calling fork(). Process A is the main process. After
|
|
# executing fork() we call the parent and child B and C, respectively. This
|
|
# first fork() is done because we do not want to modify A. B then remains
|
|
# waiting for its child C to finish. C calls fork() again, splitting into
|
|
# the parent D and its child E. In the parent D we call unshare() and close a
|
|
# pipe shared by D and E to signal to E that D is done with calling unshare().
|
|
# E notices this by using read() and follows up with executing the tools
|
|
# new[ug]idmap on D. E finishes and D continues with doing another fork().
|
|
# This is because when unsharing the PID namespace, we need a PID 1 to be kept
|
|
# alive or otherwise any child processes cannot fork() anymore themselves. So
|
|
# we keep F as PID 1 and finally call exec() in G.
|
|
sub get_unshare_cmd {
|
|
my $cmd = shift;
|
|
my $idmap = shift;
|
|
|
|
# unsharing the mount namespace (NEWNS) requires CAP_SYS_ADMIN
|
|
my $unshare_flags
|
|
= $CLONE_NEWNS | $CLONE_NEWPID | $CLONE_NEWUTS | $CLONE_NEWIPC;
|
|
|
|
# we only need to add CLONE_NEWUSER if we are not yet root
|
|
if ($EFFECTIVE_USER_ID != 0) {
|
|
$unshare_flags |= $CLONE_NEWUSER;
|
|
}
|
|
|
|
if (0) {
|
|
$unshare_flags |= $CLONE_NEWNET;
|
|
}
|
|
|
|
# fork a new process and let the child get unshare()ed
|
|
# we don't want to unshare the parent process
|
|
my $gcpid = fork() // error "fork() failed: $!";
|
|
if ($gcpid == 0) {
|
|
# Create a pipe for the parent process to signal the child process that
|
|
# it is done with calling unshare() so that the child can go ahead
|
|
# setting up uid_map and gid_map.
|
|
pipe my $rfh, my $wfh;
|
|
# We have to do this dance with forking a process and then modifying
|
|
# the parent from the child because:
|
|
# - new[ug]idmap can only be called on a process id after that process
|
|
# has unshared the user namespace
|
|
# - a process looses its capabilities if it performs an execve() with
|
|
# nonzero user ids see the capabilities(7) man page for details.
|
|
# - a process that unshared the user namespace by default does not
|
|
# have the privileges to call new[ug]idmap on itself
|
|
#
|
|
# this also works the other way around (the child setting up a user
|
|
# namespace and being modified from the parent) but that way, the
|
|
# parent would have to stay around until the child exited (so a pid
|
|
# would be wasted). Additionally, that variant would require an
|
|
# additional pipe to let the parent signal the child that it is done
|
|
# with calling new[ug]idmap. The way it is done here, this signaling
|
|
# can instead be done by wait()-ing for the exit of the child.
|
|
|
|
my $ppid = $$;
|
|
my $cpid = fork() // error "fork() failed: $!";
|
|
if ($cpid == 0) {
|
|
# child
|
|
|
|
# Close the writing descriptor at our end of the pipe so that we
|
|
# see EOF when parent closes its descriptor.
|
|
close $wfh;
|
|
|
|
# Wait for the parent process to finish its unshare() call by
|
|
# waiting for an EOF.
|
|
0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF";
|
|
|
|
# the process is already root, so no need for newuidmap/newgidmap
|
|
if ($EFFECTIVE_USER_ID == 0) {
|
|
exit 0;
|
|
}
|
|
|
|
# The program's new[ug]idmap have to be used because they are
|
|
# setuid root. These privileges are needed to map the ids from
|
|
# /etc/sub[ug]id to the user namespace set up by the parent.
|
|
# Without these privileges, only the id of the user itself can be
|
|
# mapped into the new namespace.
|
|
#
|
|
# Since new[ug]idmap is setuid root we also don't need to write
|
|
# "deny" to /proc/$$/setgroups beforehand (this is otherwise
|
|
# required for unprivileged processes trying to write to
|
|
# /proc/$$/gid_map since kernel version 3.19 for security reasons)
|
|
# and therefore the parent process keeps its ability to change its
|
|
# own group here.
|
|
#
|
|
# Since /proc/$ppid/[ug]id_map can only be written to once,
|
|
# respectively, instead of making multiple calls to new[ug]idmap,
|
|
# we assemble a command line that makes one call each.
|
|
my $uidmapcmd = "";
|
|
my $gidmapcmd = "";
|
|
foreach (@{$idmap}) {
|
|
my ($t, $hostid, $nsid, $range) = @{$_};
|
|
if ($t ne "u" and $t ne "g" and $t ne "b") {
|
|
error "invalid idmap type: $t";
|
|
}
|
|
if ($t eq "u" or $t eq "b") {
|
|
$uidmapcmd .= " $hostid $nsid $range";
|
|
}
|
|
if ($t eq "g" or $t eq "b") {
|
|
$gidmapcmd .= " $hostid $nsid $range";
|
|
}
|
|
}
|
|
my $idmapcmd = '';
|
|
if ($uidmapcmd ne "") {
|
|
0 == system "newuidmap $ppid $uidmapcmd"
|
|
or error "newuidmap $ppid $uidmapcmd failed: $!";
|
|
}
|
|
if ($gidmapcmd ne "") {
|
|
0 == system "newgidmap $ppid $gidmapcmd"
|
|
or error "newgidmap $ppid $gidmapcmd failed: $!";
|
|
}
|
|
exit 0;
|
|
}
|
|
|
|
# parent
|
|
|
|
# After fork()-ing, the parent immediately calls unshare...
|
|
0 == syscall &SYS_unshare, $unshare_flags
|
|
or error "unshare() failed: $!";
|
|
|
|
# .. and then signals the child process that we are done with the
|
|
# unshare() call by sending an EOF.
|
|
close $wfh;
|
|
|
|
# Wait for the child process to finish its setup by waiting for its
|
|
# exit.
|
|
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
|
|
my $exit = $? >> 8;
|
|
if ($exit != 0) {
|
|
error "child had a non-zero exit status: $exit";
|
|
}
|
|
|
|
# Currently we are nobody (uid and gid are 65534). So we become root
|
|
# user and group instead.
|
|
#
|
|
# We are using direct syscalls instead of setting $(, $), $< and $>
|
|
# because then perl would do additional stuff which we don't need or
|
|
# want here, like checking /proc/sys/kernel/ngroups_max (which might
|
|
# not exist). It would also also call setgroups() in a way that makes
|
|
# the root user be part of the group unknown.
|
|
if ($EFFECTIVE_USER_ID != 0) {
|
|
0 == syscall &SYS_setgid, 0 or error "setgid failed: $!";
|
|
0 == syscall &SYS_setuid, 0 or error "setuid failed: $!";
|
|
0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!";
|
|
}
|
|
|
|
if (1) {
|
|
# When the pid namespace is also unshared, then processes expect a
|
|
# master pid to always be alive within the namespace. To achieve
|
|
# this, we fork() here instead of exec() to always have one dummy
|
|
# process running as pid 1 inside the namespace. This is also what
|
|
# the unshare tool does when used with the --fork option.
|
|
#
|
|
# Otherwise, without a pid 1, new processes cannot be forked
|
|
# anymore after pid 1 finished.
|
|
my $cpid = fork() // error "fork() failed: $!";
|
|
if ($cpid != 0) {
|
|
# The parent process will stay alive as pid 1 in this
|
|
# namespace until the child finishes executing. This is
|
|
# important because pid 1 must never die or otherwise nothing
|
|
# new can be forked.
|
|
$cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
|
|
exit($? >> 8);
|
|
}
|
|
}
|
|
|
|
&{$cmd}();
|
|
|
|
exit 0;
|
|
}
|
|
|
|
# parent
|
|
return $gcpid;
|
|
}
|
|
|
|
sub havemknod {
|
|
my $root = shift;
|
|
my $havemknod = 0;
|
|
if (-e "$root/test-dev-null") {
|
|
error "/test-dev-null already exists";
|
|
}
|
|
TEST: {
|
|
# we fork so that we can read STDERR
|
|
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
|
|
if ($pid == 0) {
|
|
open(STDERR, '>&', STDOUT) or error "cannot open STDERR: $!";
|
|
# we use mknod(1) instead of the system call because creating the
|
|
# right dev_t argument requires makedev(3)
|
|
exec 'mknod', "$root/test-dev-null", 'c', '1', '3';
|
|
}
|
|
chomp(
|
|
my $content = do { local $/; <$fh> }
|
|
);
|
|
close $fh;
|
|
{
|
|
last TEST unless $? == 0 and $content eq '';
|
|
last TEST unless -c "$root/test-dev-null";
|
|
last TEST unless open my $fh, '>', "$root/test-dev-null";
|
|
last TEST unless print $fh 'test';
|
|
}
|
|
$havemknod = 1;
|
|
}
|
|
if (-e "$root/test-dev-null") {
|
|
unlink "$root/test-dev-null"
|
|
or error "cannot unlink /test-dev-null: $!";
|
|
}
|
|
return $havemknod;
|
|
}
|
|
|
|
# inspired by /usr/share/perl/5.34/pod/perlfaq8.pod
|
|
sub terminal_width {
|
|
if (!stderr_is_tty()) {
|
|
return -1;
|
|
}
|
|
if (!defined &TIOCGWINSZ) {
|
|
return -1;
|
|
}
|
|
if (!-e "/dev/tty") {
|
|
return -1;
|
|
}
|
|
my $tty_fh;
|
|
if (!open($tty_fh, "+<", "/dev/tty")) {
|
|
return -1;
|
|
}
|
|
my $winsize = '';
|
|
if (!ioctl($tty_fh, &TIOCGWINSZ, $winsize)) {
|
|
return -1;
|
|
}
|
|
my (undef, $col, undef, undef) = unpack('S4', $winsize);
|
|
return $col;
|
|
}
|
|
|
|
# Prints the current status, the percentage and a progress bar on STDERR if
|
|
# it is an interactive tty and if verbosity is set to 1.
|
|
#
|
|
# * first 12 chars: status
|
|
# * following 7 chars: percentage
|
|
# * progress bar until 79 chars are filled
|
|
sub print_progress {
|
|
if ($verbosity_level != 1) {
|
|
return;
|
|
}
|
|
if (!stderr_is_tty()) {
|
|
return;
|
|
}
|
|
my $perc = shift;
|
|
my $status = shift;
|
|
my $len_status = 12;
|
|
my $len_perc = 7;
|
|
my $len_prog_min = 10;
|
|
my $len_prog_max = 60;
|
|
my $twidth = terminal_width();
|
|
|
|
if ($twidth <= $len_status) {
|
|
return;
|
|
}
|
|
# \e[2K clears everything on the current line (i.e. the progress bar)
|
|
print STDERR "\e[2K";
|
|
if ($perc eq "done") {
|
|
print STDERR "done\n";
|
|
return;
|
|
}
|
|
if (defined $status) {
|
|
printf STDERR "%*s", -$len_status, "$status:";
|
|
} else {
|
|
print STDERR (" " x $len_status);
|
|
}
|
|
if ($twidth <= $len_status + $len_perc) {
|
|
print STDERR "\r";
|
|
return;
|
|
}
|
|
if ($perc >= 100) {
|
|
$perc = 100;
|
|
}
|
|
printf STDERR "%*.2f", $len_perc, $perc;
|
|
if ($twidth <= $len_status + $len_perc + $len_prog_min) {
|
|
print STDERR "\r";
|
|
return;
|
|
}
|
|
my $len_prog = $twidth - $len_perc - $len_status;
|
|
if ($len_prog > $len_prog_max) {
|
|
$len_prog = $len_prog_max;
|
|
}
|
|
my $num_x = int($perc * ($len_prog - 3) / 100);
|
|
my $bar = '=' x $num_x;
|
|
if ($num_x != ($len_prog - 3)) {
|
|
$bar .= '>';
|
|
$bar .= ' ' x ($len_prog - $num_x - 4);
|
|
}
|
|
print STDERR " [$bar]\r";
|
|
return;
|
|
}
|
|
|
|
sub run_progress {
|
|
my ($get_exec, $line_handler, $line_has_error, $chdir) = @_;
|
|
pipe my $rfh, my $wfh;
|
|
my $got_signal = 0;
|
|
my $ignore = sub {
|
|
info "run_progress() received signal $_[0]: waiting for child...";
|
|
};
|
|
|
|
debug("run_progress: exec " . (join ' ', ($get_exec->('${FD}'))));
|
|
|
|
# delay signals so that we can fork and change behaviour of the signal
|
|
# handler in parent and child without getting interrupted
|
|
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
|
|
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
|
|
|
|
my $pid1 = open(my $pipe, '-|') // error "failed to fork(): $!";
|
|
|
|
if ($pid1 == 0) {
|
|
# child: default signal handlers
|
|
local $SIG{'INT'} = 'DEFAULT';
|
|
local $SIG{'HUP'} = 'DEFAULT';
|
|
local $SIG{'PIPE'} = 'DEFAULT';
|
|
local $SIG{'TERM'} = 'DEFAULT';
|
|
|
|
# unblock all delayed signals (and possibly handle them)
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
or error "Can't unblock signals: $!";
|
|
|
|
close $rfh;
|
|
# Unset the close-on-exec flag, so that the file descriptor does not
|
|
# get closed when we exec
|
|
my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!";
|
|
fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC)
|
|
or error "fcntl F_SETFD: $!";
|
|
my $fd = fileno $wfh;
|
|
# redirect stderr to stdout so that we can capture it
|
|
open(STDERR, '>&', STDOUT) or error "cannot open STDOUT: $!";
|
|
my @execargs = $get_exec->($fd);
|
|
# before apt 1.5, "apt-get update" attempted to chdir() into the
|
|
# working directory. This will fail if the current working directory
|
|
# is not accessible by the user (for example in unshare mode). See
|
|
# Debian bug #860738
|
|
if (defined $chdir) {
|
|
chdir $chdir or error "failed chdir() to $chdir: $!";
|
|
}
|
|
eval { Devel::Cover::set_coverage("none") } if $is_covering;
|
|
exec { $execargs[0] } @execargs
|
|
or error 'cannot exec() ' . (join ' ', @execargs);
|
|
}
|
|
close $wfh;
|
|
|
|
# spawn two processes:
|
|
# parent will parse stdout to look for errors
|
|
# child will parse $rfh for the progress meter
|
|
my $pid2 = fork() // error "failed to fork(): $!";
|
|
if ($pid2 == 0) {
|
|
# child: default signal handlers
|
|
local $SIG{'INT'} = 'IGNORE';
|
|
local $SIG{'HUP'} = 'IGNORE';
|
|
local $SIG{'PIPE'} = 'IGNORE';
|
|
local $SIG{'TERM'} = 'IGNORE';
|
|
|
|
# unblock all delayed signals (and possibly handle them)
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
or error "Can't unblock signals: $!";
|
|
|
|
if ($verbosity_level != 1 || !stderr_is_tty()) {
|
|
# no need to print any progress
|
|
# we still need to consume everything from $rfh or otherwise apt
|
|
# will block forever if there is too much output
|
|
local $/;
|
|
<$rfh>;
|
|
close $rfh;
|
|
exit 0;
|
|
}
|
|
my $progress = 0.0;
|
|
my $status = undef;
|
|
print_progress($progress);
|
|
while (my $line = <$rfh>) {
|
|
my ($newprogress, $newstatus) = $line_handler->($line);
|
|
next unless $newprogress;
|
|
# start a new line if the new progress value is less than the
|
|
# previous one
|
|
if ($newprogress < $progress) {
|
|
print_progress("done");
|
|
}
|
|
if (defined $newstatus) {
|
|
$status = $newstatus;
|
|
}
|
|
print_progress($newprogress, $status);
|
|
$progress = $newprogress;
|
|
}
|
|
print_progress("done");
|
|
|
|
exit 0;
|
|
}
|
|
|
|
# parent: ignore signals
|
|
# by using "local", the original is automatically restored once the
|
|
# function returns
|
|
local $SIG{'INT'} = $ignore;
|
|
local $SIG{'HUP'} = $ignore;
|
|
local $SIG{'PIPE'} = $ignore;
|
|
local $SIG{'TERM'} = $ignore;
|
|
|
|
# unblock all delayed signals (and possibly handle them)
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
or error "Can't unblock signals: $!";
|
|
|
|
my $output = '';
|
|
my $has_error = 0;
|
|
while (my $line = <$pipe>) {
|
|
$has_error = $line_has_error->($line);
|
|
if ($verbosity_level >= 2) {
|
|
print STDERR $line;
|
|
} else {
|
|
# forward captured apt output
|
|
$output .= $line;
|
|
}
|
|
}
|
|
|
|
close($pipe);
|
|
my $fail = 0;
|
|
if ($? != 0 or $has_error) {
|
|
$fail = 1;
|
|
}
|
|
|
|
waitpid $pid2, 0;
|
|
$? == 0 or error "progress parsing failed";
|
|
|
|
if ($got_signal) {
|
|
error "run_progress() received signal: $got_signal";
|
|
}
|
|
|
|
# only print failure after progress output finished or otherwise it
|
|
# might interfere with the remaining output
|
|
if ($fail) {
|
|
if ($verbosity_level >= 1) {
|
|
print STDERR $output;
|
|
}
|
|
error((join ' ', $get_exec->('<$fd>')) . ' failed');
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub run_dpkg_progress {
|
|
my $options = shift;
|
|
my @debs = @{ $options->{PKGS} // [] };
|
|
my $get_exec
|
|
= sub { return @{ $options->{ARGV} }, "--status-fd=$_[0]", @debs; };
|
|
my $line_has_error = sub { return 0; };
|
|
my $num = 0;
|
|
# each package has one install and one configure step, thus the total
|
|
# number is twice the number of packages
|
|
my $total = (scalar @debs) * 2;
|
|
my $line_handler = sub {
|
|
my $status = undef;
|
|
if ($_[0] =~ /^processing: (install|configure): /) {
|
|
if ($1 eq 'install') {
|
|
$status = 'installing';
|
|
} elsif ($1 eq 'configure') {
|
|
$status = 'configuring';
|
|
} else {
|
|
error "unknown status: $1";
|
|
}
|
|
$num += 1;
|
|
}
|
|
if ($total == 0) {
|
|
return 0, $status;
|
|
} else {
|
|
return $num / $total * 100, $status;
|
|
}
|
|
};
|
|
run_progress $get_exec, $line_handler, $line_has_error;
|
|
return;
|
|
}
|
|
|
|
sub run_apt_progress {
|
|
my $options = shift;
|
|
my @debs = @{ $options->{PKGS} // [] };
|
|
|
|
if ($verbosity_level >= 3) {
|
|
my @apt_debug_opts = qw(
|
|
-oDebug::pkgProblemResolver=true
|
|
-oDebug::pkgDepCache::Marker=1
|
|
-oDebug::pkgDepCache::AutoInstall=1
|
|
);
|
|
push @{ $options->{ARGV} }, @apt_debug_opts;
|
|
}
|
|
|
|
my $get_exec = sub {
|
|
my @prefix = ();
|
|
my @opts = ();
|
|
return (
|
|
@prefix,
|
|
@{ $options->{ARGV} },
|
|
@opts,
|
|
"-oAPT::Status-Fd=$_[0]",
|
|
# prevent apt from messing up the terminal and allow dpkg to
|
|
# receive SIGINT and quit immediately without waiting for
|
|
# maintainer script to finish
|
|
'-oDpkg::Use-Pty=false',
|
|
@debs
|
|
);
|
|
};
|
|
my $line_has_error = sub { return 0; };
|
|
if ($options->{FIND_APT_WARNINGS}) {
|
|
$line_has_error = sub {
|
|
# apt-get doesn't report a non-zero exit if the update failed.
|
|
# Thus, we have to parse its output. See #778357, #776152, #696335
|
|
# and #745735 for the parsing bugs as well as #594813, #696335,
|
|
# #776152, #778357 and #953726 for non-zero exit on transient
|
|
# network errors.
|
|
#
|
|
# For example, we want to fail with the following warning:
|
|
# W: Some index files failed to download. They have been ignored,
|
|
# or old ones used instead.
|
|
# But since this message is meant for human consumption it is not
|
|
# guaranteed to be stable across different apt versions and may
|
|
# change arbitrarily in the future. Thus, we error out on any W:
|
|
# lines as well. The downside is, that apt also unconditionally
|
|
# and by design prints a warning for unsigned repositories, even
|
|
# if they were allowed with Acquire::AllowInsecureRepositories "1"
|
|
# or with trusted=yes.
|
|
#
|
|
# A workaround was introduced by apt 2.1.16 with the --error-on=any
|
|
# option to apt-get update.
|
|
if ($_[0] =~ /^(W: |Err:)/) {
|
|
return 1;
|
|
}
|
|
return 0;
|
|
};
|
|
}
|
|
my $line_handler = sub {
|
|
if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) {
|
|
my $status = undef;
|
|
if ($1 eq 'pmstatus') {
|
|
$status = "installing";
|
|
} elsif ($1 eq 'dlstatus') {
|
|
$status = "downloading";
|
|
} else {
|
|
error "unknown status: $1";
|
|
}
|
|
return $2, $status;
|
|
}
|
|
};
|
|
run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR};
|
|
return;
|
|
}
|
|
|
|
sub run_apt_download_progress {
|
|
my $options = shift;
|
|
if ($options->{dryrun}) {
|
|
info "simulate downloading packages with apt...";
|
|
} else {
|
|
info "downloading packages with apt...";
|
|
}
|
|
|
|
pipe my $rfh, my $wfh;
|
|
my $pid = open my $fh, '-|' // error "fork() failed: $!";
|
|
if ($pid == 0) {
|
|
close $wfh;
|
|
# read until parent process closes $wfh
|
|
my $content = do { local $/; <$rfh> };
|
|
close $rfh;
|
|
# the parent is done -- pass what we read back to it
|
|
print $content;
|
|
exit 0;
|
|
}
|
|
close $rfh;
|
|
# Unset the close-on-exec flag, so that the file descriptor does not
|
|
# get closed when we exec
|
|
my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!";
|
|
fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC) or error "fcntl F_SETFD: $!";
|
|
my $fd = fileno $wfh;
|
|
# run_apt_progress() can raise an exception which would leave this function
|
|
# without cleaning up the other thread we started, making mmdebstrap hang
|
|
# in case run_apt_progress() fails -- so wrap this in eval() instead
|
|
eval {
|
|
# 2022-05-02, #debian-apt on OFTC, times in UTC+2
|
|
# 16:57 < josch> DonKult: how is -oDebug::pkgDpkgPm=1
|
|
# -oDir::Log=/dev/null a "fancy no-op"?
|
|
# 11:52 < DonKult> josch: "fancy no-op" in sofar as it does nothing to
|
|
# the system even through its not in a special mode
|
|
# ala simulation or download-only. It does all the
|
|
# things it normally does, except that it just prints
|
|
# the dpkg calls instead of execv() them which in
|
|
# practice amounts means it does nothing (the Dir::Log
|
|
# just prevents libapt from creating the /var/log/apt
|
|
# directories. As the code creates them even if no
|
|
# logs will be placed there…). As said, midterm an apt
|
|
# --print-install-packages or something would be nice
|
|
# to avoid running everything.
|
|
run_apt_progress({
|
|
ARGV => [
|
|
'apt-get',
|
|
'--yes',
|
|
'-oDebug::pkgDpkgPm=1',
|
|
'-oDir::Log=/dev/null',
|
|
$options->{dryrun}
|
|
? '-oAPT::Get::Simulate=true'
|
|
: (
|
|
"-oAPT::Keep-Fds::=$fd",
|
|
"-oDPkg::Tools::options::'cat >&$fd'::InfoFD=$fd",
|
|
"-oDpkg::Pre-Install-Pkgs::=cat >&$fd",
|
|
# no need to lock the database if we are just downloading
|
|
"-oDebug::NoLocking=1",
|
|
# no need for pty magic if we write no log
|
|
"-oDpkg::Use-Pty=0",
|
|
# unset this or otherwise "cat >&$fd" will fail
|
|
"-oDPkg::Chroot-Directory=",
|
|
),
|
|
@{ $options->{APT_ARGV} },
|
|
],
|
|
});
|
|
};
|
|
my $err = '';
|
|
if ($@) {
|
|
$err = "apt download failed: $@";
|
|
}
|
|
# signal the child process that we are done
|
|
close $wfh;
|
|
# and then read from it what it got
|
|
my @listofdebs = <$fh>;
|
|
close $fh;
|
|
if ($? != 0) {
|
|
$err = "status child failed";
|
|
}
|
|
if ($err) {
|
|
error $err;
|
|
}
|
|
# remove trailing newlines
|
|
chomp @listofdebs;
|
|
return @listofdebs;
|
|
}
|
|
|
|
sub setup_mounts {
|
|
my $options = shift;
|
|
|
|
my @cleanup_tasks = ();
|
|
|
|
eval {
|
|
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
|
|
0 == system('mount', "--make-rprivate", "/")
|
|
or warning("mount --make-rprivate / failed: $?");
|
|
# if more than essential should be installed, make the system look
|
|
# more like a real one by creating or bind-mounting the device
|
|
# nodes
|
|
foreach my $file (@linuxdevfiles) {
|
|
my ($fname, $mode, $type, $linkname, $devmajor, $devminor,
|
|
undef)
|
|
= @{$file};
|
|
next if $fname eq './dev/';
|
|
if ($type eq '0') { # normal file
|
|
error "type 0 not implemented";
|
|
} elsif ($type eq '1') { # hardlink
|
|
error "type 1 not implemented";
|
|
} elsif ($type eq '2') { # symlink
|
|
if (!$options->{havemknod}) {
|
|
# If we had mknod, then the symlink was already created
|
|
# in the run_setup function.
|
|
if (!-d "$options->{root}/dev") {
|
|
warning(
|
|
"skipping creation of $fname because the"
|
|
. " /dev directory is missing in the target"
|
|
);
|
|
next;
|
|
}
|
|
if (-e "$options->{root}/$fname") {
|
|
warning(
|
|
"skipping creation of $fname because it"
|
|
. " already exists in the target");
|
|
next;
|
|
}
|
|
push @cleanup_tasks, sub {
|
|
unlink "$options->{root}/$fname"
|
|
or warning("cannot unlink ./dev/$fname: $!");
|
|
};
|
|
symlink $linkname, "$options->{root}/$fname"
|
|
or warning
|
|
"cannot create symlink $fname -> $linkname";
|
|
}
|
|
} elsif ($type eq '3' or $type eq '4') {
|
|
# character/block special
|
|
if (any { $_ =~ '^chroot/mount(?:/dev)?$' }
|
|
@{ $options->{skip} }) {
|
|
info "skipping chroot/mount/dev as requested";
|
|
} elsif (!$options->{canmount}) {
|
|
warning "skipping bind-mounting $fname";
|
|
} elsif (!$options->{havemknod}) {
|
|
if (!-d "$options->{root}/dev") {
|
|
warning(
|
|
"skipping creation of $fname because the"
|
|
. " /dev directory is missing in the target"
|
|
);
|
|
next;
|
|
}
|
|
if ($fname eq "./dev/ptmx") {
|
|
# We must not bind-mount ptmx from the outside or
|
|
# otherwise posix_openpt() will fail. Instead
|
|
# /dev/ptmx must refer to /dev/pts/ptmx either by
|
|
# symlink or by bind-mounting. We choose a symlink.
|
|
symlink '/dev/pts/ptmx',
|
|
"$options->{root}/dev/ptmx"
|
|
or error "cannot create /dev/pts/ptmx symlink";
|
|
push @cleanup_tasks, sub {
|
|
unlink "$options->{root}/dev/ptmx"
|
|
or warning "unlink /dev/ptmx";
|
|
};
|
|
next;
|
|
}
|
|
if (!-e "/$fname") {
|
|
warning("skipping creation of $fname because"
|
|
. " $fname does not exist"
|
|
. " on the outside");
|
|
next;
|
|
}
|
|
if (!-c "/$fname") {
|
|
warning("skipping creation of $fname because"
|
|
. " $fname on the outside is not a"
|
|
. " character special file");
|
|
next;
|
|
}
|
|
open my $fh, '>', "$options->{root}/$fname"
|
|
or error "cannot open $options->{root}/$fname: $!";
|
|
close $fh;
|
|
my @umountopts = ();
|
|
if ($options->{mode} eq 'unshare') {
|
|
push @umountopts, '--no-mtab';
|
|
}
|
|
push @cleanup_tasks, sub {
|
|
0 == system('umount', @umountopts,
|
|
"$options->{root}/$fname")
|
|
or warning("umount $fname failed: $?");
|
|
unlink "$options->{root}/$fname"
|
|
or warning("cannot unlink $fname: $!");
|
|
};
|
|
0 == system('mount', '-o', 'bind', "/$fname",
|
|
"$options->{root}/$fname")
|
|
or error "mount $fname failed: $?";
|
|
}
|
|
} elsif ($type eq '5') {
|
|
# directory
|
|
if (any { $_ =~ '^chroot/mount(?:/dev)?$' }
|
|
@{ $options->{skip} }) {
|
|
info "skipping chroot/mount/dev as requested";
|
|
} elsif (!$options->{canmount}) {
|
|
warning "skipping bind-mounting $fname";
|
|
} else {
|
|
if (!-d "$options->{root}/dev") {
|
|
warning(
|
|
"skipping creation of $fname because the"
|
|
. " /dev directory is missing in the target"
|
|
);
|
|
next;
|
|
}
|
|
if (!-e "/$fname" && $fname ne "./dev/pts/") {
|
|
warning("skipping creation of $fname because"
|
|
. " $fname does not exist"
|
|
. " on the outside");
|
|
next;
|
|
}
|
|
if (!-d "/$fname" && $fname ne "./dev/pts/") {
|
|
warning("skipping creation of $fname because"
|
|
. " $fname on the outside is not a"
|
|
. " directory");
|
|
next;
|
|
}
|
|
if (!$options->{havemknod}) {
|
|
# If had mknod, then the directory to bind-mount
|
|
# into was already created in the run_setup
|
|
# function.
|
|
push @cleanup_tasks, sub {
|
|
rmdir "$options->{root}/$fname"
|
|
or warning("cannot rmdir $fname: $!");
|
|
};
|
|
if (-e "$options->{root}/$fname") {
|
|
if (!-d "$options->{root}/$fname") {
|
|
error "$fname already exists but is not"
|
|
. " a directory";
|
|
}
|
|
} else {
|
|
my $num_created
|
|
= make_path "$options->{root}/$fname",
|
|
{ error => \my $err };
|
|
if ($err && @$err) {
|
|
error(
|
|
join "; ",
|
|
(
|
|
map {
|
|
"cannot create "
|
|
. (join ": ", %{$_})
|
|
} @$err
|
|
));
|
|
} elsif ($num_created == 0) {
|
|
error( "cannot create $options->{root}"
|
|
. "$fname");
|
|
}
|
|
}
|
|
chmod $mode, "$options->{root}/$fname"
|
|
or error "cannot chmod $fname: $!";
|
|
}
|
|
my @umountopts = ();
|
|
if ($options->{mode} eq 'unshare') {
|
|
push @umountopts, '--no-mtab';
|
|
}
|
|
push @cleanup_tasks, sub {
|
|
0 == system('umount', @umountopts,
|
|
"$options->{root}/$fname")
|
|
or warning("umount $fname failed: $?");
|
|
};
|
|
if ($fname eq "./dev/pts/") {
|
|
# We cannot just bind-mount /dev/pts from the host
|
|
# as doing so will make posix_openpt() fail.
|
|
# Instead, we need to mount a new devpts.
|
|
# We need ptmxmode=666 because /dev/ptmx is a
|
|
# symlink to /dev/pts/ptmx and without it
|
|
# posix_openpt() will fail if we are not the root
|
|
# user. See also:
|
|
# kernel.o/doc/Documentation/filesystems/devpts.txt
|
|
# salsa.d.o/debian/schroot/-/merge_requests/2
|
|
# https://bugs.debian.org/856877
|
|
# https://bugs.debian.org/817236
|
|
0 == system(
|
|
'mount',
|
|
'-t',
|
|
'devpts',
|
|
'none',
|
|
"$options->{root}/dev/pts",
|
|
'-o',
|
|
'noexec,nosuid,uid=5,mode=620,ptmxmode=666'
|
|
) or error "mount /dev/pts failed";
|
|
} else {
|
|
0 == system('mount', '-o', 'bind', "/$fname",
|
|
"$options->{root}/$fname")
|
|
or error "mount $fname failed: $?";
|
|
}
|
|
}
|
|
} else {
|
|
error "unsupported type: $type";
|
|
}
|
|
}
|
|
} elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) {
|
|
# we cannot mount in fakechroot mode
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
# We can only mount /proc and /sys after extracting the essential
|
|
# set because if we mount it before, then base-files will not be able
|
|
# to extract those
|
|
if ( (any { $_ eq $options->{mode} } ('root', 'unshare'))
|
|
&& (any { $_ =~ '^chroot/mount(?:/sys)?$' } @{ $options->{skip} }))
|
|
{
|
|
info "skipping chroot/mount/sys as requested";
|
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
|
|
&& !$options->{canmount}) {
|
|
warning "skipping mount sysfs";
|
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
|
|
&& !-d "$options->{root}/sys") {
|
|
warning("skipping mounting of sysfs because the"
|
|
. " /sys directory is missing in the target");
|
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
|
|
&& !-e "/sys") {
|
|
warning("skipping mounting /sys because"
|
|
. " /sys does not exist on the outside");
|
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
|
|
&& !-d "/sys") {
|
|
warning("skipping mounting /sys because"
|
|
. " /sys on the outside is not a directory");
|
|
} elsif ($options->{mode} eq 'root') {
|
|
# we don't know whether we run in root mode inside an unshared
|
|
# user namespace or as real root so we first try the real mount and
|
|
# then fall back to mounting in a way that works in unshared mode
|
|
if (
|
|
0 == system(
|
|
'mount', '-t',
|
|
'sysfs', '-o',
|
|
'ro,nosuid,nodev,noexec', 'sys',
|
|
"$options->{root}/sys"
|
|
)
|
|
) {
|
|
push @cleanup_tasks, sub {
|
|
0 == system('umount', "$options->{root}/sys")
|
|
or warning("umount /sys failed: $?");
|
|
};
|
|
} elsif (
|
|
0 == system('mount', '-o', 'rbind', '/sys',
|
|
"$options->{root}/sys")) {
|
|
push @cleanup_tasks, sub {
|
|
# since we cannot write to /etc/mtab we need --no-mtab
|
|
# unmounting /sys only seems to be successful with --lazy
|
|
0 == system(
|
|
'umount', '--no-mtab',
|
|
'--lazy', "$options->{root}/sys"
|
|
) or warning("umount /sys failed: $?");
|
|
};
|
|
} else {
|
|
error "mount /sys failed: $?";
|
|
}
|
|
} elsif ($options->{mode} eq 'unshare') {
|
|
# naturally we have to clean up after ourselves in sudo mode where
|
|
# we do a real mount. But we also need to unmount in unshare mode
|
|
# because otherwise, even with the --one-file-system tar option,
|
|
# the permissions of the mount source will be stored and not the
|
|
# mount target (the directory)
|
|
push @cleanup_tasks, sub {
|
|
# since we cannot write to /etc/mtab we need --no-mtab
|
|
# unmounting /sys only seems to be successful with --lazy
|
|
0 == system('umount', '--no-mtab', '--lazy',
|
|
"$options->{root}/sys")
|
|
or warning("umount /sys failed: $?");
|
|
};
|
|
# without the network namespace unshared, we cannot mount a new
|
|
# sysfs. Since we need network, we just bind-mount.
|
|
#
|
|
# we have to rbind because just using bind results in "wrong fs
|
|
# type, bad option, bad superblock" error
|
|
0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys")
|
|
or error "mount /sys failed: $?";
|
|
} elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) {
|
|
# we cannot mount in fakechroot mode
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
if (
|
|
(any { $_ eq $options->{mode} } ('root', 'unshare'))
|
|
&& (any { $_ =~ '^chroot/mount(?:/proc)?$' } @{ $options->{skip} })
|
|
) {
|
|
info "skipping chroot/mount/proc as requested";
|
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
|
|
&& !$options->{canmount}) {
|
|
warning "skipping mount proc";
|
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
|
|
&& !-d "$options->{root}/proc") {
|
|
warning("skipping mounting of proc because the"
|
|
. " /proc directory is missing in the target");
|
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
|
|
&& !-e "/proc") {
|
|
warning("skipping mounting /proc because"
|
|
. " /proc does not exist on the outside");
|
|
} elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
|
|
&& !-d "/proc") {
|
|
warning("skipping mounting /proc because"
|
|
. " /proc on the outside is not a directory");
|
|
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) {
|
|
# we don't know whether we run in root mode inside an unshared
|
|
# user namespace or as real root so we first try the real mount and
|
|
# then fall back to mounting in a way that works in unshared
|
|
if (
|
|
$options->{mode} eq 'root'
|
|
&& 0 == system(
|
|
'mount', '-t', 'proc', '-o', 'ro', 'proc',
|
|
"$options->{root}/proc"
|
|
)
|
|
) {
|
|
push @cleanup_tasks, sub {
|
|
# some maintainer scripts mount additional stuff into /proc
|
|
# which we need to unmount beforehand
|
|
if (
|
|
is_mountpoint(
|
|
$options->{root} . "/proc/sys/fs/binfmt_misc"
|
|
)
|
|
) {
|
|
0 == system('umount',
|
|
"$options->{root}/proc/sys/fs/binfmt_misc")
|
|
or warning(
|
|
"umount /proc/sys/fs/binfmt_misc failed: $?");
|
|
}
|
|
0 == system('umount', "$options->{root}/proc")
|
|
or warning("umount /proc failed: $?");
|
|
};
|
|
} elsif (
|
|
0 == system('mount', '-t', 'proc', 'proc',
|
|
"$options->{root}/proc")) {
|
|
push @cleanup_tasks, sub {
|
|
# since we cannot write to /etc/mtab we need --no-mtab
|
|
0 == system('umount', '--no-mtab', "$options->{root}/proc")
|
|
or warning("umount /proc failed: $?");
|
|
};
|
|
} elsif (
|
|
# if mounting proc failed, try bind-mounting it read-only as a
|
|
# last resort
|
|
0 == system(
|
|
'mount', '-o',
|
|
'rbind', '/proc',
|
|
"$options->{root}/proc"
|
|
)
|
|
) {
|
|
warning("since mounting /proc normally failed, /proc is now "
|
|
. "bind-mounted instead");
|
|
# to make sure that changes (like unmounting) to the
|
|
# bind-mounted /proc do not affect the outside /proc, change
|
|
# all the bind-mounts under /proc to be a slave mount.
|
|
if (
|
|
0 != system('mount', '--make-rslave',
|
|
"$options->{root}/proc")) {
|
|
warning("mount --make-rslave /proc failed");
|
|
}
|
|
push @cleanup_tasks, sub {
|
|
# since we cannot write to /etc/mtab we need --no-mtab
|
|
0 == system(
|
|
'umount', '--no-mtab',
|
|
'--lazy', "$options->{root}/proc"
|
|
) or warning("umount /proc failed: $?");
|
|
};
|
|
} else {
|
|
error "mount /proc failed: $?";
|
|
}
|
|
} elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) {
|
|
# we cannot mount in fakechroot mode
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
|
|
# prevent daemons from starting
|
|
# the directory might not exist in custom variant, for example
|
|
#
|
|
# ideally, we should use update-alternatives but we cannot rely on it
|
|
# existing inside the chroot
|
|
#
|
|
# See #911290 for more problems of this interface
|
|
if (any { $_ eq 'chroot/policy-rc.d' } @{ $options->{skip} }) {
|
|
info "skipping chroot/policy-rc.d as requested";
|
|
} else {
|
|
push @cleanup_tasks, sub {
|
|
if (-f "$options->{root}/usr/sbin/policy-rc.d") {
|
|
unlink "$options->{root}/usr/sbin/policy-rc.d"
|
|
or error "cannot unlink policy-rc.d: $!";
|
|
}
|
|
};
|
|
if (-d "$options->{root}/usr/sbin/") {
|
|
open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d"
|
|
or error "cannot open policy-rc.d: $!";
|
|
print $fh "#!/bin/sh\n";
|
|
print $fh "exit 101\n";
|
|
close $fh;
|
|
chmod 0755, "$options->{root}/usr/sbin/policy-rc.d"
|
|
or error "cannot chmod policy-rc.d: $!";
|
|
}
|
|
}
|
|
|
|
# the file might not exist if it was removed in a hook
|
|
if (any { $_ eq 'chroot/start-stop-daemon' } @{ $options->{skip} }) {
|
|
info "skipping chroot/start-stop-daemon as requested";
|
|
} else {
|
|
# $options->{root} must not be part of $ssdloc but must instead be
|
|
# evaluated at the time the cleanup is run or otherwise, when
|
|
# performing a pivot-root, the ssd location will still be prefixed
|
|
# with the chroot path even though we changed root
|
|
my $ssdloc;
|
|
if (-f "$options->{root}/sbin/start-stop-daemon") {
|
|
$ssdloc = "/sbin/start-stop-daemon";
|
|
} elsif (-f "$options->{root}/usr/sbin/start-stop-daemon") {
|
|
$ssdloc = "/usr/sbin/start-stop-daemon";
|
|
}
|
|
push @cleanup_tasks, sub {
|
|
return unless length $ssdloc;
|
|
if (-e "$options->{root}/$ssdloc.REAL") {
|
|
move(
|
|
"$options->{root}/$ssdloc.REAL",
|
|
"$options->{root}/$ssdloc"
|
|
) or error "cannot move start-stop-daemon: $!";
|
|
}
|
|
};
|
|
if (length $ssdloc) {
|
|
if (-e "$options->{root}/$ssdloc.REAL") {
|
|
error "$options->{root}/$ssdloc.REAL already exists";
|
|
}
|
|
move(
|
|
"$options->{root}/$ssdloc",
|
|
"$options->{root}/$ssdloc.REAL"
|
|
) or error "cannot move start-stop-daemon: $!";
|
|
open my $fh, '>', "$options->{root}/$ssdloc"
|
|
or error "cannot open start-stop-daemon: $!";
|
|
print $fh "#!/bin/sh\n";
|
|
print $fh
|
|
"echo \"Warning: Fake start-stop-daemon called, doing"
|
|
. " nothing\">&2\n";
|
|
close $fh;
|
|
chmod 0755, "$options->{root}/$ssdloc"
|
|
or error "cannot chmod start-stop-daemon: $!";
|
|
}
|
|
}
|
|
};
|
|
|
|
if ($@) {
|
|
error "setup_mounts failed: $@";
|
|
}
|
|
return @cleanup_tasks;
|
|
}
|
|
|
|
sub run_hooks {
|
|
my $name = shift;
|
|
my $options = shift;
|
|
my $essential_pkgs = shift;
|
|
|
|
if (scalar @{ $options->{"${name}_hook"} } == 0) {
|
|
return;
|
|
}
|
|
|
|
if ($options->{dryrun}) {
|
|
info "not running ${name}-hooks because of --dry-run";
|
|
return;
|
|
}
|
|
|
|
my @env_opts = ();
|
|
# At this point TMPDIR is set to "$options->{root}/tmp". This is to have a
|
|
# writable TMPDIR even in unshare mode. But if TMPDIR is still set when
|
|
# running hooks, then every hook script calling chroot, will have to wrap
|
|
# that into an "env --unset=TMPDIR". To avoid this, we unset TMPDIR here.
|
|
# If the hook script needs a writable TMPDIR, then it can always use /tmp
|
|
# inside the chroot. This is also why we do not set a new MMDEBSTRAP_TMPDIR
|
|
# environment variable.
|
|
if (length $ENV{TMPDIR}) {
|
|
push @env_opts, '--unset=TMPDIR';
|
|
}
|
|
# The APT_CONFIG variable, if set, will confuse any manual calls to
|
|
# apt-get. If you want to use the same config used by mmdebstrap, the
|
|
# original value is stored in MMDEBSTRAP_APT_CONFIG.
|
|
if (length $ENV{APT_CONFIG}) {
|
|
push @env_opts, '--unset=APT_CONFIG';
|
|
}
|
|
if (length $ENV{APT_CONFIG}) {
|
|
push @env_opts, "MMDEBSTRAP_APT_CONFIG=$ENV{APT_CONFIG}";
|
|
}
|
|
# A hook script that wants to call mmdebstrap with --hook-helper needs to
|
|
# know how mmdebstrap was executed
|
|
push @env_opts, "MMDEBSTRAP_ARGV0=$PROGRAM_NAME";
|
|
# Storing the mode is important for hook scripts to potentially change
|
|
# their behavior depending on the mode. It's also important for when the
|
|
# hook wants to use the mmdebstrap --hook-helper.
|
|
push @env_opts, "MMDEBSTRAP_MODE=$options->{mode}";
|
|
if (defined $options->{suite}) {
|
|
push @env_opts, "MMDEBSTRAP_SUITE=$options->{suite}";
|
|
}
|
|
push @env_opts, "MMDEBSTRAP_FORMAT=$options->{format}";
|
|
# Storing the hook name is important for hook scripts to potentially change
|
|
# their behavior depending on the hook. It's also important for when the
|
|
# hook wants to use the mmdebstrap --hook-helper.
|
|
push @env_opts, "MMDEBSTRAP_HOOK=$name";
|
|
# This is the file descriptor of the socket that the mmdebstrap
|
|
# --hook-helper can write to and read from to communicate with the outside.
|
|
push @env_opts, ("MMDEBSTRAP_HOOKSOCK=" . fileno($options->{hooksock}));
|
|
# Store the verbosity of mmdebstrap so that hooks can be just as verbose
|
|
# as the mmdebstrap invocation that called them.
|
|
push @env_opts, ("MMDEBSTRAP_VERBOSITY=" . $verbosity_level);
|
|
# Store the packages given via --include in an environment variable so that
|
|
# hooks can, for example, make .deb files available inside the chroot.
|
|
{
|
|
my @escaped_includes = @{ $options->{include} };
|
|
foreach my $incl (@escaped_includes) {
|
|
# We have to encode commas so that values containing commas can
|
|
# be stored in the list. Since we encode using percent-encoding
|
|
# (urlencoding) we also have to encode the percent sign.
|
|
$incl =~ s/%/%25/g;
|
|
$incl =~ s/,/%2C/g;
|
|
}
|
|
push @env_opts,
|
|
("MMDEBSTRAP_INCLUDE=" . (join ",", @escaped_includes));
|
|
}
|
|
# Give the extract hook access to the essential packages that are about to
|
|
# be installed
|
|
if ($name eq "extract" and scalar @{$essential_pkgs} > 0) {
|
|
push @env_opts,
|
|
("MMDEBSTRAP_ESSENTIAL=" . (join " ", @{$essential_pkgs}));
|
|
}
|
|
if ($options->{mode} eq 'unshare') {
|
|
push @env_opts, "container=mmdebstrap-unshare";
|
|
}
|
|
|
|
# Unset the close-on-exec flag, so that the file descriptor does not
|
|
# get closed when we exec
|
|
my $flags = fcntl($options->{hooksock}, F_GETFD, 0)
|
|
or error "fcntl F_GETFD: $!";
|
|
fcntl($options->{hooksock}, F_SETFD, $flags & ~FD_CLOEXEC)
|
|
or error "fcntl F_SETFD: $!";
|
|
|
|
{
|
|
foreach my $script (@{ $options->{"${name}_hook"} }) {
|
|
my $type = $script->[0];
|
|
$script = $script->[1];
|
|
|
|
if ($type eq "pivoted") {
|
|
info "running --chrooted-$name-hook in shell: sh -c "
|
|
. "'$script'";
|
|
my $pid = fork() // error "fork() failed: $!";
|
|
if ($pid == 0) {
|
|
# child
|
|
my @cmdprefix = ();
|
|
if ($options->{mode} eq 'fakechroot') {
|
|
# we are calling the chroot executable instead of
|
|
# chrooting the process so that fakechroot can handle
|
|
# it
|
|
@cmdprefix = ('chroot', $options->{root});
|
|
} elsif ($options->{mode} eq 'root') {
|
|
# unsharing the mount namespace is not enough for
|
|
# pivot_root to work as root (why?) unsharing the user
|
|
# namespace as well (but without remapping) makes
|
|
# pivot_root work (why??) but still makes later lazy
|
|
# umounts fail (why???). Since pivot_root is mainly
|
|
# useful for being able to run unshare mode inside
|
|
# unshare mode, we fall back to just calling chroot()
|
|
# until somebody has motivation and time to figure out
|
|
# what is going on.
|
|
chroot $options->{root}
|
|
or error "failed to chroot(): $!";
|
|
$options->{root} = "/";
|
|
chdir "/" or error "failed chdir() to /: $!";
|
|
} elsif ($options->{mode} eq 'unshare') {
|
|
0 == syscall &SYS_unshare, $CLONE_NEWNS
|
|
or error "unshare() failed: $!";
|
|
pivot_root($options->{root});
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
0 == system(@cmdprefix, 'env', @env_opts, 'sh', '-c',
|
|
$script)
|
|
or error "command failed: $script";
|
|
exit 0;
|
|
}
|
|
waitpid($pid, 0);
|
|
$? == 0 or error "chrooted hook failed with exit code $?";
|
|
next;
|
|
}
|
|
|
|
# inode and device number of chroot before
|
|
my ($dev_before, $ino_before, undef) = stat($options->{root});
|
|
|
|
if (
|
|
$script =~ /^(
|
|
copy-in|copy-out
|
|
|tar-in|tar-out
|
|
|upload|download
|
|
|sync-in|sync-out
|
|
)\ /x
|
|
) {
|
|
info "running special hook: $script";
|
|
if ((any { $_ eq $options->{variant} } ('extract', 'custom'))
|
|
and $options->{mode} eq 'fakechroot'
|
|
and $name ne 'setup') {
|
|
info "the copy-in, copy-out, tar-in and tar-out commands"
|
|
. " in fakechroot mode might fail in"
|
|
. " extract and custom variants because there might be"
|
|
. " no tar inside the chroot";
|
|
}
|
|
|
|
my $pid = fork() // error "fork() failed: $!";
|
|
if ($pid == 0) {
|
|
# whatever the script writes on stdout is sent to the
|
|
# socket
|
|
# whatever is written to the socket, send to stdin
|
|
open(STDOUT, '>&', $options->{hooksock})
|
|
or error "cannot open STDOUT: $!";
|
|
open(STDIN, '<&', $options->{hooksock})
|
|
or error "cannot open STDIN: $!";
|
|
|
|
# Text::ParseWords::shellwords does for perl what shlex
|
|
# does for python
|
|
my @args = shellwords $script;
|
|
hookhelper($options->{root}, $options->{mode}, $name,
|
|
(join ',', @{ $options->{skip} }),
|
|
$verbosity_level, @args);
|
|
exit 0;
|
|
}
|
|
waitpid($pid, 0);
|
|
$? == 0 or error "special hook failed with exit code $?";
|
|
} elsif (-x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) {
|
|
info "running --$name-hook directly: $script $options->{root}";
|
|
# execute it directly if it's an executable file
|
|
# or if it there are no shell metacharacters
|
|
# (the /a regex modifier makes \w match only ASCII)
|
|
0 == system('env', @env_opts, $script, $options->{root})
|
|
or error "command failed: $script";
|
|
} else {
|
|
info "running --$name-hook in shell: sh -c '$script' exec"
|
|
. " $options->{root}";
|
|
# otherwise, wrap everything in sh -c
|
|
0 == system('env', @env_opts,
|
|
'sh', '-c', $script, 'exec', $options->{root})
|
|
or error "command failed: $script";
|
|
}
|
|
|
|
# If the chroot directory vanished, check if pivot_root was
|
|
# performed.
|
|
#
|
|
# Running pivot_root is only really useful in the customize-hooks
|
|
# because mmdebstrap uses apt from the outside to install packages
|
|
# and that will fail after pivot_root because the process doesn't
|
|
# have access to the system on the outside anymore.
|
|
if (!-e $options->{root}) {
|
|
my ($dev_root, $ino_root, undef) = stat("/");
|
|
if ($dev_before == $dev_root and $ino_before == $ino_root) {
|
|
info "detected pivot_root, changing chroot directory to /";
|
|
# the old chroot directory is now /
|
|
# the hook probably executed pivot_root
|
|
$options->{root} = "/";
|
|
chdir "/" or error "failed chdir() to /: $!";
|
|
} else {
|
|
error "chroot directory $options->{root} vanished";
|
|
}
|
|
}
|
|
}
|
|
};
|
|
|
|
# Restore flags
|
|
fcntl($options->{hooksock}, F_SETFD, $flags) or error "fcntl F_SETFD: $!";
|
|
return;
|
|
}
|
|
|
|
sub setup {
|
|
my $options = shift;
|
|
|
|
foreach my $key (sort keys %{$options}) {
|
|
my $value = $options->{$key};
|
|
if (!defined $value) {
|
|
next;
|
|
}
|
|
if (ref $value eq '') {
|
|
debug "$key: $options->{$key}";
|
|
} elsif (ref $value eq 'ARRAY') {
|
|
debug "$key: [" . (join ', ', @{$value}) . "]";
|
|
} elsif (ref $value eq 'GLOB') {
|
|
debug "$key: GLOB";
|
|
} else {
|
|
error "unknown type for key $key: " . (ref $value);
|
|
}
|
|
}
|
|
|
|
if (-e $options->{apttrusted} && !-r $options->{apttrusted}) {
|
|
warning "cannot read $options->{apttrusted}";
|
|
}
|
|
if (-e $options->{apttrustedparts} && !-r $options->{apttrustedparts}) {
|
|
warning "cannot read $options->{apttrustedparts}";
|
|
}
|
|
|
|
if (any { $_ eq 'setup' } @{ $options->{skip} }) {
|
|
info "skipping setup as requested";
|
|
} else {
|
|
run_setup($options);
|
|
}
|
|
|
|
run_hooks('setup', $options);
|
|
|
|
# apt runs dpkg from inside the chroot and directly passes the filename to
|
|
# dpkg. Hence, the included files on the outside must be present under the
|
|
# same path on the inside. If they are not, dpkg cannot find them.
|
|
if (scalar(grep { /^\// } @{ $options->{include} }) > 0) {
|
|
my $ret = 0;
|
|
foreach my $f (grep { /^\// } @{ $options->{include} }) {
|
|
next if -e "$options->{root}/$f";
|
|
warning
|
|
"path given via --include is not present inside the chroot: $f";
|
|
$ret = 1;
|
|
}
|
|
if ($ret != 0) {
|
|
warning("apt runs chrooted dpkg which needs access to the "
|
|
. "package paths given via --include inside the chroot.");
|
|
warning "maybe try running mmdebstrap with "
|
|
. "--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount";
|
|
}
|
|
}
|
|
|
|
if (any { $_ eq 'update' } @{ $options->{skip} }) {
|
|
info "skipping update as requested";
|
|
} else {
|
|
run_update($options);
|
|
}
|
|
|
|
(my $essential_pkgs, my $cached_debs) = run_download($options);
|
|
|
|
# in theory, we don't have to extract the packages in chrootless mode
|
|
# but we do it anyways because otherwise directory creation timestamps
|
|
# will differ compared to non-chrootless and we want to create bit-by-bit
|
|
# identical tar output
|
|
#
|
|
# FIXME: dpkg could be changed to produce the same results
|
|
run_extract($options, $essential_pkgs);
|
|
|
|
# setup mounts
|
|
my @cleanup_tasks = ();
|
|
my $cleanup = sub {
|
|
my $signal = $_[0];
|
|
while (my $task = pop @cleanup_tasks) {
|
|
$task->();
|
|
}
|
|
if ($signal) {
|
|
warning "pid $PID cought signal: $signal";
|
|
exit 1;
|
|
}
|
|
};
|
|
|
|
# we only need to setup the mounts if there is anything to do
|
|
if ( $options->{variant} ne 'custom'
|
|
or scalar @{ $options->{include} } > 0
|
|
or scalar @{ $options->{"extract_hook"} } > 0
|
|
or scalar @{ $options->{"essential_hook"} } > 0
|
|
or scalar @{ $options->{"customize_hook"} } > 0) {
|
|
local $SIG{INT} = $cleanup;
|
|
local $SIG{HUP} = $cleanup;
|
|
local $SIG{PIPE} = $cleanup;
|
|
local $SIG{TERM} = $cleanup;
|
|
|
|
@cleanup_tasks = setup_mounts($options);
|
|
}
|
|
|
|
eval {
|
|
my $chrootcmd = [];
|
|
if ($options->{variant} ne 'extract') {
|
|
if ($options->{mode} ne 'chrootless') {
|
|
$chrootcmd = run_prepare($options);
|
|
}
|
|
}
|
|
|
|
run_hooks('extract', $options, $essential_pkgs);
|
|
|
|
if ($options->{variant} ne 'extract') {
|
|
run_essential($options, $essential_pkgs, $chrootcmd, $cached_debs);
|
|
|
|
run_hooks('essential', $options);
|
|
|
|
run_install($options);
|
|
|
|
run_hooks('customize', $options);
|
|
}
|
|
};
|
|
|
|
my $msg = $@;
|
|
|
|
# Wait for (reap) potential zombies and otherwise long-running background
|
|
# processes or otherwise they might hog resources like /dev/null which can
|
|
# then not be unmounted resulting in their mountpoints (the regular files)
|
|
# not being removable and then the removal of device nodes in run_cleanup
|
|
# (if mmdebstrap is run with --skip=output/dev) will fail.
|
|
if (any { $_ eq 'zombie-reaping' } @{ $options->{skip} }) {
|
|
info "skipping zombie-reaping as requested";
|
|
} else {
|
|
if (waitpid(-1, POSIX::WNOHANG) >= 0) {
|
|
info "waiting for background processes to finish...";
|
|
}
|
|
while ((my $child = waitpid(-1, 0)) > 0) {
|
|
my $status = $? >> 8;
|
|
info "PID $child exited with exit code $status";
|
|
}
|
|
}
|
|
|
|
$cleanup->(0);
|
|
if ($msg) {
|
|
error "setup failed: $msg";
|
|
}
|
|
|
|
if (any { $_ eq 'cleanup' } @{ $options->{skip} }) {
|
|
info "skipping cleanup as requested";
|
|
} else {
|
|
run_cleanup($options);
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub run_setup() {
|
|
my $options = shift;
|
|
|
|
{
|
|
my @directories = (
|
|
'/etc/apt/apt.conf.d', '/etc/apt/sources.list.d',
|
|
'/etc/apt/preferences.d', '/var/cache/apt',
|
|
'/var/lib/apt/lists/partial', '/tmp'
|
|
);
|
|
# we need /var/lib/dpkg in case we need to write to /var/lib/dpkg/arch
|
|
push @directories, '/var/lib/dpkg';
|
|
# since we do not know the dpkg version inside the chroot at this
|
|
# point, we can only omit it in chrootless mode
|
|
if ($options->{mode} ne 'chrootless'
|
|
or length $options->{dpkgopts} > 0) {
|
|
push @directories, '/etc/dpkg/dpkg.cfg.d/';
|
|
}
|
|
# if dpkg and apt operate from the outside we need some more
|
|
# directories because dpkg and apt might not even be installed inside
|
|
# the chroot. Thus, the following block is not strictly necessary in
|
|
# chrootless mode. We unconditionally add it anyways, so that the
|
|
# output with and without chrootless mode is equal.
|
|
{
|
|
push @directories, '/var/log/apt';
|
|
# since we do not know the dpkg version inside the chroot at this
|
|
# point, we can only omit it in chrootless mode
|
|
if ($options->{mode} ne 'chrootless') {
|
|
push @directories, '/var/lib/dpkg/triggers',
|
|
'/var/lib/dpkg/info', '/var/lib/dpkg/alternatives',
|
|
'/var/lib/dpkg/updates';
|
|
}
|
|
}
|
|
foreach my $dir (@directories) {
|
|
if (-e "$options->{root}/$dir") {
|
|
if (!-d "$options->{root}/$dir") {
|
|
error "$dir already exists but is not a directory";
|
|
}
|
|
} else {
|
|
my $num_created = make_path "$options->{root}/$dir",
|
|
{ error => \my $err };
|
|
if ($err && @$err) {
|
|
error(
|
|
join "; ",
|
|
(map { "cannot create " . (join ": ", %{$_}) } @$err));
|
|
} elsif ($num_created == 0) {
|
|
error "cannot create $options->{root}/$dir";
|
|
}
|
|
}
|
|
}
|
|
# make sure /tmp is not 0755 like the rest
|
|
chmod 01777, "$options->{root}/tmp" or error "cannot chmod /tmp: $!";
|
|
}
|
|
|
|
# The TMPDIR set by the user or even /tmp might be inaccessible by the
|
|
# unshared user. Thus, we place all temporary files in /tmp inside the new
|
|
# rootfs.
|
|
#
|
|
# This will affect calls to tempfile() as well as runs of "apt-get update"
|
|
# which will create temporary clearsigned.message.XXXXXX files to verify
|
|
# signatures.
|
|
#
|
|
# Setting TMPDIR to inside the chroot is also necessary for when packages
|
|
# are installed with apt from outside the chroot with
|
|
# DPkg::Chroot-Directory
|
|
{
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{"TMPDIR"} = "$options->{root}/tmp";
|
|
}
|
|
|
|
my ($conf, $tmpfile)
|
|
= tempfile("mmdebstrap.apt.conf.XXXXXXXXXXXX", TMPDIR => 1)
|
|
or error "cannot open apt.conf: $!";
|
|
print $conf "Apt::Architecture \"$options->{nativearch}\";\n";
|
|
# the host system might have configured additional architectures
|
|
# force only the native architecture
|
|
if (scalar @{ $options->{foreignarchs} } > 0) {
|
|
print $conf "Apt::Architectures { \"$options->{nativearch}\"; ";
|
|
foreach my $arch (@{ $options->{foreignarchs} }) {
|
|
print $conf "\"$arch\"; ";
|
|
}
|
|
print $conf "};\n";
|
|
} else {
|
|
print $conf "Apt::Architectures \"$options->{nativearch}\";\n";
|
|
}
|
|
print $conf "Dir \"$options->{root}\";\n";
|
|
print $conf "DPkg::Chroot-Directory \"$options->{root}\";\n";
|
|
# not needed anymore for apt 1.3 and newer
|
|
print $conf
|
|
"Dir::State::Status \"$options->{root}/var/lib/dpkg/status\";\n";
|
|
# for authentication, use the keyrings from the host
|
|
print $conf "Dir::Etc::Trusted \"$options->{apttrusted}\";\n";
|
|
print $conf "Dir::Etc::TrustedParts \"$options->{apttrustedparts}\";\n";
|
|
# apt considers itself essential. Thus, when generating an EDSP document
|
|
# for an external solver, it will add the Essential:yes field to the apt
|
|
# package stanza. This is unnecessary because we compile the set of
|
|
# packages we consider essential ourselves and for the 'essential' variant
|
|
# it would even be wrong to add apt. This workaround is only needed when
|
|
# apt is used with an external solver but doesn't hurt otherwise and we
|
|
# don't have a good way to figure out whether apt is using an external
|
|
# solver or not short of parsing the --aptopt options.
|
|
print $conf "pkgCacheGen::ForceEssential \",\";\n";
|
|
|
|
close $conf;
|
|
|
|
# We put certain configuration items in their own configuration file
|
|
# because they have to be valid for apt invocation from outside as well as
|
|
# from inside the chroot.
|
|
# The config filename is chosen such that any settings in it will be
|
|
# overridden by what the user specified with --aptopt.
|
|
if (!-e "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap") {
|
|
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
|
|
or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!";
|
|
print $fh "Apt::Install-Recommends false;\n";
|
|
print $fh "Acquire::Languages \"none\";\n";
|
|
close $fh;
|
|
}
|
|
|
|
# apt-get update requires this
|
|
if (!-e "$options->{root}/var/lib/dpkg/status") {
|
|
open my $fh, '>', "$options->{root}/var/lib/dpkg/status"
|
|
or error "failed to open(): $!";
|
|
close $fh;
|
|
}
|
|
|
|
# In theory, /var/lib/dpkg/arch is only useful if there are foreign
|
|
# architectures configured or if the architecture of a chrootless chroot
|
|
# is different from the native architecture outside the chroot.
|
|
# We nevertheless always add /var/lib/dpkg/arch to make a chroot built the
|
|
# normal way bit-by-bit identical to a foreign arch chroot built in
|
|
# chrootless mode.
|
|
chomp(my $hostarch = `dpkg --print-architecture`);
|
|
if ((!-e "$options->{root}/var/lib/dpkg/arch")) {
|
|
open my $fh, '>', "$options->{root}/var/lib/dpkg/arch"
|
|
or error "cannot open /var/lib/dpkg/arch: $!";
|
|
print $fh "$options->{nativearch}\n";
|
|
foreach my $arch (@{ $options->{foreignarchs} }) {
|
|
print $fh "$arch\n";
|
|
}
|
|
close $fh;
|
|
}
|
|
|
|
if (length $options->{aptopts} > 0
|
|
and (!-e "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap")) {
|
|
open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap"
|
|
or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!";
|
|
print $fh $options->{aptopts};
|
|
close $fh;
|
|
if ($verbosity_level >= 3) {
|
|
debug "content of /etc/apt/apt.conf.d/99mmdebstrap:";
|
|
copy("$options->{root}/etc/apt/apt.conf.d/99mmdebstrap", \*STDERR);
|
|
}
|
|
}
|
|
|
|
if (length $options->{dpkgopts} > 0
|
|
and (!-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")) {
|
|
# FIXME: in chrootless mode, dpkg will only read the configuration
|
|
# from the host -- see #808203
|
|
if ($options->{mode} eq 'chrootless') {
|
|
warning('dpkg is unable to read an alternative configuration in'
|
|
. 'chrootless mode -- see Debian bug #808203');
|
|
}
|
|
open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap"
|
|
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
|
|
print $fh $options->{dpkgopts};
|
|
close $fh;
|
|
if ($verbosity_level >= 3) {
|
|
debug "content of /etc/dpkg/dpkg.cfg.d/99mmdebstrap:";
|
|
copy("$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap",
|
|
\*STDERR);
|
|
}
|
|
}
|
|
|
|
if (!-e "$options->{root}/etc/fstab") {
|
|
open my $fh, '>', "$options->{root}/etc/fstab"
|
|
or error "cannot open fstab: $!";
|
|
print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n";
|
|
close $fh;
|
|
chmod 0644, "$options->{root}/etc/fstab"
|
|
or error "cannot chmod fstab: $!";
|
|
}
|
|
|
|
# write /etc/apt/sources.list and files in /etc/apt/sources.list.d/
|
|
if (scalar @{ $options->{sourceslists} } > 0) {
|
|
my $firstentry = $options->{sourceslists}->[0];
|
|
# if the first sources.list entry is of one-line type and without
|
|
# explicit filename, then write out an actual /etc/apt/sources.list
|
|
# otherwise everything goes into /etc/apt/sources.list.d
|
|
my $fname;
|
|
if ($firstentry->{type} eq 'one-line'
|
|
&& !defined $firstentry->{fname}) {
|
|
$fname = "$options->{root}/etc/apt/sources.list";
|
|
} else {
|
|
$fname = "$options->{root}/etc/apt/sources.list.d/0000";
|
|
if (defined $firstentry->{fname}) {
|
|
$fname .= $firstentry->{fname};
|
|
if ( $firstentry->{fname} !~ /\.list/
|
|
&& $firstentry->{fname} !~ /\.sources/) {
|
|
if ($firstentry->{type} eq 'one-line') {
|
|
$fname .= '.list';
|
|
} elsif ($firstentry->{type} eq 'deb822') {
|
|
$fname .= '.sources';
|
|
} else {
|
|
error "invalid type: $firstentry->{type}";
|
|
}
|
|
}
|
|
} else {
|
|
# if no filename is given, then this must be a deb822 file
|
|
# because if it was a one-line type file, then it would've been
|
|
# written to /etc/apt/sources.list
|
|
$fname .= 'main.sources';
|
|
}
|
|
}
|
|
if (!-e $fname) {
|
|
open my $fh, '>', "$fname" or error "cannot open $fname: $!";
|
|
print $fh $firstentry->{content};
|
|
close $fh;
|
|
}
|
|
# everything else goes into /etc/apt/sources.list.d/
|
|
for (my $i = 1 ; $i < scalar @{ $options->{sourceslists} } ; $i++) {
|
|
my $entry = $options->{sourceslists}->[$i];
|
|
my $fname = "$options->{root}/etc/apt/sources.list.d/"
|
|
. sprintf("%04d", $i);
|
|
if (defined $entry->{fname}) {
|
|
$fname .= $entry->{fname};
|
|
if ( $entry->{fname} !~ /\.list/
|
|
&& $entry->{fname} !~ /\.sources/) {
|
|
if ($entry->{type} eq 'one-line') {
|
|
$fname .= '.list';
|
|
} elsif ($entry->{type} eq 'deb822') {
|
|
$fname .= '.sources';
|
|
} else {
|
|
error "invalid type: $entry->{type}";
|
|
}
|
|
}
|
|
} else {
|
|
if ($entry->{type} eq 'one-line') {
|
|
$fname .= 'main.list';
|
|
} elsif ($entry->{type} eq 'deb822') {
|
|
$fname .= 'main.sources';
|
|
} else {
|
|
error "invalid type: $entry->{type}";
|
|
}
|
|
}
|
|
if (!-e $fname) {
|
|
open my $fh, '>', "$fname" or error "cannot open $fname: $!";
|
|
print $fh $entry->{content};
|
|
close $fh;
|
|
}
|
|
}
|
|
}
|
|
|
|
# allow network access from within
|
|
foreach my $file ("/etc/resolv.conf", "/etc/hostname") {
|
|
if (-e $file && !-e "$options->{root}/$file") {
|
|
# this will create a new file with 644 permissions and copy
|
|
# contents only even if $file was a symlink
|
|
copy($file, "$options->{root}/$file")
|
|
or error "cannot copy $file: $!";
|
|
# if the source was a regular file, preserve the permissions
|
|
if (-f $file) {
|
|
my $mode = (stat($file))[2];
|
|
$mode &= oct(7777); # mask off bits that aren't the mode
|
|
chmod $mode, "$options->{root}/$file"
|
|
or error "cannot chmod $file: $!";
|
|
}
|
|
} elsif (-e $file && -e "$options->{root}/$file") {
|
|
info "rootfs alreday contains $file";
|
|
} else {
|
|
warning("Host system does not have a $file to copy into the"
|
|
. " rootfs.");
|
|
}
|
|
}
|
|
|
|
if ($options->{havemknod}) {
|
|
foreach my $file (@linuxdevfiles) {
|
|
my ($fname, $mode, $type, $linkname, $devmajor, $devminor, undef)
|
|
= @{$file};
|
|
if ($type eq '0') { # normal file
|
|
error "type 0 not implemented";
|
|
} elsif ($type eq '1') { # hardlink
|
|
error "type 1 not implemented";
|
|
} elsif ($type eq '2') { # symlink
|
|
if ( $options->{mode} eq 'fakechroot'
|
|
and $linkname =~ /^\/proc/) {
|
|
# there is no /proc in fakechroot mode
|
|
next;
|
|
}
|
|
symlink $linkname, "$options->{root}/$fname"
|
|
or error "cannot create symlink $fname";
|
|
next; # chmod cannot work on symlinks
|
|
} elsif ($type eq '3') { # character special
|
|
0 == system('mknod', "$options->{root}/$fname", 'c',
|
|
$devmajor, $devminor)
|
|
or error "mknod failed: $?";
|
|
} elsif ($type eq '4') { # block special
|
|
0 == system('mknod', "$options->{root}/$fname", 'b',
|
|
$devmajor, $devminor)
|
|
or error "mknod failed: $?";
|
|
} elsif ($type eq '5') { # directory
|
|
if (-e "$options->{root}/$fname") {
|
|
if (!-d "$options->{root}/$fname") {
|
|
error "$fname already exists but is not a directory";
|
|
}
|
|
} else {
|
|
my $num_created = make_path "$options->{root}/$fname",
|
|
{ error => \my $err };
|
|
if ($err && @$err) {
|
|
error(
|
|
join "; ",
|
|
(
|
|
map { "cannot create " . (join ": ", %{$_}) }
|
|
@$err
|
|
));
|
|
} elsif ($num_created == 0) {
|
|
error "cannot create $options->{root}/$fname";
|
|
}
|
|
}
|
|
} else {
|
|
error "unsupported type: $type";
|
|
}
|
|
chmod $mode, "$options->{root}/$fname"
|
|
or error "cannot chmod $fname: $!";
|
|
}
|
|
}
|
|
|
|
# we tell apt about the configuration via a config file passed via the
|
|
# APT_CONFIG environment variable instead of using the --option command
|
|
# line arguments because configuration settings like Dir::Etc have already
|
|
# been evaluated at the time that apt takes its command line arguments
|
|
# into account.
|
|
{
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{"APT_CONFIG"} = "$tmpfile";
|
|
}
|
|
# we have to make the config file world readable so that a possible
|
|
# /usr/lib/apt/solvers/apt process which is run by the _apt user is also
|
|
# able to read it
|
|
chmod 0644, "$tmpfile" or error "cannot chmod $tmpfile: $!";
|
|
if ($verbosity_level >= 3) {
|
|
0 == system('apt-get', '--version')
|
|
or error "apt-get --version failed: $?";
|
|
0 == system('apt-config', 'dump') or error "apt-config failed: $?";
|
|
debug "content of $tmpfile:";
|
|
copy($tmpfile, \*STDERR);
|
|
}
|
|
|
|
if ($options->{mode} ne 'fakechroot') {
|
|
# Apt dropping privileges to another user than root is not useful in
|
|
# fakechroot mode because all users are faked and thus there is no real
|
|
# privilege difference anyways. We could set APT::Sandbox::User "root"
|
|
# in fakechroot mode but we don't because if we would, then
|
|
# /var/cache/apt/archives/partial/ and /var/lib/apt/lists/partial/
|
|
# would not be owned by the _apt user if mmdebstrap was run in
|
|
# fakechroot mode.
|
|
#
|
|
# when apt-get update is run by the root user, then apt will attempt to
|
|
# drop privileges to the _apt user. This will fail if the _apt user
|
|
# does not have permissions to read the root directory. In that case,
|
|
# we have to disable apt sandboxing. This can for example happen in
|
|
# root mode when the path of the chroot is not in a world-readable
|
|
# location.
|
|
my $partial = '/var/lib/apt/lists/partial';
|
|
my @testcmd = (
|
|
'/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test',
|
|
'-r', "$options->{root}$partial"
|
|
);
|
|
my $pid = fork() // error "fork() failed: $!";
|
|
if ($pid == 0) {
|
|
open(STDOUT, '>', '/dev/null')
|
|
or error "cannot open /dev/null for writing: $!";
|
|
open(STDERR, '>', '/dev/null')
|
|
or error "cannot open /dev/null for writing: $!";
|
|
exec { $testcmd[0] } @testcmd
|
|
or error("cannot exec " . (join " ", @testcmd) . ": $!");
|
|
}
|
|
waitpid $pid, 0;
|
|
if ($? != 0) {
|
|
warning "Download is performed unsandboxed as root as file"
|
|
. " $options->{root}$partial couldn't be accessed by user _apt";
|
|
open my $fh, '>>', $tmpfile
|
|
or error "cannot open $tmpfile for appending: $!";
|
|
print $fh "APT::Sandbox::User \"root\";\n";
|
|
close $fh;
|
|
}
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub run_update() {
|
|
my $options = shift;
|
|
|
|
my $aptopts = {
|
|
ARGV => ['apt-get', 'update', '--error-on=any'],
|
|
CHDIR => $options->{root},
|
|
};
|
|
|
|
# Maybe "apt-get update" was already run in the setup hook? If yes, skip
|
|
# running it here. We are overly strict on purpose because better to run it
|
|
# twice on accident than not at all.
|
|
if ( !-d "$options->{root}/var/lib/apt/lists/auxfiles"
|
|
|| !-d "$options->{root}/var/lib/apt/lists/partial"
|
|
|| !-e "$options->{root}/var/lib/apt/lists/lock"
|
|
|| !-e "$options->{root}/var/cache/apt/pkgcache.bin"
|
|
|| !-e "$options->{root}/var/cache/apt/srcpkgcache.bin") {
|
|
info "running apt-get update...";
|
|
run_apt_progress($aptopts);
|
|
} else {
|
|
info "skipping apt-get update because it was already run";
|
|
}
|
|
|
|
# check if anything was downloaded at all
|
|
{
|
|
open my $fh, '-|', 'apt-get',
|
|
'indextargets' // error "failed to fork(): $!";
|
|
chomp(
|
|
my $indextargets = do { local $/; <$fh> }
|
|
);
|
|
close $fh;
|
|
if ($indextargets eq '') {
|
|
warning("apt-get indextargets output is empty");
|
|
if (scalar @{ $options->{sourceslists} } == 0) {
|
|
warning "no known apt sources.list entry";
|
|
}
|
|
for my $list (@{ $options->{sourceslists} }) {
|
|
if (defined $list->{fname}) {
|
|
info("Filename: $list->{fname}");
|
|
}
|
|
info("Type: $list->{type}");
|
|
info("Content:");
|
|
for my $line (split "\n", $list->{content}) {
|
|
info(" $line");
|
|
}
|
|
}
|
|
open(my $fh, '-|', 'apt-cache', 'policy')
|
|
// error "failed to fork(): $!";
|
|
while (my $line = <$fh>) {
|
|
chomp $line;
|
|
info $line;
|
|
}
|
|
close $fh;
|
|
my $msg
|
|
= "apt-get update did not find any indices "
|
|
. "for architecture '$options->{nativearch}' in ";
|
|
if (length $options->{suite}) {
|
|
$msg .= "suite '$options->{suite}'";
|
|
} else {
|
|
$msg .= "the configured apt sources";
|
|
}
|
|
error $msg;
|
|
}
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub run_download() {
|
|
my $options = shift;
|
|
|
|
# In the future we want to replace downloading packages with "apt-get
|
|
# install" and installing them with dpkg by just installing the essential
|
|
# packages with apt from the outside with DPkg::Chroot-Directory.
|
|
# We are not doing that because then the preinst script of base-passwd will
|
|
# not be called early enough and packages will fail to install because they
|
|
# are missing /etc/passwd.
|
|
my @cached_debs = ();
|
|
my @dl_debs = ();
|
|
if (
|
|
!$options->{dryrun}
|
|
&& ((none { $_ eq $options->{variant} } ('extract', 'custom'))
|
|
|| scalar @{ $options->{include} } != 0)
|
|
&& -d "$options->{root}/var/cache/apt/archives/"
|
|
) {
|
|
my $apt_archives = "/var/cache/apt/archives/";
|
|
opendir my $dh, "$options->{root}/$apt_archives"
|
|
or error "cannot read $apt_archives";
|
|
while (my $deb = readdir $dh) {
|
|
if ($deb !~ /\.deb$/) {
|
|
next;
|
|
}
|
|
if (!-f "$options->{root}/$apt_archives/$deb") {
|
|
next;
|
|
}
|
|
push @cached_debs, $deb;
|
|
}
|
|
closedir $dh;
|
|
}
|
|
|
|
# To figure out the right package set for the apt variant we can use:
|
|
# $ apt-get dist-upgrade -o dir::state::status=/dev/null
|
|
# This is because that variants only contain essential packages and
|
|
# apt and libapt treats apt as essential. If we want to install less
|
|
# (essential variant) then we have to compute the package set ourselves.
|
|
# Same if we want to install priority based variants.
|
|
if (any { $_ eq $options->{variant} } ('extract', 'custom')) {
|
|
if (scalar @{ $options->{include} } == 0) {
|
|
info "nothing to download -- skipping...";
|
|
return ([], \@cached_debs);
|
|
}
|
|
my @apt_argv = ('install', @{ $options->{include} });
|
|
|
|
@dl_debs = run_apt_download_progress({
|
|
APT_ARGV => [@apt_argv],
|
|
dryrun => $options->{dryrun},
|
|
},
|
|
);
|
|
} elsif (any { $_ eq $options->{variant} }
|
|
('essential', 'apt', 'standard', 'important', 'required', 'buildd')) {
|
|
# 2021-06-07, #debian-apt on OFTC, times in UTC+2
|
|
# 17:27 < DonKult> (?essential includes 'apt' through)
|
|
# 17:30 < josch> DonKult: no, because pkgCacheGen::ForceEssential ",";
|
|
# 17:32 < DonKult> touché
|
|
@dl_debs = run_apt_download_progress({
|
|
APT_ARGV => [
|
|
'install',
|
|
'?narrow('
|
|
. (
|
|
length($options->{suite})
|
|
? '?or(?archive(^'
|
|
. $options->{suite}
|
|
. '$),?codename(^'
|
|
. $options->{suite} . '$)),'
|
|
: ''
|
|
)
|
|
. '?architecture('
|
|
. $options->{nativearch}
|
|
. '),?essential)'
|
|
],
|
|
dryrun => $options->{dryrun},
|
|
},
|
|
);
|
|
} else {
|
|
error "unknown variant: $options->{variant}";
|
|
}
|
|
|
|
my @essential_pkgs;
|
|
# strip the chroot directory from the filenames
|
|
foreach my $deb (@dl_debs) {
|
|
# if filename does not start with chroot directory then the user
|
|
# might've used a file:// mirror and we check whether the path is
|
|
# accessible inside the chroot
|
|
if (rindex $deb, $options->{root}, 0) {
|
|
if (!-e "$options->{root}/$deb") {
|
|
error "package file $deb not accessible from chroot directory"
|
|
. " -- use copy:// instead of file:// or a bind-mount. You"
|
|
. " can also try using --hook-dir=/usr/share/mmdebstrap/"
|
|
. "hooks/file-mirror-automount to automatically create"
|
|
. " bind-mounts or copy the files as necessary.";
|
|
}
|
|
push @essential_pkgs, $deb;
|
|
next;
|
|
}
|
|
# filename starts with chroot directory, strip it off
|
|
# this is the normal case
|
|
if (!-e $deb) {
|
|
error "cannot find package file $deb";
|
|
}
|
|
push @essential_pkgs, substr($deb, length($options->{root}));
|
|
}
|
|
|
|
return (\@essential_pkgs, \@cached_debs);
|
|
}
|
|
|
|
sub run_extract() {
|
|
my $options = shift;
|
|
my $essential_pkgs = shift;
|
|
|
|
if ($options->{dryrun}) {
|
|
info "skip extracting packages because of --dry-run";
|
|
return;
|
|
}
|
|
|
|
if (scalar @{$essential_pkgs} == 0) {
|
|
info "nothing to extract -- skipping...";
|
|
return;
|
|
}
|
|
|
|
info "extracting archives...";
|
|
print_progress 0.0;
|
|
my $counter = 0;
|
|
my $total = scalar @{$essential_pkgs};
|
|
foreach my $deb (@{$essential_pkgs}) {
|
|
$counter += 1;
|
|
|
|
my $tarfilter;
|
|
my @tarfilterargs;
|
|
# if the path-excluded option was added to the dpkg config,
|
|
# insert the tarfilter between dpkg-deb and tar
|
|
if (-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") {
|
|
open(my $fh, '<',
|
|
"$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")
|
|
or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
|
|
my @matches = grep { /^path-(?:exclude|include)=/ } <$fh>;
|
|
close $fh;
|
|
chop @matches; # remove trailing newline
|
|
@tarfilterargs = map { "--" . $_ } @matches;
|
|
}
|
|
if (scalar @tarfilterargs > 0) {
|
|
if (-x "./tarfilter") {
|
|
$tarfilter = "./tarfilter";
|
|
} else {
|
|
$tarfilter = "mmtarfilter";
|
|
}
|
|
}
|
|
|
|
my $dpkg_writer;
|
|
my $tar_reader;
|
|
my $filter_reader;
|
|
my $filter_writer;
|
|
if (scalar @tarfilterargs > 0) {
|
|
pipe $filter_reader, $dpkg_writer or error "pipe failed: $!";
|
|
pipe $tar_reader, $filter_writer or error "pipe failed: $!";
|
|
} else {
|
|
pipe $tar_reader, $dpkg_writer or error "pipe failed: $!";
|
|
}
|
|
# not using dpkg-deb --extract as that would replace the
|
|
# merged-usr symlinks with plain directories
|
|
# even after switching from pre-merging to post-merging, dpkg-deb
|
|
# will ignore filter rules from dpkg.cfg.d
|
|
# https://bugs.debian.org/989602
|
|
# not using dpkg --unpack because that would try running preinst
|
|
# maintainer scripts
|
|
my $pid1 = fork() // error "fork() failed: $!";
|
|
if ($pid1 == 0) {
|
|
open(STDOUT, '>&', $dpkg_writer) or error "cannot open STDOUT: $!";
|
|
close($tar_reader) or error "cannot close tar_reader: $!";
|
|
if (scalar @tarfilterargs > 0) {
|
|
close($filter_reader)
|
|
or error "cannot close filter_reader: $!";
|
|
close($filter_writer)
|
|
or error "cannot close filter_writer: $!";
|
|
}
|
|
debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb");
|
|
eval { Devel::Cover::set_coverage("none") } if $is_covering;
|
|
exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb";
|
|
}
|
|
my $pid2;
|
|
if (scalar @tarfilterargs > 0) {
|
|
$pid2 = fork() // error "fork() failed: $!";
|
|
if ($pid2 == 0) {
|
|
open(STDIN, '<&', $filter_reader)
|
|
or error "cannot open STDIN: $!";
|
|
open(STDOUT, '>&', $filter_writer)
|
|
or error "cannot open STDOUT: $!";
|
|
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
|
|
close($tar_reader) or error "cannot close tar_reader: $!";
|
|
debug("running $tarfilter " . (join " ", @tarfilterargs));
|
|
eval { Devel::Cover::set_coverage("none") } if $is_covering;
|
|
exec $tarfilter, @tarfilterargs;
|
|
}
|
|
}
|
|
my $pid3 = fork() // error "fork() failed: $!";
|
|
if ($pid3 == 0) {
|
|
open(STDIN, '<&', $tar_reader) or error "cannot open STDIN: $!";
|
|
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
|
|
if (scalar @tarfilterargs > 0) {
|
|
close($filter_reader)
|
|
or error "cannot close filter_reader: $!";
|
|
close($filter_writer)
|
|
or error "cannot close filter_writer: $!";
|
|
}
|
|
debug( "running tar -C $options->{root}"
|
|
. " --keep-directory-symlink --extract --file -");
|
|
eval { Devel::Cover::set_coverage("none") } if $is_covering;
|
|
exec 'tar', '-C', $options->{root},
|
|
'--keep-directory-symlink', '--extract', '--file', '-';
|
|
}
|
|
close($dpkg_writer) or error "cannot close dpkg_writer: $!";
|
|
close($tar_reader) or error "cannot close tar_reader: $!";
|
|
if (scalar @tarfilterargs > 0) {
|
|
close($filter_reader) or error "cannot close filter_reader: $!";
|
|
close($filter_writer) or error "cannot close filter_writer: $!";
|
|
}
|
|
waitpid($pid1, 0);
|
|
$? == 0 or error "dpkg-deb --fsys-tarfile failed: $?";
|
|
if (scalar @tarfilterargs > 0) {
|
|
waitpid($pid2, 0);
|
|
$? == 0 or error "tarfilter failed: $?";
|
|
}
|
|
waitpid($pid3, 0);
|
|
$? == 0 or error "tar --extract failed: $?";
|
|
print_progress($counter / $total * 100, "extracting");
|
|
}
|
|
print_progress "done";
|
|
|
|
return;
|
|
}
|
|
|
|
sub run_prepare {
|
|
my $options = shift;
|
|
|
|
if ($options->{mode} eq 'fakechroot') {
|
|
# this borrows from and extends
|
|
# /etc/fakechroot/debootstrap.env and
|
|
# /etc/fakechroot/chroot.env
|
|
{
|
|
my %subst = (
|
|
chroot => "/usr/sbin/chroot.fakechroot",
|
|
mkfifo => "/bin/true",
|
|
ldconfig => (getcwd() . '/ldconfig.fakechroot'),
|
|
ldd => "/usr/bin/ldd.fakechroot",
|
|
ischroot => "/bin/true"
|
|
);
|
|
if (!-x $subst{ldconfig}) {
|
|
$subst{ldconfig}
|
|
= '/usr/libexec/mmdebstrap/ldconfig.fakechroot';
|
|
}
|
|
my %mergedusrmap = (
|
|
"/bin" => "/usr/bin",
|
|
"/sbin" => "/usr/sbin",
|
|
"/usr/bin/" => "/bin",
|
|
"/usr/sbin" => "/sbin"
|
|
);
|
|
my %fakechrootsubst;
|
|
foreach my $d (split ':', $ENV{PATH}) {
|
|
foreach my $k (sort %subst) {
|
|
my $mapped_path = $mergedusrmap{$d} // $d;
|
|
next if !-e "$d/$k" && !-e "$mapped_path/$k";
|
|
$fakechrootsubst{"$d/$k=$subst{$k}"} = 1;
|
|
$fakechrootsubst{"$mapped_path/$k=$subst{$k}"} = 1;
|
|
}
|
|
}
|
|
if (defined $ENV{FAKECHROOT_CMD_SUBST}
|
|
&& $ENV{FAKECHROOT_CMD_SUBST} ne "") {
|
|
foreach my $e (split /:/, $ENV{FAKECHROOT_CMD_SUBST}) {
|
|
$fakechrootsubst{$e} = 1;
|
|
}
|
|
}
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{FAKECHROOT_CMD_SUBST} = join ':',
|
|
(sort keys %fakechrootsubst);
|
|
}
|
|
if (defined $ENV{FAKECHROOT_EXCLUDE_PATH}
|
|
&& $ENV{FAKECHROOT_EXCLUDE_PATH} ne "") {
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{FAKECHROOT_EXCLUDE_PATH}
|
|
= "$ENV{FAKECHROOT_EXCLUDE_PATH}:/dev:/proc:/sys";
|
|
} else {
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{FAKECHROOT_EXCLUDE_PATH} = '/dev:/proc:/sys';
|
|
}
|
|
# workaround for long unix socket path if FAKECHROOT_BASE
|
|
# exceeds the limit of 108 bytes
|
|
{
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{FAKECHROOT_AF_UNIX_PATH} = "/tmp";
|
|
}
|
|
{
|
|
my @ldlibpath = ();
|
|
if (defined $ENV{LD_LIBRARY_PATH}
|
|
&& $ENV{LD_LIBRARY_PATH} ne "") {
|
|
push @ldlibpath, (split /:/, $ENV{LD_LIBRARY_PATH});
|
|
}
|
|
# FIXME: workaround allowing installation of systemd should
|
|
# live in fakechroot, see #917920
|
|
push @ldlibpath, "$options->{root}/lib/systemd";
|
|
my $parse_ld_so_conf;
|
|
$parse_ld_so_conf = sub {
|
|
foreach my $conf (@_) {
|
|
next if !-r $conf;
|
|
open my $fh, '<', "$conf" or error "can't read $conf: $!";
|
|
while (my $line = <$fh>) {
|
|
chomp $line;
|
|
if ($line eq "") {
|
|
next;
|
|
}
|
|
if ($line =~ /^#/) {
|
|
next;
|
|
}
|
|
if ($line =~ /include (.*)/) {
|
|
$parse_ld_so_conf->(glob("$options->{root}/$1"));
|
|
next;
|
|
}
|
|
if (!-d "$options->{root}/$line") {
|
|
next;
|
|
}
|
|
push @ldlibpath, "$options->{root}/$line";
|
|
}
|
|
close $fh;
|
|
}
|
|
};
|
|
if (-e "$options->{root}/etc/ld.so.conf") {
|
|
$parse_ld_so_conf->("$options->{root}/etc/ld.so.conf");
|
|
}
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{LD_LIBRARY_PATH} = join ':', @ldlibpath;
|
|
}
|
|
}
|
|
|
|
# make sure that APT_CONFIG and TMPDIR are not set when executing
|
|
# anything inside the chroot
|
|
my @chrootcmd = ('env', '--unset=APT_CONFIG', '--unset=TMPDIR');
|
|
if (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot')) {
|
|
push @chrootcmd, ('chroot', $options->{root});
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
|
|
# foreign architecture setup for fakechroot mode
|
|
if (defined $options->{qemu} && $options->{mode} eq 'fakechroot') {
|
|
# Make sure that the fakeroot and fakechroot shared libraries exist for
|
|
# the right architecture
|
|
open my $fh, '-|', 'dpkg-architecture', '-a',
|
|
$options->{nativearch},
|
|
'-qDEB_HOST_MULTIARCH' // error "failed to fork(): $!";
|
|
chomp(
|
|
my $deb_host_multiarch = do { local $/; <$fh> }
|
|
);
|
|
close $fh;
|
|
if (($? != 0) or (!$deb_host_multiarch)) {
|
|
error "dpkg-architecture failed: $?";
|
|
}
|
|
my $fakechrootdir = "/usr/lib/$deb_host_multiarch/fakechroot";
|
|
if (!-e "$fakechrootdir/libfakechroot.so") {
|
|
error "$fakechrootdir/libfakechroot.so doesn't exist."
|
|
. " Install libfakechroot:$options->{nativearch}"
|
|
. " outside the chroot";
|
|
}
|
|
my $fakerootdir = "/usr/lib/$deb_host_multiarch/libfakeroot";
|
|
if (!-e "$fakerootdir/libfakeroot-sysv.so") {
|
|
error "$fakerootdir/libfakeroot-sysv.so doesn't exist."
|
|
. " Install libfakeroot:$options->{nativearch}"
|
|
. " outside the chroot";
|
|
}
|
|
|
|
# The rest of this block sets environment variables, so we have to add
|
|
# the "no critic" statement to stop perlcritic from complaining about
|
|
# setting global variables
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
# fakechroot only fills LD_LIBRARY_PATH with the directories of the
|
|
# host's architecture. We append the directories of the chroot
|
|
# architecture.
|
|
$ENV{LD_LIBRARY_PATH}
|
|
= "$ENV{LD_LIBRARY_PATH}:$fakechrootdir:$fakerootdir";
|
|
# The binfmt support on the outside is used, so qemu needs to know
|
|
# where it has to look for shared libraries
|
|
if (defined $ENV{QEMU_LD_PREFIX}
|
|
&& $ENV{QEMU_LD_PREFIX} ne "") {
|
|
$ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
|
|
} else {
|
|
$ENV{QEMU_LD_PREFIX} = $options->{root};
|
|
}
|
|
}
|
|
|
|
# some versions of coreutils use the renameat2 system call in mv.
|
|
# This breaks certain versions of fakechroot. Here we do
|
|
# a sanity check and warn the user in case things might break.
|
|
if ($options->{mode} eq 'fakechroot'
|
|
and -e "$options->{root}/bin/mv") {
|
|
mkdir "$options->{root}/000-move-me"
|
|
or error "cannot create directory: $!";
|
|
my $ret = system @chrootcmd, '/bin/mv', '/000-move-me',
|
|
'/001-delete-me';
|
|
if ($ret != 0) {
|
|
info "the /bin/mv binary inside the chroot doesn't"
|
|
. " work under fakechroot";
|
|
info "with certain versions of coreutils and glibc,"
|
|
. " this is due to missing support for renameat2 in"
|
|
. " fakechroot";
|
|
info "see https://github.com/dex4er/fakechroot/issues/60";
|
|
info "expect package post installation scripts not to work";
|
|
rmdir "$options->{root}/000-move-me"
|
|
or error "cannot rmdir: $!";
|
|
} else {
|
|
rmdir "$options->{root}/001-delete-me"
|
|
or error "cannot rmdir: $!";
|
|
}
|
|
}
|
|
|
|
return \@chrootcmd;
|
|
}
|
|
|
|
sub run_essential() {
|
|
my $options = shift;
|
|
my $essential_pkgs = shift;
|
|
my $chrootcmd = shift;
|
|
my $cached_debs = shift;
|
|
|
|
if (scalar @{$essential_pkgs} == 0) {
|
|
info "no essential packages -- skipping...";
|
|
return;
|
|
}
|
|
|
|
if ($options->{mode} eq 'chrootless') {
|
|
if ($options->{dryrun}) {
|
|
info "simulate installing essential packages...";
|
|
} else {
|
|
info "installing essential packages...";
|
|
}
|
|
# FIXME: the dpkg config from the host is parsed before the command
|
|
# line arguments are parsed and might break this mode
|
|
# Example: if the host has --path-exclude set, then this will also
|
|
# affect the chroot. See #808203
|
|
my @chrootless_opts = (
|
|
'-oDPkg::Chroot-Directory=',
|
|
'-oDPkg::Options::=--force-not-root',
|
|
'-oDPkg::Options::=--force-script-chrootless',
|
|
'-oDPkg::Options::=--root=' . $options->{root},
|
|
'-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log",
|
|
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
|
|
);
|
|
if (defined $options->{qemu}) {
|
|
# The binfmt support on the outside is used, so qemu needs to know
|
|
# where it has to look for shared libraries
|
|
if (defined $ENV{QEMU_LD_PREFIX}
|
|
&& $ENV{QEMU_LD_PREFIX} ne "") {
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
|
|
} else {
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{QEMU_LD_PREFIX} = $options->{root};
|
|
}
|
|
}
|
|
# we don't use apt because that will not run the base-passwd preinst
|
|
# early enough
|
|
#run_apt_progress({
|
|
# ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
|
|
# PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}],
|
|
#});
|
|
run_dpkg_progress({
|
|
ARGV => [
|
|
'dpkg',
|
|
'--force-not-root',
|
|
'--force-script-chrootless',
|
|
"--root=$options->{root}",
|
|
"--log=$options->{root}/var/log/dpkg.log",
|
|
'--install',
|
|
'--force-depends'
|
|
],
|
|
PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}] });
|
|
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot'))
|
|
{
|
|
# install the extracted packages properly
|
|
# we need --force-depends because dpkg does not take Pre-Depends
|
|
# into account and thus doesn't install them in the right order
|
|
# And the --predep-package option is broken: #539133
|
|
#
|
|
# We could use apt from outside the chroot using DPkg::Chroot-Directory
|
|
# but then the preinst script of base-passwd will not be called early
|
|
# enough and packages will fail to install because they are missing
|
|
# /etc/passwd. Also, with plain dpkg the essential variant can finish
|
|
# within 9 seconds. If we use apt instead, it becomes 12 seconds. We
|
|
# prefer speed here.
|
|
if ($options->{dryrun}) {
|
|
info "simulate installing essential packages...";
|
|
} else {
|
|
info "installing essential packages...";
|
|
run_dpkg_progress({
|
|
ARGV =>
|
|
[@{$chrootcmd}, 'dpkg', '--install', '--force-depends'],
|
|
PKGS => $essential_pkgs,
|
|
});
|
|
}
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
|
|
if (any { $_ eq 'essential/unlink' } @{ $options->{skip} }) {
|
|
info "skipping essential/unlink as requested";
|
|
} else {
|
|
foreach my $deb (@{$essential_pkgs}) {
|
|
# do not unlink those packages that were in /var/cache/apt/archive
|
|
# before the download phase
|
|
next
|
|
if any { "/var/cache/apt/archives/$_" eq $deb } @{$cached_debs};
|
|
# do not unlink those packages that were not in
|
|
# /var/cache/apt/archive (for example because they were provided by
|
|
# a file:// mirror)
|
|
next if $deb !~ /\/var\/cache\/apt\/archives\//;
|
|
unlink "$options->{root}/$deb"
|
|
or error "cannot unlink $deb: $!";
|
|
}
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub run_install() {
|
|
my $options = shift;
|
|
|
|
my @pkgs_to_install = (@{ $options->{include} });
|
|
if ($options->{variant} eq 'extract') {
|
|
error "must not be called with variant extract";
|
|
}
|
|
if (none { $_ eq $options->{variant} } ('custom', 'essential')) {
|
|
push @pkgs_to_install, 'apt';
|
|
}
|
|
if ($options->{variant} eq 'buildd') {
|
|
push @pkgs_to_install, 'build-essential';
|
|
}
|
|
if (any { $_ eq $options->{variant} }
|
|
('required', 'important', 'standard')) {
|
|
# Many of the priority:required packages are also essential:yes. We
|
|
# make sure not to select those here to avoid useless "xxx is already
|
|
# the newest version" messages.
|
|
my $priority;
|
|
if (any { $_ eq $options->{variant} } ('required')) {
|
|
$priority = '?and(?priority(required),?not(?essential))';
|
|
} elsif ($options->{variant} eq 'important') {
|
|
$priority = '?and(?or(?priority(required),?priority(important)),'
|
|
. '?not(?essential))';
|
|
} elsif ($options->{variant} eq 'standard') {
|
|
$priority = '?and(?or(~prequired,~pimportant,~pstandard),'
|
|
. '?not(?essential))';
|
|
}
|
|
push @pkgs_to_install,
|
|
(
|
|
"?narrow("
|
|
. (
|
|
length($options->{suite})
|
|
? '?or(?archive(^'
|
|
. $options->{suite}
|
|
. '$),?codename(^'
|
|
. $options->{suite} . '$)),'
|
|
: ''
|
|
)
|
|
. "?architecture($options->{nativearch}),"
|
|
. "$priority)"
|
|
);
|
|
}
|
|
|
|
if ($options->{mode} eq 'chrootless') {
|
|
if (scalar @pkgs_to_install > 0) {
|
|
my @chrootless_opts = (
|
|
'-oDPkg::Chroot-Directory=',
|
|
'-oDPkg::Options::=--force-not-root',
|
|
'-oDPkg::Options::=--force-script-chrootless',
|
|
'-oDPkg::Options::=--root=' . $options->{root},
|
|
'-oDPkg::Options::=--log='
|
|
. "$options->{root}/var/log/dpkg.log",
|
|
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
|
|
);
|
|
run_apt_progress({
|
|
ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
|
|
PKGS => [@pkgs_to_install],
|
|
});
|
|
}
|
|
} elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot'))
|
|
{
|
|
if ($options->{variant} ne 'custom'
|
|
and scalar @pkgs_to_install > 0) {
|
|
# Advantage of running apt on the outside instead of inside the
|
|
# chroot:
|
|
#
|
|
# - we can build chroots without apt (for example from buildinfo
|
|
# files)
|
|
#
|
|
# - we do not need to install additional packages like
|
|
# apt-transport-* or ca-certificates inside the chroot
|
|
#
|
|
# - we do not not need additional key material inside the chroot
|
|
#
|
|
# - we can make use of file:// and copy://
|
|
#
|
|
# - we can use EDSP solvers without installing apt-utils or other
|
|
# solvers inside the chroot
|
|
#
|
|
# The DPkg::Install::Recursive::force=true workaround can be
|
|
# dropped after this issue is fixed:
|
|
# https://salsa.debian.org/apt-team/apt/-/merge_requests/189
|
|
#
|
|
# We could also move the dpkg call to the outside and run dpkg with
|
|
# --root but this would only make sense in situations where there
|
|
# is no dpkg inside the chroot.
|
|
if (!$options->{dryrun}) {
|
|
info "installing remaining packages inside the chroot...";
|
|
run_apt_progress({
|
|
ARGV => [
|
|
'apt-get',
|
|
'-o',
|
|
'Dir::Bin::dpkg=env',
|
|
'-o',
|
|
'DPkg::Options::=--unset=TMPDIR',
|
|
'-o',
|
|
'DPkg::Options::=dpkg',
|
|
$options->{mode} eq 'fakechroot'
|
|
? ('-o', 'DPkg::Install::Recursive::force=true')
|
|
: (),
|
|
'--yes',
|
|
'install'
|
|
],
|
|
PKGS => [@pkgs_to_install],
|
|
});
|
|
} else {
|
|
info "simulate installing remaining packages inside the"
|
|
. " chroot...";
|
|
run_apt_progress({
|
|
ARGV => [
|
|
'apt-get', '--yes',
|
|
'-oAPT::Get::Simulate=true', 'install'
|
|
],
|
|
PKGS => [@pkgs_to_install],
|
|
});
|
|
}
|
|
}
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
sub run_cleanup() {
|
|
my $options = shift;
|
|
|
|
if (any { $_ eq 'cleanup/apt' } @{ $options->{skip} }) {
|
|
info "skipping cleanup/apt as requested";
|
|
} else {
|
|
if ( none { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }
|
|
and none { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
|
|
info "cleaning package lists and apt cache...";
|
|
}
|
|
if (any { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }) {
|
|
info "skipping cleanup/apt/lists as requested";
|
|
} else {
|
|
if (any { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
|
|
info "cleaning package lists...";
|
|
}
|
|
run_apt_progress({
|
|
ARGV => [
|
|
'apt-get', '--option',
|
|
'Dir::Etc::SourceList=/dev/null', '--option',
|
|
'Dir::Etc::SourceParts=/dev/null', 'update'
|
|
],
|
|
CHDIR => $options->{root},
|
|
});
|
|
}
|
|
if (any { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
|
|
info "skipping cleanup/apt/cache as requested";
|
|
} else {
|
|
if (any { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }) {
|
|
info "cleaning apt cache...";
|
|
}
|
|
run_apt_progress(
|
|
{ ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} });
|
|
}
|
|
|
|
# apt since 1.6 creates the auxfiles directory. If apt inside the
|
|
# chroot is older than that, then it will not know how to clean it.
|
|
if (-e "$options->{root}/var/lib/apt/lists/auxfiles") {
|
|
0 == system(
|
|
'rm',
|
|
'--interactive=never',
|
|
'--recursive',
|
|
'--preserve-root',
|
|
'--one-file-system',
|
|
"$options->{root}/var/lib/apt/lists/auxfiles"
|
|
) or error "rm failed: $?";
|
|
}
|
|
}
|
|
|
|
if (any { $_ eq 'cleanup/mmdebstrap' } @{ $options->{skip} }) {
|
|
info "skipping cleanup/mmdebstrap as requested";
|
|
} else {
|
|
# clean up temporary configuration file
|
|
unlink "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
|
|
or warning "failed to unlink /etc/apt/apt.conf.d/00mmdebstrap: $!";
|
|
|
|
if (defined $ENV{APT_CONFIG} && -e $ENV{APT_CONFIG}) {
|
|
unlink $ENV{APT_CONFIG}
|
|
or error "failed to unlink $ENV{APT_CONFIG}: $!";
|
|
}
|
|
}
|
|
|
|
if (any { $_ eq 'cleanup/reproducible' } @{ $options->{skip} }) {
|
|
info "skipping cleanup/reproducible as requested";
|
|
} else {
|
|
# clean up certain files to make output reproducible
|
|
foreach my $fname (
|
|
'/var/log/dpkg.log', '/var/log/apt/history.log',
|
|
'/var/log/apt/term.log', '/var/log/alternatives.log',
|
|
'/var/cache/ldconfig/aux-cache', '/var/log/apt/eipp.log.xz',
|
|
'/var/lib/dbus/machine-id'
|
|
) {
|
|
my $path = "$options->{root}$fname";
|
|
if (!-e $path) {
|
|
next;
|
|
}
|
|
unlink $path or error "cannot unlink $path: $!";
|
|
}
|
|
|
|
if (-e "$options->{root}/etc/machine-id") {
|
|
# from machine-id(5):
|
|
# For operating system images which are created once and used on
|
|
# multiple machines, for example for containers or in the cloud,
|
|
# /etc/machine-id should be an empty file in the generic file
|
|
# system image. An ID will be generated during boot and saved to
|
|
# this file if possible. Having an empty file in place is useful
|
|
# because it allows a temporary file to be bind-mounted over the
|
|
# real file, in case the image is used read-only.
|
|
if (any { $_ eq 'cleanup/reproducible/machine-id' }
|
|
@{ $options->{skip} }) {
|
|
info "skipping cleanup/reproducible/machine-id as requested";
|
|
} else {
|
|
unlink "$options->{root}/etc/machine-id"
|
|
or error "cannot unlink /etc/machine-id: $!";
|
|
open my $fh, '>', "$options->{root}/etc/machine-id"
|
|
or error "failed to open(): $!";
|
|
close $fh;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (any { $_ eq 'cleanup/run' } @{ $options->{skip} }) {
|
|
info "skipping cleanup/run as requested";
|
|
} else {
|
|
# remove any possible leftovers in /run
|
|
if (-d "$options->{root}/run") {
|
|
opendir(my $dh, "$options->{root}/run")
|
|
or error "Can't opendir($options->{root}/run): $!";
|
|
while (my $entry = readdir $dh) {
|
|
# skip the "." and ".." entries
|
|
next if $entry eq ".";
|
|
next if $entry eq "..";
|
|
# skip deleting /run/lock as /var/lock is a symlink to it
|
|
# according to Debian policy §9.1.4
|
|
next if $entry eq "lock";
|
|
debug "deleting files in /run: $entry";
|
|
0 == system(
|
|
'rm', '--interactive=never',
|
|
'--recursive', '--preserve-root',
|
|
'--one-file-system', "$options->{root}/run/$entry"
|
|
) or error "rm failed: $?";
|
|
}
|
|
closedir($dh);
|
|
}
|
|
}
|
|
if (any { $_ eq 'cleanup/tmp' } @{ $options->{skip} }) {
|
|
info "skipping cleanup/tmp as requested";
|
|
} else {
|
|
# remove any possible leftovers in /tmp
|
|
if (-d "$options->{root}/tmp") {
|
|
opendir(my $dh, "$options->{root}/tmp")
|
|
or error "Can't opendir($options->{root}/tmp): $!";
|
|
while (my $entry = readdir $dh) {
|
|
# skip the "." and ".." entries
|
|
next if $entry eq ".";
|
|
next if $entry eq "..";
|
|
debug "deleting files in /tmp: $entry";
|
|
0 == system(
|
|
'rm', '--interactive=never',
|
|
'--recursive', '--preserve-root',
|
|
'--one-file-system', "$options->{root}/tmp/$entry"
|
|
) or error "rm failed: $?";
|
|
}
|
|
closedir($dh);
|
|
}
|
|
}
|
|
|
|
if (any { $_ eq 'cleanup/dev' } @{ $options->{skip} }) {
|
|
info "skipping cleanup/dev as requested";
|
|
} else {
|
|
|
|
# By default, tar is run with --exclude=./dev because we create the
|
|
# ./dev entries ourselves using @devfiles. But if --skip=output/dev is
|
|
# used, --exclude=./dev is not passed so that the chroot includes ./dev
|
|
# as created by base-files. But if mknod was available (for example
|
|
# when running as root) then ./dev will also include the @devfiles
|
|
# entries created by run_setup() and thus the resulting tarball will
|
|
# include things inside ./dev despite the user having supplied
|
|
# --skip=output/dev. So if --skip=output/dev was passed and if a
|
|
# tarball is to be created, we need to make sure to clean up the
|
|
# ./dev entries that were created in run_setup(). This is not done
|
|
# when creating a directory because in that case we want to do the
|
|
# same as debootstrap and create a directory including device nodes.
|
|
if ($options->{format} ne 'directory' && any { $_ eq 'output/dev' }
|
|
@{ $options->{skip} }) {
|
|
foreach my $file (@linuxdevfiles) {
|
|
my ($fname, $mode, $type, $linkname, $devmajor, $devminor,
|
|
undef)
|
|
= @{$file};
|
|
if (!-e "$options->{root}/$fname") {
|
|
next;
|
|
}
|
|
# do not remove ./dev itself
|
|
if ($fname eq "./dev/") {
|
|
next;
|
|
}
|
|
if ($type == 0) { # normal file
|
|
error "type 0 not implemented";
|
|
} elsif ($type == 1) { # hardlink
|
|
error "type 1 not implemented";
|
|
} elsif (any { $_ eq $type } (2, 3, 4))
|
|
{ # symlink, char, block
|
|
unlink "$options->{root}/$fname"
|
|
or error "failed to unlink $fname: $!";
|
|
} elsif ($type == 5) { # directory
|
|
rmdir "$options->{root}/$fname"
|
|
or error "failed to unlink $fname: $!";
|
|
} else {
|
|
error "unsupported type: $type";
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return;
|
|
}
|
|
|
|
# messages from process inside unshared namespace to the outside
|
|
# openw -- open file for writing
|
|
# untar -- extract tar into directory
|
|
# write -- write data to last opened file or tar process
|
|
# close -- finish file writing or tar extraction
|
|
# adios -- last message and tear-down
|
|
# messages from process outside unshared namespace to the inside
|
|
# okthx -- success
|
|
sub checkokthx {
|
|
my $fh = shift;
|
|
my $ret = read($fh, my $buf, 2 + 5) // error "cannot read from socket: $!";
|
|
if ($ret == 0) { error "received eof on socket"; }
|
|
my ($len, $msg) = unpack("nA5", $buf);
|
|
if ($msg ne "okthx") { error "expected okthx but got: $msg"; }
|
|
if ($len != 0) { error "expected no payload but got $len bytes"; }
|
|
return;
|
|
}
|
|
|
|
# resolve a path inside a chroot
|
|
sub chrooted_realpath {
|
|
my $root = shift;
|
|
my $src = shift;
|
|
my $result = $root;
|
|
my $prefix;
|
|
|
|
# relative paths are relative to the root of the chroot
|
|
# remove prefixed slashes
|
|
$src =~ s{^/+}{};
|
|
my $loop = 0;
|
|
while (length $src) {
|
|
if ($loop > 25) {
|
|
error "too many levels of symbolic links";
|
|
}
|
|
# Get the first directory component.
|
|
($prefix, $src) = split m{/+}, $src, 2;
|
|
# Resolve the first directory component.
|
|
if ($prefix eq ".") {
|
|
# Ignore, stay at the same directory.
|
|
} elsif ($prefix eq "..") {
|
|
# Go up one directory.
|
|
$result =~ s{(.*)/[^/]*}{$1};
|
|
# but not further than the root
|
|
if ($result !~ m/^\Q$root\E/) {
|
|
$result = $root;
|
|
}
|
|
} elsif (-l "$result/$prefix") {
|
|
my $dst = readlink "$result/$prefix";
|
|
if ($dst =~ s{^/+}{}) {
|
|
# Absolute pathname, reset result back to $root.
|
|
$result = $root;
|
|
}
|
|
$src = length $src ? "$dst/$src" : $dst;
|
|
$loop++;
|
|
} else {
|
|
# Otherwise append the prefix.
|
|
$result = "$result/$prefix";
|
|
}
|
|
}
|
|
return $result;
|
|
}
|
|
|
|
sub pivot_root {
|
|
my $root = shift;
|
|
my $target = "/mnt";
|
|
my $put_old = "tmp";
|
|
0 == syscall &SYS_mount, $root, $target, 0, $MS_REC | $MS_BIND, 0
|
|
or error "mount failed: $!";
|
|
chdir "/mnt" or error "failed chdir() to /mnt: $!";
|
|
0 == syscall &SYS_pivot_root, my $new_root = ".", $put_old
|
|
or error "pivot_root failed: $!";
|
|
chroot "." or error "failed to chroot() to .: $!";
|
|
0 == syscall &SYS_umount2, $put_old, $MNT_DETACH
|
|
or error "umount2 failed: $!";
|
|
0 == syscall &SYS_umount2, my $sys = "sys", $MNT_DETACH
|
|
or error "umount2 failed: $!";
|
|
return;
|
|
}
|
|
|
|
sub hookhelper {
|
|
my ($root, $mode, $hook, $skipopt, $verbosity, $command, @args) = @_;
|
|
$verbosity_level = $verbosity;
|
|
my @skipopts = ();
|
|
if (length $skipopt) {
|
|
for my $skip (split /[,\s]+/, $skipopt) {
|
|
# strip leading and trailing whitespace
|
|
$skip =~ s/^\s+|\s+$//g;
|
|
# skip if the remainder is an empty string
|
|
if ($skip eq '') {
|
|
next;
|
|
}
|
|
push @skipopts, $skip;
|
|
}
|
|
}
|
|
# we put everything in an eval block because that way we can easily handle
|
|
# errors without goto labels or much code duplication: the error handler
|
|
# has to send an "error" message to the other side
|
|
eval {
|
|
|
|
my @cmdprefix = ();
|
|
my @tarcmd = (
|
|
'tar', '--numeric-owner', '--xattrs', '--format=pax',
|
|
'--pax-option=exthdr.name=%d/PaxHeaders/%f,'
|
|
. 'delete=atime,delete=ctime'
|
|
);
|
|
if ($hook eq 'setup') {
|
|
} elsif (any { $_ eq $hook } ('extract', 'essential', 'customize')) {
|
|
if ($mode eq 'fakechroot') {
|
|
# Fakechroot requires tar to run inside the chroot or
|
|
# otherwise absolute symlinks will include the path to the
|
|
# root directory
|
|
push @cmdprefix, 'chroot', $root;
|
|
} elsif (any { $_ eq $mode } ('root', 'chrootless', 'unshare')) {
|
|
# not chrooting in this case
|
|
} else {
|
|
error "unknown mode: $mode";
|
|
}
|
|
} else {
|
|
error "unknown hook: $hook";
|
|
}
|
|
|
|
if (any { $_ eq $command } ('copy-in', 'tar-in', 'upload', 'sync-in'))
|
|
{
|
|
if (scalar @args < 2) {
|
|
error "$command needs at least one path on the"
|
|
. " outside and the output path inside the chroot";
|
|
}
|
|
my $outpath = pop @args;
|
|
foreach my $file (@args) {
|
|
# the right argument for tar's --directory argument depends on
|
|
# whether tar is called from inside the chroot or from the
|
|
# outside
|
|
my $directory;
|
|
if ($hook eq 'setup') {
|
|
# tar runs outside, so acquire the correct path
|
|
$directory = chrooted_realpath $root, $outpath;
|
|
} elsif (any { $_ eq $hook }
|
|
('extract', 'essential', 'customize')) {
|
|
if ($mode eq 'fakechroot') {
|
|
# tar will run inside the chroot
|
|
$directory = $outpath;
|
|
} elsif (any { $_ eq $mode }
|
|
('root', 'chrootless', 'unshare')) {
|
|
$directory = chrooted_realpath $root, $outpath;
|
|
} else {
|
|
error "unknown mode: $mode";
|
|
}
|
|
} else {
|
|
error "unknown hook: $hook";
|
|
}
|
|
|
|
# if chrooted_realpath was used and if fakechroot
|
|
# was used (absolute symlinks will be broken) we can
|
|
# check and potentially fail early if the target does not exist
|
|
if ($mode ne 'fakechroot') {
|
|
my $dirtocheck = $directory;
|
|
if ($command eq 'upload') {
|
|
# check the parent directory instead
|
|
$dirtocheck =~ s/(.*)\/[^\/]*/$1/;
|
|
}
|
|
if (!-e $dirtocheck) {
|
|
error "path does not exist: $dirtocheck";
|
|
}
|
|
if (!-d $dirtocheck) {
|
|
error "path is not a directory: $dirtocheck";
|
|
}
|
|
}
|
|
|
|
my $fh;
|
|
if ($command eq 'upload') {
|
|
# open the requested file for writing
|
|
open $fh, '|-', @cmdprefix, 'sh', '-c', 'cat > "$1"',
|
|
'exec', $directory // error "failed to fork(): $!";
|
|
} elsif (any { $_ eq $command }
|
|
('copy-in', 'tar-in', 'sync-in')) {
|
|
# open a tar process that extracts the tarfile that we
|
|
# supply it with on stdin to the output directory inside
|
|
# the chroot
|
|
my @cmd = (
|
|
@cmdprefix, @tarcmd, '--xattrs-include=*',
|
|
'--directory', $directory, '--extract', '--file', '-'
|
|
);
|
|
# go via mmtarfilter if copy-in/mknod, tar-in/mknod or
|
|
# sync-in/mknod were part of the skip options
|
|
if (any { $_ eq "$command/mknod" } @skipopts) {
|
|
info "skipping $command/mknod as requested";
|
|
my $tarfilter = "mmtarfilter";
|
|
if (-x "./tarfilter") {
|
|
$tarfilter = "./tarfilter";
|
|
}
|
|
pipe my $filter_reader, $fh or error "pipe failed: $!";
|
|
pipe my $tar_reader, my $filter_writer
|
|
or error "pipe failed: $!";
|
|
my $pid1 = fork() // error "fork() failed: $!";
|
|
if ($pid1 == 0) {
|
|
open(STDIN, '<&', $filter_reader)
|
|
or error "cannot open STDIN: $!";
|
|
open(STDOUT, '>&', $filter_writer)
|
|
or error "cannot open STDOUT: $!";
|
|
close($tar_reader)
|
|
or error "cannot close tar_reader: $!";
|
|
debug(
|
|
"helper: running $tarfilter --type-exclude=3 "
|
|
. "--type-exclude=4");
|
|
eval { Devel::Cover::set_coverage("none") }
|
|
if $is_covering;
|
|
exec $tarfilter, '--type-exclude=3',
|
|
'--type-exclude=4';
|
|
}
|
|
my $pid2 = fork() // error "fork() failed: $!";
|
|
if ($pid2 == 0) {
|
|
open(STDIN, '<&', $tar_reader)
|
|
or error "cannot open STDIN: $!";
|
|
close($filter_writer)
|
|
or error "cannot close filter_writer: $!";
|
|
debug("helper: running " . (join " ", @cmd));
|
|
eval { Devel::Cover::set_coverage("none") }
|
|
if $is_covering;
|
|
exec { $cmd[0] } @cmd;
|
|
}
|
|
} else {
|
|
debug("helper: running " . (join " ", @cmd));
|
|
open($fh, '|-', @cmd) // error "failed to fork(): $!";
|
|
}
|
|
} else {
|
|
error "unknown command: $command";
|
|
}
|
|
|
|
if ($command eq 'copy-in') {
|
|
# instruct the parent process to create a tarball of the
|
|
# requested path outside the chroot
|
|
debug "helper: sending mktar";
|
|
print STDOUT (pack("n", length $file) . "mktar" . $file);
|
|
} elsif ($command eq 'sync-in') {
|
|
# instruct the parent process to create a tarball of the
|
|
# content of the requested path outside the chroot
|
|
debug "helper: sending mktac";
|
|
print STDOUT (pack("n", length $file) . "mktac" . $file);
|
|
} elsif (any { $_ eq $command } ('upload', 'tar-in')) {
|
|
# instruct parent process to open a tarball of the
|
|
# requested path outside the chroot for reading
|
|
debug "helper: sending openr";
|
|
print STDOUT (pack("n", length $file) . "openr" . $file);
|
|
} else {
|
|
error "unknown command: $command";
|
|
}
|
|
STDOUT->flush();
|
|
debug "helper: waiting for okthx";
|
|
checkokthx \*STDIN;
|
|
|
|
# handle "write" messages from the parent process and feed
|
|
# their payload into the tar process until a "close" message
|
|
# is encountered
|
|
while (1) {
|
|
# receive the next message
|
|
my $ret = read(STDIN, my $buf, 2 + 5)
|
|
// error "cannot read from socket: $!";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
my ($len, $msg) = unpack("nA5", $buf);
|
|
debug "helper: received message: $msg";
|
|
if ($msg eq "close") {
|
|
# finish the loop
|
|
if ($len != 0) {
|
|
error "expected no payload but got $len bytes";
|
|
}
|
|
debug "helper: sending okthx";
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
last;
|
|
} elsif ($msg ne "write") {
|
|
error "expected write but got: $msg";
|
|
}
|
|
# read the payload
|
|
my $content;
|
|
{
|
|
my $ret = read(STDIN, $content, $len)
|
|
// error "error cannot read from socket: $!";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
}
|
|
# write the payload to the tar process
|
|
print $fh $content
|
|
or error "cannot write to tar process: $!";
|
|
debug "helper: sending okthx";
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
}
|
|
close $fh;
|
|
if ($command ne 'upload' and $? != 0) {
|
|
error "tar failed";
|
|
}
|
|
}
|
|
} elsif (any { $_ eq $command }
|
|
('copy-out', 'tar-out', 'download', 'sync-out')) {
|
|
if (scalar @args < 2) {
|
|
error "$command needs at least one path inside the chroot and"
|
|
. " the output path on the outside";
|
|
}
|
|
my $outpath = pop @args;
|
|
foreach my $file (@args) {
|
|
# the right argument for tar's --directory argument depends on
|
|
# whether tar is called from inside the chroot or from the
|
|
# outside
|
|
my $directory;
|
|
if ($hook eq 'setup') {
|
|
# tar runs outside, so acquire the correct path
|
|
$directory = chrooted_realpath $root, $file;
|
|
} elsif (any { $_ eq $hook }
|
|
('extract', 'essential', 'customize')) {
|
|
if ($mode eq 'fakechroot') {
|
|
# tar will run inside the chroot
|
|
$directory = $file;
|
|
} elsif (any { $_ eq $mode }
|
|
('root', 'chrootless', 'unshare')) {
|
|
$directory = chrooted_realpath $root, $file;
|
|
} else {
|
|
error "unknown mode: $mode";
|
|
}
|
|
} else {
|
|
error "unknown hook: $hook";
|
|
}
|
|
|
|
# if chrooted_realpath was used and if fakechroot
|
|
# was used (absolute symlinks will be broken) we can
|
|
# check and potentially fail early if the source does not exist
|
|
if ($mode ne 'fakechroot') {
|
|
if (!-e $directory) {
|
|
error "path does not exist: $directory";
|
|
}
|
|
if ($command eq 'download') {
|
|
if (!-f $directory) {
|
|
error "path is not a file: $directory";
|
|
}
|
|
}
|
|
}
|
|
|
|
my $fh;
|
|
if ($command eq 'download') {
|
|
# open the requested file for reading
|
|
open $fh, '-|', @cmdprefix, 'sh', '-c', 'cat "$1"',
|
|
'exec', $directory // error "failed to fork(): $!";
|
|
} elsif ($command eq 'sync-out') {
|
|
# Open a tar process that creates a tarfile of everything
|
|
# inside the requested directory inside the chroot and
|
|
# writes it to stdout.
|
|
my @cmd = (
|
|
@cmdprefix, @tarcmd, '--directory',
|
|
$directory, '--create', '--file', '-', '.'
|
|
);
|
|
debug("helper: running " . (join " ", @cmd));
|
|
open($fh, '-|', @cmd) // error "failed to fork(): $!";
|
|
} elsif (any { $_ eq $command } ('copy-out', 'tar-out')) {
|
|
# Open a tar process that creates a tarfile of the
|
|
# requested directory inside the chroot and writes it to
|
|
# stdout. To emulate the behaviour of cp, change to the
|
|
# dirname of the requested path first.
|
|
my @cmd = (
|
|
@cmdprefix, @tarcmd, '--directory',
|
|
dirname($directory), '--create', '--file', '-',
|
|
basename($directory));
|
|
debug("helper: running " . (join " ", @cmd));
|
|
open($fh, '-|', @cmd) // error "failed to fork(): $!";
|
|
} else {
|
|
error "unknown command: $command";
|
|
}
|
|
|
|
if (any { $_ eq $command } ('copy-out', 'sync-out')) {
|
|
# instruct the parent process to extract a tarball to a
|
|
# certain path outside the chroot
|
|
debug "helper: sending untar";
|
|
print STDOUT (
|
|
pack("n", length $outpath) . "untar" . $outpath);
|
|
} elsif (any { $_ eq $command } ('download', 'tar-out')) {
|
|
# instruct parent process to open a tarball of the
|
|
# requested path outside the chroot for writing
|
|
debug "helper: sending openw";
|
|
print STDOUT (
|
|
pack("n", length $outpath) . "openw" . $outpath);
|
|
} else {
|
|
error "unknown command: $command";
|
|
}
|
|
STDOUT->flush();
|
|
debug "helper: waiting for okthx";
|
|
checkokthx \*STDIN;
|
|
|
|
# read from the tar process and send as payload to the parent
|
|
# process
|
|
while (1) {
|
|
# read from tar
|
|
my $ret = read($fh, my $cont, 4096)
|
|
// error "cannot read from pipe: $!";
|
|
if ($ret == 0) { last; }
|
|
debug "helper: sending write";
|
|
# send to parent
|
|
print STDOUT pack("n", $ret) . "write" . $cont;
|
|
STDOUT->flush();
|
|
debug "helper: waiting for okthx";
|
|
checkokthx \*STDIN;
|
|
if ($ret < 4096) { last; }
|
|
}
|
|
|
|
# signal to the parent process that we are done
|
|
debug "helper: sending close";
|
|
print STDOUT pack("n", 0) . "close";
|
|
STDOUT->flush();
|
|
debug "helper: waiting for okthx";
|
|
checkokthx \*STDIN;
|
|
|
|
close $fh;
|
|
if ($? != 0) {
|
|
error "$command failed";
|
|
}
|
|
}
|
|
} else {
|
|
error "unknown command: $command";
|
|
}
|
|
};
|
|
if ($@) {
|
|
# inform the other side that something went wrong
|
|
print STDOUT (pack("n", 0) . "error");
|
|
STDOUT->flush();
|
|
error "hookhelper failed: $@";
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub hooklistener {
|
|
$verbosity_level = shift;
|
|
# we put everything in an eval block because that way we can easily handle
|
|
# errors without goto labels or much code duplication: the error handler
|
|
# has to send an "error" message to the other side
|
|
eval {
|
|
while (1) {
|
|
# get the next message
|
|
my $msg = "error";
|
|
my $len = -1;
|
|
{
|
|
debug "listener: reading next command";
|
|
my $ret = read(STDIN, my $buf, 2 + 5)
|
|
// error "cannot read from socket: $!";
|
|
debug "listener: finished reading command";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
($len, $msg) = unpack("nA5", $buf);
|
|
}
|
|
if ($msg eq "adios") {
|
|
debug "listener: received message: adios";
|
|
# setup finished, so we break out of the loop
|
|
if ($len != 0) {
|
|
error "expected no payload but got $len bytes";
|
|
}
|
|
last;
|
|
} elsif ($msg eq "openr") {
|
|
# handle the openr message
|
|
debug "listener: received message: openr";
|
|
my $infile;
|
|
{
|
|
my $ret = read(STDIN, $infile, $len)
|
|
// error "cannot read from socket: $!";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
}
|
|
# make sure that the requested path exists outside the chroot
|
|
if (!-e $infile) {
|
|
error "$infile does not exist";
|
|
}
|
|
debug "listener: sending okthx";
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
|
|
open my $fh, '<', $infile
|
|
or error "failed to open $infile for reading: $!";
|
|
|
|
# read from the file and send as payload to the child process
|
|
while (1) {
|
|
# read from file
|
|
my $ret = read($fh, my $cont, 4096)
|
|
// error "cannot read from pipe: $!";
|
|
if ($ret == 0) { last; }
|
|
debug "listener: sending write";
|
|
# send to child
|
|
print STDOUT pack("n", $ret) . "write" . $cont;
|
|
STDOUT->flush();
|
|
debug "listener: waiting for okthx";
|
|
checkokthx \*STDIN;
|
|
if ($ret < 4096) { last; }
|
|
}
|
|
|
|
# signal to the child process that we are done
|
|
debug "listener: sending close";
|
|
print STDOUT pack("n", 0) . "close";
|
|
STDOUT->flush();
|
|
debug "listener: waiting for okthx";
|
|
checkokthx \*STDIN;
|
|
|
|
close $fh;
|
|
} elsif ($msg eq "openw") {
|
|
debug "listener: received message: openw";
|
|
# payload is the output directory
|
|
my $outfile;
|
|
{
|
|
my $ret = read(STDIN, $outfile, $len)
|
|
// error "cannot read from socket: $!";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
}
|
|
# make sure that the directory exists
|
|
my $outdir = dirname($outfile);
|
|
if (-e $outdir) {
|
|
if (!-d $outdir) {
|
|
error "$outdir already exists but is not a directory";
|
|
}
|
|
} else {
|
|
my $num_created = make_path $outdir, { error => \my $err };
|
|
if ($err && @$err) {
|
|
error(
|
|
join "; ",
|
|
(
|
|
map { "cannot create " . (join ": ", %{$_}) }
|
|
@$err
|
|
));
|
|
} elsif ($num_created == 0) {
|
|
error "cannot create $outdir";
|
|
}
|
|
}
|
|
debug "listener: sending okthx";
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
|
|
# now we expect one or more "write" messages containing the
|
|
# tarball to write
|
|
open my $fh, '>', $outfile
|
|
or error "failed to open $outfile for writing: $!";
|
|
|
|
# handle "write" messages from the child process and feed
|
|
# their payload into the file handle until a "close" message
|
|
# is encountered
|
|
while (1) {
|
|
# receive the next message
|
|
my $ret = read(STDIN, my $buf, 2 + 5)
|
|
// error "cannot read from socket: $!";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
my ($len, $msg) = unpack("nA5", $buf);
|
|
debug "listener: received message: $msg";
|
|
if ($msg eq "close") {
|
|
# finish the loop
|
|
if ($len != 0) {
|
|
error "expected no payload but got $len bytes";
|
|
}
|
|
debug "listener: sending okthx";
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
last;
|
|
} elsif ($msg ne "write") {
|
|
# we should not receive this message at this point
|
|
error "expected write but got: $msg";
|
|
}
|
|
# read the payload
|
|
my $content;
|
|
{
|
|
my $ret = read(STDIN, $content, $len)
|
|
// error "error cannot read from socket: $!";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
}
|
|
# write the payload to the file handle
|
|
print $fh $content
|
|
or error "cannot write to file handle: $!";
|
|
debug "listener: sending okthx";
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
}
|
|
close $fh;
|
|
} elsif (any { $_ eq $msg } ('mktar', 'mktac')) {
|
|
# handle the mktar message
|
|
debug "listener: received message: $msg";
|
|
my $indir;
|
|
{
|
|
my $ret = read(STDIN, $indir, $len)
|
|
// error "cannot read from socket: $!";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
}
|
|
# make sure that the requested path exists outside the chroot
|
|
if (!-e $indir) {
|
|
error "$indir does not exist";
|
|
}
|
|
debug "listener: sending okthx";
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
|
|
# Open a tar process creating a tarfile of the instructed
|
|
# path. To emulate the behaviour of cp, change to the
|
|
# dirname of the requested path first.
|
|
my @cmd = (
|
|
'tar',
|
|
'--numeric-owner',
|
|
'--xattrs',
|
|
'--format=pax',
|
|
'--pax-option=exthdr.name=%d/PaxHeaders/%f,'
|
|
. 'delete=atime,delete=ctime',
|
|
'--directory',
|
|
$msg eq 'mktar' ? dirname($indir) : $indir,
|
|
'--create',
|
|
'--file',
|
|
'-',
|
|
$msg eq 'mktar' ? basename($indir) : '.'
|
|
);
|
|
debug("listener: running " . (join " ", @cmd));
|
|
open(my $fh, '-|', @cmd) // error "failed to fork(): $!";
|
|
|
|
# read from the tar process and send as payload to the child
|
|
# process
|
|
while (1) {
|
|
# read from tar
|
|
my $ret = read($fh, my $cont, 4096)
|
|
// error "cannot read from pipe: $!";
|
|
if ($ret == 0) { last; }
|
|
debug "listener: sending write ($ret bytes)";
|
|
# send to child
|
|
print STDOUT pack("n", $ret) . "write" . $cont;
|
|
STDOUT->flush();
|
|
debug "listener: waiting for okthx";
|
|
checkokthx \*STDIN;
|
|
if ($ret < 4096) { last; }
|
|
}
|
|
|
|
# signal to the child process that we are done
|
|
debug "listener: sending close";
|
|
print STDOUT pack("n", 0) . "close";
|
|
STDOUT->flush();
|
|
debug "listener: waiting for okthx";
|
|
checkokthx \*STDIN;
|
|
|
|
close $fh;
|
|
if ($? != 0) {
|
|
error "tar failed";
|
|
}
|
|
} elsif ($msg eq "untar") {
|
|
debug "listener: received message: untar";
|
|
# payload is the output directory
|
|
my $outdir;
|
|
{
|
|
my $ret = read(STDIN, $outdir, $len)
|
|
// error "cannot read from socket: $!";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
}
|
|
# make sure that the directory exists
|
|
if (-e $outdir) {
|
|
if (!-d $outdir) {
|
|
error "$outdir already exists but is not a directory";
|
|
}
|
|
} else {
|
|
my $num_created = make_path $outdir, { error => \my $err };
|
|
if ($err && @$err) {
|
|
error(
|
|
join "; ",
|
|
(
|
|
map { "cannot create " . (join ": ", %{$_}) }
|
|
@$err
|
|
));
|
|
} elsif ($num_created == 0) {
|
|
error "cannot create $outdir";
|
|
}
|
|
}
|
|
debug "listener: sending okthx";
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
|
|
# now we expect one or more "write" messages containing the
|
|
# tarball to unpack
|
|
open my $fh, '|-', 'tar', '--numeric-owner', '--xattrs',
|
|
'--xattrs-include=*', '--directory', $outdir,
|
|
'--extract', '--file',
|
|
'-' // error "failed to fork(): $!";
|
|
|
|
# handle "write" messages from the child process and feed
|
|
# their payload into the tar process until a "close" message
|
|
# is encountered
|
|
while (1) {
|
|
# receive the next message
|
|
my $ret = read(STDIN, my $buf, 2 + 5)
|
|
// error "cannot read from socket: $!";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
my ($len, $msg) = unpack("nA5", $buf);
|
|
debug "listener: received message: $msg";
|
|
if ($msg eq "close") {
|
|
# finish the loop
|
|
if ($len != 0) {
|
|
error "expected no payload but got $len bytes";
|
|
}
|
|
debug "listener: sending okthx";
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
last;
|
|
} elsif ($msg ne "write") {
|
|
# we should not receive this message at this point
|
|
error "expected write but got: $msg";
|
|
}
|
|
# read the payload
|
|
my $content;
|
|
{
|
|
my $ret = read(STDIN, $content, $len)
|
|
// error "error cannot read from socket: $!";
|
|
if ($ret == 0) {
|
|
error "received eof on socket";
|
|
}
|
|
}
|
|
# write the payload to the tar process
|
|
print $fh $content
|
|
or error "cannot write to tar process: $!";
|
|
debug "listener: sending okthx";
|
|
print STDOUT (pack("n", 0) . "okthx")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
}
|
|
close $fh;
|
|
if ($? != 0) {
|
|
error "tar failed";
|
|
}
|
|
} elsif ($msg eq "error") {
|
|
error "received error on socket";
|
|
} else {
|
|
error "unknown message: $msg";
|
|
}
|
|
}
|
|
};
|
|
if ($@) {
|
|
warning("hooklistener errored out: $@");
|
|
# inform the other side that something went wrong
|
|
print STDOUT (pack("n", 0) . "error")
|
|
or error "cannot write to socket: $!";
|
|
STDOUT->flush();
|
|
}
|
|
return;
|
|
}
|
|
|
|
# parse files of the format found in /usr/share/distro-info/ and return two
|
|
# lists: the first contains codenames of end-of-life distros and the second
|
|
# list contains codenames of currently active distros
|
|
sub parse_distro_info {
|
|
my $file = shift;
|
|
my @eol = ();
|
|
my @current = ();
|
|
my $today = POSIX::strftime "%Y-%m-%d", localtime;
|
|
open my $fh, '<', $file or error "cannot open $file: $!";
|
|
my $i = 0;
|
|
while (my $line = <$fh>) {
|
|
chomp($line);
|
|
$i++;
|
|
my @cells = split /,/, $line;
|
|
if (scalar @cells < 4) {
|
|
error "cannot parse line $i of $file";
|
|
}
|
|
if (
|
|
$i == 1
|
|
and ( scalar @cells < 6
|
|
or $cells[0] ne 'version'
|
|
or $cells[1] ne 'codename'
|
|
or $cells[2] ne 'series'
|
|
or $cells[3] ne 'created'
|
|
or $cells[4] ne 'release'
|
|
or $cells[5] ne 'eol')
|
|
) {
|
|
error "cannot find correct header in $file";
|
|
}
|
|
if ($i == 1) {
|
|
next;
|
|
}
|
|
if (scalar @cells == 6) {
|
|
if ($cells[5] !~ m/^\d\d\d\d-\d\d-\d\d$/) {
|
|
error "invalid eof date format in $file:$i: $cells[5]";
|
|
}
|
|
# since the date format is iso8601, we can use lexicographic string
|
|
# comparison to compare dates
|
|
if ($cells[5] lt $today) {
|
|
push @eol, $cells[2];
|
|
} else {
|
|
push @current, $cells[2];
|
|
}
|
|
} else {
|
|
push @current, $cells[2];
|
|
}
|
|
}
|
|
close $fh;
|
|
return ([@eol], [@current]);
|
|
}
|
|
|
|
sub get_suite_by_vendor {
|
|
my %suite_by_vendor = (
|
|
'debian' => {},
|
|
'ubuntu' => {},
|
|
'tanglu' => {},
|
|
'kali' => {},
|
|
);
|
|
|
|
# pre-fill with some known values
|
|
foreach my $suite (
|
|
'potato', 'woody', 'sarge', 'etch',
|
|
'lenny', 'squeeze', 'wheezy', 'jessie'
|
|
) {
|
|
$suite_by_vendor{'debian'}->{$suite} = 1;
|
|
}
|
|
foreach my $suite (
|
|
'unstable', 'stable', 'oldstable', 'stretch',
|
|
'buster', 'bullseye', 'bookworm', 'trixie'
|
|
) {
|
|
$suite_by_vendor{'debian'}->{$suite} = 0;
|
|
}
|
|
foreach my $suite ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis') {
|
|
$suite_by_vendor{'tanglu'}->{$suite} = 0;
|
|
}
|
|
foreach my $suite ('kali-dev', 'kali-rolling', 'kali-bleeding-edge') {
|
|
$suite_by_vendor{'kali'}->{$suite} = 0;
|
|
}
|
|
foreach
|
|
my $suite ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic') {
|
|
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
|
|
}
|
|
# if the Debian package distro-info-data is installed, then we can use it,
|
|
# to get better data about new distros or EOL distros
|
|
if (-e '/usr/share/distro-info/debian.csv') {
|
|
my ($eol, $current)
|
|
= parse_distro_info('/usr/share/distro-info/debian.csv');
|
|
foreach my $suite (@{$eol}) {
|
|
$suite_by_vendor{'debian'}->{$suite} = 1;
|
|
}
|
|
foreach my $suite (@{$current}) {
|
|
$suite_by_vendor{'debian'}->{$suite} = 0;
|
|
}
|
|
}
|
|
if (-e '/usr/share/distro-info/ubuntu.csv') {
|
|
my ($eol, $current)
|
|
= parse_distro_info('/usr/share/distro-info/ubuntu.csv');
|
|
foreach my $suite (@{$eol}, @{$current}) {
|
|
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
|
|
}
|
|
}
|
|
# if debootstrap is installed we infer distro names from the symlink
|
|
# targets of the scripts in /usr/share/debootstrap/scripts/
|
|
my $debootstrap_scripts = '/usr/share/debootstrap/scripts/';
|
|
if (-d $debootstrap_scripts) {
|
|
opendir(my $dh, $debootstrap_scripts)
|
|
or error "Can't opendir($debootstrap_scripts): $!";
|
|
while (my $suite = readdir $dh) {
|
|
# this is only a heuristic -- don't overwrite anything but instead
|
|
# just update anything that was missing
|
|
if (!-l "$debootstrap_scripts/$suite") {
|
|
next;
|
|
}
|
|
my $target = readlink "$debootstrap_scripts/$suite";
|
|
if ($target eq "sid"
|
|
and not exists $suite_by_vendor{'debian'}->{$suite}) {
|
|
$suite_by_vendor{'debian'}->{$suite} = 0;
|
|
} elsif ($target eq "gutsy"
|
|
and not exists $suite_by_vendor{'ubuntu'}->{$suite}) {
|
|
$suite_by_vendor{'ubuntu'}->{$suite} = 0;
|
|
} elsif ($target eq "aequorea"
|
|
and not exists $suite_by_vendor{'tanglu'}->{$suite}) {
|
|
$suite_by_vendor{'tanglu'}->{$suite} = 0;
|
|
} elsif ($target eq "kali"
|
|
and not exists $suite_by_vendor{'kali'}->{$suite}) {
|
|
$suite_by_vendor{'kali'}->{$suite} = 0;
|
|
}
|
|
}
|
|
closedir($dh);
|
|
}
|
|
|
|
return %suite_by_vendor;
|
|
}
|
|
|
|
# try to guess the right keyring path for the given suite
|
|
sub get_keyring_by_suite {
|
|
my $query = shift;
|
|
my $suite_by_vendor = shift;
|
|
|
|
my $debianvendor;
|
|
my $ubuntuvendor;
|
|
# make $@ local, so we don't print "Can't locate Dpkg/Vendor/Debian.pm"
|
|
# in other parts where we evaluate $@
|
|
local $@ = '';
|
|
eval {
|
|
require Dpkg::Vendor::Debian;
|
|
require Dpkg::Vendor::Ubuntu;
|
|
$debianvendor = Dpkg::Vendor::Debian->new();
|
|
$ubuntuvendor = Dpkg::Vendor::Ubuntu->new();
|
|
};
|
|
|
|
my $keyring_by_vendor = sub {
|
|
my $vendor = shift;
|
|
my $eol = shift;
|
|
if ($vendor eq 'debian') {
|
|
if ($eol) {
|
|
if (defined $debianvendor) {
|
|
return $debianvendor->run_hook(
|
|
'archive-keyrings-historic');
|
|
} else {
|
|
return
|
|
'/usr/share/keyrings/debian-archive-removed-keys.gpg';
|
|
}
|
|
} else {
|
|
if (defined $debianvendor) {
|
|
return $debianvendor->run_hook('archive-keyrings');
|
|
} else {
|
|
return '/usr/share/keyrings/debian-archive-keyring.gpg';
|
|
}
|
|
}
|
|
} elsif ($vendor eq 'ubuntu') {
|
|
if (defined $ubuntuvendor) {
|
|
return $ubuntuvendor->run_hook('archive-keyrings');
|
|
} else {
|
|
return '/usr/share/keyrings/ubuntu-archive-keyring.gpg';
|
|
}
|
|
} elsif ($vendor eq 'tanglu') {
|
|
return '/usr/share/keyrings/tanglu-archive-keyring.gpg';
|
|
} elsif ($vendor eq 'kali') {
|
|
return '/usr/share/keyrings/kali-archive-keyring.gpg';
|
|
} else {
|
|
error "unknown vendor: $vendor";
|
|
}
|
|
};
|
|
my %keyrings = ();
|
|
foreach my $vendor (keys %{$suite_by_vendor}) {
|
|
foreach my $suite (keys %{ $suite_by_vendor->{$vendor} }) {
|
|
my $keyring = $keyring_by_vendor->(
|
|
$vendor, $suite_by_vendor->{$vendor}->{$suite});
|
|
debug "suite $suite with keyring $keyring";
|
|
$keyrings{$suite} = $keyring;
|
|
}
|
|
}
|
|
|
|
if (exists $keyrings{$query}) {
|
|
return $keyrings{$query};
|
|
} else {
|
|
return;
|
|
}
|
|
}
|
|
|
|
sub get_sourceslist_by_suite {
|
|
my $suite = shift;
|
|
my $arch = shift;
|
|
my $signedby = shift;
|
|
my $compstr = shift;
|
|
my $suite_by_vendor = shift;
|
|
|
|
my @debstable = keys %{ $suite_by_vendor->{'debian'} };
|
|
my @ubuntustable = keys %{ $suite_by_vendor->{'ubuntu'} };
|
|
my @tanglustable = keys %{ $suite_by_vendor->{'tanglu'} };
|
|
my @kali = keys %{ $suite_by_vendor->{'kali'} };
|
|
|
|
my $mirror = 'http://deb.debian.org/debian';
|
|
my $secmirror = 'http://security.debian.org/debian-security';
|
|
if (any { $_ eq $suite } @ubuntustable) {
|
|
if (any { $_ eq $arch } ('amd64', 'i386')) {
|
|
$mirror = 'http://archive.ubuntu.com/ubuntu';
|
|
$secmirror = 'http://security.ubuntu.com/ubuntu';
|
|
} else {
|
|
$mirror = 'http://ports.ubuntu.com/ubuntu-ports';
|
|
$secmirror = 'http://ports.ubuntu.com/ubuntu-ports';
|
|
}
|
|
if (-e '/usr/share/debootstrap/scripts/gutsy') {
|
|
# try running the debootstrap script but ignore errors
|
|
my $script = 'set -eu;
|
|
default_mirror() { echo $1; };
|
|
mirror_style() { :; };
|
|
download_style() { :; };
|
|
finddebs_style() { :; };
|
|
variants() { :; };
|
|
keyring() { :; };
|
|
doing_variant() { false; };
|
|
info() { fmt="$2"; shift; shift; printf "I: $fmt\n" "$@" >&2; };
|
|
. /usr/share/debootstrap/scripts/gutsy;';
|
|
open my $fh, '-|', 'env', "ARCH=$arch", "SUITE=$suite",
|
|
'sh', '-c', $script // last;
|
|
chomp(
|
|
my $output = do { local $/; <$fh> }
|
|
);
|
|
close $fh;
|
|
if ($? == 0 && $output ne '') {
|
|
$mirror = $output;
|
|
}
|
|
}
|
|
} elsif (any { $_ eq $suite } @tanglustable) {
|
|
$mirror = 'http://archive.tanglu.org/tanglu';
|
|
} elsif (any { $_ eq $suite } @kali) {
|
|
$mirror = 'https://http.kali.org/kali';
|
|
}
|
|
my $sourceslist = '';
|
|
$sourceslist .= "deb$signedby $mirror $suite $compstr\n";
|
|
if (any { $_ eq $suite } @ubuntustable) {
|
|
$sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n";
|
|
$sourceslist .= "deb$signedby $secmirror $suite-security $compstr\n";
|
|
} elsif (any { $_ eq $suite } @tanglustable) {
|
|
$sourceslist .= "deb$signedby $secmirror $suite-updates $compstr\n";
|
|
} elsif (any { $_ eq $suite } @debstable
|
|
and none { $_ eq $suite } ('testing', 'unstable', 'sid')) {
|
|
$sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n";
|
|
# the security mirror changes, starting with bullseye
|
|
# https://lists.debian.org/87r26wqr2a.fsf@43-1.org
|
|
my $bullseye_or_later = 0;
|
|
if (any { $_ eq $suite }
|
|
('oldstable', 'stable', 'bullseye', 'bookworm', 'trixie')) {
|
|
$bullseye_or_later = 1;
|
|
}
|
|
my $distro_info = '/usr/share/distro-info/debian.csv';
|
|
# make $@ local, so we don't print "Can't locate Debian/DistroInfo.pm"
|
|
# in other parts where we evaluate $@
|
|
local $@ = '';
|
|
eval { require Debian::DistroInfo; };
|
|
if (!$@) {
|
|
debug "libdistro-info-perl is installed";
|
|
my $debinfo = DebianDistroInfo->new();
|
|
if ($debinfo->version($suite, 0) >= 11) {
|
|
$bullseye_or_later = 1;
|
|
}
|
|
} elsif (-f $distro_info) {
|
|
debug "distro-info-data is installed";
|
|
open my $fh, '<', $distro_info
|
|
or error "cannot open $distro_info: $!";
|
|
my $i = 0;
|
|
my $matching_version;
|
|
my @releases;
|
|
my $today = POSIX::strftime "%Y-%m-%d", localtime;
|
|
while (my $line = <$fh>) {
|
|
chomp($line);
|
|
$i++;
|
|
my @cells = split /,/, $line;
|
|
if (scalar @cells < 4) {
|
|
error "cannot parse line $i of $distro_info";
|
|
}
|
|
if (
|
|
$i == 1
|
|
and ( scalar @cells < 6
|
|
or $cells[0] ne 'version'
|
|
or $cells[1] ne 'codename'
|
|
or $cells[2] ne 'series'
|
|
or $cells[3] ne 'created'
|
|
or $cells[4] ne 'release'
|
|
or $cells[5] ne 'eol')
|
|
) {
|
|
error "cannot find correct header in $distro_info";
|
|
}
|
|
if ($i == 1) {
|
|
next;
|
|
}
|
|
if ( scalar @cells > 4
|
|
and $cells[4] =~ m/^\d\d\d\d-\d\d-\d\d$/
|
|
and $cells[4] lt $today) {
|
|
push @releases, $cells[0];
|
|
}
|
|
if (lc $cells[1] eq $suite or lc $cells[2] eq $suite) {
|
|
$matching_version = $cells[0];
|
|
last;
|
|
}
|
|
}
|
|
close $fh;
|
|
if (defined $matching_version and $matching_version >= 11) {
|
|
$bullseye_or_later = 1;
|
|
}
|
|
if ($suite eq "stable" and $releases[-1] >= 11) {
|
|
$bullseye_or_later = 1;
|
|
}
|
|
} else {
|
|
debug "neither libdistro-info-perl nor distro-info-data installed";
|
|
}
|
|
if ($bullseye_or_later) {
|
|
# starting from bullseye use
|
|
$sourceslist
|
|
.= "deb$signedby $secmirror $suite-security" . " $compstr\n";
|
|
} else {
|
|
$sourceslist
|
|
.= "deb$signedby $secmirror $suite/updates" . " $compstr\n";
|
|
}
|
|
}
|
|
return $sourceslist;
|
|
}
|
|
|
|
sub guess_sources_format {
|
|
my $content = shift;
|
|
my $is_deb822 = 0;
|
|
my $is_oneline = 0;
|
|
for my $line (split "\n", $content) {
|
|
if ($line =~ /^deb(-src)? /) {
|
|
$is_oneline = 1;
|
|
last;
|
|
}
|
|
if ($line =~ /^[^#:\s]+:/) {
|
|
$is_deb822 = 1;
|
|
last;
|
|
}
|
|
}
|
|
if ($is_deb822) {
|
|
return 'deb822';
|
|
}
|
|
if ($is_oneline) {
|
|
return 'one-line';
|
|
}
|
|
return;
|
|
}
|
|
|
|
sub approx_disk_usage {
|
|
my $directory = shift;
|
|
my $block_size = shift;
|
|
info "approximating disk usage...";
|
|
# the "du" utility reports different results depending on the underlying
|
|
# filesystem, see https://bugs.debian.org/650077 for a discussion
|
|
#
|
|
# we use code similar to the one used by dpkg-gencontrol instead
|
|
#
|
|
# Regular files are measured in number of $block_size byte blocks. All
|
|
# other entries are assumed to take one block of space.
|
|
#
|
|
# We ignore /dev because depending on the mode, the directory might be
|
|
# populated or not and we want consistent disk usage results independent
|
|
# of the mode.
|
|
my $installed_size = 0;
|
|
my %hardlink;
|
|
my $scan_installed_size = sub {
|
|
if ($File::Find::name eq "$directory/dev") {
|
|
# add all entries of @devfiles once
|
|
$installed_size += scalar @linuxdevfiles;
|
|
return;
|
|
} elsif ($File::Find::name =~ /^$directory\/dev\//) {
|
|
# ignore everything below /dev
|
|
return;
|
|
}
|
|
|
|
lstat or error "cannot stat $File::Find::name";
|
|
|
|
if (-f _ or -l _) {
|
|
my ($dev, $ino, $nlink) = (lstat _)[0, 1, 3];
|
|
return if exists $hardlink{"$dev:$ino"};
|
|
# Track hardlinks to avoid repeated additions.
|
|
$hardlink{"$dev:$ino"} = 1 if $nlink > 1;
|
|
# add file size in $block_size byte blocks, rounded up
|
|
$installed_size += int(((-s _) + $block_size) / $block_size);
|
|
} else {
|
|
# all other entries are assumed to only take up one block
|
|
$installed_size += 1;
|
|
}
|
|
};
|
|
# We use no_chdir because otherwise the unshared user has to have read
|
|
# permissions for the current working directory when producing an ext2
|
|
# image. See https://bugs.debian.org/1005857
|
|
find({ wanted => $scan_installed_size, no_chdir => 1 }, $directory);
|
|
|
|
# the above is only a heuristic and especially ext4 will consume quite a
|
|
# few more blocks than the heuristic above is going to compute
|
|
return int($installed_size * 1.2);
|
|
}
|
|
|
|
sub main() {
|
|
my $before = Time::HiRes::time;
|
|
|
|
umask 022;
|
|
|
|
if (scalar @ARGV >= 7 && $ARGV[0] eq "--hook-helper") {
|
|
shift @ARGV; # shift off "--hook-helper"
|
|
hookhelper(@ARGV);
|
|
exit 0;
|
|
}
|
|
|
|
# this is the counterpart to --hook-helper and will receive and carry
|
|
# out its instructions
|
|
if (scalar @ARGV == 2 && $ARGV[0] eq "--hook-listener") {
|
|
hooklistener($ARGV[1]);
|
|
exit 0;
|
|
}
|
|
|
|
# this is like:
|
|
# lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' ...
|
|
# but without needing lxc
|
|
if (scalar @ARGV >= 1 && $ARGV[0] eq "--unshare-helper") {
|
|
if ($EFFECTIVE_USER_ID != 0) {
|
|
test_unshare_userns(1);
|
|
}
|
|
my @idmap = ();
|
|
if ($EFFECTIVE_USER_ID != 0) {
|
|
@idmap = read_subuid_subgid 1;
|
|
}
|
|
my $pid = get_unshare_cmd(
|
|
sub {
|
|
0 == system @ARGV[1 .. $#ARGV] or error "system failed: $?";
|
|
},
|
|
\@idmap
|
|
);
|
|
waitpid $pid, 0;
|
|
$? == 0 or error "unshared command failed";
|
|
exit 0;
|
|
}
|
|
|
|
my $mtime = time;
|
|
if (exists $ENV{SOURCE_DATE_EPOCH}) {
|
|
$mtime = $ENV{SOURCE_DATE_EPOCH} + 0;
|
|
}
|
|
|
|
{
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{DEBIAN_FRONTEND} = 'noninteractive';
|
|
$ENV{DEBCONF_NONINTERACTIVE_SEEN} = 'true';
|
|
$ENV{LC_ALL} = 'C.UTF-8';
|
|
$ENV{LANGUAGE} = 'C.UTF-8';
|
|
$ENV{LANG} = 'C.UTF-8';
|
|
}
|
|
|
|
# copy ARGV because getopt modifies it
|
|
my @ARGVORIG = @ARGV;
|
|
|
|
# obtain the correct defaults for the keyring locations that apt knows
|
|
# about
|
|
my $apttrusted
|
|
= `eval \$(apt-config shell v Dir::Etc::trusted/f); printf \$v`;
|
|
my $apttrustedparts
|
|
= `eval \$(apt-config shell v Dir::Etc::trustedparts/d); printf \$v`;
|
|
|
|
chomp(my $hostarch = `dpkg --print-architecture`);
|
|
my $options = {
|
|
components => ["main"],
|
|
variant => "important",
|
|
include => [],
|
|
architectures => [$hostarch],
|
|
mode => 'auto',
|
|
format => 'auto',
|
|
dpkgopts => '',
|
|
aptopts => '',
|
|
apttrusted => $apttrusted,
|
|
apttrustedparts => $apttrustedparts,
|
|
noop => [],
|
|
setup_hook => [],
|
|
extract_hook => [],
|
|
essential_hook => [],
|
|
customize_hook => [],
|
|
dryrun => 0,
|
|
skip => [],
|
|
};
|
|
my $logfile = undef;
|
|
Getopt::Long::Configure('default', 'bundling', 'auto_abbrev',
|
|
'ignore_case_always');
|
|
GetOptions(
|
|
'h|help' => sub { pod2usage(-exitval => 0, -verbose => 1) },
|
|
'man' => sub { pod2usage(-exitval => 0, -verbose => 2) },
|
|
'version' => sub { print STDOUT "mmdebstrap $VERSION\n"; exit 0; },
|
|
'components=s@' => \$options->{components},
|
|
'variant=s' => \$options->{variant},
|
|
'include=s' => sub {
|
|
my ($opt_name, $opt_value) = @_;
|
|
my $sanitize_path = sub {
|
|
my $pkg = shift;
|
|
$pkg = abs_path($pkg)
|
|
// error "cannot resolve absolute path of $pkg: $!";
|
|
if ($pkg !~ /^\//) {
|
|
error "absolute path of $pkg doesn't start with a slash";
|
|
}
|
|
if (!-f $pkg) {
|
|
error "$pkg is not an existing file";
|
|
}
|
|
if (!-r $pkg) {
|
|
error "$pkg is not readable";
|
|
}
|
|
return $pkg;
|
|
};
|
|
if ($opt_value =~ /^[?~!(]/) {
|
|
# Treat option as a single apt pattern and don't split by comma
|
|
# or whitespace -- append it verbatim.
|
|
push @{ $options->{include} }, $opt_value;
|
|
} elsif ($opt_value =~ /^\.?\.?\//) {
|
|
# Treat option as a single path name and don't split by comma
|
|
# or whitespace -- append the normalized path.
|
|
push @{ $options->{include} }, &{$sanitize_path}($opt_value);
|
|
} else {
|
|
for my $pkg (split /[,\s]+/, $opt_value) {
|
|
# strip leading and trailing whitespace
|
|
$pkg =~ s/^\s+|\s+$//g;
|
|
# skip if the remainder is an empty string
|
|
if ($pkg eq '') {
|
|
next;
|
|
}
|
|
# Make paths canonical absolute paths, resolve symlinks
|
|
# and check if it's an existing file.
|
|
if ($pkg =~ /^\.?\.?\//) {
|
|
$pkg = &{$sanitize_path}($pkg);
|
|
}
|
|
push @{ $options->{include} }, $pkg;
|
|
}
|
|
}
|
|
# We are not sorting or otherwise normalizing the order of
|
|
# arguments to apt because package order matters for "apt install"
|
|
# since https://salsa.debian.org/apt-team/apt/-/merge_requests/256
|
|
},
|
|
'architectures=s@' => \$options->{architectures},
|
|
'mode=s' => \$options->{mode},
|
|
'dpkgopt=s' => sub {
|
|
my ($opt_name, $opt_value) = @_;
|
|
if (-r $opt_value) {
|
|
open my $fh, '<', $opt_value
|
|
or error "failed to open $opt_value: $!";
|
|
$options->{dpkgopts} .= do { local $/; <$fh> };
|
|
if ($options->{dpkgopts} !~ /\n$/) {
|
|
print $fh "\n";
|
|
}
|
|
close $fh;
|
|
} else {
|
|
$options->{dpkgopts} .= $opt_value;
|
|
if ($opt_value !~ /\n$/) {
|
|
$options->{dpkgopts} .= "\n";
|
|
}
|
|
}
|
|
},
|
|
'aptopt=s' => sub {
|
|
my ($opt_name, $opt_value) = @_;
|
|
if (-r $opt_value) {
|
|
open my $fh, '<', $opt_value
|
|
or error "failed to open $opt_value: $!";
|
|
$options->{aptopts} .= do { local $/; <$fh> };
|
|
if ($options->{aptopts} !~ /\n$/) {
|
|
print $fh "\n";
|
|
}
|
|
close $fh;
|
|
} else {
|
|
$options->{aptopts} .= $opt_value;
|
|
if ($opt_value !~ /;$/) {
|
|
$options->{aptopts} .= ';';
|
|
}
|
|
if ($opt_value !~ /\n$/) {
|
|
$options->{aptopts} .= "\n";
|
|
}
|
|
}
|
|
},
|
|
'keyring=s' => sub {
|
|
my ($opt_name, $opt_value) = @_;
|
|
if ($opt_value =~ /"/) {
|
|
error "--keyring: apt cannot handle paths with double quotes:"
|
|
. " $opt_value";
|
|
}
|
|
if (!-e $opt_value) {
|
|
error "keyring \"$opt_value\" does not exist";
|
|
}
|
|
my $abs_path = abs_path($opt_value);
|
|
if (!defined $abs_path) {
|
|
error "unable to get absolute path of --keyring: $opt_value";
|
|
}
|
|
# since abs_path resolved all symlinks for us, we can now test
|
|
# what the actual target actually is
|
|
if (-d $abs_path) {
|
|
$options->{apttrustedparts} = $abs_path;
|
|
} else {
|
|
$options->{apttrusted} = $abs_path;
|
|
}
|
|
},
|
|
's|silent' => sub { $verbosity_level = 0; },
|
|
'q|quiet' => sub { $verbosity_level = 0; },
|
|
'v|verbose' => sub { $verbosity_level = 2; },
|
|
'd|debug' => sub { $verbosity_level = 3; },
|
|
'format=s' => \$options->{format},
|
|
'logfile=s' => \$logfile,
|
|
# no-op options so that mmdebstrap can be used with
|
|
# sbuild-createchroot --debootstrap=mmdebstrap
|
|
'resolve-deps' => sub { push @{ $options->{noop} }, 'resolve-deps'; },
|
|
'merged-usr' => sub { push @{ $options->{noop} }, 'merged-usr'; },
|
|
'no-merged-usr' =>
|
|
sub { push @{ $options->{noop} }, 'no-merged-usr'; },
|
|
'force-check-gpg' =>
|
|
sub { push @{ $options->{noop} }, 'force-check-gpg'; },
|
|
'setup-hook=s' => sub {
|
|
push @{ $options->{setup_hook} }, ["normal", $_[1]];
|
|
},
|
|
'extract-hook=s' => sub {
|
|
push @{ $options->{extract_hook} }, ["normal", $_[1]];
|
|
},
|
|
'chrooted-extract-hook=s' => sub {
|
|
push @{ $options->{extract_hook} }, ["pivoted", $_[1]];
|
|
},
|
|
'essential-hook=s' => sub {
|
|
push @{ $options->{essential_hook} }, ["normal", $_[1]];
|
|
},
|
|
'chrooted-essential-hook=s' => sub {
|
|
push @{ $options->{essential_hook} }, ["pivoted", $_[1]];
|
|
},
|
|
'customize-hook=s' => sub {
|
|
push @{ $options->{customize_hook} }, ["normal", $_[1]];
|
|
},
|
|
'chrooted-customize-hook=s' => sub {
|
|
push @{ $options->{customize_hook} }, ["pivoted", $_[1]];
|
|
},
|
|
'hook-directory=s' => sub {
|
|
my ($opt_name, $opt_value) = @_;
|
|
if (!-e $opt_value) {
|
|
error "hook directory \"$opt_value\" does not exist";
|
|
}
|
|
my $abs_path = abs_path($opt_value);
|
|
if (!defined $abs_path) {
|
|
error( "unable to get absolute path of "
|
|
. "--hook-directory: $opt_value");
|
|
}
|
|
# since abs_path resolved all symlinks for us, we can now test
|
|
# what the actual target actually is
|
|
if (!-d $opt_value) {
|
|
error "hook directory \"$opt_value\" is not a directory";
|
|
}
|
|
# gather all files starting with special prefixes into the
|
|
# respective keys of a hash
|
|
my %scripts;
|
|
my $count = 0;
|
|
opendir(my $dh, $opt_value)
|
|
or error "Can't opendir($opt_value): $!";
|
|
while (my $entry = readdir $dh) {
|
|
# skip the "." and ".." entries
|
|
next if $entry eq ".";
|
|
next if $entry eq "..";
|
|
my $found = 0;
|
|
foreach
|
|
my $hook ('setup', 'extract', 'essential', 'customize') {
|
|
if ($entry =~ m/^\Q$hook\E/) {
|
|
if (-x "$opt_value/$entry") {
|
|
push @{ $scripts{$hook} }, "$opt_value/$entry";
|
|
$count += 1;
|
|
$found = 1;
|
|
} else {
|
|
warning("$opt_value/$entry is named like a "
|
|
. "hook but not executable");
|
|
}
|
|
}
|
|
}
|
|
if (!$found && -x "$opt_value/$entry") {
|
|
warning("$opt_value/$entry: is executable "
|
|
. "but not prefixed with a hook name");
|
|
}
|
|
}
|
|
closedir($dh);
|
|
if ($count == 0) {
|
|
warning "No executable hook scripts found in $opt_value";
|
|
return;
|
|
}
|
|
# add the sorted list associated with each key to the respective
|
|
# list of hooks
|
|
foreach my $hook (keys %scripts) {
|
|
push @{ $options->{"${hook}_hook"} },
|
|
(map { ["normal", $_] } (sort @{ $scripts{$hook} }));
|
|
}
|
|
},
|
|
# Sometimes --simulate fails even though non-simulate succeeds because
|
|
# in simulate mode, apt cannot rely on dpkg to figure out tricky
|
|
# dependency situations and will give up instead when it cannot find
|
|
# a solution.
|
|
#
|
|
# 2020-02-06, #debian-apt on OFTC, times in UTC+1
|
|
# 12:52 < DonKult> [...] It works in non-simulation because simulate is
|
|
# more picky. If you wanna know why simulate complains
|
|
# here prepare for long suffering in dependency hell.
|
|
'simulate' => \$options->{dryrun},
|
|
'dry-run' => \$options->{dryrun},
|
|
'skip=s' => sub {
|
|
my ($opt_name, $opt_value) = @_;
|
|
for my $skip (split /[,\s]+/, $opt_value) {
|
|
# strip leading and trailing whitespace
|
|
$skip =~ s/^\s+|\s+$//g;
|
|
# skip if the remainder is an empty string
|
|
if ($skip eq '') {
|
|
next;
|
|
}
|
|
push @{ $options->{skip} }, $skip;
|
|
}
|
|
}) or pod2usage(-exitval => 2, -verbose => 0);
|
|
|
|
if (defined($logfile)) {
|
|
open(STDERR, '>', $logfile) or error "cannot open $logfile: $!";
|
|
}
|
|
|
|
foreach my $arg (@{ $options->{noop} }) {
|
|
info "the option --$arg is a no-op. It only exists for compatibility"
|
|
. " with some debootstrap wrappers.";
|
|
}
|
|
|
|
if ($options->{dryrun}) {
|
|
foreach my $hook ('setup', 'extract', 'essential', 'customize') {
|
|
if (scalar @{ $options->{"${hook}_hook"} } > 0) {
|
|
warning "In dry-run mode, --$hook-hook options have no effect";
|
|
}
|
|
if ($options->{mode} eq 'chrootless') {
|
|
foreach my $script (@{ $options->{"${hook}_hook"} }) {
|
|
if ($script->[0] eq "pivoted") {
|
|
error "--chrooted-$hook-hook are illegal in "
|
|
. "chrootless mode";
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
my @valid_variants = (
|
|
'extract', 'custom', 'essential', 'apt',
|
|
'required', 'minbase', 'buildd', 'important',
|
|
'debootstrap', '-', 'standard'
|
|
);
|
|
if (none { $_ eq $options->{variant} } @valid_variants) {
|
|
error "invalid variant. Choose from " . (join ', ', @valid_variants);
|
|
}
|
|
# debootstrap and - are an alias for important
|
|
if (any { $_ eq $options->{variant} } ('-', 'debootstrap')) {
|
|
$options->{variant} = 'important';
|
|
}
|
|
# minbase is an alias for required
|
|
if ($options->{variant} eq 'minbase') {
|
|
$options->{variant} = 'required';
|
|
}
|
|
|
|
# fakeroot is an alias for fakechroot
|
|
if ($options->{mode} eq 'fakeroot') {
|
|
$options->{mode} = 'fakechroot';
|
|
}
|
|
# sudo is an alias for root
|
|
if ($options->{mode} eq 'sudo') {
|
|
$options->{mode} = 'root';
|
|
}
|
|
my @valid_modes = ('auto', 'root', 'unshare', 'fakechroot', 'chrootless');
|
|
if (none { $_ eq $options->{mode} } @valid_modes) {
|
|
error "invalid mode. Choose from " . (join ', ', @valid_modes);
|
|
}
|
|
|
|
# sqfs is an alias for squashfs
|
|
if ($options->{format} eq 'sqfs') {
|
|
$options->{format} = 'squashfs';
|
|
}
|
|
# dir is an alias for directory
|
|
if ($options->{format} eq 'dir') {
|
|
$options->{format} = 'directory';
|
|
}
|
|
my @valid_formats
|
|
= ('auto', 'directory', 'tar', 'squashfs', 'ext2', 'ext4', 'null');
|
|
if (none { $_ eq $options->{format} } @valid_formats) {
|
|
error "invalid format. Choose from " . (join ', ', @valid_formats);
|
|
}
|
|
|
|
# setting PATH for chroot, ldconfig, start-stop-daemon...
|
|
my $defaultpath = `eval \$(apt-config shell v DPkg::Path); printf \$v`;
|
|
if (length $ENV{PATH}) {
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{PATH} = "$ENV{PATH}:$defaultpath";
|
|
} else {
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{PATH} = $defaultpath;
|
|
}
|
|
|
|
foreach my $tool (
|
|
'dpkg', 'dpkg-deb', 'apt-get', 'apt-cache',
|
|
'apt-config', 'tar', 'rm', 'find',
|
|
'env'
|
|
) {
|
|
if (!can_execute $tool) {
|
|
error "cannot find $tool";
|
|
}
|
|
}
|
|
|
|
{
|
|
my $dpkgversion = version->new(0);
|
|
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
|
|
if ($pid == 0) {
|
|
# redirect stderr to /dev/null to hide error messages from dpkg
|
|
# versions before 1.20.0
|
|
open(STDERR, '>', '/dev/null')
|
|
or error "cannot open /dev/null for writing: $!";
|
|
exec 'dpkg', '--robot', '--version';
|
|
}
|
|
chomp(
|
|
my $content = do { local $/; <$fh> }
|
|
);
|
|
close $fh;
|
|
# the --robot option was introduced in 1.20.0 but until 1.20.2 the
|
|
# output contained a string after the version, separated by a
|
|
# whitespace -- since then, it's only the version
|
|
if ($? == 0 and $content =~ /^([0-9.]+).*$/) {
|
|
# dpkg is new enough for the --robot option
|
|
$dpkgversion = version->new($1);
|
|
}
|
|
if ($dpkgversion < "1.20.0") {
|
|
error "need dpkg >= 1.20.0 but have $dpkgversion";
|
|
}
|
|
}
|
|
|
|
{
|
|
my $aptversion = version->new(0);
|
|
my $pid = open my $fh, '-|', 'apt-get',
|
|
'--version' // error "failed to fork(): $!";
|
|
chomp(
|
|
my $content = do { local $/; <$fh> }
|
|
);
|
|
close $fh;
|
|
if ( $? == 0
|
|
and $content =~ /^apt (\d+\.\d+\.\d+)\S* \(\S+\)$/am) {
|
|
$aptversion = version->new($1);
|
|
}
|
|
if ($aptversion < "2.3.14") {
|
|
error "need apt >= 2.3.14 but have $aptversion";
|
|
}
|
|
}
|
|
|
|
my $check_fakechroot_running = sub {
|
|
# test if we are inside fakechroot already
|
|
# We fork a child process because setting FAKECHROOT_DETECT seems to
|
|
# be an irreversible operation for fakechroot.
|
|
my $pid = open my $rfh, '-|' // error "failed to fork(): $!";
|
|
if ($pid == 0) {
|
|
# with the FAKECHROOT_DETECT environment variable set, any program
|
|
# execution will be replaced with the output "fakeroot [version]"
|
|
local $ENV{FAKECHROOT_DETECT} = 0;
|
|
exec 'echo', 'If fakechroot is running, this will not be printed';
|
|
}
|
|
my $content = do { local $/; <$rfh> };
|
|
waitpid $pid, 0;
|
|
my $result = 0;
|
|
if ($? == 0 and $content =~ /^fakechroot [0-9.]+$/) {
|
|
$result = 1;
|
|
}
|
|
return $result;
|
|
};
|
|
|
|
# figure out the mode to use or test whether the chosen mode is legal
|
|
if ($options->{mode} eq 'auto') {
|
|
if (&{$check_fakechroot_running}()) {
|
|
# if mmdebstrap is executed inside fakechroot, then we assume the
|
|
# user expects fakechroot mode
|
|
$options->{mode} = 'fakechroot';
|
|
} elsif ($EFFECTIVE_USER_ID == 0) {
|
|
# if mmdebstrap is executed as root, we assume the user wants root
|
|
# mode
|
|
$options->{mode} = 'root';
|
|
} elsif (test_unshare_userns(0)) {
|
|
# if we are not root, unshare mode is our best option if
|
|
# test_unshare_userns() succeeds
|
|
$options->{mode} = 'unshare';
|
|
} elsif (can_execute 'fakechroot') {
|
|
# the next fallback is fakechroot
|
|
# exec ourselves again but within fakechroot
|
|
my @prefix = ();
|
|
if ($is_covering) {
|
|
@prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
|
|
}
|
|
exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
|
|
} else {
|
|
error( "unable to pick chroot mode automatically (use --mode for "
|
|
. "manual selection)");
|
|
}
|
|
info "automatically chosen mode: $options->{mode}";
|
|
} elsif ($options->{mode} eq 'root') {
|
|
if ($EFFECTIVE_USER_ID != 0) {
|
|
error "need to be root";
|
|
}
|
|
} elsif ($options->{mode} eq 'fakechroot') {
|
|
if (&{$check_fakechroot_running}()) {
|
|
# fakechroot is already running
|
|
} elsif (!can_execute 'fakechroot') {
|
|
error "need working fakechroot binary";
|
|
} else {
|
|
# exec ourselves again but within fakechroot
|
|
my @prefix = ();
|
|
if ($is_covering) {
|
|
@prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
|
|
}
|
|
exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
|
|
}
|
|
} elsif ($options->{mode} eq 'unshare') {
|
|
# For unshare mode to work we either need to already be the root user
|
|
# and then we do not have to unshare the user namespace anymore but we
|
|
# need to be able to unshare the mount namespace...
|
|
#
|
|
# We need to call unshare with "--propagation unchanged" or otherwise
|
|
# we get 'cannot change root filesystem propagation' when running
|
|
# mmdebstrap inside a chroot for which the root of the chroot is not
|
|
# its own mount point.
|
|
if ($EFFECTIVE_USER_ID == 0
|
|
&& 0 != system 'unshare --mount --propagation unchanged -- true') {
|
|
error "unable to unshare the mount namespace";
|
|
}
|
|
# ...or we are not root and then we need to be able to unshare the user
|
|
# namespace.
|
|
if ($EFFECTIVE_USER_ID != 0) {
|
|
test_unshare_userns(1);
|
|
}
|
|
} elsif ($options->{mode} eq 'chrootless') {
|
|
if (any { $_ eq 'check/chrootless' } @{ $options->{skip} }) {
|
|
info "skipping check/chrootless as requested";
|
|
} else {
|
|
my $ischroot = 0 == system 'ischroot';
|
|
if ( $EFFECTIVE_USER_ID == 0
|
|
&& !exists $ENV{FAKEROOTKEY}
|
|
&& !$ischroot) {
|
|
error
|
|
"running chrootless mode as root without fakeroot might "
|
|
. "damage the host system if not run inside a chroot";
|
|
}
|
|
}
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
|
|
$options->{canmount} = 1;
|
|
if ($options->{mode} eq 'root') {
|
|
# It's possible to be root but not be able to mount anything.
|
|
# This is for example the case when running under docker.
|
|
# Mounting needs CAP_SYS_ADMIN which might not be available.
|
|
#
|
|
# We test for CAP_SYS_ADMIN using the capget syscall.
|
|
# We cannot use cap_get_proc from sys/capability.h because Perl.
|
|
# We don't use capsh because we don't want to depend on libcap2-bin
|
|
my $hdrp = pack(
|
|
"Li", # __u32 followed by int
|
|
$_LINUX_CAPABILITY_VERSION_3, # available since Linux 2.6.26
|
|
0 # caps of this process
|
|
);
|
|
my $datap = pack("LLLLLL", 0, 0, 0, 0, 0, 0); # six __u32
|
|
0 == syscall &SYS_capget, $hdrp, $datap
|
|
or error "capget failed: $!";
|
|
my ($effective, undef) = unpack "LLLLLL", $datap;
|
|
if ((($effective >> $CAP_SYS_ADMIN) & 1) != 1) {
|
|
warning
|
|
"cannot mount because CAP_SYS_ADMIN is not in the effective set";
|
|
$options->{canmount} = 0;
|
|
}
|
|
if (0 == syscall &SYS_prctl, $PR_CAPBSET_READ, $CAP_SYS_ADMIN) {
|
|
warning
|
|
"cannot mount because CAP_SYS_ADMIN is not in the bounding set";
|
|
$options->{canmount} = 0;
|
|
}
|
|
# To test whether we can use mount without actually trying to mount
|
|
# something we try unsharing the mount namespace. If this is allowed,
|
|
# then we are also allowed to mount.
|
|
#
|
|
# We need to call unshare with "--propagation unchanged" or otherwise
|
|
# we get 'cannot change root filesystem propagation' when running
|
|
# mmdebstrap inside a chroot for which the root of the chroot is not
|
|
# its own mount point.
|
|
if (0 != system 'unshare --mount --propagation unchanged -- true') {
|
|
# if we cannot unshare the mount namespace as root, then we also
|
|
# cannot mount
|
|
warning "cannot mount because unshare --mount failed";
|
|
$options->{canmount} = 0;
|
|
}
|
|
}
|
|
|
|
if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
|
|
if (!can_execute 'mount') {
|
|
warning "cannot execute mount";
|
|
$options->{canmount} = 0;
|
|
}
|
|
}
|
|
|
|
# we can only possibly mount in root and unshare mode
|
|
if (none { $_ eq $options->{mode} } ('root', 'unshare')) {
|
|
$options->{canmount} = 0;
|
|
}
|
|
|
|
my @architectures = ();
|
|
foreach my $archs (@{ $options->{architectures} }) {
|
|
foreach my $arch (split /[,\s]+/, $archs) {
|
|
# strip leading and trailing whitespace
|
|
$arch =~ s/^\s+|\s+$//g;
|
|
# skip if the remainder is an empty string
|
|
if ($arch eq '') {
|
|
next;
|
|
}
|
|
# do not append component if it's already in the list
|
|
if (any { $_ eq $arch } @architectures) {
|
|
next;
|
|
}
|
|
push @architectures, $arch;
|
|
}
|
|
}
|
|
|
|
$options->{nativearch} = $hostarch;
|
|
$options->{foreignarchs} = [];
|
|
if (scalar @architectures == 0) {
|
|
warning "empty architecture list: falling back to native architecture"
|
|
. " $hostarch";
|
|
} elsif (scalar @architectures == 1) {
|
|
$options->{nativearch} = $architectures[0];
|
|
} else {
|
|
$options->{nativearch} = $architectures[0];
|
|
push @{ $options->{foreignarchs} },
|
|
@architectures[1 .. $#architectures];
|
|
}
|
|
|
|
debug "Native architecture (outside): $hostarch";
|
|
debug "Native architecture (inside): $options->{nativearch}";
|
|
debug("Foreign architectures (inside): "
|
|
. (join ', ', @{ $options->{foreignarchs} }));
|
|
|
|
{
|
|
# FIXME: autogenerate this list
|
|
my $deb2qemu = {
|
|
alpha => 'alpha',
|
|
amd64 => 'x86_64',
|
|
arm => 'arm',
|
|
arm64 => 'aarch64',
|
|
armel => 'arm',
|
|
armhf => 'arm',
|
|
hppa => 'hppa',
|
|
i386 => 'i386',
|
|
m68k => 'm68k',
|
|
mips => 'mips',
|
|
mips64 => 'mips64',
|
|
mips64el => 'mips64el',
|
|
mipsel => 'mipsel',
|
|
powerpc => 'ppc',
|
|
ppc64 => 'ppc64',
|
|
ppc64el => 'ppc64le',
|
|
riscv64 => 'riscv64',
|
|
s390x => 's390x',
|
|
sh4 => 'sh4',
|
|
sparc => 'sparc',
|
|
sparc64 => 'sparc64',
|
|
};
|
|
if (any { $_ eq 'check/qemu' } @{ $options->{skip} }) {
|
|
info "skipping check/qemu as requested";
|
|
} elsif ($options->{mode} eq "chrootless") {
|
|
info "skipping emulation check in chrootless mode";
|
|
} elsif ($options->{variant} eq "extract") {
|
|
info "skipping emulation check for extract variant";
|
|
} elsif ($hostarch ne $options->{nativearch}) {
|
|
if (!can_execute 'arch-test') {
|
|
error "install arch-test for foreign architecture support";
|
|
}
|
|
my $withemu = 0;
|
|
my $noemu = 0;
|
|
{
|
|
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
|
|
if ($pid == 0) {
|
|
{
|
|
## no critic (TestingAndDebugging::ProhibitNoWarnings)
|
|
# don't print a warning if the following fails
|
|
no warnings;
|
|
exec 'arch-test', $options->{nativearch};
|
|
}
|
|
# if exec didn't work (for example because the arch-test
|
|
# program is missing) prepare for the worst and assume that
|
|
# the architecture cannot be executed
|
|
print "$options->{nativearch}: not supported on this"
|
|
. " machine/kernel\n";
|
|
exit 1;
|
|
}
|
|
chomp(
|
|
my $content = do { local $/; <$fh> }
|
|
);
|
|
close $fh;
|
|
if ($? == 0 and $content eq "$options->{nativearch}: ok") {
|
|
$withemu = 1;
|
|
}
|
|
}
|
|
{
|
|
my $pid = open my $fh, '-|' // error "failed to fork(): $!";
|
|
if ($pid == 0) {
|
|
{
|
|
## no critic (TestingAndDebugging::ProhibitNoWarnings)
|
|
# don't print a warning if the following fails
|
|
no warnings;
|
|
exec 'arch-test', '-n', $options->{nativearch};
|
|
}
|
|
# if exec didn't work (for example because the arch-test
|
|
# program is missing) prepare for the worst and assume that
|
|
# the architecture cannot be executed
|
|
print "$options->{nativearch}: not supported on this"
|
|
. " machine/kernel\n";
|
|
exit 1;
|
|
}
|
|
chomp(
|
|
my $content = do { local $/; <$fh> }
|
|
);
|
|
close $fh;
|
|
if ($? == 0 and $content eq "$options->{nativearch}: ok") {
|
|
$noemu = 1;
|
|
}
|
|
}
|
|
# four different outcomes, depending on whether arch-test
|
|
# succeeded with or without emulation
|
|
#
|
|
# withemu | noemu |
|
|
# --------+-------+-----------------
|
|
# 0 | 0 | test why emu doesn't work and quit
|
|
# 0 | 1 | should never happen
|
|
# 1 | 0 | use qemu emulation
|
|
# 1 | 1 | don't use qemu emulation
|
|
if ($withemu == 0 and $noemu == 0) {
|
|
{
|
|
open my $fh, '<', '/proc/filesystems'
|
|
or error "failed to open /proc/filesystems: $!";
|
|
unless (grep { /^nodev\tbinfmt_misc$/ } (<$fh>)) {
|
|
warning "binfmt_misc not found in /proc/filesystems --"
|
|
. " is the module loaded?";
|
|
}
|
|
close $fh;
|
|
}
|
|
{
|
|
open my $fh, '<', '/proc/mounts'
|
|
or error "failed to open /proc/mounts: $!";
|
|
unless (
|
|
grep {
|
|
/^binfmt_misc\s+
|
|
\/proc\/sys\/fs\/binfmt_misc\s+
|
|
binfmt_misc\s+/x
|
|
} (<$fh>)
|
|
) {
|
|
warning "binfmt_misc not found in /proc/mounts -- not"
|
|
. " mounted?";
|
|
}
|
|
close $fh;
|
|
}
|
|
{
|
|
if (!exists $deb2qemu->{ $options->{nativearch} }) {
|
|
warning "no mapping from $options->{nativearch} to"
|
|
. " qemu-user binary";
|
|
} elsif (!can_execute 'update-binfmts') {
|
|
warning "cannot find update-binfmts";
|
|
} else {
|
|
my $binfmt_identifier
|
|
= 'qemu-' . $deb2qemu->{ $options->{nativearch} };
|
|
open my $fh, '-|', 'update-binfmts', '--display',
|
|
$binfmt_identifier // error "failed to fork(): $!";
|
|
chomp(
|
|
my $binfmts = do { local $/; <$fh> }
|
|
);
|
|
close $fh;
|
|
if ($? != 0 || $binfmts eq '') {
|
|
warning "$binfmt_identifier is not a supported"
|
|
. " binfmt name";
|
|
}
|
|
}
|
|
}
|
|
error "$options->{nativearch} can neither be executed natively"
|
|
. " nor via qemu user emulation with binfmt_misc";
|
|
} elsif ($withemu == 0 and $noemu == 1) {
|
|
error "arch-test succeeded without emu but not with emu";
|
|
} elsif ($withemu == 1 and $noemu == 0) {
|
|
info "$options->{nativearch} cannot be executed natively, but"
|
|
. " transparently using qemu-user binfmt emulation";
|
|
if (!exists $deb2qemu->{ $options->{nativearch} }) {
|
|
error "no mapping from $options->{nativearch} to qemu-user"
|
|
. " binary";
|
|
}
|
|
$options->{qemu} = $deb2qemu->{ $options->{nativearch} };
|
|
} elsif ($withemu == 1 and $noemu == 1) {
|
|
info "$options->{nativearch} is different from $hostarch but"
|
|
. " can be executed natively";
|
|
} else {
|
|
error "logic error";
|
|
}
|
|
} else {
|
|
info "chroot architecture $options->{nativearch} is equal to the"
|
|
. " host's architecture";
|
|
}
|
|
}
|
|
|
|
if (defined $options->{qemu} && $options->{mode} eq 'fakechroot') {
|
|
if (!can_execute 'dpkg-architecture') {
|
|
error "cannot find dpkg-architecture";
|
|
}
|
|
}
|
|
|
|
{
|
|
$options->{suite} = undef;
|
|
if (scalar @ARGV > 0) {
|
|
$options->{suite} = shift @ARGV;
|
|
if (scalar @ARGV > 0) {
|
|
$options->{target} = shift @ARGV;
|
|
} else {
|
|
$options->{target} = '-';
|
|
}
|
|
} else {
|
|
info
|
|
"No SUITE specified, expecting sources.list on standard input";
|
|
$options->{target} = '-';
|
|
}
|
|
|
|
my $sourceslists = [];
|
|
if (!defined $options->{suite}) {
|
|
# If no suite was specified, then the whole sources.list has to
|
|
# come from standard input
|
|
info "reading sources.list from standard input...";
|
|
my $content = do {
|
|
local $/;
|
|
## no critic (InputOutput::ProhibitExplicitStdin)
|
|
<STDIN>;
|
|
};
|
|
if ($content eq "") {
|
|
warning "sources.list from standard input is empty";
|
|
} else {
|
|
my $type = guess_sources_format($content);
|
|
if (!defined $type
|
|
|| ($type ne "deb822" and $type ne "one-line")) {
|
|
error "cannot determine sources.list format";
|
|
}
|
|
push @{$sourceslists},
|
|
{
|
|
type => $type,
|
|
fname => undef,
|
|
content => $content,
|
|
};
|
|
}
|
|
} else {
|
|
my @components = ();
|
|
foreach my $comp (@{ $options->{components} }) {
|
|
my @comps = split /[,\s]+/, $comp;
|
|
foreach my $c (@comps) {
|
|
# strip leading and trailing whitespace
|
|
$c =~ s/^\s+|\s+$//g;
|
|
# skip if the remainder is an empty string
|
|
if ($c eq "") {
|
|
next;
|
|
}
|
|
# do not append component if it's already in the list
|
|
if (any { $_ eq $c } @components) {
|
|
next;
|
|
}
|
|
push @components, $c;
|
|
}
|
|
}
|
|
my $compstr = join " ", @components;
|
|
# From the suite name we can maybe infer which key we need. If we
|
|
# can infer this information, then we need to check whether the
|
|
# currently running apt actually trusts this key or not. If it
|
|
# doesn't, then we need to add a signed-by line to the sources.list
|
|
# entry.
|
|
my $signedby = '';
|
|
my %suite_by_vendor = get_suite_by_vendor();
|
|
my $gpgproc = sub {
|
|
my $keyring
|
|
= get_keyring_by_suite($options->{suite}, \%suite_by_vendor);
|
|
if (!defined $keyring) {
|
|
debug "get_keyring_by_suite() cannot find keyring";
|
|
return '';
|
|
}
|
|
|
|
# we can only check if we need the signed-by entry if we u
|
|
# automatically chosen keyring exists
|
|
if (!defined $keyring || !-e $keyring) {
|
|
debug "found keyring does not exist";
|
|
return '';
|
|
}
|
|
|
|
# we can only check key material if gpg is installed
|
|
my $gpghome = tempdir(
|
|
"mmdebstrap.gpghome.XXXXXXXXXXXX",
|
|
TMPDIR => 1,
|
|
CLEANUP => 1
|
|
);
|
|
my @gpgcmd = (
|
|
'gpg', '--quiet',
|
|
'--ignore-time-conflict', '--no-options',
|
|
'--no-default-keyring', '--homedir',
|
|
$gpghome, '--no-auto-check-trustdb',
|
|
);
|
|
my ($ret, $message);
|
|
{
|
|
my $fh;
|
|
{
|
|
# change warning handler to prevent message
|
|
# Can't exec "gpg": No such file or directory
|
|
local $SIG{__WARN__} = sub { $message = shift; };
|
|
$ret = open $fh, '-|', @gpgcmd, '--version';
|
|
}
|
|
# we only want to check if the gpg command exists
|
|
close $fh;
|
|
}
|
|
if ($? != 0 || !defined $ret || defined $message) {
|
|
warning
|
|
"gpg --version failed: cannot infer signed-by value";
|
|
return '';
|
|
}
|
|
# initialize gpg trustdb with empty one
|
|
{
|
|
0 == system(@gpgcmd, '--update-trustdb')
|
|
or error "gpg failed to initialize trustdb:: $?";
|
|
}
|
|
if (!-d $options->{apttrustedparts}) {
|
|
warning "$options->{apttrustedparts} doesn't exist";
|
|
return '';
|
|
}
|
|
# find all the fingerprints of the keys apt currently
|
|
# knows about
|
|
my @keyrings = ();
|
|
opendir my $dh, $options->{apttrustedparts}
|
|
or error "cannot read $options->{apttrustedparts}";
|
|
while (my $filename = readdir $dh) {
|
|
if ($filename !~ /\.(asc|gpg)$/) {
|
|
next;
|
|
}
|
|
$filename = "$options->{apttrustedparts}/$filename";
|
|
# skip empty keyrings
|
|
-s "$filename" || next;
|
|
push @keyrings, $filename;
|
|
}
|
|
closedir $dh;
|
|
if (-s $options->{apttrusted}) {
|
|
push @keyrings, $options->{apttrusted};
|
|
}
|
|
my @aptfingerprints = ();
|
|
if (scalar @keyrings == 0) {
|
|
debug "no keyring is trusted by apt";
|
|
return " [signed-by=\"$keyring\"]";
|
|
}
|
|
info "finding correct signed-by value...";
|
|
my $progress = 0.0;
|
|
print_progress($progress);
|
|
for (my $i = 0 ; $i < scalar @keyrings ; $i++) {
|
|
my $k = $keyrings[$i];
|
|
open(my $fh, '-|', @gpgcmd, '--with-colons',
|
|
'--show-keys', $k) // error "failed to fork(): $!";
|
|
while (my $line = <$fh>) {
|
|
if ($line !~ /^fpr:::::::::([^:]+):/) {
|
|
next;
|
|
}
|
|
push @aptfingerprints, $1;
|
|
}
|
|
close $fh;
|
|
if ($? != 0) {
|
|
warning("gpg failed to read $k");
|
|
}
|
|
print_progress($i / (scalar @keyrings) * 100.0, undef);
|
|
}
|
|
print_progress("done");
|
|
if (scalar @aptfingerprints == 0) {
|
|
debug "no fingerprints found";
|
|
return " [signed-by=\"$keyring\"]";
|
|
}
|
|
# check if all fingerprints from the keyring that we guessed
|
|
# are known by apt and only add signed-by option if that's not
|
|
# the case
|
|
my @suitefingerprints = ();
|
|
{
|
|
open(my $fh, '-|', @gpgcmd, '--with-colons', '--show-keys',
|
|
$keyring) // error "failed to fork(): $!";
|
|
while (my $line = <$fh>) {
|
|
if ($line !~ /^fpr:::::::::([^:]+):/) {
|
|
next;
|
|
}
|
|
# if this fingerprint is not known by apt, then we need
|
|
#to add the signed-by option
|
|
if (none { $_ eq $1 } @aptfingerprints) {
|
|
debug "fingerprint $1 is not trusted by apt";
|
|
return " [signed-by=\"$keyring\"]";
|
|
}
|
|
}
|
|
close $fh;
|
|
if ($? != 0) {
|
|
warning "gpg failed -- cannot infer signed-by value";
|
|
}
|
|
}
|
|
return '';
|
|
};
|
|
if (any { $_ eq 'check/signed-by' } @{ $options->{skip} }) {
|
|
info "skipping check/signed-by as requested";
|
|
} else {
|
|
$signedby = $gpgproc->();
|
|
}
|
|
if (scalar @ARGV > 0) {
|
|
for my $arg (@ARGV) {
|
|
if ($arg eq '-') {
|
|
info 'reading sources.list from standard input...';
|
|
my $content = do {
|
|
local $/;
|
|
## no critic (InputOutput::ProhibitExplicitStdin)
|
|
<STDIN>;
|
|
};
|
|
if ($content eq "") {
|
|
warning
|
|
"sources.list from standard input is empty";
|
|
} else {
|
|
my $type = guess_sources_format($content);
|
|
if (!defined $type
|
|
|| ($type ne 'deb822' and $type ne 'one-line'))
|
|
{
|
|
error "cannot determine sources.list format";
|
|
}
|
|
# if last entry is of same type and without filename,
|
|
# then append
|
|
if ( scalar @{$sourceslists} > 0
|
|
&& $sourceslists->[-1]{type} eq $type
|
|
&& !defined $sourceslists->[-1]{fname}) {
|
|
$sourceslists->[-1]{content}
|
|
.= ($type eq 'one-line' ? "\n" : "\n\n")
|
|
. $content;
|
|
} else {
|
|
push @{$sourceslists},
|
|
{
|
|
type => $type,
|
|
fname => undef,
|
|
content => $content,
|
|
};
|
|
}
|
|
}
|
|
} elsif ($arg =~ /^deb(-src)? /) {
|
|
my $content = "$arg\n";
|
|
# if last entry is of same type and without filename,
|
|
# then append
|
|
if ( scalar @{$sourceslists} > 0
|
|
&& $sourceslists->[-1]{type} eq 'one-line'
|
|
&& !defined $sourceslists->[-1]{fname}) {
|
|
$sourceslists->[-1]{content} .= "\n" . $content;
|
|
} else {
|
|
push @{$sourceslists},
|
|
{
|
|
type => 'one-line',
|
|
fname => undef,
|
|
content => $content,
|
|
};
|
|
}
|
|
} elsif ($arg =~ /:\/\//) {
|
|
my $content = join ' ',
|
|
(
|
|
"deb$signedby",
|
|
$arg, $options->{suite}, "$compstr\n"
|
|
);
|
|
# if last entry is of same type and without filename,
|
|
# then append
|
|
if ( scalar @{$sourceslists} > 0
|
|
&& $sourceslists->[-1]{type} eq 'one-line'
|
|
&& !defined $sourceslists->[-1]{fname}) {
|
|
$sourceslists->[-1]{content} .= "\n" . $content;
|
|
} else {
|
|
push @{$sourceslists},
|
|
{
|
|
type => 'one-line',
|
|
fname => undef,
|
|
content => $content,
|
|
};
|
|
}
|
|
} elsif (-f $arg) {
|
|
my $content = '';
|
|
open my $fh, '<', $arg or error "cannot open $arg: $!";
|
|
while (my $line = <$fh>) {
|
|
$content .= $line;
|
|
}
|
|
close $fh;
|
|
if ($content eq "") {
|
|
warning "$arg is empty";
|
|
} else {
|
|
my $type = undef;
|
|
if ($arg =~ /\.list$/) {
|
|
$type = 'one-line';
|
|
} elsif ($arg =~ /\.sources$/) {
|
|
$type = 'deb822';
|
|
} else {
|
|
$type = guess_sources_format($content);
|
|
}
|
|
if (!defined $type
|
|
|| ($type ne 'deb822' and $type ne 'one-line'))
|
|
{
|
|
error "cannot determine sources.list format";
|
|
}
|
|
push @{$sourceslists},
|
|
{
|
|
type => $type,
|
|
fname => basename($arg),
|
|
content => $content,
|
|
};
|
|
}
|
|
} elsif ($arg eq '') {
|
|
# empty
|
|
} else {
|
|
error "invalid mirror: $arg";
|
|
}
|
|
}
|
|
} else {
|
|
my $sourceslist
|
|
= get_sourceslist_by_suite($options->{suite},
|
|
$options->{nativearch},
|
|
$signedby, $compstr, \%suite_by_vendor);
|
|
push @{$sourceslists},
|
|
{
|
|
type => 'one-line',
|
|
fname => undef,
|
|
content => $sourceslist,
|
|
};
|
|
}
|
|
}
|
|
if (scalar @{$sourceslists} == 0) {
|
|
warning "empty apt sources.list";
|
|
}
|
|
debug("sources list entries:");
|
|
for my $list (@{$sourceslists}) {
|
|
if (defined $list->{fname}) {
|
|
debug("fname: $list->{fname}");
|
|
}
|
|
debug("type: $list->{type}");
|
|
debug("content:");
|
|
for my $line (split "\n", $list->{content}) {
|
|
debug(" $line");
|
|
}
|
|
}
|
|
$options->{sourceslists} = $sourceslists;
|
|
}
|
|
|
|
if ($options->{target} eq '-') {
|
|
if (POSIX::isatty STDOUT) {
|
|
error "stdout is a an interactive tty";
|
|
}
|
|
} else {
|
|
my $abs_path = abs_path($options->{target});
|
|
if (!defined $abs_path) {
|
|
error "unable to get absolute path of target directory"
|
|
. " $options->{target}";
|
|
}
|
|
$options->{target} = $abs_path;
|
|
}
|
|
|
|
if ($options->{target} eq '/') {
|
|
error "refusing to use the filesystem root as output directory";
|
|
}
|
|
|
|
my $tar_compressor = get_tar_compressor($options->{target});
|
|
|
|
# figure out the right format
|
|
if ($options->{format} eq 'auto') {
|
|
# (stat(...))[6] is the device identifier which contains the major and
|
|
# minor numbers for character special files
|
|
# major 1 and minor 3 is /dev/null on Linux
|
|
if ( $options->{target} eq '/dev/null'
|
|
and $OSNAME eq 'linux'
|
|
and -c '/dev/null'
|
|
and major((stat("/dev/null"))[6]) == 1
|
|
and minor((stat("/dev/null"))[6]) == 3) {
|
|
$options->{format} = 'null';
|
|
} elsif ($options->{target} eq '-'
|
|
and $OSNAME eq 'linux'
|
|
and major((stat(STDOUT))[6]) == 1
|
|
and minor((stat(STDOUT))[6]) == 3) {
|
|
# by checking the major and minor number of the STDOUT fd we also
|
|
# can detect redirections to /dev/null and choose the null format
|
|
# accordingly
|
|
$options->{format} = 'null';
|
|
} elsif ($options->{target} ne '-' and -d $options->{target}) {
|
|
$options->{format} = 'directory';
|
|
} elsif (
|
|
defined $tar_compressor
|
|
or $options->{target} =~ /\.tar$/
|
|
or $options->{target} eq '-'
|
|
or -p $options->{target} # named pipe (fifo)
|
|
or -c $options->{target} # character special like /dev/null
|
|
) {
|
|
$options->{format} = 'tar';
|
|
# check if the compressor is installed
|
|
if (defined $tar_compressor) {
|
|
my $pid = fork() // error "fork() failed: $!";
|
|
if ($pid == 0) {
|
|
open(STDOUT, '>', '/dev/null')
|
|
or error "cannot open /dev/null for writing: $!";
|
|
open(STDIN, '<', '/dev/null')
|
|
or error "cannot open /dev/null for reading: $!";
|
|
exec { $tar_compressor->[0] } @{$tar_compressor}
|
|
or error("cannot exec "
|
|
. (join " ", @{$tar_compressor})
|
|
. ": $!");
|
|
}
|
|
waitpid $pid, 0;
|
|
if ($? != 0) {
|
|
error("failed to start " . (join " ", @{$tar_compressor}));
|
|
}
|
|
}
|
|
} elsif ($options->{target} =~ /\.(squashfs|sqfs)$/) {
|
|
$options->{format} = 'squashfs';
|
|
# check if tar2sqfs is installed
|
|
my $pid = fork() // error "fork() failed: $!";
|
|
if ($pid == 0) {
|
|
open(STDOUT, '>', '/dev/null')
|
|
or error "cannot open /dev/null for writing: $!";
|
|
open(STDIN, '<', '/dev/null')
|
|
or error "cannot open /dev/null for reading: $!";
|
|
exec('tar2sqfs', '--version')
|
|
or error("cannot exec tar2sqfs --version: $!");
|
|
}
|
|
waitpid $pid, 0;
|
|
if ($? != 0) {
|
|
error("failed to start tar2sqfs --version");
|
|
}
|
|
} elsif ($options->{target} =~ /\.ext2$/) {
|
|
$options->{format} = 'ext2';
|
|
# check if the installed version of genext2fs supports tarballs on
|
|
# stdin
|
|
(undef, my $filename) = tempfile(
|
|
"mmdebstrap.ext2.XXXXXXXXXXXX",
|
|
OPEN => 0,
|
|
TMPDIR => 1
|
|
);
|
|
open my $fh, '|-', 'genext2fs', '-B', '1024', '-b', '8', '-N',
|
|
'11', '-a', '-', $filename // error "failed to fork(): $!";
|
|
# write 10240 null-bytes to genext2fs -- this represents an empty
|
|
# tar archive
|
|
print $fh ("\0" x 10240)
|
|
or error "cannot write to genext2fs process";
|
|
close $fh;
|
|
my $exitstatus = $?;
|
|
unlink $filename // die "cannot unlink $filename";
|
|
if ($exitstatus != 0) {
|
|
error "genext2fs failed with exit status: $exitstatus";
|
|
}
|
|
} elsif ($options->{target} =~ /\.ext4$/) {
|
|
$options->{format} = 'ext4';
|
|
# check if the installed version of e2fsprogs supports tarballs on
|
|
# stdin
|
|
(undef, my $filename) = tempfile(
|
|
"mmdebstrap.ext4.XXXXXXXXXXXX",
|
|
OPEN => 0,
|
|
TMPDIR => 1
|
|
);
|
|
# creating file to suppress message "Creating regular file ..."
|
|
{ open my $fh, '>', $filename; }
|
|
open my $fh, '|-', 'mke2fs', '-q', '-F', '-o', 'Linux', '-T',
|
|
'ext4', '-b', '4096', '-d', '-', $filename,
|
|
'16384' // error "failed to fork(): $!";
|
|
# write 10240 null-bytes to mke2fs -- this represents an empty
|
|
# tar archive
|
|
print $fh ("\0" x 10240)
|
|
or error "cannot write to mke2fs process";
|
|
close $fh;
|
|
my $exitstatus = $?;
|
|
unlink $filename // die "cannot unlink $filename";
|
|
if ($exitstatus != 0) {
|
|
error "mke2fs failed with exit status: $exitstatus";
|
|
}
|
|
} else {
|
|
$options->{format} = 'directory';
|
|
}
|
|
info "automatically chosen format: $options->{format}";
|
|
}
|
|
|
|
if ( $options->{target} eq '-'
|
|
and $options->{format} ne 'tar'
|
|
and $options->{format} ne 'null') {
|
|
error "the $options->{format} format is unable to write to stdout";
|
|
}
|
|
|
|
if ($options->{format} eq 'null'
|
|
and none { $_ eq $options->{target} } ('-', '/dev/null')) {
|
|
info "ignoring target $options->{target} with null format";
|
|
}
|
|
|
|
my $blocksize = -1;
|
|
if ($options->{format} eq 'ext2') {
|
|
if (!can_execute 'genext2fs') {
|
|
error "need genext2fs for ext2 format";
|
|
}
|
|
$blocksize = 1024;
|
|
} elsif ($options->{format} eq 'ext4') {
|
|
if (!can_execute 'mke2fs', '-V') {
|
|
error "need mke2fs for ext4 format";
|
|
}
|
|
require DynaLoader;
|
|
my $libarchive = DynaLoader::dl_load_file("libarchive.so.13", 0)
|
|
or error "need libarchive for ext4 format";
|
|
$blocksize = 4096;
|
|
} elsif ($options->{format} eq 'squashfs') {
|
|
if (!can_execute 'tar2sqfs') {
|
|
error "need tar2sqfs binary from the squashfs-tools-ng package";
|
|
}
|
|
$blocksize = 1048576;
|
|
}
|
|
|
|
my $rootdir_handle;
|
|
if (any { $_ eq $options->{format} }
|
|
('tar', 'squashfs', 'ext2', 'ext4', 'null')) {
|
|
if ($options->{format} ne 'null') {
|
|
if (any { $_ eq $options->{variant} } ('extract', 'custom')
|
|
and $options->{mode} eq 'fakechroot') {
|
|
info "creating a tarball, squashfs, ext2 or ext4 image in"
|
|
. " fakechroot mode might fail in extract and"
|
|
. " custom variants because there might be no tar inside the"
|
|
. " chroot";
|
|
}
|
|
# try to fail early if target tarball or squashfs image cannot be
|
|
# opened for writing
|
|
if ($options->{target} ne '-') {
|
|
if ($options->{dryrun}) {
|
|
if (-e $options->{target}) {
|
|
info "not overwriting $options->{target} because in"
|
|
. " dry-run mode";
|
|
}
|
|
} else {
|
|
open my $fh, '>', $options->{target}
|
|
or error
|
|
"cannot open $options->{target} for writing: $!";
|
|
close $fh;
|
|
}
|
|
}
|
|
}
|
|
# since the output is a tarball, we create the rootfs in a temporary
|
|
# directory
|
|
$options->{root} = tempdir('mmdebstrap.XXXXXXXXXX', TMPDIR => 1);
|
|
info "using $options->{root} as tempdir";
|
|
# add an flock on the temporary directory to prevent cleanup by systemd
|
|
# see section Age in tmpfiles.d(5)
|
|
sysopen($rootdir_handle, $options->{root}, O_RDONLY | O_DIRECTORY)
|
|
or error "Failed to sysopen $options->{root}: $!\n";
|
|
flock($rootdir_handle, LOCK_EX)
|
|
or error "Unable to flock $options->{root}: $!\n";
|
|
|
|
# in unshare and root mode, other users than the current user need to
|
|
# access the rootfs, most prominently, the _apt user. Thus, make the
|
|
# temporary directory world readable.
|
|
if (
|
|
any { $_ eq $options->{mode} } ('unshare', 'root')
|
|
or ($EFFECTIVE_USER_ID == 0 and $options->{mode} eq 'chrootless')
|
|
) {
|
|
chmod 0755, $options->{root} or error "cannot chmod root: $!";
|
|
}
|
|
} elsif ($options->{format} eq 'directory') {
|
|
# user does not seem to have specified a tarball as output, thus work
|
|
# directly in the supplied directory
|
|
$options->{root} = $options->{target};
|
|
if (-e $options->{root}) {
|
|
if (!-d $options->{root}) {
|
|
error "$options->{root} exists and is not a directory";
|
|
}
|
|
if (any { $_ eq 'check/empty' } @{ $options->{skip} }) {
|
|
info "skipping check/empty as requested";
|
|
} else {
|
|
# check if the directory is empty or contains nothing more than
|
|
# an empty lost+found directory. The latter exists on freshly
|
|
# created ext3 and ext4 partitions.
|
|
# rationale for requiring an empty directory:
|
|
# https://bugs.debian.org/833525
|
|
opendir(my $dh, $options->{root})
|
|
or error "Can't opendir($options->{root}): $!";
|
|
while (my $entry = readdir $dh) {
|
|
# skip the "." and ".." entries
|
|
next if $entry eq ".";
|
|
next if $entry eq "..";
|
|
# if the entry is a directory named "lost+found" then skip
|
|
# it, if it's empty
|
|
if ($entry eq "lost+found"
|
|
and -d "$options->{root}/$entry") {
|
|
opendir(my $dh2, "$options->{root}/$entry");
|
|
# Attempt reading the directory thrice. If the third
|
|
# time succeeds, then it has more entries than just "."
|
|
# and ".." and must thus not be empty.
|
|
readdir $dh2;
|
|
readdir $dh2;
|
|
# rationale for requiring an empty directory:
|
|
# https://bugs.debian.org/833525
|
|
if (readdir $dh2) {
|
|
error "$options->{root} contains a non-empty"
|
|
. " lost+found directory";
|
|
}
|
|
closedir($dh2);
|
|
} else {
|
|
error "$options->{root} is not empty";
|
|
}
|
|
}
|
|
closedir($dh);
|
|
}
|
|
} else {
|
|
my $num_created = make_path "$options->{root}",
|
|
{ error => \my $err };
|
|
if ($err && @$err) {
|
|
error(join "; ",
|
|
(map { "cannot create " . (join ": ", %{$_}) } @$err));
|
|
} elsif ($num_created == 0) {
|
|
error "cannot create $options->{root}";
|
|
}
|
|
}
|
|
} else {
|
|
error "unknown format: $options->{format}";
|
|
}
|
|
|
|
# check for double quotes because apt doesn't allow to escape them and
|
|
# thus paths with double quotes are invalid in the apt config
|
|
if ($options->{root} =~ /"/) {
|
|
error "apt cannot handle paths with double quotes";
|
|
}
|
|
|
|
my @idmap;
|
|
# for unshare mode the rootfs directory has to have appropriate
|
|
# permissions
|
|
if ($EFFECTIVE_USER_ID != 0 and $options->{mode} eq 'unshare') {
|
|
@idmap = read_subuid_subgid 1;
|
|
# sanity check
|
|
if ( scalar(@idmap) != 2
|
|
|| $idmap[0][0] ne 'u'
|
|
|| $idmap[1][0] ne 'g'
|
|
|| !length $idmap[0][2]
|
|
|| !length $idmap[1][2]) {
|
|
error "invalid idmap";
|
|
}
|
|
|
|
my $outer_gid = $REAL_GROUP_ID + 0;
|
|
|
|
my $pid = get_unshare_cmd(
|
|
sub { chown 1, 1, $options->{root} },
|
|
[
|
|
['u', '0', $REAL_USER_ID, '1'],
|
|
['g', '0', $outer_gid, '1'],
|
|
['u', '1', $idmap[0][2], '1'],
|
|
['g', '1', $idmap[1][2], '1']]);
|
|
waitpid $pid, 0;
|
|
$? == 0 or error "chown failed";
|
|
}
|
|
|
|
# check if .deb files given by --include are readable by the unshared user
|
|
if ($options->{mode} eq 'unshare'
|
|
and scalar(grep { /^\// } @{ $options->{include} }) > 0) {
|
|
my $pid = get_unshare_cmd(
|
|
sub {
|
|
my $ret = 0;
|
|
foreach my $f (grep { /^\// } @{ $options->{include} }) {
|
|
# open the file for real because -r will report the file as
|
|
# readable even though open will fail (in contrast to the
|
|
# coreutils test utility, perl doesn't use faccessat)
|
|
my $res = open(my $fh, '<', $f);
|
|
if (!$res) {
|
|
warning "unshared user cannot access $f for reading";
|
|
$ret = 1;
|
|
} else {
|
|
close $fh;
|
|
}
|
|
}
|
|
exit $ret;
|
|
},
|
|
\@idmap
|
|
);
|
|
waitpid $pid, 0;
|
|
if ($? != 0) {
|
|
warning("apt on the outside is run as the unshared user and "
|
|
. "needs read access to packages outside the chroot given "
|
|
. "via --include");
|
|
}
|
|
}
|
|
|
|
# figure out whether we have mknod
|
|
$options->{havemknod} = 0;
|
|
if ($options->{mode} eq 'unshare') {
|
|
my $pid = get_unshare_cmd(
|
|
sub {
|
|
$options->{havemknod} = havemknod($options->{root});
|
|
},
|
|
\@idmap
|
|
);
|
|
waitpid $pid, 0;
|
|
$? == 0 or error "havemknod failed";
|
|
} elsif (any { $_ eq $options->{mode} }
|
|
('root', 'fakechroot', 'chrootless')) {
|
|
$options->{havemknod} = havemknod($options->{root});
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
|
|
# If a tarball is to be created, we always (except if --skip=output/dev is
|
|
# passed) craft the /dev entries ourselves.
|
|
# Why do we put /dev entries in the final tarball?
|
|
# - because debootstrap does it
|
|
# - because schroot (#856877) and pbuilder rely on it and we care about
|
|
# Debian buildds (using schroot) and reproducible builds infra (using
|
|
# pbuilder)
|
|
# If both the above assertion change, we can stop creating /dev entries as
|
|
# well.
|
|
my $devtar = '';
|
|
if (any { $_ eq $options->{format} } ('tar', 'squashfs', 'ext2', 'ext4')) {
|
|
my @entries = ();
|
|
|
|
my @paxentries;
|
|
if ($options->{nativearch} eq "hurd-i386") {
|
|
@paxentries = @hurdfiles;
|
|
} else {
|
|
@paxentries = @linuxdevfiles;
|
|
}
|
|
|
|
foreach my $paxentry (@paxentries) {
|
|
my ($name, $mode, $type, $linktarget, $major, $minor, $content)
|
|
= @{$paxentry};
|
|
if (defined $content) {
|
|
$content = "SCHILY.xattr.gnu.translator=$content";
|
|
|
|
# In the beginning the decimal length of the field is recorded.
|
|
# But the length includes the length of the number itself.
|
|
# Luckily we only need to support decimal numbers with two digits.
|
|
if ((length $content) + 4 > 99 || (length $content) + 4 < 10) {
|
|
exit 1; # not supported yet
|
|
}
|
|
my $len = (length $content) + 4;
|
|
my $dirname = dirname $name;
|
|
my $basename = basename $name;
|
|
push @entries,
|
|
[
|
|
"$dirname/PaxHeaders/$basename",
|
|
0, 'x', undef, undef, undef, "$len $content\n"
|
|
];
|
|
}
|
|
push @entries,
|
|
[$name, $mode, $type, $linktarget, $major, $minor, undef];
|
|
}
|
|
|
|
foreach my $file (@entries) {
|
|
my ($fname, $mode, $type, $linktarget, $devmajor, $devminor,
|
|
$content)
|
|
= @{$file};
|
|
if (length "$fname" > 100) {
|
|
error "tar entry cannot exceed 100 characters";
|
|
}
|
|
if ($type eq '3'
|
|
and any { $_ eq 'output/mknod' } @{ $options->{skip} }) {
|
|
info "skipping output/mknod as requested for $fname";
|
|
next;
|
|
}
|
|
my $size = defined $content ? length $content : 0;
|
|
my $etime = $type eq 'x' ? 0 : $mtime;
|
|
my $entry = pack(
|
|
# name mode uid gid size mtime type linktarget
|
|
'a100 a8 a8 a8 a12 a12 A8 a1 a100 '
|
|
# magic version username groupname major minor prefix
|
|
. 'a6 a2 a32 a32 a8 a8 a155 x12',
|
|
$fname,
|
|
sprintf('%07o', $mode),
|
|
sprintf('%07o', 0), # uid
|
|
sprintf('%07o', 0), # gid
|
|
sprintf('%011o', $size), # size
|
|
sprintf('%011o', $etime),
|
|
'', # checksum
|
|
$type, # type
|
|
$linktarget // '', # linktarget
|
|
"ustar", # magic
|
|
"00", # version
|
|
'', # username
|
|
'', # groupname
|
|
defined($devmajor) ? sprintf('%07o', $devmajor) : '',
|
|
defined($devminor) ? sprintf('%07o', $devminor) : '',
|
|
'', # prefix
|
|
);
|
|
|
|
# compute and insert checksum
|
|
substr($entry, 148, 7)
|
|
= sprintf("%06o\0", unpack("%16C*", $entry));
|
|
$devtar .= $entry;
|
|
|
|
if (length $content) {
|
|
$devtar .= (pack 'a512', $content);
|
|
}
|
|
}
|
|
} elsif (any { $_ eq $options->{format} } ('directory', 'null')) {
|
|
# nothing to do
|
|
} else {
|
|
error "unknown format: $options->{format}";
|
|
}
|
|
|
|
my $exitstatus = 0;
|
|
my @taropts = (
|
|
'--sort=name',
|
|
"--mtime=\@$mtime",
|
|
'--clamp-mtime',
|
|
'--numeric-owner',
|
|
'--one-file-system',
|
|
'--format=pax',
|
|
'--pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime',
|
|
'-c',
|
|
'--exclude=./lost+found'
|
|
);
|
|
# only exclude ./dev if device nodes are written out (the default)
|
|
if (none { $_ eq 'output/dev' } @{ $options->{skip} }) {
|
|
push @taropts, '--exclude=./dev', '--exclude=./servers',
|
|
'--exclude=./servers/*';
|
|
}
|
|
# tar2sqfs and genext2fs do not support extended attributes
|
|
if ($options->{format} eq "squashfs") {
|
|
# tar2sqfs supports user.*, trusted.* and security.* but not system.*
|
|
# https://bugs.debian.org/988100
|
|
# lib/sqfs/xattr/xattr.c of https://github.com/AgentD/squashfs-tools-ng
|
|
# https://github.com/AgentD/squashfs-tools-ng/issues/83
|
|
# https://github.com/AgentD/squashfs-tools-ng/issues/25
|
|
warning("tar2sqfs does not support extended attributes"
|
|
. " from the 'system' namespace");
|
|
push @taropts, '--xattrs', '--xattrs-exclude=system.*';
|
|
} elsif ($options->{format} eq "ext2") {
|
|
warning "genext2fs does not support extended attributes";
|
|
warning "ext2 does not support sub-second precision timestamps";
|
|
warning "ext2 does not support timestamps beyond 2038 January 18";
|
|
warning "ext2 inode size of 128 prevents removing these limitations";
|
|
} else {
|
|
push @taropts, '--xattrs';
|
|
}
|
|
|
|
# disable signals so that we can fork and change behaviour of the signal
|
|
# handler in the parent and child without getting interrupted
|
|
my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
|
|
POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
|
|
|
|
# a pipe to transfer the final tarball from the child to the parent
|
|
pipe my $rfh, my $wfh;
|
|
|
|
# instead of two pipe calls, creating four file handles, we use socketpair
|
|
socketpair my $childsock, my $parentsock, AF_UNIX, SOCK_STREAM, PF_UNSPEC
|
|
or error "socketpair failed: $!";
|
|
$options->{hooksock} = $childsock;
|
|
# for communicating the required number of blocks, we don't need
|
|
# bidirectional communication, so a pipe() is enough
|
|
# we don't communicate this via the hook communication because
|
|
# a) this would abuse the functionality exclusively for hooks
|
|
# b) it puts code writing the protocol outside of the helper/listener
|
|
# c) the forked listener process cannot communicate to its parent
|
|
pipe my $nblkreader, my $nblkwriter or error "pipe failed: $!";
|
|
|
|
my $worker = sub {
|
|
# child
|
|
local $SIG{'INT'} = 'DEFAULT';
|
|
local $SIG{'HUP'} = 'DEFAULT';
|
|
local $SIG{'PIPE'} = 'DEFAULT';
|
|
local $SIG{'TERM'} = 'DEFAULT';
|
|
|
|
# unblock all delayed signals (and possibly handle them)
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
or error "Can't unblock signals: $!";
|
|
|
|
close $rfh;
|
|
close $parentsock;
|
|
open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!";
|
|
|
|
setup($options);
|
|
|
|
print $childsock (pack('n', 0) . 'adios');
|
|
$childsock->flush();
|
|
|
|
close $childsock;
|
|
|
|
close $nblkreader;
|
|
if (!$options->{dryrun} && any { $_ eq $options->{format} }
|
|
('ext2', 'ext4')) {
|
|
my $numblocks = approx_disk_usage($options->{root}, $blocksize);
|
|
print $nblkwriter "$numblocks\n";
|
|
$nblkwriter->flush();
|
|
}
|
|
close $nblkwriter;
|
|
|
|
if ($options->{dryrun}) {
|
|
info "simulate creating tarball...";
|
|
} elsif (any { $_ eq $options->{format} }
|
|
('tar', 'squashfs', 'ext2', 'ext4')) {
|
|
info "creating tarball...";
|
|
|
|
# redirect tar output to the writing end of the pipe so
|
|
# that the parent process can capture the output
|
|
open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!";
|
|
|
|
# Add ./dev as the first entries of the tar file.
|
|
# We cannot add them after calling tar, because there is no
|
|
# way to prevent tar from writing NULL entries at the end.
|
|
if (any { $_ eq 'output/dev' } @{ $options->{skip} }) {
|
|
info "skipping output/dev as requested";
|
|
} else {
|
|
print $devtar;
|
|
}
|
|
|
|
if ($options->{mode} eq 'unshare') {
|
|
# pack everything except ./dev
|
|
0 == system('tar', @taropts, '-C', $options->{root}, '.')
|
|
or error "tar failed: $?";
|
|
} elsif ($options->{mode} eq 'fakechroot') {
|
|
# By default, FAKECHROOT_EXCLUDE_PATH includes /proc and /sys
|
|
# which means that the resulting tarball will contain the
|
|
# permission and ownership information of /proc and /sys from
|
|
# the outside, which we want to avoid.
|
|
## no critic (Variables::RequireLocalizedPunctuationVars)
|
|
$ENV{FAKECHROOT_EXCLUDE_PATH} = "/dev";
|
|
# Fakechroot requires tar to run inside the chroot or otherwise
|
|
# absolute symlinks will include the path to the root directory
|
|
0 == system('chroot', $options->{root}, 'tar',
|
|
@taropts, '-C', '/', '.')
|
|
or error "tar failed: $?";
|
|
} elsif (any { $_ eq $options->{mode} } ('root', 'chrootless')) {
|
|
# If the chroot directory is not owned by the root user, then
|
|
# we assume that no measure was taken to fake root permissions.
|
|
# Since the final tarball should contain entries with root
|
|
# ownership, we instruct tar to do so.
|
|
my @owneropts = ();
|
|
if ((stat $options->{root})[4] != 0) {
|
|
push @owneropts, '--owner=0', '--group=0',
|
|
'--numeric-owner';
|
|
}
|
|
0 == system('tar', @taropts, @owneropts, '-C',
|
|
$options->{root}, '.')
|
|
or error "tar failed: $?";
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
|
|
info "done";
|
|
} elsif (any { $_ eq $options->{format} } ('directory', 'null')) {
|
|
# nothing to do
|
|
} else {
|
|
error "unknown format: $options->{format}";
|
|
}
|
|
|
|
exit 0;
|
|
};
|
|
|
|
my $pid;
|
|
if ($options->{mode} eq 'unshare') {
|
|
$pid = get_unshare_cmd($worker, \@idmap);
|
|
} elsif (any { $_ eq $options->{mode} }
|
|
('root', 'fakechroot', 'chrootless')) {
|
|
$pid = fork() // error "fork() failed: $!";
|
|
if ($pid == 0) {
|
|
$worker->();
|
|
}
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
|
|
# parent
|
|
|
|
my $got_signal = 0;
|
|
my $waiting_for = "setup";
|
|
my $ignore = sub {
|
|
$got_signal = shift;
|
|
info "main() received signal $got_signal: waiting for $waiting_for...";
|
|
};
|
|
|
|
local $SIG{'INT'} = $ignore;
|
|
local $SIG{'HUP'} = $ignore;
|
|
local $SIG{'PIPE'} = $ignore;
|
|
local $SIG{'TERM'} = $ignore;
|
|
|
|
# unblock all delayed signals (and possibly handle them)
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
or error "Can't unblock signals: $!";
|
|
|
|
close $wfh;
|
|
close $childsock;
|
|
|
|
debug "starting to listen for hooks";
|
|
# handle special hook commands via parentsock
|
|
my $lpid = fork() // error "fork() failed: $!";
|
|
if ($lpid == 0) {
|
|
# whatever the script writes on stdout is sent to the
|
|
# socket
|
|
# whatever is written to the socket, send to stdin
|
|
open(STDOUT, '>&', $parentsock)
|
|
or error "cannot open STDOUT: $!";
|
|
open(STDIN, '<&', $parentsock)
|
|
or error "cannot open STDIN: $!";
|
|
|
|
hooklistener($verbosity_level);
|
|
exit 0;
|
|
}
|
|
waitpid($lpid, 0);
|
|
if ($? != 0) {
|
|
# we cannot die here because that would leave the other thread
|
|
# running without a parent
|
|
warning "listening on child socket failed: $@";
|
|
$exitstatus = 1;
|
|
}
|
|
debug "finish to listen for hooks";
|
|
|
|
close $parentsock;
|
|
|
|
my $numblocks = 0;
|
|
close $nblkwriter;
|
|
if (!$options->{dryrun} && any { $_ eq $options->{format} }
|
|
('ext2', 'ext4')) {
|
|
$numblocks = <$nblkreader>;
|
|
if (defined $numblocks) {
|
|
chomp $numblocks;
|
|
} else {
|
|
# This can happen if the setup process died early and thus closes
|
|
# the pipe from the other and. The EOF is turned into undef.
|
|
# we cannot die here because that would skip the cleanup task
|
|
warning "failed to read required number of blocks";
|
|
$exitstatus = 1;
|
|
$numblocks = -1;
|
|
}
|
|
}
|
|
close $nblkreader;
|
|
|
|
if ($options->{dryrun}) {
|
|
# nothing to do
|
|
} elsif (any { $_ eq $options->{format} } ('directory', 'null')) {
|
|
# nothing to do
|
|
} elsif ((any { $_ eq $options->{format} } ('ext2', 'ext4'))
|
|
&& $numblocks <= 0) {
|
|
# nothing to do because of invalid $numblocks
|
|
} elsif (any { $_ eq $options->{format} }
|
|
('tar', 'squashfs', 'ext2', 'ext4')) {
|
|
# we use eval() so that error() doesn't take this process down and
|
|
# thus leaves the setup() process without a parent
|
|
eval {
|
|
if ($options->{target} eq '-') {
|
|
if (!copy($rfh, *STDOUT)) {
|
|
error "cannot copy to standard output: $!";
|
|
}
|
|
} else {
|
|
if (any { $_ eq $options->{format} }
|
|
('squashfs', 'ext2', 'ext4')
|
|
or defined $tar_compressor) {
|
|
my @argv = ();
|
|
if ($options->{format} eq 'squashfs') {
|
|
push @argv, 'tar2sqfs',
|
|
'--quiet', '--no-skip', '--force',
|
|
'--exportable',
|
|
'--compressor', 'xz',
|
|
'--block-size', $blocksize,
|
|
$options->{target};
|
|
} elsif ($options->{format} eq 'ext2') {
|
|
if ($numblocks <= 0) {
|
|
error "invalid number of blocks: $numblocks";
|
|
}
|
|
push @argv, 'genext2fs', '-B', 1024, '-b', $numblocks,
|
|
'-i', '16384', '-a', '-', $options->{target};
|
|
} elsif ($options->{format} eq 'ext4') {
|
|
if ($numblocks <= 0) {
|
|
error "invalid number of blocks: $numblocks";
|
|
}
|
|
push @argv, 'mke2fs', '-q', '-F', '-o', 'Linux', '-T',
|
|
'ext4';
|
|
if (exists $ENV{SOURCE_DATE_EPOCH}) {
|
|
# if SOURCE_DATE_EPOCH was set, make the image
|
|
# reproducible by setting a fixed uuid and
|
|
# hash_seed
|
|
my $uuid = create_v5_uuid(
|
|
create_v5_uuid(
|
|
$UUID_NS_DNS, "mister-muffin.de"
|
|
),
|
|
$mtime
|
|
);
|
|
push @argv, '-U', $uuid, '-E', "hash_seed=$uuid";
|
|
}
|
|
push @argv, '-b', $blocksize, '-d', '-',
|
|
$options->{target}, $numblocks;
|
|
} elsif ($options->{format} eq 'tar') {
|
|
push @argv, @{$tar_compressor};
|
|
} else {
|
|
error "unknown format: $options->{format}";
|
|
}
|
|
POSIX::sigprocmask(SIG_BLOCK, $sigset)
|
|
or error "Can't block signals: $!";
|
|
my $cpid = fork() // error "fork() failed: $!";
|
|
if ($cpid == 0) {
|
|
# child: default signal handlers
|
|
local $SIG{'INT'} = 'DEFAULT';
|
|
local $SIG{'HUP'} = 'DEFAULT';
|
|
local $SIG{'PIPE'} = 'DEFAULT';
|
|
local $SIG{'TERM'} = 'DEFAULT';
|
|
|
|
# unblock all delayed signals (and possibly handle
|
|
# them)
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
or error "Can't unblock signals: $!";
|
|
|
|
# redirect stdout to file or /dev/null
|
|
if (any { $_ eq $options->{format} }
|
|
('squashfs', 'ext2', 'ext4')) {
|
|
open(STDOUT, '>', '/dev/null')
|
|
or error "cannot open /dev/null for writing: $!";
|
|
} elsif ($options->{format} eq 'tar') {
|
|
open(STDOUT, '>', $options->{target})
|
|
or error
|
|
"cannot open $options->{target} for writing: $!";
|
|
} else {
|
|
error "unknown format: $options->{format}";
|
|
}
|
|
open(STDIN, '<&', $rfh)
|
|
or error "cannot open file handle for reading: $!";
|
|
eval { Devel::Cover::set_coverage("none") }
|
|
if $is_covering;
|
|
exec { $argv[0] } @argv
|
|
or
|
|
error("cannot exec " . (join " ", @argv) . ": $!");
|
|
}
|
|
POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
|
|
or error "Can't unblock signals: $!";
|
|
waitpid $cpid, 0;
|
|
if ($? != 0) {
|
|
error("failed to run " . (join " ", @argv));
|
|
}
|
|
} else {
|
|
# somehow, when running under qemu, writing to a virtio
|
|
# device will not result in a ENOSPC but just stall forever
|
|
if (!copy($rfh, $options->{target})) {
|
|
error "cannot copy to $options->{target}: $!";
|
|
}
|
|
}
|
|
}
|
|
};
|
|
if ($@) {
|
|
# we cannot die here because that would leave the other thread
|
|
# running without a parent
|
|
# We send SIGHUP to all our processes (including eventually
|
|
# running tar and this process itself) to reliably tear down
|
|
# all running child processes. The main process is not affected
|
|
# because we are ignoring SIGHUP.
|
|
#
|
|
# FIXME: this codepath becomes dangerous in case mmdebstrap is not
|
|
# run in its own process group. When run from the terminal, the
|
|
# shell creates a new process group as part of its job control, so
|
|
# sending SIGHUP to all processes in our own process group should
|
|
# not be dangerous. But for example, on debci, lxc will run in the
|
|
# same process group as mmdebstrap and sending SIGHUP to the whole
|
|
# process group will also kill lxc. Creating a new process group
|
|
# for $pid will break things because only the foreground job is
|
|
# allowed to read from the terminal. If a background job does it,
|
|
# i will be suspended with SIGTTIN. Even though apt could be told
|
|
# to not read from the terminal by opening STDIN from /dev/null,
|
|
# this would make --chrooted-customize-hook=bash impossible.
|
|
# Making the $pid process group the foreground job will destroy all
|
|
# the signal handling we have set up for when the user presses
|
|
# ctrl+c in a terminal. Even if we fix the signal handling we now
|
|
# find ourselves in the opposite situation: the $pid process must
|
|
# now clean up the former main process tree reliably. And we cannot
|
|
# create a new process group for everything all-in-one because that
|
|
# would also destroy CTRL+C handling from the terminal.
|
|
warning "creating tarball failed: $@";
|
|
my $pgroup = getpgrp();
|
|
warning "sending SIGHUP to all processes in process group $pgroup";
|
|
kill HUP => -$pgroup;
|
|
$exitstatus = 1;
|
|
}
|
|
} else {
|
|
error "unknown format: $options->{format}";
|
|
}
|
|
close($rfh);
|
|
waitpid $pid, 0;
|
|
if ($? != 0) {
|
|
$exitstatus = 1;
|
|
}
|
|
|
|
# change signal handler message
|
|
$waiting_for = "cleanup";
|
|
|
|
if (any { $_ eq $options->{format} } ('directory')) {
|
|
# nothing to do
|
|
} elsif (any { $_ eq $options->{format} }
|
|
('tar', 'squashfs', 'ext2', 'ext4', 'null')) {
|
|
if (!-e $options->{root}) {
|
|
error "$options->{root} does not exist";
|
|
}
|
|
info "removing tempdir $options->{root}...";
|
|
if ($options->{mode} eq 'unshare') {
|
|
# We don't have permissions to remove the directory outside
|
|
# the unshared namespace, so we remove it here.
|
|
# Since this is still inside the unshared namespace, there is
|
|
# no risk of removing anything important.
|
|
$pid = get_unshare_cmd(
|
|
sub {
|
|
# change CWD to chroot directory because find tries to
|
|
# chdir to the current directory which might not be
|
|
# accessible by the unshared user:
|
|
# find: Failed to restore initial working directory
|
|
0 == system('env', "--chdir=$options->{root}", 'find',
|
|
$options->{root}, '-mount',
|
|
'-mindepth', '1', '-delete')
|
|
or error "rm failed: $?";
|
|
# ignore failure in case the unshared user doesn't have the
|
|
# required permissions -- we attempt again later if
|
|
# necessary
|
|
rmdir "$options->{root}";
|
|
},
|
|
\@idmap
|
|
);
|
|
waitpid $pid, 0;
|
|
$? == 0 or error "remove_tree failed";
|
|
# in unshare mode, the toplevel directory might've been created in
|
|
# a directory that the unshared user cannot change and thus cannot
|
|
# delete. We attempt its removal again outside as the normal user.
|
|
if (-e $options->{root}) {
|
|
rmdir "$options->{root}"
|
|
or error "cannot rmdir $options->{root}: $!";
|
|
}
|
|
} elsif (any { $_ eq $options->{mode} }
|
|
('root', 'fakechroot', 'chrootless')) {
|
|
# without unshare, we use the system's rm to recursively remove the
|
|
# temporary directory just to make sure that we do not accidentally
|
|
# remove more than we should by using --one-file-system.
|
|
0 == system('rm', '--interactive=never', '--recursive',
|
|
'--preserve-root', '--one-file-system', $options->{root})
|
|
or error "rm failed: $?";
|
|
} else {
|
|
error "unknown mode: $options->{mode}";
|
|
}
|
|
} else {
|
|
error "unknown format: $options->{format}";
|
|
}
|
|
|
|
if ($got_signal) {
|
|
$exitstatus = 1;
|
|
}
|
|
|
|
if ($exitstatus == 0) {
|
|
my $duration = Time::HiRes::time - $before;
|
|
info "success in " . (sprintf "%.04f", $duration) . " seconds";
|
|
exit 0;
|
|
}
|
|
|
|
error "mmdebstrap failed to run";
|
|
return 1;
|
|
}
|
|
|
|
main();
|
|
|
|
__END__
|
|
|
|
=head1 NAME
|
|
|
|
mmdebstrap - multi-mirror Debian chroot creation
|
|
|
|
=head1 SYNOPSIS
|
|
|
|
B<mmdebstrap> [B<OPTION...>] [I<SUITE> [I<TARGET> [I<MIRROR>...]]]
|
|
|
|
=head1 DESCRIPTION
|
|
|
|
B<mmdebstrap> creates a Debian chroot of I<SUITE> into I<TARGET> from one or
|
|
more I<MIRROR>s. It is meant as an alternative to the debootstrap tool (see
|
|
section B<DEBOOTSTRAP>). In contrast to debootstrap it uses apt to resolve
|
|
dependencies and is thus able to use more than one mirror and resolve more
|
|
complex dependency relationships. See section B<OPERATION> for an overview of
|
|
how B<mmdebstrap> works internally.
|
|
|
|
The I<SUITE> option may either be a valid release code name (eg, sid, bookworm,
|
|
trixie) or a symbolic name (eg, unstable, testing, stable, oldstable). Any
|
|
suite name that works with apt on the given mirror will work. The I<SUITE>
|
|
option is optional if no I<TARGET> and no I<MIRROR> option is provided. If
|
|
I<SUITE> is missing, then the information of the desired suite has to come from
|
|
standard input as part of a valid apt sources.list file or be set up via hooks.
|
|
The value of the I<SUITE> argument will be used to determine which apt index to
|
|
use for finding out the set of C<Essential:yes> packages and/or the set of
|
|
packages with the right priority for the selected variant. This functionality
|
|
can be disabled by choosing the empty string for I<SUITE>. See the section
|
|
B<VARIANTS> for more information.
|
|
|
|
The I<TARGET> option may either be the path to a directory, the path to a
|
|
tarball filename, the path to a squashfs image, the path to an ext2 or ext4
|
|
image, a FIFO, a character special device, or C<->. The I<TARGET> option is
|
|
optional if no I<MIRROR> option is provided. If I<TARGET> is missing or if
|
|
I<TARGET> is C<->, an uncompressed tarball will be sent to standard output.
|
|
Without the B<--format> option, I<TARGET> will be used to choose the format.
|
|
See the section B<FORMATS> for more information.
|
|
|
|
The I<MIRROR> option may either be provided as a URI, in apt one-line format,
|
|
as a path to a file in apt's one-line or deb822-format, or C<->. If no
|
|
I<MIRROR> option is provided, then L<http://deb.debian.org/debian> is used as
|
|
the default. If I<SUITE> does not refer to "unstable" or "testing", then
|
|
I<SUITE>-updates and I<SUITE>-security mirrors are automatically added. If a
|
|
I<MIRROR> option starts with "deb " or "deb-src " then it is used as a one-line
|
|
format entry for apt's sources.list inside the chroot. If a I<MIRROR> option
|
|
contains a "://" then it is interpreted as a mirror URI and the apt line inside
|
|
the chroot is assembled as "deb [arch=A] B C D" where A is the host's native
|
|
architecture, B is the I<MIRROR>, C is the given I<SUITE> and D is the
|
|
components given via B<--components> (defaults to "main"). If a I<MIRROR>
|
|
option happens to be an existing file, then its contents are written into the
|
|
chroot's sources.list (if the first I<MIRROR> is a file in one-line format) or
|
|
into the chroot's sources.list.d directory, named with the extension .list or
|
|
.sources, depending on whether the file is in one-line or deb822 format,
|
|
respectively. If I<MIRROR> is C<-> then standard input is pasted into the
|
|
chroot's sources.list. More than one mirror can be specified and are appended
|
|
to the chroot's sources.list in the given order. If you specify a https or tor
|
|
I<MIRROR> and you want the chroot to be able to update itself, don't forget to
|
|
also install the ca-certificates package, the apt-transport-https package for
|
|
apt versions less than 1.5 and/or the apt-transport-tor package using the
|
|
B<--include> option, as necessary.
|
|
|
|
All status output is printed to standard error unless B<--logfile> is used to
|
|
redirect it to a file or B<--quiet> or B<--silent> is used to suppress any
|
|
output on standard error. Help and version information will be printed to
|
|
standard error with the B<--help> and B<--version> options, respectively.
|
|
Otherwise, an uncompressed tarball might be sent to standard output if
|
|
I<TARGET> is C<-> or if no I<TARGET> was specified.
|
|
|
|
=head1 OPTIONS
|
|
|
|
Options are case insensitive. Short options may be bundled. Long options
|
|
require a double dash and may be abbreviated to uniqueness. Options can be
|
|
placed anywhere on the command line, even before or mixed with the I<SUITE>,
|
|
I<TARGET>, and I<MIRROR> arguments. A double dash C<--> can be used to stop
|
|
interpreting command line arguments as options to allow I<SUITE>, I<TARGET> and
|
|
I<MIRROR> arguments that start with a single or double dash. Option order only
|
|
matters for options that can be passed multiple times as documented below.
|
|
|
|
=over 8
|
|
|
|
=item B<-h,--help>
|
|
|
|
Print synopsis and options of this man page and exit.
|
|
|
|
=item B<--man>
|
|
|
|
Show the full man page as generated from Perl POD in a pager. This requires
|
|
the perldoc program from the perl-doc package. This is the same as running:
|
|
|
|
pod2man /usr/bin/mmdebstrap | man -l -
|
|
|
|
=item B<--version>
|
|
|
|
Print the B<mmdebstrap> version and exit.
|
|
|
|
=item B<--variant>=I<name>
|
|
|
|
Choose which package set to install. Valid variant I<name>s are B<extract>,
|
|
B<custom>, B<essential>, B<apt>, B<required>, B<minbase>, B<buildd>,
|
|
B<important>, B<debootstrap>, B<->, and B<standard>. The default variant is
|
|
B<debootstrap>. See the section B<VARIANTS> for more information.
|
|
|
|
=item B<--mode>=I<name>
|
|
|
|
Choose how to perform the chroot operation and create a filesystem with
|
|
ownership information different from the current user. Valid mode I<name>s are
|
|
B<auto>, B<sudo>, B<root>, B<unshare>, B<fakeroot>, B<fakechroot> and
|
|
B<chrootless>. The default mode is B<auto>. See the section B<MODES> for more
|
|
information.
|
|
|
|
=item B<--format>=I<name>
|
|
|
|
Choose the output format. Valid format I<name>s are B<auto>, B<directory>,
|
|
B<tar>, B<squashfs>, B<ext2>, B<ext4> and B<null>. The default format is
|
|
B<auto>. See the section B<FORMATS> for more information.
|
|
|
|
=item B<--aptopt>=I<option>|I<file>
|
|
|
|
Pass arbitrary I<option>s to apt. Will be permamently added to
|
|
F</etc/apt/apt.conf.d/99mmdebstrap> inside the chroot. Use hooks for temporary
|
|
configuration options. Can be specified multiple times. Each I<option> will be
|
|
appended to 99mmdebstrap. A semicolon will be added at the end of the option if
|
|
necessary. If the command line argument is an existing I<file>, the content of
|
|
the file will be appended to 99mmdebstrap verbatim.
|
|
|
|
Example: This is necessary for allowing old timestamps from snapshot.debian.org
|
|
|
|
--aptopt='Acquire::Check-Valid-Until "false"'
|
|
--aptopt='Apt::Key::gpgvcommand "/usr/libexec/mmdebstrap/gpgvnoexpkeysig"'
|
|
|
|
Example: Settings controlling download of package description translations
|
|
|
|
--aptopt='Acquire::Languages { "environment"; "en"; }'
|
|
--aptopt='Acquire::Languages "none"'
|
|
|
|
Example: Enable installing Recommends (by default B<mmdebstrap> doesn't)
|
|
|
|
--aptopt='Apt::Install-Recommends "true"'
|
|
|
|
Example: Configure apt-cacher or apt-cacher-ng as an apt proxy
|
|
|
|
--aptopt='Acquire::http { Proxy "http://127.0.0.1:3142"; }'
|
|
|
|
Example: For situations in which the apt sandbox user cannot access the chroot
|
|
|
|
--aptopt='APT::Sandbox::User "root"'
|
|
|
|
Example: Minimizing the number of packages installed from experimental
|
|
|
|
--aptopt='APT::Solver "aspcud"'
|
|
--aptopt='APT::Solver::aspcud::Preferences
|
|
"-count(solution,APT-Release:=/a=experimental/),-removed,-changed,-new"'
|
|
|
|
=item B<--keyring>=I<file>|I<directory>
|
|
|
|
Change the default keyring to use by apt during the initial setup. This is
|
|
similar to setting B<Dir::Etc::Trusted> and B<Dir::Etc::TrustedParts> using
|
|
B<--aptopt> except that the latter setting will be permanently stored in the
|
|
chroot while the keyrings passed via B<--keyring> will only be visible to apt
|
|
as run by B<mmdebstrap>. Do not use B<--keyring> if apt inside the chroot needs
|
|
to know about your keys after the initial chroot creation by B<mmdebstrap>.
|
|
This option is mainly intended for users who use B<mmdebstrap> as a
|
|
B<deboostrap> drop-in replacement. As such, it is probably not what you want to
|
|
use if you use B<mmdebstrap> with more than a single mirror unless you pass it
|
|
a directory containing all the keyrings you need.
|
|
|
|
By default, the local setting of B<Dir::Etc::Trusted> and
|
|
B<Dir::Etc::TrustedParts> are used to choose the keyring used by apt as run by
|
|
B<mmdebstrap>. These two locations are set to F</etc/apt/trusted.gpg> and
|
|
F</etc/apt/trusted.gpg.d> by default. Depending on whether a file or directory
|
|
is passed to this option, the former and latter default can be changed,
|
|
respectively. Since apt only supports a single keyring file and directory,
|
|
respectively, you can B<not> use this option to pass multiple files and/or
|
|
directories. Using the C<--keyring> argument in the following way is equal to
|
|
keeping the default:
|
|
|
|
--keyring=/etc/apt/trusted.gpg --keyring=/etc/apt/trusted.gpg.d
|
|
|
|
If you need to pass multiple keyrings, use the C<signed-by> option when
|
|
specifying the mirror like this:
|
|
|
|
mmdebstrap mysuite out.tar "deb [signed-by=/path/to/key.gpg] http://..."
|
|
|
|
Another reason to use C<signed-by> instead of B<--keyring> is if apt inside the
|
|
chroot needs to know by what key the repository is signed even after the
|
|
initial chroot creation.
|
|
|
|
The C<signed-by> option will automatically be added to the final
|
|
C<sources.list> if the keyring required for the selected I<SUITE> is not yet
|
|
trusted by apt. Automatically adding the C<signed-by> option in these cases
|
|
requires C<gpg> to be installed. If C<gpg> and C<ubuntu-archive-keyring> are
|
|
installed, then you can create a Ubuntu Bionic chroot on Debian like this:
|
|
|
|
mmdebstrap bionic ubuntu-bionic.tar
|
|
|
|
The resulting chroot will have a C<source.list> with a C<signed-by> option
|
|
pointing to F</usr/share/keyrings/ubuntu-archive-keyring.gpg>.
|
|
|
|
You do not need to use B<--keyring> or C<signed-by> if you placed the keys that
|
|
apt needs to know about into F</etc/apt/trusted.gpg.d> in the B<--setup-hook>
|
|
(which is before C<apt update> runs), for example by using the B<copy-in>
|
|
special hook. You also need to copy your keys into the chroot explicitly if the
|
|
key you passed via C<signed-by> points to a location that is not otherwise
|
|
populated during chroot creation (for example by installing a keyring package).
|
|
|
|
=item B<--dpkgopt>=I<option>|I<file>
|
|
|
|
Pass arbitrary I<option>s to dpkg. Will be permanently added to
|
|
F</etc/dpkg/dpkg.cfg.d/99mmdebstrap> inside the chroot. Use hooks for temporary
|
|
configuration options. Can be specified multiple times. Each I<option> will be
|
|
appended to 99mmdebstrap. If the command line argument is an existing I<file>,
|
|
the content of the file will be appended to 99mmdebstrap verbatim.
|
|
|
|
Example: Exclude paths to reduce chroot size
|
|
|
|
--dpkgopt='path-exclude=/usr/share/man/*'
|
|
--dpkgopt='path-include=/usr/share/man/man[1-9]/*'
|
|
--dpkgopt='path-exclude=/usr/share/locale/*'
|
|
--dpkgopt='path-include=/usr/share/locale/locale.alias'
|
|
--dpkgopt='path-exclude=/usr/share/doc/*'
|
|
--dpkgopt='path-include=/usr/share/doc/*/copyright'
|
|
--dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*'
|
|
|
|
=item B<--include>=I<pkg1>[,I<pkg2>,...]
|
|
|
|
Comma or whitespace separated list of packages which will be installed in
|
|
addition to the packages installed by the specified variant. The direct and
|
|
indirect hard dependencies will also be installed. The behaviour of this
|
|
option depends on the selected variant. The B<extract> and B<custom> variants
|
|
install no packages by default, so for these variants, the packages specified
|
|
by this option will be the only ones that get either extracted or installed by
|
|
dpkg, respectively. For all other variants, apt is used to install the
|
|
additional packages. Package names are directly passed to apt and thus, you
|
|
can use apt features like C<pkg/suite>, C<pkg=version>, C<pkg->, use a glob or
|
|
regex for C<pkg>, use apt patterns or pass a path to a .deb package file (see
|
|
below for notes concerning passing the path to a .deb package file in
|
|
B<unshare> mode). See L<apt(8)> for the supported syntax.
|
|
|
|
The option can be specified multiple times and the packages are concatenated in
|
|
the order in which they are given on the command line. If later list items are
|
|
repeated, then they get dropped so that the resulting package list is free of
|
|
duplicates. So the following are equivalent:
|
|
|
|
--include="pkg1/stable pkg2=1.0 pkg3-"
|
|
--include=pkg1/stable,pkg2=1.0,pkg3-,,,
|
|
--incl=pkg1/stable --incl="pkg2=1.0 pkg3-" --incl=pkg2=1.0,pkg3-
|
|
|
|
Since the list of packages is separated by comma or whitespace, it is not
|
|
possible to mix apt patterns or .deb package file paths containing either
|
|
commas or whitespace with normal package names. If you do, your patterns and
|
|
paths will be split by comma and whitespace as well and become useless. To pass
|
|
such a pattern or package file path, put them into their own B<--include>
|
|
option. If the argument to B<--include> starts with an apt pattern or with a
|
|
file path, then it will not be split:
|
|
|
|
--include="?or(?priority(required), ?priority(important))"
|
|
--include="./path/to/deb with spaces/and,commas/foo.deb"
|
|
|
|
Specifically, all arguments to B<--include> that start with a C<?>, C<!>, C<~>,
|
|
C<(>, C</>, C<./> or C<../> are not split and treated as single arguments to
|
|
apt. To add more packages, use multiple B<--include> options. To disable this
|
|
detection of patterns and paths, start the argument to B<--include> with a
|
|
comma or whitespace.
|
|
|
|
If you pass the path to a .deb package file using B<--include>, B<mmdebstrap>
|
|
will ensure that the path exists. If the path is a relative path, it will
|
|
internally by converted to an absolute path. Since apt (outside the chroot)
|
|
passes paths to dpkg (on the inside) verbatim, you have to make the .deb
|
|
package available under the same path inside the chroot as well or otherwise
|
|
dpkg inside the chroot will be unable to access it. This can be achieved using
|
|
a setup-hook. A hook that automatically makes the contents of C<file://>
|
|
mirrors as well as .deb packages given with B<--include> available inside the
|
|
chroot is provided by B<mmdebstrap> as
|
|
B<--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount>. This hook
|
|
takes care of copying all relevant file to their correct locations and cleans
|
|
up those files at the end. In B<unshare> mode, the .deb package paths have to
|
|
be accessible by the unshared user as well. This means that the package itself
|
|
likely must be made world-readable and all directory components on the path to
|
|
it world-executable.
|
|
|
|
=item B<--components>=I<comp1>[,I<comp2>,...]
|
|
|
|
Comma or whitespace separated list of components like main, contrib, non-free
|
|
and non-free-firmware which will be used for all URI-only I<MIRROR> arguments.
|
|
The option can be specified multiple times and the components are concatenated
|
|
in the order in which they are given on the command line. If later list items
|
|
are repeated, then they get dropped so that the resulting component list is
|
|
free of duplicates. So the following are equivalent:
|
|
|
|
--components="main contrib non-free non-free-firmware"
|
|
--components=main,contrib,non-free,non-free-firmware
|
|
--comp=main --comp="contrib non-free" --comp="main,non-free-firmware"
|
|
|
|
=item B<--architectures>=I<native>[,I<foreign1>,...]
|
|
|
|
Comma or whitespace separated list of architectures. The first architecture is
|
|
the I<native> architecture inside the chroot. The remaining architectures will
|
|
be added to the foreign dpkg architectures. Without this option, the I<native>
|
|
architecture of the chroot defaults to the native architecture of the system
|
|
running B<mmdebstrap>. The option can be specified multiple times and values
|
|
are concatenated. If later list items are repeated, then they get dropped so
|
|
that the resulting list is free of duplicates. So the following are
|
|
equivalent:
|
|
|
|
--architectures="amd64 armhf mipsel"
|
|
--architectures=amd64,armhf,mipsel
|
|
--arch=amd64 --arch="armhf mipsel" --arch=armhf,mipsel
|
|
|
|
=item B<--simulate>, B<--dry-run>
|
|
|
|
Run apt-get with B<--simulate>. Only the package cache is initialized but no
|
|
binary packages are downloaded or installed. Use this option to quickly check
|
|
whether a package selection within a certain suite and variant can in principle
|
|
be installed as far as their dependencies go. If the output is a tarball, then
|
|
no output is produced. If the output is a directory, then the directory will be
|
|
left populated with the skeleton files and directories necessary for apt to run
|
|
in it. No hooks are executed in with B<--simulate> or B<--dry-run>.
|
|
|
|
=item B<--setup-hook>=I<command>
|
|
|
|
Execute arbitrary I<command>s right after initial setup (directory creation,
|
|
configuration of apt and dpkg, ...) but before any packages are downloaded or
|
|
installed. At that point, the chroot directory does not contain any
|
|
executables and thus cannot be chroot-ed into. See section B<HOOKS> for more
|
|
information.
|
|
|
|
Example: add additional apt sources entries on top of the default ones:
|
|
|
|
--setup-hook='echo "deb http..." > "$1"/etc/apt/sources.list.d/custom.list'
|
|
|
|
Example: Setup chroot for installing a sub-essential busybox-based chroot
|
|
with --variant=custom
|
|
--include=dpkg,busybox,libc-bin,base-files,base-passwd,debianutils
|
|
|
|
--setup-hook='mkdir -p "$1/bin"'
|
|
--setup-hook='for p in awk cat chmod chown cp diff echo env grep less ln
|
|
mkdir mount rm rmdir sed sh sleep sort touch uname mktemp; do
|
|
ln -s busybox "$1/bin/$p"; done'
|
|
--setup-hook='echo root:x:0:0:root:/root:/bin/sh > "$1/etc/passwd"'
|
|
--setup-hook='printf "root:x:0:\nmail:x:8:\nutmp:x:43:\n" > "$1/etc/group"'
|
|
|
|
For a more elegant way for setting up a sub-essential busybox-based chroot, see
|
|
the B<--hook-dir> option below.
|
|
|
|
=item B<--extract-hook>=I<command>
|
|
|
|
Execute arbitrary I<command>s after the Essential:yes packages have been
|
|
extracted but before installing them. See section B<HOOKS> for more
|
|
information.
|
|
|
|
Example: Install busybox symlinks
|
|
|
|
--extract-hook='chroot "$1" /bin/busybox --install -s'
|
|
|
|
=item B<--essential-hook>=I<command>
|
|
|
|
Execute arbitrary I<command>s after the Essential:yes packages have been
|
|
installed but before installing the remaining packages. The hook is not
|
|
executed for the B<extract> and B<custom> variants. See section B<HOOKS> for
|
|
more information.
|
|
|
|
Example: Enable unattended upgrades
|
|
|
|
--essential-hook='echo unattended-upgrades
|
|
unattended-upgrades/enable_auto_updates boolean true
|
|
| chroot "$1" debconf-set-selections'
|
|
|
|
Example: Select Europe/Berlin as the timezone
|
|
|
|
--essential-hook='echo tzdata tzdata/Areas select Europe
|
|
| chroot "$1" debconf-set-selections'
|
|
--essential-hook='echo tzdata tzdata/Zones/Europe select Berlin
|
|
| chroot "$1" debconf-set-selections'
|
|
|
|
=item B<--customize-hook>=I<command>
|
|
|
|
Execute arbitrary I<command>s after the chroot is set up and all packages got
|
|
installed but before final cleanup actions are carried out. See section
|
|
B<HOOKS> for more information.
|
|
|
|
Example: Add a user without a password
|
|
|
|
--customize-hook='chroot "$1" useradd --home-dir /home/user
|
|
--create-home user'
|
|
--customize-hook='chroot "$1" passwd --delete user'
|
|
|
|
Example: set up F</etc/hostname> and F</etc/hosts>
|
|
|
|
--customize-hook='echo host > "$1/etc/hostname"'
|
|
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"'
|
|
|
|
Example: to mimic B<debootstrap> behaviour, B<mmdebstrap> copies from the host.
|
|
Remove them in a B<--customize-hook> to make the chroot reproducible across
|
|
multiple hosts:
|
|
|
|
--customize-hook='rm "$1"/etc/resolv.conf'
|
|
--customize-hook='rm "$1"/etc/hostname'
|
|
|
|
=item B<--hook-directory>=I<directory>
|
|
|
|
Execute scripts in I<directory> with filenames starting with C<setup>,
|
|
C<extract>, C<essential> or C<customize>, at the respective stages during an
|
|
mmdebstrap run. The files must be marked executable. Their extension is
|
|
ignored. Subdirectories are not traversed. This option is a short-hand for
|
|
specifying the remaining four hook options individually for each file in the
|
|
directory. If there are more than one script for a stage, then they are added
|
|
alphabetically. This is useful in cases, where a user wants to run the same
|
|
hooks frequently. For example, given a directory C<./hooks> with two scripts
|
|
C<setup01-foo.sh> and C<setup02-bar.sh>, this call:
|
|
|
|
mmdebstrap --customize=./scriptA --hook-dir=./hooks --setup=./scriptB
|
|
|
|
is equivalent to this call:
|
|
|
|
mmdebstrap --customize=./scriptA --setup=./hooks/setup01-foo.sh \
|
|
--setup=./hooks/setup02-bar.sh --setup=./scriptB
|
|
|
|
The option can be specified multiple times and scripts are added to the
|
|
respective hooks in the order the options are given on the command line. Thus,
|
|
if the scripts in two directories depend upon each other, the scripts must be
|
|
placed into a common directory and be named such that they get added in the
|
|
correct order.
|
|
|
|
Example 1: Run mmdebstrap with eatmydata
|
|
|
|
--hook-dir=/usr/share/mmdebstrap/hooks/eatmydata
|
|
|
|
Example 2: Setup chroot for installing a sub-essential busybox-based chroot
|
|
|
|
--hook-dir=/usr/share/mmdebstrap/hooks/busybox
|
|
|
|
Example 3: Automatically mount all directories referenced by C<file://> mirrors
|
|
into the chroot
|
|
|
|
--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount
|
|
|
|
=item B<--skip>=I<stage>[,I<stage>,...]
|
|
|
|
B<mmdebstrap> tries hard to implement sensible defaults and will try to stop
|
|
you before shooting yourself in the foot. This option is for when you are sure
|
|
you know what you are doing and allows one to skip certain actions and safety
|
|
checks. See section B<OPERATION> for a list of possible arguments and their
|
|
context. The option can be specified multiple times or you can separate
|
|
multiple values by comma or whitespace.
|
|
|
|
=item B<-q,--quiet>, B<-s,--silent>
|
|
|
|
Do not write anything to standard error. If used together with B<--verbose> or
|
|
B<--debug>, only the last option will take effect.
|
|
|
|
=item B<-v,--verbose>
|
|
|
|
Instead of progress bars, write the dpkg and apt output directly to standard
|
|
error. If used together with B<--quiet> or B<--debug>, only the last option
|
|
will take effect.
|
|
|
|
=item B<-d,--debug>
|
|
|
|
In addition to the output produced by B<--verbose>, write detailed debugging
|
|
information to standard error. Errors will print a backtrace. If used together
|
|
with B<--quiet> or B<--verbose>, only the last option will take effect.
|
|
|
|
=item B<--logfile>=I<filename>
|
|
|
|
Instead of writing status information to standard error, write it into the
|
|
file given by I<filename>.
|
|
|
|
=back
|
|
|
|
=head1 MODES
|
|
|
|
Creating a Debian chroot requires not only permissions for running chroot but
|
|
also the ability to create files owned by the superuser. The selected mode
|
|
decides which way this is achieved.
|
|
|
|
=over 8
|
|
|
|
=item B<auto>
|
|
|
|
This mode automatically selects a fitting mode. If the effective user id is the
|
|
one of the superuser, then the B<sudo> mode is chosen. Otherwise, the
|
|
B<unshare> mode is picked if F</etc/subuid> and F</etc/subgid> are set up
|
|
correctly. Should that not be the case and if the fakechroot binary exists, the
|
|
B<fakechroot> mode is chosen.
|
|
|
|
=item B<sudo>, B<root>
|
|
|
|
This mode directly executes chroot and is the same mode of operation as is
|
|
used by debootstrap. It is the only mode that can directly create a directory
|
|
chroot with the right permissions. If the chroot directory is not accessible
|
|
by the _apt user, then apt sandboxing will be automatically disabled. This mode
|
|
needs to be able to mount and thus requires C<CAP_SYS_ADMIN>.
|
|
|
|
=item B<unshare>
|
|
|
|
When used as a normal (not root) user, this mode uses Linux user namespaces to
|
|
allow unprivileged use of chroot and creation of files that appear to be owned
|
|
by the superuser inside the unshared namespace. A tarball created in this mode
|
|
will be bit-by-bit identical to a tarball created with the B<root> mode. With
|
|
this mode, the only binaries that will run as the root user will be
|
|
L<newuidmap(1)> and L<newgidmap(1)> via their setuid bit. Running those
|
|
successfully requires F</etc/subuid> and F</etc/subgid> to have an entry for
|
|
your username. This entry was usually created by L<adduser(8)> already.
|
|
|
|
The unshared user will not automatically have access to the same files as you
|
|
do. This is intentional and an additional security against unintended changes
|
|
to your files that could theoretically result from running B<mmdebstrap> and
|
|
package maintainer scripts. To copy files in and out of the chroot, either use
|
|
globally readable or writable directories or use special hooks like B<copy-in>
|
|
and B<copy-out>.
|
|
|
|
Besides the user namespace, the mount, pid (process ids), uts (hostname) and
|
|
ipc namespaces will be unshared as well. See the man pages of L<namespaces(7)>
|
|
and L<unshare(2)> as well as the manual pages they are linking to.
|
|
|
|
A directory chroot created with this mode will end up with wrong ownership
|
|
information (seen from outside the unshared user namespace). For correct
|
|
ownership information, the directory must be accessed from a user namespace
|
|
with the right subuid/subgid offset, like so:
|
|
|
|
$ lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' -- \
|
|
> /usr/sbin/chroot ./debian-rootfs /bin/bash
|
|
|
|
Or without LXC:
|
|
|
|
$ mmdebstrap --unshare-helper /usr/sbin/chroot ./debian-rootfs /bin/bash
|
|
|
|
Or without mmdebstrap:
|
|
|
|
$ unshare --map-auto --map-user=65536 --map-group=65536 --keep-caps -- \
|
|
> /usr/sbin/chroot ./debian-rootfs /bin/bash
|
|
|
|
The above uses C<--map-auto> to map the block of user/group ids for the
|
|
effective user/group to a block starting at user/group ID 0. We also want to
|
|
map the current effective user/group ID into the subuid/subgid range using
|
|
C<--map-user> and C<--map-group>, respectively. But if that uid/gid overlaps
|
|
with the respective range, a "hole" will be removed from the mapping by the
|
|
L<unshare(1)> utility and the remaining uid/gid values will get shifted. Thus,
|
|
we map the current effective user/group ID to the highest possible uid/gid,
|
|
putting them at the end. Since that means that the user/group will be "nobody"
|
|
and not "root" inside the namespace, C<--keep-caps> propagate permitted
|
|
capabilities into the ambient set and thus give the user C<CAP_DAC_OVERRIDE>
|
|
and other capabilities that it would've had. The following does B<NOT> work:
|
|
|
|
$ unshare --map-root-user --map-auto ... # or equavalient:
|
|
$ unshare --map-user=0 --map-group=0 --map-users=auto --map-groups=auto ...
|
|
|
|
Using the format of L<lxc-usernsexec(1)>, these options will result in the
|
|
following mapping:
|
|
|
|
0:$UID:1 + 1:$SUBUIDBASE:65535
|
|
|
|
So a hole is punched in the automatically mapped range at the very beginning to
|
|
make space for the user id of the current user as root inside the namespace and
|
|
the remaining uids are shifted. With C<--map-user=65536> the mapping is as
|
|
follows and places the current user at the end of the range, preventing any
|
|
shifting:
|
|
|
|
0:$SUBUIDBASE:65536 + 65536:$UID:1
|
|
|
|
Lastly, if you don't mind using superuser privileges and have systemd-nspawn
|
|
available and you know your subuid/subgid offset (100000 in this example):
|
|
|
|
$ sudo systemd-nspawn --private-users=100000 \
|
|
> --directory=./debian-rootfs /bin/bash
|
|
|
|
A directory created in B<unshare> mode cannot be removed the normal way.
|
|
Instead, use something like this:
|
|
|
|
$ unshare --map-root-user --map-auto rm -rf ./debian-rootfs
|
|
|
|
The above L<unshare(1)> command will map user and group ids into different
|
|
ranges compared to the mapping used by B<mmdebstrap> (effectively shifting them
|
|
one up) but it will provide the required capabilities for the removal
|
|
operation.
|
|
|
|
If this mode is used as the root user, the user namespace is not unshared (but
|
|
the mount namespace and other still are) and created directories will have
|
|
correct ownership information. This is also useful in cases where the root user
|
|
wants the benefits of an unshared mount namespace to prevent accidentally
|
|
messing up the system.
|
|
|
|
=item B<fakeroot>, B<fakechroot>
|
|
|
|
This mode will exec B<mmdebstrap> again under C<fakechroot fakeroot>. A
|
|
directory chroot created with this mode will end up with wrong permissions. If
|
|
you need a directory then run B<mmdebstrap> under C<fakechroot fakeroot -s
|
|
fakeroot.env> and use C<fakeroot.env> later when entering the chroot with
|
|
C<fakechroot fakeroot -i fakeroot.env chroot ...>. This mode will not work if
|
|
maintainer scripts are unable to handle C<LD_PRELOAD> correctly like the
|
|
package B<initramfs-tools> until version 0.132. This mode will also not work
|
|
with a different libc inside the chroot than on the outside. See the section
|
|
B<LIMITATIONS> in L<fakechroot(1)>.
|
|
|
|
=item B<chrootless>
|
|
|
|
Uses the dpkg option C<--force-script-chrootless> to install packages into
|
|
I<TARGET> without dpkg and apt inside I<TARGET> but using apt and dpkg from the
|
|
machine running B<mmdebstrap>. Maintainer scripts are run without chrooting
|
|
into I<TARGET> and rely on their dependencies being installed on the machine
|
|
running B<mmdebstrap>. Only very few packages support this mode. Namely, as of
|
|
2022, not all essential packages support it. See
|
|
https://wiki.debian.org/Teams/Dpkg/Spec/InstallBootstrap or the
|
|
dpkg-root-support usertag of debian-dpkg@lists.debian.org in the Debian bug
|
|
tracking system. B<WARNING>: if this option is used carelessly with packages
|
|
that do not support C<DPKG_ROOT>, this mode can result in undesired changes to
|
|
the system running B<mmdebstrap> because maintainer-scripts will be run without
|
|
L<chroot(1)>. Make sure to run this mode without superuser privileges and/or
|
|
inside a throw-away chroot environment like so:
|
|
|
|
mmdebstrap --variant=apt --include=mmdebstrap \
|
|
--customize-hook='chroot "$1" mmdebstrap --mode=chrootless
|
|
--variant=apt unstable chrootless.tar' \
|
|
--customize-hook='copy-out chrootless.tar .' unstable /dev/null
|
|
|
|
=back
|
|
|
|
=head1 VARIANTS
|
|
|
|
All package sets also include the direct and indirect hard dependencies (but
|
|
not recommends) of the selected package sets. The variants B<minbase>,
|
|
B<buildd> and B<->, resemble the package sets that debootstrap would install
|
|
with the same I<--variant> argument. The release with a name matching the
|
|
I<SUITE> argument as well as the native architecture will be used to determine
|
|
the C<Essential:yes> and priority values. To select packages with matching
|
|
priority from any suite, specify the empty string for I<SUITE>. The default
|
|
variant is B<debootstrap>.
|
|
|
|
=over 8
|
|
|
|
=item B<extract>
|
|
|
|
Installs nothing by default (not even C<Essential:yes> packages). Packages
|
|
given by the C<--include> option are extracted but will not be installed.
|
|
|
|
=item B<custom>
|
|
|
|
Installs nothing by default (not even C<Essential:yes> packages). Packages
|
|
given by the C<--include> option will be installed. If another mode than
|
|
B<chrootless> was selected and dpkg was not part of the included package set,
|
|
then this variant will fail because it cannot configure the packages.
|
|
|
|
=item B<essential>
|
|
|
|
C<Essential:yes> packages. If I<SUITE> is a non-empty string, then only
|
|
packages from the archive with suite or codename matching I<SUITE> will be
|
|
considered for selection of C<Essential:yes> packages.
|
|
|
|
=item B<apt>
|
|
|
|
The B<essential> set plus apt.
|
|
It is roughly equivalent to running mmdebstrap with
|
|
|
|
--variant=essential --include="apt"
|
|
|
|
=item B<buildd>
|
|
|
|
The B<essential> set plus apt and build-essential.
|
|
It is roughly equivalent to running mmdebstrap with
|
|
|
|
--variant=essential --include="apt,build-essential"
|
|
|
|
=item B<required>, B<minbase>
|
|
|
|
The B<essential> set plus all packages with Priority:required.
|
|
It is roughly equivalent to running mmdebstrap with
|
|
|
|
--variant=essential --include="?priority(required)"
|
|
|
|
=item B<important>, B<debootstrap>, B<->
|
|
|
|
The B<required> set plus all packages with Priority:important. This is the
|
|
default of debootstrap. It is roughly equivalent to running mmdebstrap with
|
|
|
|
--variant=essential --include="~prequired|~pimportant"
|
|
|
|
=item B<standard>
|
|
|
|
The B<important> set plus all packages with Priority:standard.
|
|
It is roughly equivalent to running mmdebstrap with
|
|
|
|
--variant=essential --include="~prequired|~pimportant|~pstandard"
|
|
|
|
=back
|
|
|
|
=head1 FORMATS
|
|
|
|
The output format of B<mmdebstrap> is specified using the B<--format> option.
|
|
Without that option the default format is I<auto>. The following formats exist:
|
|
|
|
=over 8
|
|
|
|
=item B<auto>
|
|
|
|
When selecting this format (the default), the actual format will be inferred
|
|
from the I<TARGET> positional argument. If I<TARGET> was not specified, then
|
|
the B<tar> format will be chosen. If I<TARGET> happens to be F</dev/null> or if
|
|
standard output is F</dev/null>, then the B<null> format will be chosen. If
|
|
I<TARGET> is an existing directory, and does not equal to C<->, then the
|
|
B<directory> format will be chosen. If I<TARGET> ends with C<.tar> or with one
|
|
of the filename extensions listed in the section B<COMPRESSION>, or if
|
|
I<TARGET> equals C<->, or if I<TARGET> is a named pipe (fifo) or if I<TARGET>
|
|
is a character special file, then the B<tar> format will be chosen. If
|
|
I<TARGET> ends with C<.squashfs> or C<.sqfs>, then the B<squashfs> format will
|
|
be chosen. If I<TARGET> ends with C<.ext2> then the B<ext2> format will be
|
|
chosen. If I<TARGET> ends with C<.ext4> then the B<ext4> format will be
|
|
chosen. If none of these conditions apply, the B<directory> format will be
|
|
chosen.
|
|
|
|
=item B<directory>, B<dir>
|
|
|
|
A chroot directory will be created in I<TARGET>. If the directory already
|
|
exists, it must either be empty or only contain an empty C<lost+found>
|
|
directory. The special I<TARGET> C<-> does not work with this format because a
|
|
directory cannot be written to standard output. If you need your directory be
|
|
named C<->, then just explicitly pass the relative path to it like F<./->. If
|
|
a directory is chosen as output in any other mode than B<sudo>, then its
|
|
contents will have wrong ownership information and special device files will be
|
|
missing. Refer to the section B<MODES> for more information.
|
|
|
|
=item B<tar>
|
|
|
|
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
|
|
C<$TMPDIR> is not set. A tarball of that directory will be stored in I<TARGET>
|
|
or sent to standard output if I<TARGET> was omitted or if I<TARGET> equals
|
|
C<->. If I<TARGET> ends with one of the filename extensions listed in the
|
|
section B<COMPRESSION>, then a compressed tarball will be created. The tarball
|
|
will be in POSIX 1003.1-2001 (pax) format and will contain extended attributes.
|
|
To preserve the extended attributes, you have to pass B<--xattrs
|
|
--xattrs-include='*'> to tar when extracting the tarball.
|
|
|
|
=item B<squashfs>, B<sqfs>
|
|
|
|
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
|
|
C<$TMPDIR> is not set. A tarball of that directory will be piped to the
|
|
C<tar2sqfs> utility, which will create an xz compressed squashfs image with a
|
|
blocksize of 1048576 bytes in I<TARGET>. The special I<TARGET> C<-> does not
|
|
work with this format because C<tar2sqfs> can only write to a regular file. If
|
|
you need your squashfs image be named C<->, then just explicitly pass the
|
|
relative path to it like F<./->. The C<tar2sqfs> tool only supports a limited
|
|
set of extended attribute prefixes. Therefore, extended attributes are disabled
|
|
in the resulting image. If you need them, create a tarball first and remove the
|
|
extended attributes from its pax headers. Refer to the B<EXAMPLES> section for
|
|
how to achieve this.
|
|
|
|
=item B<ext2>
|
|
|
|
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
|
|
C<$TMPDIR> is not set. A tarball of that directory will be piped to the
|
|
C<genext2fs> utility, which will create an ext2 image that will be
|
|
approximately 90% full in I<TARGET>. The special I<TARGET> C<-> does not work
|
|
with this format because C<genext2fs> can only write to a regular file. If you
|
|
need your ext2 image be named C<->, then just explicitly pass the relative path
|
|
to it like F<./->. To convert the result to an ext3 image, use C<tune2fs -O
|
|
has_journal TARGET> and to convert it to ext4, use C<tune2fs -O
|
|
extents,uninit_bg,dir_index,has_journal TARGET>.
|
|
|
|
B<CAUTION>: the ext2 format does not support timestamps beyond 2038 January 19,
|
|
does not support sub-second precision timestamps and does not support extended
|
|
attributes. Its inode size of 128 prevents adding these features with tune2fs
|
|
later on.
|
|
|
|
=item B<ext4>
|
|
|
|
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
|
|
C<$TMPDIR> is not set. A tarball of that directory will be piped to the
|
|
C<mke2fs> utility, which will create an ext4 image that will be approximately
|
|
90% full in I<TARGET>. The special I<TARGET> C<-> does not work with this
|
|
format because C<mke2fs> can only write to a regular file. If you need your
|
|
ext4 image be named C<->, then just explicitly pass the relative path to it
|
|
like F<./->. If C<SOURCE_DATE_EPOCH> is set, the filesystem UUID and hash_seed
|
|
will be set to a UUID derived from SOURCE_DATE_EPOCH to create reproducible
|
|
images.
|
|
|
|
=item B<null>
|
|
|
|
A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
|
|
C<$TMPDIR> is not set. After the bootstrap is complete, the temporary chroot
|
|
will be deleted without being part of the output. This is most useful when the
|
|
desired artifact is generated inside the chroot and it is transferred using
|
|
special hooks such as B<sync-out>. It is also useful in situations where only
|
|
the exit code or stdout or stderr of a process run in a hook is of interest.
|
|
|
|
=back
|
|
|
|
=head1 HOOKS
|
|
|
|
This section describes properties of the hook options B<--setup-hook>,
|
|
B<--extract-hook>, B<--essential-hook> and B<--customize-hook> which are common
|
|
to all four of them. Any information specific to each hook is documented under
|
|
the specific hook options in the section B<OPTIONS>.
|
|
|
|
The options can be specified multiple times and the commands are executed in
|
|
the order in which they are given on the command line. There are four different
|
|
types of hook option arguments. If the argument passed to the hook option
|
|
starts with C<copy-in>, C<copy-out>, C<tar-in>, C<tar-out>, C<upload> or
|
|
C<download> followed by a space, then the hook is interpreted as a special
|
|
hook. Otherwise, if I<command> is an existing executable file from C<$PATH> or
|
|
if I<command> does not contain any shell metacharacters, then I<command> is
|
|
directly exec-ed with the path to the chroot directory passed as the first
|
|
argument. Otherwise, I<command> is executed under I<sh> and the chroot
|
|
directory can be accessed via I<$1>. Background (daemon) processes spawned in
|
|
a hook are not guaranteed to persist beyond the hook that created them.
|
|
|
|
Most environment variables set by B<mmdebstrap> (like C<DEBIAN_FRONTEND>,
|
|
C<LC_ALL> and C<PATH>) are preserved. Most notably, C<APT_CONFIG> is being
|
|
unset. If you need the path to C<APT_CONFIG> as written by mmdebstrap it can be
|
|
found in the C<MMDEBSTRAP_APT_CONFIG> environment variable. All environment
|
|
variables set by the user are preserved, except for C<TMPDIR> which is cleared.
|
|
See section B<TMPDIR>. Furthermore, C<MMDEBSTRAP_MODE> will store the mode set
|
|
by B<--mode>, C<MMDEBSTRAP_FORMAT> stores the format chosen by B<--format>,
|
|
C<MMDEBSTRAP_HOOK> stores which hook is currently run (setup, extract,
|
|
essential, customize), C<MMDEBSTRAP_ARGV0> stores the name of the binary with
|
|
which B<mmdebstrap> was executed and C<MMDEBSTRAP_VERBOSITY> stores the
|
|
numerical verbosity level (0 for no output, 1 for normal, 2 for verbose and 3
|
|
for debug output). The C<MMDEBSTRAP_INCLUDE> variable stores the list of
|
|
packages, apt patterns or file paths given by the B<--include> option,
|
|
separated by a comma and with commas and percent signs in the option values
|
|
urlencoded. If I<SUITE> name was supplied, it's stored in C<MMDEBSTRAP_SUITE>.
|
|
|
|
In special hooks, the paths inside the chroot are relative to the root
|
|
directory of the chroot. The path on the outside is relative to current
|
|
directory of the original B<mmdebstrap> invocation. The path inside the chroot
|
|
must already exist. Paths outside the chroot are created as necessary.
|
|
|
|
In B<fakechroot> mode, C<tar>, or C<sh> and C<cat> have to be run inside the
|
|
chroot or otherwise, symlinks will be wrongly resolved and/or permissions will
|
|
be off. This means that the special hooks might fail in B<fakechroot> mode for
|
|
the B<setup> hook or for the B<extract> and B<custom> variants if no C<tar> or
|
|
C<sh> and C<cat> is available inside the chroot.
|
|
|
|
=over 8
|
|
|
|
=item B<copy-out> I<pathinside> [I<pathinside> ...] I<pathoutside>
|
|
|
|
Recursively copies one or more files and directories recursively from
|
|
I<pathinside> inside the chroot to I<pathoutside> outside of the chroot.
|
|
|
|
=item B<copy-in> I<pathoutside> [I<pathoutside> ...] I<pathinside>
|
|
|
|
Recursively copies one or more files and directories into the chroot into,
|
|
placing them into I<pathinside> inside of the chroot.
|
|
|
|
=item B<sync-out> I<pathinside> I<pathoutside>
|
|
|
|
Recursively copy everything inside I<pathinside> inside the chroot into
|
|
I<pathoutside>. In contrast to B<copy-out>, this command synchronizes the
|
|
content of I<pathinside> with the content of I<pathoutside> without deleting
|
|
anything from I<pathoutside> but overwriting content as necessary. Use this
|
|
command over B<copy-out> if you don't want to create a new directory outside
|
|
the chroot but only update the content of an existing directory.
|
|
|
|
=item B<sync-in> I<pathoutside> I<pathinside>
|
|
|
|
Recursively copy everything inside I<pathoutside> into I<pathinside> inside the
|
|
chroot. In contrast to B<copy-in>, this command synchronizes the content of
|
|
I<pathoutside> with the content of I<pathinside> without deleting anything from
|
|
I<pathinside> but overwriting content as necessary. Use this command over
|
|
B<copy-in> if you don't want to create a new directory inside the chroot but
|
|
only update the content of an existing directory.
|
|
|
|
=item B<tar-in> I<outside.tar> I<pathinside>
|
|
|
|
Unpacks a tarball I<outside.tar> from outside the chroot into a certain
|
|
location I<pathinside> inside the chroot. In B<unshare> mode, device nodes
|
|
cannot be created. To ignore device nodes in tarballs, use
|
|
B<--skip=tar-in/mknod>.
|
|
|
|
=item B<tar-out> I<pathinside> I<outside.tar>
|
|
|
|
Packs the path I<pathinside> from inside the chroot into a tarball, placing it
|
|
into a certain location I<outside.tar> outside the chroot. To emulate behaviour
|
|
of C<cp> and to provide control over the path which gets put into the tarball,
|
|
a C<chdir()> is performed to the C<dirname()> of I<pathinside> and then the
|
|
C<basename()> of I<pathinside> is packaged as a tarball. For example, if
|
|
I<pathinside> is C</boot/.> then first a C<chdir()> into C</boot> will be
|
|
performed before packing up the contents of C<.> inside C</boot>.
|
|
|
|
=item B<download> I<fileinside> I<fileoutside>
|
|
|
|
Copy the file given by I<fileinside> from inside the chroot to outside the
|
|
chroot as I<fileoutside>. In contrast to B<copy-out>, this command only
|
|
handles files and not directories. To copy a directory recursively out of the
|
|
chroot, use B<copy-out> or B<tar-out>. Its advantage is, that by being able to
|
|
specify the full path on the outside, including the filename, the file on the
|
|
outside can have a different name from the file on the inside. In contrast to
|
|
B<copy-out> and B<tar-out>, this command follows symlinks.
|
|
|
|
=item B<upload> I<fileoutside> I<fileinside>
|
|
|
|
Copy the file given by I<fileoutside> from outside the chroot to inside the
|
|
chroot as I<fileinside>. In contrast to B<copy-in>, this command only
|
|
handles files and not directories. To copy a directory recursively into the
|
|
chroot, use B<copy-in> or B<tar-in>. Its advantage is, that by being able to
|
|
specify the full path on the inside, including the filename, the file on the
|
|
inside can have a different name from the file on the outside. In contrast to
|
|
B<copy-in> and B<tar-in>, permission and ownership information will not be
|
|
retained.
|
|
|
|
=back
|
|
|
|
=head1 OPERATION
|
|
|
|
This section gives an overview of the different steps to create a chroot. At
|
|
its core, what B<mmdebstrap> does can be put into a 14 line shell script:
|
|
|
|
mkdir -p "$2/etc/apt" "$2/var/cache" "$2/var/lib"
|
|
cat << END > "$2/apt.conf"
|
|
Apt::Architecture "$(dpkg --print-architecture)";
|
|
Apt::Architectures "$(dpkg --print-architecture)";
|
|
Dir "$(cd "$2" && pwd)";
|
|
Dir::Etc::Trusted "$(eval "$(apt-config shell v Dir::Etc::Trusted/f)"; printf "$v")";
|
|
Dir::Etc::TrustedParts "$(eval "$(apt-config shell v Dir::Etc::TrustedParts/d)"; printf "$v")";
|
|
END
|
|
echo "deb http://deb.debian.org/debian/ $1 main" > "$2/etc/apt/sources.list"
|
|
APT_CONFIG="$2/apt.conf" apt-get update
|
|
APT_CONFIG="$2/apt.conf" apt-get --yes --download-only install '?essential'
|
|
for f in "$2"/var/cache/apt/archives/*.deb; do dpkg-deb --extract "$f" "$2"; done
|
|
chroot "$2" sh -c "dpkg --install --force-depends /var/cache/apt/archives/*.deb"
|
|
|
|
The additional complexity of B<mmdebstrap> is to support operation without
|
|
superuser privileges, bit-by-bit reproducible output, hooks and foreign
|
|
architecture support.
|
|
|
|
The remainder of this section explains what B<mmdebstrap> does step-by-step.
|
|
|
|
=over 8
|
|
|
|
=item B<check>
|
|
|
|
Upon startup, several checks are carried out, like:
|
|
|
|
=over 4
|
|
|
|
=item * whether required utilities (apt, dpkg, tar) are installed
|
|
|
|
=item * which mode to use and whether prerequisites are met
|
|
|
|
=item * do not allow chrootless mode as root (without fakeroot) unless inside a chroot. This check can be disabled using B<--skip=check/chrootless>
|
|
|
|
=item * whether the requested architecture can be executed (requires arch-test) using qemu binfmt_misc support. This requires arch-test and can be disabled using B<--skip=check/qemu>
|
|
|
|
=item * how the apt sources can be assembled from I<SUITE>, I<MIRROR> and B<--components> and/or from standard input as deb822 or one-line format and whether the required GPG keys exist.
|
|
|
|
=item * which output format to pick depending on the B<--format> argument or name of I<TARGET> or its type.
|
|
|
|
=item * whether the output directory is empty. This check can be disabled using B<--skip=check/empty>
|
|
|
|
=item * whether adding a C<signed-by> to C<apt/sources.list> is necessary. This requires gpg and can be disabled using B<--skip=check/signed-by>
|
|
|
|
=back
|
|
|
|
=item B<setup>
|
|
|
|
The following tasks are carried out unless B<--skip=setup> is used:
|
|
|
|
=over 4
|
|
|
|
=item * create required directories
|
|
|
|
=item * write out the temporary apt config file
|
|
|
|
=item * populates F</etc/apt/apt.conf.d/99mmdebstrap> and F</etc/dpkg/dpkg.cfg.d/99mmdebstrap> with config options from B<--aptopt> and B<--dpkgopt>, respectively
|
|
|
|
=item * write out F</etc/apt/sources.list>
|
|
|
|
=item * copy over F</etc/resolv.conf> and F</etc/hostname>
|
|
|
|
=item * populate F</dev> if mknod is possible
|
|
|
|
=back
|
|
|
|
=item B<setup-hook>
|
|
|
|
Run B<--setup-hook> options and all F<setup*> scripts in B<--hook-dir>.
|
|
|
|
=item B<update>
|
|
|
|
Runs C<apt-get update> using the temporary apt configuration file created in
|
|
the B<setup> step. This can be disabled using B<--skip=update>.
|
|
|
|
=item B<download>
|
|
|
|
In the B<extract> and B<custom> variants, C<apt-get install> is used to
|
|
download all the packages requested via the B<--include> option. In the
|
|
remaining variants, apt patterns are used to find the C<Essential:yes> packages
|
|
from the native architecture. If I<SUITE> is a non-empty string, then only
|
|
packages from the archive with suite or codename matching I<SUITE> will be
|
|
considered for selection of C<Essential:yes> packages.
|
|
|
|
=item B<mount>
|
|
|
|
Mount relevant device nodes, F</proc> and F</sys> into the chroot and unmount
|
|
them afterwards. This can be disabled using B<--skip=chroot/mount> or
|
|
specifically by B<--skip=chroot/mount/dev>, B<--skip=chroot/mount/proc> and
|
|
B<--skip=chroot/mount/sys>, respectively. B<mmdebstrap> will disable running
|
|
services by temporarily moving F</usr/sbin/policy-rc.d> and
|
|
F</usr/sbin/start-stop-daemon> if they exist. This can be disabled with
|
|
B<--skip=chroot/policy-rc.d> and B<--skip=chroot/start-stop-daemon>,
|
|
respectively.
|
|
|
|
=item B<extract>
|
|
|
|
Extract the downloaded packages into the rootfs.
|
|
|
|
=item B<prepare>
|
|
|
|
In B<fakechroot> mode, environment variables C<LD_LIBRARY_PATH> will be set up
|
|
correctly. For foreign B<fakechroot> environments, C<LD_LIBRARY_PATH> and
|
|
C<QEMU_LD_PREFIX> are set up accordingly. This step is not carried out in
|
|
B<extract> mode and neither for the B<chrootless> variant.
|
|
|
|
=item B<extract-hook>
|
|
|
|
Run B<--extract-hook> options and all F<extract*> scripts in B<--hook-dir>.
|
|
|
|
=item B<essential>
|
|
|
|
Uses C<dpkg --install> to properly install all packages that have been
|
|
extracted before. Removes all packages downloaded in the B<download> step,
|
|
except those which were present in F</var/cache/apt/archives/> before (if any).
|
|
This can be disabled using B<--skip=essential/unlink>. This step is not carried
|
|
out in B<extract> mode.
|
|
|
|
=item B<essential-hook>
|
|
|
|
Run B<--essential-hook> options and all F<essential*> scripts in B<--hook-dir>.
|
|
This step is not carried out in B<extract> mode.
|
|
|
|
=item B<install>
|
|
|
|
Install the apt package into the chroot, if necessary and then run apt from
|
|
inside the chroot to install all remaining packages. This step is not carried
|
|
out in B<extract> mode.
|
|
|
|
=item B<customize-hook>
|
|
|
|
Run B<--customize-hook> options and all F<customize*> scripts in B<--hook-dir>.
|
|
This step is not carried out in B<extract> mode.
|
|
|
|
=item B<unmount>
|
|
|
|
Unmount everything that was mounted during the B<mount> stage and restores
|
|
F</usr/sbin/policy-rc.d> and F</usr/sbin/start-stop-daemon> if necessary.
|
|
|
|
=item B<zombie-reaping>
|
|
|
|
Wait for (reap) still running processes (background processes or zombie
|
|
processes), unless B<--skip=zombie-reaping> is used.
|
|
|
|
=item B<cleanup>
|
|
|
|
Performs cleanup tasks, unless B<--skip=cleanup> is used:
|
|
|
|
=over 4
|
|
|
|
=item * Removes the package lists (unless B<--skip=cleanup/apt/lists>) and apt cache (unless B<--skip=cleanup/apt/cache>). Both removals can be disabled by using B<--skip=cleanup/apt>.
|
|
|
|
=item * Remove all files that were put into the chroot for setup purposes, like F</etc/apt/apt.conf.d/00mmdebstrap> and the temporary apt config. This can be disabled using B<--skip=cleanup/mmdebstrap>.
|
|
|
|
=item * Remove files that make the result unreproducible and write the empty string to /etc/machine-id if it exists. This can be disabled using B<--skip=cleanup/reproducible>. Note that this will not remove files that make the result unreproducible on machines with differing F</etc/resolv.conf> or F</etc/hostname>. Use a B<--customize-hook> to make those two files reproducible across multiple hosts. See section C<SOURCE_DATE_EPOCH> for more information. The following files will be removed:
|
|
|
|
=over 4
|
|
|
|
=item * F</var/log/dpkg.log>
|
|
|
|
=item * F</var/log/apt/history.log>
|
|
|
|
=item * F</var/log/apt/term.log>
|
|
|
|
=item * F</var/log/alternatives.log>
|
|
|
|
=item * F</var/cache/ldconfig/aux-cache>
|
|
|
|
=item * F</var/log/apt/eipp.log.xz>
|
|
|
|
=item * F</var/lib/dbus/machine-id>
|
|
|
|
=back
|
|
|
|
=item * Remove everything in F</run> inside the chroot. This can be disabled using B<--skip=cleanup/run>.
|
|
|
|
=item * Remove everything in F</tmp> inside the chroot. This can be disabled using B<--skip=cleanup/tmp>.
|
|
|
|
=back
|
|
|
|
=item B<output>
|
|
|
|
For formats other than B<directory>, pack up the temporary chroot directory
|
|
into a tarball, ext2 image, ext4 image or squashfs image and delete the
|
|
temporary chroot directory.
|
|
|
|
If B<--skip=output/dev> is added, the resulting chroot will not contain the
|
|
device nodes, directories and symlinks that B<debootstrap> creates but just
|
|
an empty /dev as created by B<base-files>.
|
|
|
|
If B<--skip=output/mknod> is added, the resulting chroot will not contain
|
|
device nodes (neither block nor character special devices). This is useful
|
|
if the chroot tarball is to be exatracted in environments where mknod does
|
|
not function like in unshared user namespaces.
|
|
|
|
=back
|
|
|
|
=head1 EXAMPLES
|
|
|
|
Use like debootstrap:
|
|
|
|
$ sudo mmdebstrap unstable ./unstable-chroot
|
|
|
|
Without superuser privileges:
|
|
|
|
$ mmdebstrap unstable unstable-chroot.tar
|
|
|
|
With no command line arguments at all. The chroot content is entirely defined
|
|
by a sources.list file on standard input.
|
|
|
|
$ mmdebstrap < /etc/apt/sources.list > unstable-chroot.tar
|
|
|
|
Since the tarball is output on stdout, members of it can be excluded using tar
|
|
on-the-fly. For example the /dev directory can be removed from the final
|
|
tarbal in cases where it is to be extracted by a non-root user who cannot
|
|
create device nodes:
|
|
|
|
$ mmdebstrap unstable | tar --delete ./dev > unstable-chroot.tar
|
|
|
|
Create a tarball for use with C<sbuild --chroot-mode=unshare>:
|
|
|
|
$ mmdebstrap --variant=buildd unstable ~/.cache/sbuild/unstable-amd64.tar
|
|
|
|
Instead of a tarball, a squashfs image can be created:
|
|
|
|
$ mmdebstrap unstable unstable-chroot.squashfs
|
|
|
|
By default, B<mmdebstrap> runs B<tar2sqfs> with C<--no-skip --exportable
|
|
--compressor xz --block-size 1048576>. To choose a different set of options,
|
|
and to filter out all extended attributes not supported by B<tar2sqfs>, pipe
|
|
the output of B<mmdebstrap> into B<tar2sqfs> manually like so:
|
|
|
|
$ mmdebstrap unstable \
|
|
| mmtarfilter --pax-exclude='*' \
|
|
--pax-include='SCHILY.xattr.user.*' \
|
|
--pax-include='SCHILY.xattr.trusted.*' \
|
|
--pax-include='SCHILY.xattr.security.*' \
|
|
| tar2sqfs --quiet --no-skip --force --exportable --compressor xz \
|
|
--block-size 1048576 unstable-chroot.squashfs
|
|
|
|
By default, debootstrapping a stable distribution will add mirrors for security
|
|
and updates to the sources.list.
|
|
|
|
$ mmdebstrap stable stable-chroot.tar
|
|
|
|
If you don't want this behaviour, you can override it by manually specifying a
|
|
mirror in various different ways:
|
|
|
|
$ mmdebstrap stable stable-chroot.tar http://deb.debian.org/debian
|
|
$ mmdebstrap stable stable-chroot.tar "deb http://deb.debian.org/debian stable main"
|
|
$ mmdebstrap stable stable-chroot.tar /path/to/sources.list
|
|
$ mmdebstrap stable stable-chroot.tar - < /path/to/sources.list
|
|
|
|
Drop locales (but not the symlink to the locale name alias database),
|
|
translated manual packages (but not the untranslated ones), and documentation
|
|
(but not copyright and Debian changelog).
|
|
|
|
$ mmdebstrap --variant=essential \
|
|
--dpkgopt='path-exclude=/usr/share/man/*' \
|
|
--dpkgopt='path-include=/usr/share/man/man[1-9]/*' \
|
|
--dpkgopt='path-exclude=/usr/share/locale/*' \
|
|
--dpkgopt='path-include=/usr/share/locale/locale.alias' \
|
|
--dpkgopt='path-exclude=/usr/share/doc/*' \
|
|
--dpkgopt='path-include=/usr/share/doc/*/copyright' \
|
|
--dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*' \
|
|
unstable debian-unstable.tar
|
|
|
|
Create a bootable USB Stick that boots into a full Debian desktop:
|
|
|
|
$ mmdebstrap --aptopt='Apt::Install-Recommends "true"' --customize-hook \
|
|
'chroot "$1" adduser --comment user --disabled-password user' \
|
|
--customize-hook='echo 'user:live' | chroot "$1" chpasswd' \
|
|
--customize-hook='echo host > "$1/etc/hostname"' \
|
|
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
|
|
--include=linux-image-amd64,task-desktop unstable debian-unstable.tar
|
|
$ cat << END > extlinux.conf
|
|
> default linux
|
|
> timeout 0
|
|
>
|
|
> label linux
|
|
> kernel /vmlinuz
|
|
> append initrd=/initrd.img root=LABEL=rootfs
|
|
END
|
|
# You can use $(sudo blockdev --getsize64 /dev/sdXXX) to get the right
|
|
# image size for the target medium in bytes
|
|
$ guestfish -N debian-unstable.img=disk:8G -- \
|
|
part-disk /dev/sda mbr : \
|
|
part-set-bootable /dev/sda 1 true : \
|
|
mkfs ext4 /dev/sda1 : \
|
|
set-label /dev/sda1 rootfs : \
|
|
mount /dev/sda1 / : \
|
|
tar-in debian-unstable.tar / xattrs:true : \
|
|
upload /usr/lib/EXTLINUX/mbr.bin /boot/mbr.bin : \
|
|
copy-file-to-device /boot/mbr.bin /dev/sda size:440 : \
|
|
extlinux / : copy-in extlinux.conf / : sync : umount / : shutdown
|
|
$ qemu-system-x86_64 -m 1G -enable-kvm debian-unstable.img
|
|
$ sudo dd if=debian-unstable.img of=/dev/sdXXX status=progress
|
|
|
|
On architectures without extlinux you can also boot using grub2:
|
|
|
|
$ mmdebstrap --include=linux-image-amd64,grub2,systemd-sysv unstable fs.tar
|
|
$ guestfish -N debian-unstable.img=disk:2G -- \
|
|
part-disk /dev/sda mbr : \
|
|
part-set-bootable /dev/sda 1 true : \
|
|
mkfs ext4 /dev/sda1 : \
|
|
set-label /dev/sda1 rootfs : \
|
|
mount /dev/sda1 / : \
|
|
tar-in fs.tar / xattrs:true : \
|
|
command "grub-install /dev/sda" : \
|
|
command update-grub : \
|
|
sync : umount / : shutdown
|
|
$ qemu-system-x86_64 -m 1G -enable-kvm debian-unstable.img
|
|
|
|
Build libdvdcss2.deb without installing installing anything or changing apt
|
|
sources on the current system:
|
|
|
|
$ mmdebstrap --variant=apt --components=main,contrib --include=libdvd-pkg \
|
|
--customize-hook='chroot $1 /usr/lib/libdvd-pkg/b-i_libdvdcss.sh' \
|
|
| tar --extract --verbose --strip-components=4 \
|
|
--wildcards './usr/src/libdvd-pkg/libdvdcss2_*_*.deb'
|
|
$ ls libdvdcss2_*_*.deb
|
|
|
|
Use as replacement for autopkgtest-build-qemu and vmdb2 for all architectures
|
|
supporting EFI booting (amd64, arm64, armhf, i386, riscv64), use a convenience
|
|
wrapper around B<mmdebstrap>:
|
|
|
|
$ mmdebstrap-autopkgtest-build-qemu unstable ./autopkgtest.img
|
|
|
|
Use as replacement for autopkgtest-build-qemu and vmdb2 on architectures
|
|
supporting extlinux (amd64 and i386):
|
|
|
|
$ mmdebstrap --variant=important --include=linux-image-amd64 \
|
|
--customize-hook='chroot "$1" passwd --delete root' \
|
|
--customize-hook='chroot "$1" useradd --home-dir /home/user --create-home user' \
|
|
--customize-hook='chroot "$1" passwd --delete user' \
|
|
--customize-hook='echo host > "$1/etc/hostname"' \
|
|
--customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
|
|
--customize-hook=/usr/share/autopkgtest/setup-commands/setup-testbed \
|
|
unstable debian-unstable.tar
|
|
$ cat << END > extlinux.conf
|
|
> default linux
|
|
> timeout 0
|
|
>
|
|
> label linux
|
|
> kernel /vmlinuz
|
|
> append initrd=/initrd.img root=/dev/vda1 rw console=ttyS0
|
|
END
|
|
$ guestfish -N debian-unstable.img=disk:8G -- \
|
|
part-disk /dev/sda mbr : \
|
|
part-set-bootable /dev/sda 1 true : \
|
|
mkfs ext4 /dev/sda1 : mount /dev/sda1 / : \
|
|
tar-in debian-unstable.tar / xattrs:true : \
|
|
upload /usr/lib/EXTLINUX/mbr.bin /boot/mbr.bin : \
|
|
copy-file-to-device /boot/mbr.bin /dev/sda size:440 : \
|
|
extlinux / : copy-in extlinux.conf / : sync : umount / : shutdown
|
|
$ qemu-img convert -O qcow2 debian-unstable.img debian-unstable.qcow2
|
|
|
|
As a debootstrap wrapper to run it without superuser privileges but using Linux
|
|
user namespaces instead. This fixes Debian bug #829134.
|
|
|
|
$ mmdebstrap --variant=custom --mode=unshare \
|
|
--setup-hook='debootstrap unstable "$1"' \
|
|
- debian-debootstrap.tar
|
|
|
|
Build a non-Debian chroot like Ubuntu bionic:
|
|
|
|
$ mmdebstrap --aptopt='Dir::Etc::Trusted
|
|
"/usr/share/keyrings/ubuntu-keyring-2012-archive.gpg"' bionic bionic.tar
|
|
|
|
If, for some reason, you cannot use a caching proxy like apt-cacher or
|
|
apt-cacher-ng, you can use the B<sync-in> and B<sync-out> special hooks to
|
|
synchronize a directory outside the chroot with F</var/cache/apt/archives>
|
|
inside the chroot.
|
|
|
|
$ mmdebstrap --variant=apt --skip=essential/unlink \
|
|
--setup-hook='mkdir -p ./cache "$1"/var/cache/apt/archives/' \
|
|
--setup-hook='sync-in ./cache /var/cache/apt/archives/' \
|
|
--customize-hook='sync-out /var/cache/apt/archives ./cache' \
|
|
unstable /dev/null
|
|
|
|
Instead of copying potentially large amounts of data with B<sync-in> you can
|
|
also use a bind-mount in combination with a C<file://> mirror to make packages
|
|
from the outside available inside the chroot:
|
|
|
|
$ mmdebstrap --variant=apt --skip=essential/unlink \
|
|
--setup-hook='mkdir "$1/tmp/mirror"' \
|
|
--setup-hook='mount -o ro,bind /tmp/mirror "$1/tmp/mirror"' \
|
|
--customize-hook='sync-out /var/cache/apt/archives ./cache' \
|
|
--customize-hook='umount "$1/tmp/mirror"; rmdir "$1/tmp/mirror";' \
|
|
unstable /dev/null file:///tmp/mirror http://deb.debian.org/debian
|
|
|
|
To automatically mount all directories referenced by C<file://> mirrors
|
|
into the chroot you can use a hook:
|
|
|
|
$ mmdebstrap --variant=apt \
|
|
--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount \
|
|
unstable /dev/null file:///tmp/mirror1 file:///tmp/mirror2
|
|
|
|
Create a system that can be used with docker:
|
|
|
|
$ mmdebstrap unstable | sudo docker import - debian
|
|
[...]
|
|
$ sudo docker run -it --rm debian whoami
|
|
root
|
|
$ sudo docker rmi debian
|
|
|
|
Create and boot a qemu virtual machine for an arbitrary architecture using
|
|
the B<debvm-create> wrapper script around B<mmdebstrap>:
|
|
|
|
$ debvm-create -r stable -- --architecture=riscv64
|
|
$ debvm-run
|
|
|
|
Create a system that can be used with podman:
|
|
|
|
$ mmdebstrap unstable | podman import - debian
|
|
[...]
|
|
$ podman run --network=none -it --rm debian whoami
|
|
root
|
|
$ podman rmi debian
|
|
|
|
As a docker/podman replacement:
|
|
|
|
$ mmdebstrap unstable chroot.tar
|
|
[...]
|
|
$ mmdebstrap --variant=custom --skip=update,tar-in/mknod \
|
|
--setup-hook='tar-in chroot.tar /' \
|
|
--customize-hook='chroot "$1" whoami' unstable /dev/null
|
|
[...]
|
|
root
|
|
$ rm chroot.tar
|
|
|
|
You can re-use a chroot tarball created with mmdebstrap for further refinement.
|
|
Say you want to create a minimal chroot and a chroot with more packages
|
|
installed, then instead of downloading and installing the essential packages
|
|
twice you can instead build on top of the already present minimal chroot:
|
|
|
|
$ mmdebstrap --variant=apt unstable chroot.tar
|
|
$ mmdebstrap --variant=custom --skip=update,setup,cleanup,tar-in/mknod \
|
|
--setup-hook='tar-in chroot.tar /' \
|
|
--customize-hook='chroot "$1" apt-get install --yes pkg1 pkg2' \
|
|
'' chroot-full.tar
|
|
|
|
=head1 ENVIRONMENT VARIABLES
|
|
|
|
=over 8
|
|
|
|
=item C<SOURCE_DATE_EPOCH>
|
|
|
|
By setting C<SOURCE_DATE_EPOCH> the result will be reproducible across multiple
|
|
runs with the same options and mirror content. Note that for debootstrap
|
|
compatibility, B<mmdebstrap> will copy the host's F</etc/resolv.conf> and
|
|
F</etc/hostname> into the chroot. This means that the B<mmdebstrap> output will
|
|
differ if it is run on machines with differing F</etc/resolv.conf> and
|
|
F</etc/hostname> contents. To make the result reproducible across different
|
|
hosts, you need to manually either delete both files from the output:
|
|
|
|
$ mmdebstrap --customize-hook='rm "$1"/etc/resolv.conf' \
|
|
--customize-hook='rm "$1"/etc/hostname' ...
|
|
|
|
or fill them with reproducible content:
|
|
|
|
$ mmdebstrap --customize-hook='echo nameserver X > "$1"/etc/resolv.conf' \
|
|
--customize-hook='echo host > "$1"/etc/hostname' ...
|
|
|
|
=item C<TMPDIR>
|
|
|
|
When creating a tarball, a temporary directory is populated with the rootfs
|
|
before the tarball is packed. The location of that temporary directory will be
|
|
in F</tmp> or the location pointed to by C<TMPDIR> if that environment variable
|
|
is set. Setting C<TMPDIR> to a different directory than F</tmp> is useful if
|
|
you have F</tmp> on a tmpfs that is too small for your rootfs.
|
|
|
|
If you set C<TMPDIR> in B<unshare> mode, then the unshared user must be able to
|
|
access the directory. This means that the directory itself must be
|
|
world-writable and all its ancestors must be at least world-executable.
|
|
|
|
Since C<TMPDIR> is only valid outside the chroot, the variable is being unset
|
|
when running hook scripts. If you need a valid temporary directory in a hook,
|
|
consider using F</tmp> inside your target directory.
|
|
|
|
=back
|
|
|
|
=head1 DEBOOTSTRAP
|
|
|
|
This section lists some differences to debootstrap.
|
|
|
|
=over 8
|
|
|
|
=item * More than one mirror possible
|
|
|
|
=item * Default mirrors for stable releases include updates and security mirror
|
|
|
|
=item * Multiple ways to operate as non-root: fakechroot and unshare
|
|
|
|
=item * twice as fast
|
|
|
|
=item * Can create a chroot with only C<Essential:yes> packages and their deps
|
|
|
|
=item * Reproducible output by default if $SOURCE_DATE_EPOCH is set
|
|
|
|
=item * Can create output on filesystems with nodev set
|
|
|
|
=item * apt cache and lists are cleaned at the end
|
|
|
|
=item * foreign architecture chroots using qemu-user
|
|
|
|
=back
|
|
|
|
Limitations in comparison to debootstrap:
|
|
|
|
=over 8
|
|
|
|
=item * Only runs on systems with apt installed (Debian and derivatives)
|
|
|
|
=item * No I<SCRIPT> argument (use hooks instead)
|
|
|
|
=item * Some debootstrap options don't exist, namely:
|
|
|
|
I<--second-stage>, I<--exclude>, I<--resolve-deps>, I<--force-check-gpg>,
|
|
I<--merged-usr>, I<--no-merged-usr> and I<--cache-dir>.
|
|
|
|
=back
|
|
|
|
=head1 MERGED-/USR
|
|
|
|
B<mmdebstrap> will create a merged-/usr chroot or not depending on whether
|
|
packages setting up merged-/usr (i.e. the B<usrmerge> package) are installed or
|
|
not. In Debian, the essential package B<init-system-helpers> depends on the
|
|
B<usrmerge> package, starting with Debian 12 (Bookworm).
|
|
|
|
Before Debian 12 (Bookworm), to force B<mmdebstrap> to create a chroot with
|
|
merged-/usr using symlinks, either explicitly install the B<usrmerge> package:
|
|
|
|
--include=usrmerge
|
|
|
|
or setup merged-/usr using the debootstrap-method which takes care of the
|
|
architecture specific symlinks and installs the B<usr-is-merged> package.
|
|
|
|
--hook-dir=/usr/share/mmdebstrap/hooks/merged-usr
|
|
|
|
To force B<mmdebstrap> to create a chroot without merged-/usr even after the
|
|
Debian 12 (Bookworm) release, you can use the following hook:
|
|
|
|
--hook-dir=/usr/share/mmdebstrap/hooks/no-merged-usr
|
|
|
|
This will write "this system will not be supported in the future" into
|
|
F</etc/unsupported-skip-usrmerge-conversion> inside the chroot and install the
|
|
B<usr-is-merged> package to avoid the installation of the B<usrmerge> package
|
|
and its dependencies.
|
|
|
|
If you are using B<mmdebstrap> in a setup where you do not know upfront whether
|
|
the chroot you are creating should be merged-/usr or not and you want to avoid
|
|
installation of the B<usrmerge> package and it's dependencies, you can use:
|
|
|
|
--hook-dir=/usr/share/mmdebstrap/hooks/maybe-merged-usr
|
|
|
|
That hook will use the availability of the B<usr-is-merged> package to decide
|
|
whether to call the B<merged-usr> hook or not.
|
|
|
|
=head1 COMPRESSION
|
|
|
|
B<mmdebstrap> will choose a suitable compressor for the output tarball
|
|
depending on the filename extension. The following mapping from filename
|
|
extension to compressor applies:
|
|
|
|
extension compressor
|
|
--------------------
|
|
.tar none
|
|
.gz gzip
|
|
.tgz gzip
|
|
.taz gzip
|
|
.Z compress
|
|
.taZ compress
|
|
.bz2 bzip2
|
|
.tbz bzip2
|
|
.tbz2 bzip2
|
|
.tz2 bzip2
|
|
.lz lzip
|
|
.lzma lzma
|
|
.tlz lzma
|
|
.lzo lzop
|
|
.lz4 lz4
|
|
.xz xz
|
|
.txz xz
|
|
.zst zstd
|
|
|
|
To change compression specific options, either use the respecitve environment
|
|
variables like B<XZ_OPT> or send B<mmdebstrap> output to your compressor of
|
|
choice with a pipe.
|
|
|
|
=head1 WRAPPERS
|
|
|
|
=head2 debvm
|
|
|
|
B<debvm> helps create and run virtual machines for various Debian releases and
|
|
architectures. The tool B<debvm-create> can be used to create a virtual
|
|
machine image and the tool B<debvm-run> can be used to run such a machine
|
|
image. Their purpose primarily is testing software using qemu as a containment
|
|
technology. These are relatively thin wrappers around B<mmdebstrap> and
|
|
B<qemu>.
|
|
|
|
=head2 bdebstrap
|
|
|
|
B<bdebstrap> is a YAML config based multi-mirror Debian chroot creation tool.
|
|
B<bdebstrap> is an alternative to B<debootstrap> and a wrapper around
|
|
B<mmdebstrap> to support YAML based configuration files. It inherits all
|
|
benefits from B<mmdebstrap>. The support for configuration allows storing all
|
|
customization in a YAML file instead of having to use a very long one-liner
|
|
call to B<mmdebstrap>. It also layering multiple customizations on top of each
|
|
other, e.g. to support flavors of an image.
|
|
|
|
=head1 BUGS
|
|
|
|
https://gitlab.mister-muffin.de/josch/mmdebstrap/issues
|
|
|
|
https://bugs.debian.org/src:mmdebstrap
|
|
|
|
As of version 1.20.9, dpkg does not provide facilities preventing it from
|
|
reading the dpkg configuration of the machine running B<mmdebstrap>.
|
|
Therefore, until this dpkg limitation is fixed, a default dpkg configuration is
|
|
recommended on machines running B<mmdebstrap>. If you are using B<mmdebstrap>
|
|
as the non-root user, then as a workaround you could run C<chmod 600
|
|
/etc/dpkg/dpkg.cfg.d/*> so that the config files are only accessible by the
|
|
root user. See Debian bug #808203.
|
|
|
|
With apt versions before 2.1.16, setting C<[trusted=yes]> or
|
|
C<Acquire::AllowInsecureRepositories "1"> to allow signed archives without a
|
|
known public key or unsigned archives will fail because of a gpg warning in the
|
|
apt output. Since apt does not communicate its status via any other means than
|
|
human readable strings, and because B<mmdebstrap> wants to treat transient
|
|
network errors as errors, B<mmdebstrap> treats any warning from "apt-get
|
|
update" as an error.
|
|
|
|
=head1 SEE ALSO
|
|
|
|
L<debootstrap(8)>, L<debvm(1)>, L<bdebstrap(1)>
|
|
|
|
=cut
|
|
|
|
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 ft=perl tw=79
|