forked from josch/mmdebstrap
Compare commits
9 commits
2767b051bc
...
594ea3c72e
Author | SHA1 | Date | |
---|---|---|---|
594ea3c72e | |||
3f79c18a0d | |||
|
0378c101bb | ||
88a031477a | |||
|
c51fb24c7b | ||
236b84a486 | |||
bd5d3c3dab | |||
ebfac91738 | |||
ccd4b5c163 |
2 changed files with 239 additions and 70 deletions
211
mmdebstrap
211
mmdebstrap
|
@ -42,6 +42,7 @@ use Carp;
|
||||||
use Term::ANSIColor;
|
use Term::ANSIColor;
|
||||||
use Socket;
|
use Socket;
|
||||||
use Time::HiRes;
|
use Time::HiRes;
|
||||||
|
use Math::BigInt;
|
||||||
use version;
|
use version;
|
||||||
|
|
||||||
## no critic (InputOutput::RequireBriefOpen)
|
## no critic (InputOutput::RequireBriefOpen)
|
||||||
|
@ -175,6 +176,25 @@ sub error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# The encoding of dev_t is MMMM Mmmm mmmM MMmm, where M is a hex digit of
|
||||||
|
# the major number and m is a hex digit of the minor number.
|
||||||
|
sub major {
|
||||||
|
my $rdev = shift;
|
||||||
|
my $right
|
||||||
|
= Math::BigInt->from_hex("0x00000000000fff00")->band($rdev)->brsft(8);
|
||||||
|
my $left
|
||||||
|
= Math::BigInt->from_hex("0xfffff00000000000")->band($rdev)->brsft(32);
|
||||||
|
return $right->bior($left);
|
||||||
|
}
|
||||||
|
|
||||||
|
sub minor {
|
||||||
|
my $rdev = shift;
|
||||||
|
my $right = Math::BigInt->from_hex("0x00000000000000ff")->band($rdev);
|
||||||
|
my $left
|
||||||
|
= Math::BigInt->from_hex("0x00000ffffff00000")->band($rdev)->brsft(12);
|
||||||
|
return $right->bior($left);
|
||||||
|
}
|
||||||
|
|
||||||
# check whether a directory is mounted by comparing the device number of the
|
# check whether a directory is mounted by comparing the device number of the
|
||||||
# directory itself with its parent
|
# directory itself with its parent
|
||||||
sub is_mountpoint {
|
sub is_mountpoint {
|
||||||
|
@ -221,7 +241,7 @@ sub get_tar_compressor {
|
||||||
} elsif ($filename =~ /\.(xz|txz)$/) {
|
} elsif ($filename =~ /\.(xz|txz)$/) {
|
||||||
return ['xz', '--threads=0'];
|
return ['xz', '--threads=0'];
|
||||||
} elsif ($filename =~ /\.zst$/) {
|
} elsif ($filename =~ /\.zst$/) {
|
||||||
return ['zstd'];
|
return ['zstd', '--threads=0'];
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -837,7 +857,23 @@ sub run_apt_progress {
|
||||||
$line_has_error = sub {
|
$line_has_error = sub {
|
||||||
# apt-get doesn't report a non-zero exit if the update failed.
|
# apt-get doesn't report a non-zero exit if the update failed.
|
||||||
# Thus, we have to parse its output. See #778357, #776152, #696335
|
# Thus, we have to parse its output. See #778357, #776152, #696335
|
||||||
# and #745735
|
# and #745735 for the parsing bugs as well as #594813, #696335,
|
||||||
|
# #776152, #778357 and #953726 for non-zero exit on transient
|
||||||
|
# network errors.
|
||||||
|
#
|
||||||
|
# For example, we want to fail with the following warning:
|
||||||
|
# W: Some index files failed to download. They have been ignored,
|
||||||
|
# or old ones used instead.
|
||||||
|
# But since this message is meant for human consumption it is not
|
||||||
|
# guaranteed to be stable across different apt versions and may
|
||||||
|
# change arbitrarily in the future. Thus, we error out on any W:
|
||||||
|
# lines as well. The downside is, that apt also unconditionally
|
||||||
|
# and by design prints a warning for unsigned repositories, even
|
||||||
|
# if they were allowed with Acquire::AllowInsecureRepositories "1"
|
||||||
|
# or with trusted=yes.
|
||||||
|
#
|
||||||
|
# A workaround was introduced by apt 2.1.16 with the --error-on=any
|
||||||
|
# option to apt-get update.
|
||||||
if ($_[0] =~ /^(W: |Err:)/) {
|
if ($_[0] =~ /^(W: |Err:)/) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -1936,12 +1972,30 @@ sub run_setup() {
|
||||||
sub run_update() {
|
sub run_update() {
|
||||||
my $options = shift;
|
my $options = shift;
|
||||||
|
|
||||||
info "running apt-get update...";
|
my $aptversion = version->new(0);
|
||||||
run_apt_progress({
|
{
|
||||||
|
my $pid = open my $fh, '-|', 'apt-get',
|
||||||
|
'--version' // error "failed to fork(): $!";
|
||||||
|
chomp(my $firstline = <$fh>);
|
||||||
|
close $fh;
|
||||||
|
if ( $? == 0
|
||||||
|
and $firstline =~ /^apt ([0-9]+\.[0-9]+\.[0-9]+) \([a-z0-9-]+\)$/)
|
||||||
|
{
|
||||||
|
$aptversion = version->new($1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
my $aptopts = {
|
||||||
ARGV => ['apt-get', 'update'],
|
ARGV => ['apt-get', 'update'],
|
||||||
CHDIR => $options->{root},
|
CHDIR => $options->{root},
|
||||||
FIND_APT_WARNINGS => 1
|
};
|
||||||
});
|
if ($aptversion < "2.1.16") {
|
||||||
|
$aptopts->{FIND_APT_WARNINGS} = 1;
|
||||||
|
} else {
|
||||||
|
push @{ $aptopts->{ARGV} }, '--error-on=any';
|
||||||
|
}
|
||||||
|
|
||||||
|
info "running apt-get update...";
|
||||||
|
run_apt_progress($aptopts);
|
||||||
|
|
||||||
# check if anything was downloaded at all
|
# check if anything was downloaded at all
|
||||||
{
|
{
|
||||||
|
@ -2995,7 +3049,16 @@ sub run_cleanup() {
|
||||||
if (any { $_ eq 'cleanup/apt' } @{ $options->{skip} }) {
|
if (any { $_ eq 'cleanup/apt' } @{ $options->{skip} }) {
|
||||||
info "skipping cleanup/apt as requested";
|
info "skipping cleanup/apt as requested";
|
||||||
} else {
|
} else {
|
||||||
|
if ( none { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }
|
||||||
|
and none { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
|
||||||
info "cleaning package lists and apt cache...";
|
info "cleaning package lists and apt cache...";
|
||||||
|
}
|
||||||
|
if (any { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }) {
|
||||||
|
info "skipping cleanup/apt/lists as requested";
|
||||||
|
} else {
|
||||||
|
if (any { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
|
||||||
|
info "cleaning package lists...";
|
||||||
|
}
|
||||||
run_apt_progress({
|
run_apt_progress({
|
||||||
ARGV => [
|
ARGV => [
|
||||||
'apt-get', '--option',
|
'apt-get', '--option',
|
||||||
|
@ -3004,8 +3067,16 @@ sub run_cleanup() {
|
||||||
],
|
],
|
||||||
CHDIR => $options->{root},
|
CHDIR => $options->{root},
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
if (any { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
|
||||||
|
info "skipping cleanup/apt/cache as requested";
|
||||||
|
} else {
|
||||||
|
if (any { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }) {
|
||||||
|
info "cleaning apt cache...";
|
||||||
|
}
|
||||||
run_apt_progress(
|
run_apt_progress(
|
||||||
{ ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} });
|
{ ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} });
|
||||||
|
}
|
||||||
|
|
||||||
# apt since 1.6 creates the auxfiles directory. If apt inside the
|
# apt since 1.6 creates the auxfiles directory. If apt inside the
|
||||||
# chroot is older than that, then it will not know how to clean it.
|
# chroot is older than that, then it will not know how to clean it.
|
||||||
|
@ -4937,30 +5008,37 @@ sub main() {
|
||||||
. " signed-by value";
|
. " signed-by value";
|
||||||
last;
|
last;
|
||||||
}
|
}
|
||||||
|
# initialize gpg trustdb with empty one
|
||||||
|
{
|
||||||
|
`@gpgcmd --update-trustdb >/dev/null 2>/dev/null`;
|
||||||
|
$? == 0 or error "gpg failed to initialize trustdb: $?";
|
||||||
|
}
|
||||||
# find all the fingerprints of the keys apt currently
|
# find all the fingerprints of the keys apt currently
|
||||||
# knows about
|
# knows about
|
||||||
my @keyringopts = ();
|
my @keyrings = ();
|
||||||
opendir my $dh, "$options->{apttrustedparts}"
|
opendir my $dh, "$options->{apttrustedparts}"
|
||||||
or error "cannot read $options->{apttrustedparts}";
|
or error "cannot read $options->{apttrustedparts}";
|
||||||
while (my $filename = readdir $dh) {
|
while (my $filename = readdir $dh) {
|
||||||
if ($filename !~ /\.(asc|gpg)$/) {
|
if ($filename !~ /\.(asc|gpg)$/) {
|
||||||
next;
|
next;
|
||||||
}
|
}
|
||||||
push @keyringopts, '--keyring',
|
$filename = "$options->{apttrustedparts}/$filename";
|
||||||
"$options->{apttrustedparts}/$filename";
|
# skip empty keyrings
|
||||||
|
-s "$filename" || next;
|
||||||
|
push @keyrings, "$filename";
|
||||||
}
|
}
|
||||||
closedir $dh;
|
closedir $dh;
|
||||||
if (-e $options->{apttrusted}) {
|
if (-s $options->{apttrusted}) {
|
||||||
push @keyringopts, '--keyring', $options->{apttrusted};
|
push @keyrings, $options->{apttrusted};
|
||||||
}
|
}
|
||||||
my @aptfingerprints = ();
|
my @aptfingerprints = ();
|
||||||
if (scalar @keyringopts == 0) {
|
if (scalar @keyrings == 0) {
|
||||||
$signedby = " [signed-by=\"$keyring\"]";
|
$signedby = " [signed-by=\"$keyring\"]";
|
||||||
last;
|
last;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
open my $fh, '-|', @gpgcmd, @keyringopts, '--with-colons',
|
open(my $fh, '-|', @gpgcmd, '--with-colons', '--show-keys',
|
||||||
'--list-keys' // error "failed to fork(): $!";
|
@keyrings) // error "failed to fork(): $!";
|
||||||
while (my $line = <$fh>) {
|
while (my $line = <$fh>) {
|
||||||
if ($line !~ /^fpr:::::::::([^:]+):/) {
|
if ($line !~ /^fpr:::::::::([^:]+):/) {
|
||||||
next;
|
next;
|
||||||
|
@ -4981,9 +5059,8 @@ sub main() {
|
||||||
# the case
|
# the case
|
||||||
my @suitefingerprints = ();
|
my @suitefingerprints = ();
|
||||||
{
|
{
|
||||||
open my $fh, '-|', @gpgcmd, '--keyring', $keyring,
|
open(my $fh, '-|', @gpgcmd, '--with-colons', '--show-keys',
|
||||||
'--with-colons',
|
$keyring) // error "failed to fork(): $!";
|
||||||
'--list-keys' // error "failed to fork(): $!";
|
|
||||||
while (my $line = <$fh>) {
|
while (my $line = <$fh>) {
|
||||||
if ($line !~ /^fpr:::::::::([^:]+):/) {
|
if ($line !~ /^fpr:::::::::([^:]+):/) {
|
||||||
next;
|
next;
|
||||||
|
@ -5143,7 +5220,22 @@ sub main() {
|
||||||
|
|
||||||
# figure out the right format
|
# figure out the right format
|
||||||
if ($format eq 'auto') {
|
if ($format eq 'auto') {
|
||||||
if ($options->{target} eq '/dev/null') {
|
# (stat(...))[6] is the device identifier which contains the major and
|
||||||
|
# minor numbers for character special files
|
||||||
|
# major 1 and minor 3 is /dev/null on Linux
|
||||||
|
if ( $options->{target} eq '/dev/null'
|
||||||
|
and $OSNAME eq 'linux'
|
||||||
|
and -c '/dev/null'
|
||||||
|
and major((stat("/dev/null"))[6]) == 1
|
||||||
|
and minor((stat("/dev/null"))[6]) == 3) {
|
||||||
|
$format = 'null';
|
||||||
|
} elsif ($options->{target} eq '-'
|
||||||
|
and $OSNAME eq 'linux'
|
||||||
|
and major((stat(STDOUT))[6]) == 1
|
||||||
|
and minor((stat(STDOUT))[6]) == 3) {
|
||||||
|
# by checking the major and minor number of the STDOUT fd we also
|
||||||
|
# can detect redirections to /dev/null and choose the null format
|
||||||
|
# accordingly
|
||||||
$format = 'null';
|
$format = 'null';
|
||||||
} elsif ($options->{target} ne '-' and -d $options->{target}) {
|
} elsif ($options->{target} ne '-' and -d $options->{target}) {
|
||||||
$format = 'directory';
|
$format = 'directory';
|
||||||
|
@ -5220,6 +5312,11 @@ sub main() {
|
||||||
error "the $format format is unable to write to standard output";
|
error "the $format format is unable to write to standard output";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ($format eq 'null'
|
||||||
|
and none { $_ eq $options->{target} } ('-', '/dev/null')) {
|
||||||
|
info "ignoring target $options->{target} with null format";
|
||||||
|
}
|
||||||
|
|
||||||
if (any { $_ eq $format } ('tar', 'squashfs', 'ext2', 'null')) {
|
if (any { $_ eq $format } ('tar', 'squashfs', 'ext2', 'null')) {
|
||||||
if ($format ne 'null') {
|
if ($format ne 'null') {
|
||||||
if ( any { $_ eq $options->{variant} } ('extract', 'custom')
|
if ( any { $_ eq $options->{variant} } ('extract', 'custom')
|
||||||
|
@ -5417,7 +5514,9 @@ sub main() {
|
||||||
);
|
);
|
||||||
# tar2sqfs and genext2fs do not support extended attributes
|
# tar2sqfs and genext2fs do not support extended attributes
|
||||||
if ($format eq "squashfs") {
|
if ($format eq "squashfs") {
|
||||||
warning "tar2sqfs does not support extended attributes";
|
warning("tar2sqfs does not support extended attributes"
|
||||||
|
. " from the 'system' namespace");
|
||||||
|
push @taropts, '--xattrs', '--xattrs-exclude=system.*';
|
||||||
} elsif ($format eq "ext2") {
|
} elsif ($format eq "ext2") {
|
||||||
warning "genext2fs does not support extended attributes";
|
warning "genext2fs does not support extended attributes";
|
||||||
} else {
|
} else {
|
||||||
|
@ -5771,9 +5870,9 @@ sub main() {
|
||||||
# change signal handler message
|
# change signal handler message
|
||||||
$waiting_for = "cleanup";
|
$waiting_for = "cleanup";
|
||||||
|
|
||||||
if (any { $_ eq $format } ('directory', 'null')) {
|
if (any { $_ eq $format } ('directory')) {
|
||||||
# nothing to do
|
# nothing to do
|
||||||
} elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2')) {
|
} elsif (any { $_ eq $format } ('tar', 'squashfs', 'ext2', 'null')) {
|
||||||
if (!-e $options->{root}) {
|
if (!-e $options->{root}) {
|
||||||
error "$options->{root} does not exist";
|
error "$options->{root} does not exist";
|
||||||
}
|
}
|
||||||
|
@ -6125,11 +6224,14 @@ Example: Setup chroot for installing a sub-essential busybox-based chroot with
|
||||||
|
|
||||||
--setup-hook='mkdir -p "$1/bin"'
|
--setup-hook='mkdir -p "$1/bin"'
|
||||||
--setup-hook='for p in awk cat chmod chown cp diff echo env grep less ln
|
--setup-hook='for p in awk cat chmod chown cp diff echo env grep less ln
|
||||||
mkdir mount rm rmdir sed sh sleep sort touch uname; do
|
mkdir mount rm rmdir sed sh sleep sort touch uname mktemp; do
|
||||||
ln -s busybox "$1/bin/$p"; done'
|
ln -s busybox "$1/bin/$p"; done'
|
||||||
--setup-hook='echo root:x:0:0:root:/root:/bin/sh > "$1/etc/passwd"'
|
--setup-hook='echo root:x:0:0:root:/root:/bin/sh > "$1/etc/passwd"'
|
||||||
--setup-hook='printf "root:x:0:\nmail:x:8:\nutmp:x:43:\n" > "$1/etc/group"'
|
--setup-hook='printf "root:x:0:\nmail:x:8:\nutmp:x:43:\n" > "$1/etc/group"'
|
||||||
|
|
||||||
|
For a more elegant way to setup merged-/usr via symlinks and for setting up a
|
||||||
|
sub-essential busybox-based chroot, see the B<--hook-dir> option below.
|
||||||
|
|
||||||
=item B<--extract-hook>=I<command>
|
=item B<--extract-hook>=I<command>
|
||||||
|
|
||||||
Execute arbitrary I<command>s after the Essential:yes packages have been
|
Execute arbitrary I<command>s after the Essential:yes packages have been
|
||||||
|
@ -6201,10 +6303,14 @@ if the scripts in two directories depend upon each other, the scripts must be
|
||||||
placed into a common directory and be named such that they get added in the
|
placed into a common directory and be named such that they get added in the
|
||||||
correct order.
|
correct order.
|
||||||
|
|
||||||
Example: Run mmdebstrap with eatmydata
|
Example 1: Run mmdebstrap with eatmydata
|
||||||
|
|
||||||
--hook-dir=/usr/share/mmdebstrap/hooks/eatmydata
|
--hook-dir=/usr/share/mmdebstrap/hooks/eatmydata
|
||||||
|
|
||||||
|
Example 2: Setup chroot for installing a sub-essential busybox-based chroot
|
||||||
|
|
||||||
|
--hook-dir=/usr/share/mmdebstrap/hooks/busybox
|
||||||
|
|
||||||
=item B<--skip>=I<stage>[,I<stage>,...]
|
=item B<--skip>=I<stage>[,I<stage>,...]
|
||||||
|
|
||||||
B<mmdebstrap> tries hard to implement sensible defaults and will try to stop
|
B<mmdebstrap> tries hard to implement sensible defaults and will try to stop
|
||||||
|
@ -6399,16 +6505,17 @@ Without that option the default format is I<auto>. The following formats exist:
|
||||||
|
|
||||||
When selecting this format (the default), the actual format will be inferred
|
When selecting this format (the default), the actual format will be inferred
|
||||||
from the I<TARGET> positional argument. If I<TARGET> was not specified, then
|
from the I<TARGET> positional argument. If I<TARGET> was not specified, then
|
||||||
the B<tar> format will be chosen. If I<TARGET> happens to be F</dev/null>, then
|
the B<tar> format will be chosen. If I<TARGET> happens to be F</dev/null> or if
|
||||||
the B<null> format will be chosen. If I<TARGET> is an existing directory, and
|
standard output is F</dev/null>, then the B<null> format will be chosen. If
|
||||||
does not equal to C<->, then the B<directory> format will be chosen. If
|
I<TARGET> is an existing directory, and does not equal to C<->, then the
|
||||||
I<TARGET> ends with C<.tar> or with one of the filename extensions listed in
|
B<directory> format will be chosen. If I<TARGET> ends with C<.tar> or with one
|
||||||
the section B<COMPRESSION>, or if I<TARGET> equals C<->, or if I<TARGET> is a
|
of the filename extensions listed in the section B<COMPRESSION>, or if
|
||||||
named pipe (fifo) or if I<TARGET> is a character special file like
|
I<TARGET> equals C<->, or if I<TARGET> is a named pipe (fifo) or if I<TARGET>
|
||||||
F</dev/null>, then the B<tar> format will be chosen. If I<TARGET> ends with
|
is a character special file, then the B<tar> format will be chosen. If
|
||||||
C<.squashfs> or C<.sqfs>, then the B<squashfs> format will be chosen. If
|
I<TARGET> ends with C<.squashfs> or C<.sqfs>, then the B<squashfs> format will
|
||||||
<TARGET> ends with C<.ext2> then the B<ext2> format will be chosen. If none of
|
be chosen. If <TARGET> ends with C<.ext2> then the B<ext2> format will be
|
||||||
these conditions apply, the B<directory> format will be chosen.
|
chosen. If none of these conditions apply, the B<directory> format will be
|
||||||
|
chosen.
|
||||||
|
|
||||||
=item B<directory>, B<dir>
|
=item B<directory>, B<dir>
|
||||||
|
|
||||||
|
@ -6440,8 +6547,11 @@ C<tar2sqfs> utility, which will create an xz compressed squashfs image with a
|
||||||
blocksize of 1048576 bytes in I<TARGET>. The special I<TARGET> C<-> does not
|
blocksize of 1048576 bytes in I<TARGET>. The special I<TARGET> C<-> does not
|
||||||
work with this format because C<tar2sqfs> can only write to a regular file. If
|
work with this format because C<tar2sqfs> can only write to a regular file. If
|
||||||
you need your squashfs image be named C<->, then just explicitly pass the
|
you need your squashfs image be named C<->, then just explicitly pass the
|
||||||
relative path to it like F<./->. Since C<tar2sqfs> does not support extended
|
relative path to it like F<./->. The C<tar2sqfs> tool only supports a limited
|
||||||
attributes, the resulting image will not contain them.
|
set of extended attribute prefixes. Therefore, extended attributes are disabled
|
||||||
|
in the resulting image. If you need them, create a tarball first and remove the
|
||||||
|
extended attributes from its pax headers. Refer to the B<EXAMPLES> section for
|
||||||
|
how to achieve this.
|
||||||
|
|
||||||
=item B<ext2>
|
=item B<ext2>
|
||||||
|
|
||||||
|
@ -6462,7 +6572,8 @@ A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
|
||||||
C<$TMPDIR> is not set. After the bootstrap is complete, the temporary chroot
|
C<$TMPDIR> is not set. After the bootstrap is complete, the temporary chroot
|
||||||
will be deleted without being part of the output. This is most useful when the
|
will be deleted without being part of the output. This is most useful when the
|
||||||
desired artifact is generated inside the chroot and it is transferred using
|
desired artifact is generated inside the chroot and it is transferred using
|
||||||
special hooks such as B<sync-out>.
|
special hooks such as B<sync-out>. It is also useful in situations where only
|
||||||
|
the exit code or stdout or stderr of a process run in a hook is of interest.
|
||||||
|
|
||||||
=back
|
=back
|
||||||
|
|
||||||
|
@ -6675,7 +6786,7 @@ Performs cleanup tasks like:
|
||||||
|
|
||||||
=over 4
|
=over 4
|
||||||
|
|
||||||
=item * Removes the package lists and apt cache. This can be disabled using B<--skip=cleanup/apt>.
|
=item * Removes the package lists (unless B<--skip=cleanup/apt/lists>) and apt cache (unless B<--skip=cleanup/apt/cache>). Both removals can be disabled by using B<--skip=cleanup/apt>.
|
||||||
|
|
||||||
=item * Remove all files that were put into the chroot for setup purposes, like F</etc/apt/apt.conf.d/00mmdebstrap>, the temporary apt config and the qemu-user-static binary. This can be disabled using B<--skip=cleanup/mmdebstrap>.
|
=item * Remove all files that were put into the chroot for setup purposes, like F</etc/apt/apt.conf.d/00mmdebstrap>, the temporary apt config and the qemu-user-static binary. This can be disabled using B<--skip=cleanup/mmdebstrap>.
|
||||||
|
|
||||||
|
@ -6719,7 +6830,16 @@ Instead of a tarball, a squashfs image can be created:
|
||||||
|
|
||||||
By default, B<mmdebstrap> runs B<tar2sqfs> with C<--no-skip --exportable
|
By default, B<mmdebstrap> runs B<tar2sqfs> with C<--no-skip --exportable
|
||||||
--compressor xz --block-size 1048576>. To choose a different set of options,
|
--compressor xz --block-size 1048576>. To choose a different set of options,
|
||||||
pipe the output of B<mmdebstrap> into B<tar2sqfs> manually.
|
and to filter out all extended attributes not supported by B<tar2sqfs>, pipe
|
||||||
|
the output of B<mmdebstrap> into B<tar2sqfs> manually like so:
|
||||||
|
|
||||||
|
$ mmdebstrap unstable \
|
||||||
|
| mmtarfilter --pax-exclude='*' \
|
||||||
|
--pax-include='SCHILY.xattr.user.*' \
|
||||||
|
--pax-include='SCHILY.xattr.trusted.*' \
|
||||||
|
--pax-include='SCHILY.xattr.security.*' \
|
||||||
|
| tar2sqfs --quiet --no-skip --force --exportable --compressor xz \
|
||||||
|
--block-size 1048576 unstable-chroot.squashfs
|
||||||
|
|
||||||
By default, debootstrapping a stable distribution will add mirrors for security
|
By default, debootstrapping a stable distribution will add mirrors for security
|
||||||
and updates to the sources.list.
|
and updates to the sources.list.
|
||||||
|
@ -6943,7 +7063,7 @@ https://gitlab.mister-muffin.de/josch/mmdebstrap/issues
|
||||||
|
|
||||||
https://bugs.debian.org/src:mmdebstrap
|
https://bugs.debian.org/src:mmdebstrap
|
||||||
|
|
||||||
As of version 1.19.5, dpkg does not provide facilities preventing it from
|
As of version 1.20.9, dpkg does not provide facilities preventing it from
|
||||||
reading the dpkg configuration of the machine running B<mmdebstrap>.
|
reading the dpkg configuration of the machine running B<mmdebstrap>.
|
||||||
Therefore, until this dpkg limitation is fixed, a default dpkg configuration is
|
Therefore, until this dpkg limitation is fixed, a default dpkg configuration is
|
||||||
recommended on machines running B<mmdebstrap>. If you are using B<mmdebstrap>
|
recommended on machines running B<mmdebstrap>. If you are using B<mmdebstrap>
|
||||||
|
@ -6951,12 +7071,13 @@ as the non-root user, then as a workaround you could run C<chmod 600
|
||||||
/etc/dpkg/dpkg.cfg.d/*> so that the config files are only accessible by the
|
/etc/dpkg/dpkg.cfg.d/*> so that the config files are only accessible by the
|
||||||
root user.
|
root user.
|
||||||
|
|
||||||
Setting [trusted=yes] to allow signed archives without a known public key will
|
With apt versions before 2.1.16, setting C<[trusted=yes]> or
|
||||||
fail because of a gpg warning in the apt output. Since apt does not
|
C<Acquire::AllowInsecureRepositories "1"> to allow signed archives without a
|
||||||
communicate its status via any other means than human readable strings,
|
known public key or unsigned archives will fail because of a gpg warning in the
|
||||||
B<mmdebstrap> treats any warning from "apt-get update" as an error. Fixing
|
apt output. Since apt does not communicate its status via any other means than
|
||||||
this will require apt to provide a machine readable status interface. See
|
human readable strings, and because B<mmdebstrap> wants to treat transient
|
||||||
Debian bugs #778357, #776152, #696335, and #745735.
|
network errors as errors, B<mmdebstrap> treats any warning from "apt-get
|
||||||
|
update" as an error.
|
||||||
|
|
||||||
=head1 SEE ALSO
|
=head1 SEE ALSO
|
||||||
|
|
||||||
|
|
74
tarfilter
74
tarfilter
|
@ -25,12 +25,20 @@ import fnmatch
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
class FilterAction(argparse.Action):
|
class PathFilterAction(argparse.Action):
|
||||||
def __call__(self, parser, namespace, values, option_string=None):
|
def __call__(self, parser, namespace, values, option_string=None):
|
||||||
items = getattr(namespace, "filter", [])
|
items = getattr(namespace, "pathfilter", [])
|
||||||
regex = re.compile(fnmatch.translate(values))
|
regex = re.compile(fnmatch.translate(values))
|
||||||
items.append((self.dest, regex))
|
items.append((self.dest, regex))
|
||||||
setattr(namespace, "filter", items)
|
setattr(namespace, "pathfilter", items)
|
||||||
|
|
||||||
|
|
||||||
|
class PaxFilterAction(argparse.Action):
|
||||||
|
def __call__(self, parser, namespace, values, option_string=None):
|
||||||
|
items = getattr(namespace, "paxfilter", [])
|
||||||
|
regex = re.compile(fnmatch.translate(values))
|
||||||
|
items.append((self.dest, regex))
|
||||||
|
setattr(namespace, "paxfilter", items)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
@ -39,22 +47,46 @@ def main():
|
||||||
Filters a tarball on standard input by the same rules as the dpkg --path-exclude
|
Filters a tarball on standard input by the same rules as the dpkg --path-exclude
|
||||||
and --path-include options and writes resulting tarball to standard output. See
|
and --path-include options and writes resulting tarball to standard output. See
|
||||||
dpkg(1) for information on how these two options work in detail.
|
dpkg(1) for information on how these two options work in detail.
|
||||||
|
|
||||||
|
Similarly, filter out unwanted pax extended headers. This is useful in cases
|
||||||
|
where a tool only accepts certain xattr prefixes. For example tar2sqfs only
|
||||||
|
supports SCHILY.xattr.user.*, SCHILY.xattr.trusted.* and
|
||||||
|
SCHILY.xattr.security.* but not SCHILY.xattr.system.posix_acl_default.*.
|
||||||
|
|
||||||
|
Both types of options use Unix shell-style wildcards:
|
||||||
|
|
||||||
|
* matches everything
|
||||||
|
? matches any single character
|
||||||
|
[seq] matches any character in seq
|
||||||
|
[!seq] matches any character not in seq
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--path-exclude",
|
"--path-exclude",
|
||||||
metavar="pattern",
|
metavar="pattern",
|
||||||
action=FilterAction,
|
action=PathFilterAction,
|
||||||
help="Exclude path matching the given shell pattern.",
|
help="Exclude path matching the given shell pattern.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--path-include",
|
"--path-include",
|
||||||
metavar="pattern",
|
metavar="pattern",
|
||||||
action=FilterAction,
|
action=PathFilterAction,
|
||||||
help="Re-include a pattern after a previous exclusion.",
|
help="Re-include a pattern after a previous exclusion.",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--pax-exclude",
|
||||||
|
metavar="pattern",
|
||||||
|
action=PaxFilterAction,
|
||||||
|
help="Exclude pax header matching the given globbing pattern.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--pax-include",
|
||||||
|
metavar="pattern",
|
||||||
|
action=PaxFilterAction,
|
||||||
|
help="Re-include a pax header after a previous exclusion.",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
if not hasattr(args, "filter"):
|
if not hasattr(args, "pathfilter") and not hasattr(args, "paxfilter"):
|
||||||
from shutil import copyfileobj
|
from shutil import copyfileobj
|
||||||
|
|
||||||
copyfileobj(sys.stdin.buffer, sys.stdout.buffer)
|
copyfileobj(sys.stdin.buffer, sys.stdout.buffer)
|
||||||
|
@ -63,36 +95,52 @@ dpkg(1) for information on how these two options work in detail.
|
||||||
# same logic as in dpkg/src/filters.c/filter_should_skip()
|
# same logic as in dpkg/src/filters.c/filter_should_skip()
|
||||||
prefix_prog = re.compile(r"^([^*?[\\]*).*")
|
prefix_prog = re.compile(r"^([^*?[\\]*).*")
|
||||||
|
|
||||||
def filter_should_skip(member):
|
def path_filter_should_skip(member):
|
||||||
skip = False
|
skip = False
|
||||||
if not args.filter:
|
if not hasattr(args, "pathfilter"):
|
||||||
return False
|
return False
|
||||||
for (t, r) in args.filter:
|
for (t, r) in args.pathfilter:
|
||||||
if r.match(member.name[1:]) is not None:
|
if r.match(member.name[1:]) is not None:
|
||||||
if t == "path_include":
|
if t == "path_include":
|
||||||
skip = False
|
skip = False
|
||||||
else:
|
else:
|
||||||
skip = True
|
skip = True
|
||||||
if skip and (member.isdir() or member.issym()):
|
if skip and (member.isdir() or member.issym()):
|
||||||
for (t, r) in args.filter:
|
for (t, r) in args.pathfilter:
|
||||||
if t != "path_include":
|
if t != "path_include":
|
||||||
continue
|
continue
|
||||||
prefix = prefix_prog.sub(r"\1", r.pattern)
|
prefix = prefix_prog.sub(r"\1", r.pattern)
|
||||||
prefix = prefix.rstrip("/")
|
prefix = prefix.rstrip("/")
|
||||||
if member.name[1:].startswith(prefix):
|
if member.name[1:].startswith(prefix):
|
||||||
if member.name == "./usr/share/doc/doc-debian":
|
|
||||||
print("foo", prefix, "bar", file=sys.stderr)
|
|
||||||
return False
|
return False
|
||||||
return skip
|
return skip
|
||||||
|
|
||||||
|
def pax_filter_should_skip(header):
|
||||||
|
if not hasattr(args, "paxfilter"):
|
||||||
|
return False
|
||||||
|
skip = False
|
||||||
|
for (t, r) in args.paxfilter:
|
||||||
|
if r.match(header) is None:
|
||||||
|
continue
|
||||||
|
if t == "pax_include":
|
||||||
|
skip = False
|
||||||
|
else:
|
||||||
|
skip = True
|
||||||
|
return skip
|
||||||
|
|
||||||
# starting with Python 3.8, the default format became PAX_FORMAT, so this
|
# starting with Python 3.8, the default format became PAX_FORMAT, so this
|
||||||
# is only for compatibility with older versions of Python 3
|
# is only for compatibility with older versions of Python 3
|
||||||
with tarfile.open(fileobj=sys.stdin.buffer, mode="r|*") as in_tar, tarfile.open(
|
with tarfile.open(fileobj=sys.stdin.buffer, mode="r|*") as in_tar, tarfile.open(
|
||||||
fileobj=sys.stdout.buffer, mode="w|", format=tarfile.PAX_FORMAT
|
fileobj=sys.stdout.buffer, mode="w|", format=tarfile.PAX_FORMAT
|
||||||
) as out_tar:
|
) as out_tar:
|
||||||
for member in in_tar:
|
for member in in_tar:
|
||||||
if filter_should_skip(member):
|
if path_filter_should_skip(member):
|
||||||
continue
|
continue
|
||||||
|
member.pax_headers = {
|
||||||
|
k: v
|
||||||
|
for k, v in member.pax_headers.items()
|
||||||
|
if not pax_filter_should_skip(k)
|
||||||
|
}
|
||||||
if member.isfile():
|
if member.isfile():
|
||||||
with in_tar.extractfile(member) as file:
|
with in_tar.extractfile(member) as file:
|
||||||
out_tar.addfile(member, file)
|
out_tar.addfile(member, file)
|
||||||
|
|
Loading…
Reference in a new issue