Compare commits

...

5 commits

Author SHA1 Message Date
d3952de003 Let apt decide unpack order instead of sorting filenames
Now that the deb files can reside in different places sorting them leads
to subtil differences in the order and hence the created chroot. apts
unpack order on the other hand might not be a good order (but why would
one sorted from a to z be one?), but it is far more stable as it is
independent on the filenames.
2022-04-24 11:56:50 +02:00
e9fa78c438 Support file:// mirrors the same way copy:// is supported
As long as you can make it so that the same path to the deb file works
inside and outside of the chroot using file:// as a mirror is no longer
a problem with the previous work.
2022-04-24 11:56:50 +02:00
71067316ee Ask apt to give us the deb filenames directly
Guessing filenames is boring. What if we could ask apt to tell us which
debs it downloaded (or found lying around elsewhere) directly? Turns out
we can rather easily avoiding a bunch of guesswork.
2022-04-24 11:56:48 +02:00
487237f9ae Factor out downloading packages with apt 2022-04-24 11:55:30 +02:00
8b58dc583e Replace EDSP with EIPP usage obsoleting proxysolver
EIPP stands for "External Installation Planner Protocol" and is rather
similar to EDSP but with the clear advantage that we can extract the
information we need more easily as we can tell apt to write the file for
us rather than playing solver-in-the-middle and the problem space is
much smaller meaning less data for apt to generate and to pass through
our hands.

The idea here is simply that every package which doesn't have a Status
field in EIPP has the uninstalled status and the only reason its is part
of the EIPP request is that we want to change this by installing it.
That could be verified via the Install header at the start of the
request, but this commit doesn't implement that.

Note that this means we need "more" than the download-only mode can
provide: Either a simulation or "the real deal". Except we modify the
later to be a fancy no op.
2022-04-24 09:29:17 +02:00
3 changed files with 88 additions and 287 deletions

View file

@ -89,13 +89,6 @@ if [ ! -e shared/tarfilter ] || [ tarfilter -nt shared/tarfilter ]; then
cp -a /usr/bin/mmtarfilter shared/tarfilter
fi
fi
if [ ! -e shared/proxysolver ] || [ proxysolver -nt shared/proxysolver ]; then
if [ -e ./proxysolver ]; then
cp -a proxysolver shared
else
cp -a /usr/lib/apt/solvers/mmdebstrap-dump-solution shared/proxysolver
fi
fi
if [ ! -e shared/ldconfig.fakechroot ] || [ ldconfig.fakechroot -nt shared/ldconfig.fakechroot ]; then
if [ -e ./ldconfig.fakechroot ]; then
cp -a ldconfig.fakechroot shared
@ -1533,7 +1526,7 @@ else
skipped=$((skipped+1))
fi
print_header "mode=$defaultmode,variant=apt: fail with file:// mirror"
print_header "mode=$defaultmode,variant=apt: file:// mirror"
cat << END > shared/test.sh
#!/bin/sh
set -eu
@ -1542,13 +1535,9 @@ if [ ! -e /mmdebstrap-testenv ]; then
echo "this test requires the cache directory to be mounted on /mnt and should only be run inside a container" >&2
exit 1
fi
ret=0
$CMD --mode=$defaultmode --variant=apt $DEFAULT_DIST /tmp/debian-chroot.tar "deb file:///mnt/cache/debian unstable main" || ret=\$?
$CMD --mode=$defaultmode --variant=apt $DEFAULT_DIST /tmp/debian-chroot.tar "deb file:///mnt/cache/debian $DEFAULT_DIST main"
tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
rm /tmp/debian-chroot.tar
if [ "\$ret" = 0 ]; then
echo expected failure but got exit \$ret >&2
exit 1
fi
END
if [ "$HAVE_QEMU" = "yes" ]; then
./run_qemu.sh
@ -3079,7 +3068,7 @@ $CMD \$include --mode=$defaultmode --variant=$variant \
--setup-hook='sync-in "'"\$tmpdir"'" /var/cache/apt/archives/partial' \
$DEFAULT_DIST - $mirror > test1.tar
cmp orig.tar test1.tar
$CMD \$include --mode=$defaultmode --variant=$variant --skip=download/empty \
$CMD \$include --mode=$defaultmode --variant=$variant \
--customize-hook='touch "\$1"/var/cache/apt/archives/partial' \
--setup-hook='mkdir -p "\$1"/var/cache/apt/archives/' \
--setup-hook='sync-in "'"\$tmpdir"'" /var/cache/apt/archives/' \
@ -3760,4 +3749,4 @@ if [ "$((skipped+runtests))" -ne "$total" ]; then
exit 1
fi
rm shared/test.sh shared/tar1.txt shared/tar2.txt shared/pkglist.txt shared/doc-debian.tar.list shared/mmdebstrap shared/taridshift shared/tarfilter shared/proxysolver
rm shared/test.sh shared/tar1.txt shared/tar2.txt shared/pkglist.txt shared/doc-debian.tar.list shared/mmdebstrap shared/taridshift shared/tarfilter

View file

@ -874,28 +874,9 @@ sub run_dpkg_progress {
sub run_apt_progress {
my $options = shift;
my @debs = @{ $options->{PKGS} // [] };
my $tmpedsp;
if (exists $options->{EDSP_RES}) {
(undef, $tmpedsp) = tempfile(
"mmdebstrap.edsp.XXXXXXXXXXXX",
OPEN => 0,
TMPDIR => 1
);
}
my $get_exec = sub {
my @prefix = ();
my @opts = ();
if (exists $options->{EDSP_RES}) {
push @prefix, 'env', "APT_EDSP_DUMP_FILENAME=$tmpedsp";
if (-e "./proxysolver") {
# for development purposes, use the current directory if it
# contains a file called proxysolver
push @opts, ("-oDir::Bin::solvers=" . getcwd()),
'--solver=proxysolver';
} else {
push @opts, '--solver=mmdebstrap-dump-solution';
}
}
return (
@prefix,
@{ $options->{ARGV} },
@ -950,38 +931,46 @@ sub run_apt_progress {
}
};
run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR};
if (exists $options->{EDSP_RES}) {
info "parsing EDSP results...";
open my $fh, '<', $tmpedsp
or error "failed to open $tmpedsp for reading: $!";
my $inst = 0;
my $pkg;
my $ver;
while (my $line = <$fh>) {
chomp $line;
if ($line ne "") {
if ($line =~ /^Install: \d+/) {
$inst = 1;
} elsif ($line =~ /^Package: (.*)/) {
$pkg = $1;
} elsif ($line =~ /^Version: (.*)/) {
$ver = $1;
}
next;
}
if ($inst == 1 && defined $pkg && defined $ver) {
push @{ $options->{EDSP_RES} }, [$pkg, $ver];
}
$inst = 0;
undef $pkg;
undef $ver;
}
close $fh;
unlink $tmpedsp;
}
return;
}
sub run_apt_download_progress {
my $options = shift;
my $tmplistofdebs;
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
info "downloading packages with apt...";
(undef, $tmplistofdebs) = tempfile(
"mmdebstrap.listofdebs.XXXXXXXXXXXX",
OPEN => 0,
TMPDIR => 1
);
}
run_apt_progress({
ARGV => [
'apt-get',
'--yes',
'-oDebug::pkgDpkgPm=1',
'-oDir::Log=/dev/null',
$options->{dryrun}
? '-oAPT::Get::Simulate=true'
: "-oDpkg::Pre-Install-Pkgs::=cat > $tmplistofdebs",
@{ $options->{APT_ARGV} },
],
});
if ($tmplistofdebs) {
open my $fh, '<', $tmplistofdebs
or error "failed to open $tmplistofdebs for reading: $!";
my @listofdebs = <$fh>;
close $fh;
unlink $tmplistofdebs;
chomp(@listofdebs);
return @listofdebs;
}
return [];
}
sub run_chroot {
my $cmd = shift;
my $options = shift;
@ -2034,26 +2023,14 @@ sub run_update() {
sub run_download() {
my $options = shift;
# We use /var/cache/apt/archives/ to figure out which packages apt chooses
# to install. That's why the directory must be empty if:
# - /var/cache/apt/archives exists, and
# - no simulation run is done, and
# - the variant is not extract or custom or the number to be
# installed packages not zero
#
# We could also unconditionally use the proxysolver and then "apt-get
# download" any missing packages but using the proxysolver requires
# /usr/lib/apt/solvers/apt from the apt-utils package and we want to avoid
# that dependency.
#
# In the future we want to replace downloading packages with "apt-get
# install --download-only" and installing them with dpkg by just installing
# the essential packages with apt from the outside with
# DPkg::Chroot-Directory. We are not doing that because then the preinst
# script of base-passwd will not be called early enough and packages will
# fail to install because they are missing /etc/passwd.
# install" and installing them with dpkg by just installing the essential
# packages with apt from the outside with DPkg::Chroot-Directory.
# We are not doing that because then the preinst script of base-passwd will
# not be called early enough and packages will fail to install because they
# are missing /etc/passwd.
my @cached_debs = ();
my @dl_debs = ();
my @dl_debs;
if (
!$options->{dryrun}
&& ((none { $_ eq $options->{variant} } ('extract', 'custom'))
@ -2073,14 +2050,6 @@ sub run_download() {
push @cached_debs, $deb;
}
closedir $dh;
if (scalar @cached_debs > 0) {
if (any { $_ eq 'download/empty' } @{ $options->{skip} }) {
info "skipping download/empty as requested";
} else {
error("/var/cache/apt/archives/ inside the chroot contains: "
. (join ', ', (sort @cached_debs)));
}
}
}
# To figure out the right package set for the apt variant we can use:
@ -2094,7 +2063,7 @@ sub run_download() {
info "nothing to download -- skipping...";
return ([], []);
}
my %pkgs_to_install;
my @apt_argv = ['install'];
for my $incl (@{ $options->{include} }) {
for my $pkg (split /[,\s]+/, $incl) {
# strip leading and trailing whitespace
@ -2103,32 +2072,15 @@ sub run_download() {
if ($pkg eq '') {
next;
}
$pkgs_to_install{$pkg} = ();
push @apt_argv, $pkg;
}
}
my %result = ();
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
# if there are already packages in /var/cache/apt/archives/, we
# need to use our proxysolver to obtain the solution chosen by apt
if (scalar @cached_debs > 0) {
$result{EDSP_RES} = \@dl_debs;
}
info "downloading packages with apt...";
}
run_apt_progress({
ARGV => [
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
'install'
],
PKGS => [keys %pkgs_to_install],
%result
});
@dl_debs = run_apt_download_progress({
APT_ARGV => @apt_argv,
dryrun => $options->{dryrun},
},
);
} elsif ($options->{variant} eq 'apt') {
# if we just want to install Essential:yes packages, apt and their
# dependencies then we can make use of libapt treating apt as
@ -2143,27 +2095,11 @@ sub run_download() {
# remind me in 5+ years that I said that after I wrote
# in the bugreport: "Are you crazy?!? Nobody in his
# right mind would even suggest depending on it!")
my %result = ();
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
# if there are already packages in /var/cache/apt/archives/, we
# need to use our proxysolver to obtain the solution chosen by apt
if (scalar @cached_debs > 0) {
$result{EDSP_RES} = \@dl_debs;
}
info "downloading packages with apt...";
}
run_apt_progress({
ARGV => [
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
'dist-upgrade'
],
%result
});
@dl_debs = run_apt_download_progress({
APT_ARGV => ['dist-upgrade'],
dryrun => $options->{dryrun},
},
);
} elsif (
any { $_ eq $options->{variant} }
('essential', 'standard', 'important', 'required', 'buildd')
@ -2172,23 +2108,8 @@ sub run_download() {
# 17:27 < DonKult> (?essential includes 'apt' through)
# 17:30 < josch> DonKult: no, because pkgCacheGen::ForceEssential ",";
# 17:32 < DonKult> touché
my %result = ();
if ($options->{dryrun}) {
info "simulate downloading packages with apt...";
} else {
# if there are already packages in /var/cache/apt/archives/, we
# need to use our proxysolver to obtain the solution chosen by apt
if (scalar @cached_debs > 0) {
$result{EDSP_RES} = \@dl_debs;
}
info "downloading packages with apt...";
}
run_apt_progress({
ARGV => [
'apt-get',
'--yes',
'-oApt::Get::Download-Only=true',
$options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
@dl_debs = run_apt_download_progress({
APT_ARGV => [
'install',
'?narrow('
. (
@ -2203,76 +2124,31 @@ sub run_download() {
. $options->{nativearch}
. '),?essential)'
],
%result
});
dryrun => $options->{dryrun},
},
);
} else {
error "unknown variant: $options->{variant}";
}
my @essential_pkgs;
if (scalar @cached_debs > 0 && scalar @dl_debs > 0) {
my $archives = "/var/cache/apt/archives/";
# for each package in @dl_debs, check if it's in
# /var/cache/apt/archives/ and add it to @essential_pkgs
foreach my $p (@dl_debs) {
my ($pkg, $ver_epoch) = @{$p};
# apt appends the architecture at the end of the package name
($pkg, my $arch) = split ':', $pkg, 2;
# apt replaces the colon by its percent encoding %3a
my $ver = $ver_epoch;
$ver =~ s/:/%3a/;
# the architecture returned by apt is the native architecture.
# Since we don't know whether the package is architecture
# independent or not, we first try with the native arch and then
# with "all" and only error out if neither exists.
if (-e "$options->{root}/$archives/${pkg}_${ver}_$arch.deb") {
push @essential_pkgs, "$archives/${pkg}_${ver}_$arch.deb";
} elsif (-e "$options->{root}/$archives/${pkg}_${ver}_all.deb") {
push @essential_pkgs, "$archives/${pkg}_${ver}_all.deb";
} else {
error( "cannot find package for $pkg:$arch (= $ver_epoch) "
. "in /var/cache/apt/archives/");
}
}
} else {
# collect the .deb files that were downloaded by apt from the content
# of /var/cache/apt/archives/
if (!$options->{dryrun}) {
my $apt_archives = "/var/cache/apt/archives/";
opendir my $dh, "$options->{root}/$apt_archives"
or error "cannot read $apt_archives";
while (my $deb = readdir $dh) {
if ($deb !~ /\.deb$/) {
next;
}
$deb = "$apt_archives/$deb";
if (!-f "$options->{root}/$deb") {
next;
}
# strip the the chroot directory from the filenames
foreach my $deb (@dl_debs) {
if (rindex $deb, $options->{root}, 0) {
if (-e "$options->{root}/$deb") {
push @essential_pkgs, $deb;
} else {
error "package file $deb not accessible from chroot directory"
. " -- use copy:// instead of file:// or a bind-mount";
}
closedir $dh;
if (scalar @essential_pkgs == 0) {
# check if a file:// URI was used
open(my $pipe_apt, '-|', 'apt-get', 'indextargets', '--format',
'$(URI)', 'Created-By: Packages')
or error "cannot start apt-get indextargets: $!";
while (my $uri = <$pipe_apt>) {
if ($uri =~ /^file:\/\//) {
error
"nothing got downloaded -- use copy:// instead of"
. " file://";
next;
}
if (-e $deb) {
push @essential_pkgs, substr($deb, length($options->{root}));
} else {
error "cannot find package file $deb";
}
}
error "nothing got downloaded";
}
}
}
# Unpack order matters. Since we create this list using two different
# methods but we want both methods to have the same result, we sort the
# list before returning it.
@essential_pkgs = sort @essential_pkgs;
return (\@essential_pkgs, \@cached_debs);
}
@ -6703,15 +6579,13 @@ the B<setup> step. This can be disabled using B<--skip=update>.
=item B<download>
Checks whether F</var/cache/apt/archives/> is empty. This can be disabled with
B<--skip=download/empty>. In the B<extract> and B<custom> variants, C<apt-get
--download-only install> is used to download all the packages requested via the
B<--include> option. The B<apt> variant uses the fact that libapt treats the
C<apt> packages as implicitly essential to download only all C<Essential:yes>
packages plus apt using C<apt-get --download-only dist-upgrade>. In the
remaining variants, all Packages files downloaded by the B<update> step are
inspected to find the C<Essential:yes> package set as well as all packages of
the required priority.
In the B<extract> and B<custom> variants, C<apt-get install> is used to
download all the packages requested via the B<--include> option. The B<apt>
variant uses the fact that libapt treats the C<apt> packages as implicitly
essential to download only all C<Essential:yes> packages plus apt using
C<apt-get dist-upgrade>. In the remaining variants, all Packages files
downloaded by the B<update> step are inspected to find the C<Essential:yes>
package set as well as all packages of the required priority.
=item B<extract>
@ -6939,7 +6813,7 @@ apt-cacher-ng, you can use the B<sync-in> and B<sync-out> special hooks to
synchronize a directory outside the chroot with F</var/cache/apt/archives>
inside the chroot.
$ mmdebstrap --variant=apt --skip=download/empty --skip=essential/unlink \
$ mmdebstrap --variant=apt --skip=essential/unlink \
--setup-hook='mkdir -p ./cache "$1"/var/cache/apt/archives/' \
--setup-hook='sync-in ./cache /var/cache/apt/archives/' \
--customize-hook='sync-out /var/cache/apt/archives ./cache' \
@ -7080,12 +6954,6 @@ as the non-root user, then as a workaround you could run C<chmod 600
/etc/dpkg/dpkg.cfg.d/*> so that the config files are only accessible by the
root user. See Debian bug #808203.
The C<file://> URI type cannot be used to install the essential packages. This
is because B<mmdebstrap> uses dpkg to install the packages that apt places into
F</var/cache/apt/archives> but with C<file://> apt will not copy the files even
with C<--download-only>. Use C<copy://> instead, which is equivalent to
C<file://> but copies the archives into F</var/cache/apt/archives>.
With apt versions before 2.1.16, setting C<[trusted=yes]> or
C<Acquire::AllowInsecureRepositories "1"> to allow signed archives without a
known public key or unsigned archives will fail because of a gpg warning in the

View file

@ -1,56 +0,0 @@
#!/usr/bin/env python3
#
# This script is in the public domain
#
# Author: Johannes Schauer Marin Rodrigues <josch@mister-muffin.de>
#
# thin layer around /usr/lib/apt/solvers/apt, so that we can capture the solver
# result
#
# we set Debug::EDSP::WriteSolution=yes so that Install stanzas also come with
# Package and Version fields. That way, we do not also have to parse the EDSP
# request and spend time matching ID numbers
import subprocess
import sys
import os
import getpass
if not os.path.exists("/usr/lib/apt/solvers/apt"):
print(
"""Error: ERR_NO_SOLVER
Message: The external apt solver doesn't exist. You must install the apt-utils package.
"""
)
exit()
fname = os.environ.get("APT_EDSP_DUMP_FILENAME")
if fname is None:
print(
"""Error: ERR_NO_FILENAME
Message: You have to set the environment variable APT_EDSP_DUMP_FILENAME
to a valid filename to store the dump of EDSP solver input in.
For example with: export APT_EDSP_DUMP_FILENAME=/tmp/dump.edsp
"""
)
exit()
try:
with open(fname, "w") as f:
with subprocess.Popen(
["/usr/lib/apt/solvers/apt", "-oDebug::EDSP::WriteSolution=yes"],
stdin=sys.stdin.fileno(),
stdout=subprocess.PIPE,
bufsize=0, # unbuffered
text=True, # open in text mode
) as p:
for line in p.stdout:
print(line, end="")
f.write(line)
except (FileNotFoundError, PermissionError) as e:
print(
"""Error: ERR_CREATE_FILE
Message: Writing EDSP solver input to file '%s' failed as it couldn't be created!
"""
% fname
)