Commit 86855715 authored by Lorenzo "Palinuro" Faletra's avatar Lorenzo "Palinuro" Faletra Committed by Lorenzo "Palinuro" Faletra
Browse files

Import Debian version 2.2.3+parrot1

apt (2.2.3+parrot1) rolling; urgency=medium
.
  * Import new Debian release.
  * Patch methods/basehttp.c to allow https-to-http redirects.
.
apt (2.2.3) unstable; urgency=medium
.
  * tests: Check for and discard expected warning from MaybeAddAuth. For some
    reason, this was only noticed with LTO enabled, but should be a general
    issue.
  * Fix downloads of unsized files that are largest in pipeline (LP: #1921626)
  * Warn on packages without a Size field. Such repositories are broken and
    need to be fixed, as we do not test apt against them, see the bug above
    for more details. Set Acquire::AllowUnsizedPackages to disable the
    warning.
.
apt (2.2.2+parrot1) rolling; urgency=medium
.
  * Import new Debian release.
  * Patch methods/basehttp.c to allow https-to-http redirects.
.
apt (2.2.2) unstable; urgency=medium
.
  [ David Kalnischkies ]
  * Deal with rred shortcomings around empty patch files (LP: #1918112)
    - Allow merging with empty pdiff patches
    - Rename pdiff merge patches only after they are all downloaded
    - Start pdiff patching from the last possible starting point
    - Ensure all index files sent custom tags to the methods
  * Harden test for no new acquires after transaction abort (Closes: #984966)
.
  [ Julian Andres Klode ]
  * Make ADDARG{,C}() macros expand to single statements
parent 54085fd5
Pipeline #3019 failed with stages
in 0 seconds
......@@ -200,7 +200,7 @@ endif()
# Configure some variables like package, version and architecture.
set(PACKAGE ${PROJECT_NAME})
set(PACKAGE_MAIL "APT Development Team <deity@lists.debian.org>")
set(PACKAGE_VERSION "2.2.1")
set(PACKAGE_VERSION "2.2.3")
string(REGEX MATCH "^[0-9.]+" PROJECT_VERSION ${PACKAGE_VERSION})
if (NOT DEFINED DPKG_DATADIR)
......
......@@ -2564,26 +2564,16 @@ bool pkgAcqDiffIndex::ParseDiffIndex(string const &IndexDiffFile) /*{{{*/
}
}
bool foundStart = false;
for (std::vector<DiffInfo>::iterator cur = available_patches.begin();
cur != available_patches.end(); ++cur)
{
if (LocalHashes != cur->result_hashes)
continue;
available_patches.erase(available_patches.begin(), cur);
foundStart = true;
break;
}
if (foundStart == false || unlikely(available_patches.empty() == true))
{
ErrorText = "Couldn't find the start of the patch series";
return false;
}
auto const foundStart = std::find_if(available_patches.rbegin(), available_patches.rend(),
[&](auto const &cur) { return LocalHashes == cur.result_hashes; });
if (foundStart == available_patches.rend() || unlikely(available_patches.empty()))
{
ErrorText = "Couldn't find the start of the patch series";
return false;
}
available_patches.erase(available_patches.begin(), std::prev(foundStart.base()));
{
auto const patch = std::find_if(available_patches.cbegin(), available_patches.cend(), [](auto const &patch) {
return not patch.result_hashes.usable() ||
not patch.patch_hashes.usable() ||
......@@ -3050,14 +3040,11 @@ void pkgAcqIndexMergeDiffs::Done(string const &Message, HashStringList const &Ha
State = StateErrorDiff;
return;
}
std::string const PatchFile = GetMergeDiffsPatchFileName(UnpatchedFile, patch.file);
std::string const PatchedFile = GetKeepCompressedFileName(UncompressedUnpatchedFile, Target);
switch (State)
{
case StateFetchDiff:
Rename(DestFile, PatchFile);
// check if this is the last completed diff
State = StateDoneDiff;
for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
......@@ -3068,6 +3055,8 @@ void pkgAcqIndexMergeDiffs::Done(string const &Message, HashStringList const &Ha
std::clog << "Not the last done diff in the batch: " << Desc.URI << std::endl;
return;
}
for (auto * diff : *allPatches)
Rename(diff->DestFile, GetMergeDiffsPatchFileName(UnpatchedFile, diff->patch.file));
// this is the last completed diff, so we are ready to apply now
DestFile = GetKeepCompressedFileName(UncompressedUnpatchedFile + "-patched", Target);
if(Debug)
......@@ -3098,8 +3087,8 @@ void pkgAcqIndexMergeDiffs::Done(string const &Message, HashStringList const &Ha
if(Debug)
std::clog << "allDone: " << DestFile << "\n" << std::endl;
return;
case StateDoneDiff: _error->Fatal("Done called for %s which is in an invalid Done state", PatchFile.c_str()); break;
case StateErrorDiff: _error->Fatal("Done called for %s which is in an invalid Error state", PatchFile.c_str()); break;
case StateDoneDiff: _error->Fatal("Done called for %s which is in an invalid Done state", patch.file.c_str()); break;
case StateErrorDiff: _error->Fatal("Done called for %s which is in an invalid Error state", patch.file.c_str()); break;
}
}
/*}}}*/
......@@ -3188,8 +3177,8 @@ void pkgAcqIndex::Init(string const &URI, string const &URIDesc,
/* The only header we use is the last-modified header. */
string pkgAcqIndex::Custom600Headers() const
{
string msg = "\nIndex-File: true";
std::string msg = pkgAcqBaseIndex::Custom600Headers();
msg.append("\nIndex-File: true");
if (TransactionManager->LastMetaIndexParser == NULL)
{
......@@ -3480,6 +3469,12 @@ pkgAcqArchive::pkgAcqArchive(pkgAcquire *const Owner, pkgSourceList *const Sourc
Version.VerStr(), Version.ParentPkg().FullName(false).c_str());
return;
}
if (FileSize == 0 && not _config->FindB("Acquire::AllowUnsizedPackages", false))
{
_error->Warning("Repository is broken: %s (= %s) has no Size information",
Version.ParentPkg().FullName(false).c_str(),
Version.VerStr());
}
// Check if we already downloaded the file
struct stat Buf;
......@@ -3941,9 +3936,10 @@ void pkgAcqFile::Done(string const &Message,HashStringList const &CalcHashes,
/*}}}*/
string pkgAcqFile::Custom600Headers() const /*{{{*/
{
if (IsIndexFile)
return "\nIndex-File: true";
return "";
string Header = pkgAcquire::Item::Custom600Headers();
if (not IsIndexFile)
return Header;
return Header + "\nIndex-File: true";
}
/*}}}*/
pkgAcqFile::~pkgAcqFile() {}
......
......@@ -1765,8 +1765,8 @@ bool pkgDPkgPM::Go(APT::Progress::PackageManager *progress)
if (pipe(fd) != 0)
return _error->Errno("pipe","Failed to create IPC pipe to dpkg");
#define ADDARG(X) Args.push_back(X); Size += strlen(X)
#define ADDARGC(X) Args.push_back(X); Size += sizeof(X) - 1
#define ADDARG(X) do { const char *arg = (X); Args.push_back(arg); Size += strlen(arg); } while (0)
#define ADDARGC(X) ADDARG(X)
ADDARGC("--status-fd");
char status_fd_buf[20];
......
apt (2.2.3+parrot1) rolling; urgency=medium
* Import new Debian release.
* Patch methods/basehttp.c to allow https-to-http redirects.
-- Lorenzo "Palinuro" Faletra <palinuro@parrotsec.org> Wed, 28 Apr 2021 22:24:52 +0200
apt (2.2.3) unstable; urgency=medium
* tests: Check for and discard expected warning from MaybeAddAuth. For some
reason, this was only noticed with LTO enabled, but should be a general
issue.
* Fix downloads of unsized files that are largest in pipeline (LP: #1921626)
* Warn on packages without a Size field. Such repositories are broken and
need to be fixed, as we do not test apt against them, see the bug above
for more details. Set Acquire::AllowUnsizedPackages to disable the
warning.
-- Julian Andres Klode <jak@debian.org> Tue, 13 Apr 2021 17:53:32 +0200
apt (2.2.2+parrot1) rolling; urgency=medium
* Import new Debian release.
* Patch methods/basehttp.c to allow https-to-http redirects.
-- Lorenzo "Palinuro" Faletra <palinuro@localhost.localdomain> Sat, 20 Mar 2021 20:50:10 +0100
apt (2.2.2) unstable; urgency=medium
[ David Kalnischkies ]
* Deal with rred shortcomings around empty patch files (LP: #1918112)
- Allow merging with empty pdiff patches
- Rename pdiff merge patches only after they are all downloaded
- Start pdiff patching from the last possible starting point
- Ensure all index files sent custom tags to the methods
* Harden test for no new acquires after transaction abort (Closes: #984966)
[ Julian Andres Klode ]
* Make ADDARG{,C}() macros expand to single statements
-- Julian Andres Klode <jak@debian.org> Fri, 12 Mar 2021 09:15:59 +0100
apt (2.2.1+parrot1) rolling; urgency=medium
* Import new Debian release.
......
......@@ -274,7 +274,7 @@
">
<!-- this will be updated by 'prepare-release' -->
<!ENTITY apt-product-version "2.2.1">
<!ENTITY apt-product-version "2.2.3">
<!-- (Code)names for various things used all over the place -->
<!ENTITY debian-oldstable-codename "buster">
......
......@@ -257,6 +257,7 @@ Acquire
AllowInsecureRepositories "<BOOL>";
AllowWeakRepositories "<BOOL>";
AllowDowngradeToInsecureRepositories "<BOOL>";
AllowUnsizedPackages "<BOOL>";
// allow repositories to change information potentially breaking user config like pinning
AllowReleaseInfoChange "<BOOL>"
{
......
......@@ -5,9 +5,9 @@
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: apt-doc 2.2.1\n"
"Project-Id-Version: apt-doc 2.2.3\n"
"Report-Msgid-Bugs-To: APT Development Team <deity@lists.debian.org>\n"
"POT-Creation-Date: 2021-03-01 22:28+0100\n"
"POT-Creation-Date: 2021-04-13 15:38+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
......
......@@ -391,9 +391,9 @@ BaseHttpMethod::DealWithHeaders(FetchResult &Res, RequestState &Req)
// as well as http to https
else if ((Uri.Access == "http" || Uri.Access == "https+http") && tmpURI.Access == "https")
return TRY_AGAIN_OR_REDIRECT;
// allow https to http redirects (for https mirrordirectors with http mirrors)
else if ((Uri.Access == "https" || Uri.Access == "https+http") && tmpURI.Access == "http")
return TRY_AGAIN_OR_REDIRECT;
// allow https to http redirects (for https mirrordirectors with http mirrors)
else if ((Uri.Access == "https" || Uri.Access == "https+http") && tmpURI.Access == "http")
return TRY_AGAIN_OR_REDIRECT;
else
{
auto const tmpplus = tmpURI.Access.find('+');
......@@ -885,7 +885,11 @@ unsigned long long BaseHttpMethod::FindMaximumObjectSizeInQueue() const /*{{{*/
{
unsigned long long MaxSizeInQueue = 0;
for (FetchItem *I = Queue; I != 0 && I != QueueBack; I = I->Next)
{
if (I->MaximumSize == 0)
return 0;
MaxSizeInQueue = std::max(MaxSizeInQueue, I->MaximumSize);
}
return MaxSizeInQueue;
}
/*}}}*/
......
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
HTTP and HTTPS share a lot of common code and these classes are
exactly the dumping ground for this common code
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/strutl.h>
#include <iostream>
#include <limits>
#include <map>
#include <string>
#include <vector>
#include <ctype.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#include "basehttp.h"
#include <apti18n.h>
/*}}}*/
using namespace std;
string BaseHttpMethod::FailFile;
int BaseHttpMethod::FailFd = -1;
time_t BaseHttpMethod::FailTime = 0;
// Number of successful requests in a pipeline needed to continue
// pipelining after a connection reset.
constexpr int PIPELINE_MIN_SUCCESSFUL_ANSWERS_TO_CONTINUE = 3;
// ServerState::RunHeaders - Get the headers before the data /*{{{*/
// ---------------------------------------------------------------------
/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
parse error occurred */
ServerState::RunHeadersResult ServerState::RunHeaders(RequestState &Req,
const std::string &Uri)
{
Owner->Status(_("Waiting for headers"));
do
{
string Data;
if (ReadHeaderLines(Data) == false)
continue;
if (Owner->Debug == true)
clog << "Answer for: " << Uri << endl << Data;
for (string::const_iterator I = Data.begin(); I < Data.end(); ++I)
{
string::const_iterator J = I;
for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J);
if (Req.HeaderLine(string(I,J)) == false)
return RUN_HEADERS_PARSE_ERROR;
I = J;
}
// 100 Continue is a Nop...
if (Req.Result == 100)
continue;
// Tidy up the connection persistence state.
if (Req.Encoding == RequestState::Closes && Req.HaveContent == true)
Persistent = false;
return RUN_HEADERS_OK;
} while (LoadNextResponse(false, Req) == ResultState::SUCCESSFUL);
return RUN_HEADERS_IO_ERROR;
}
/*}}}*/
bool RequestState::HeaderLine(string const &Line) /*{{{*/
{
if (Line.empty() == true)
return true;
if (Result == 0 && Line.size() > 4 && stringcasecmp(Line.data(), Line.data() + 4, "HTTP") == 0)
{
// Evil servers return no version
if (Line[4] == '/')
{
int const elements = sscanf(Line.c_str(),"HTTP/%3u.%3u %3u%359[^\n]",&Major,&Minor,&Result,Code);
if (elements == 3)
{
Code[0] = '\0';
if (Owner != NULL && Owner->Debug == true)
clog << "HTTP server doesn't give Reason-Phrase for " << std::to_string(Result) << std::endl;
}
else if (elements != 4)
return _error->Error(_("The HTTP server sent an invalid reply header"));
}
else
{
Major = 0;
Minor = 9;
if (sscanf(Line.c_str(),"HTTP %3u%359[^\n]",&Result,Code) != 2)
return _error->Error(_("The HTTP server sent an invalid reply header"));
}
auto const CodeLen = strlen(Code);
auto const CodeEnd = std::remove_if(Code, Code + CodeLen, [](char c) { return isprint(c) == 0; });
*CodeEnd = '\0';
/* Check the HTTP response header to get the default persistence
state. */
if (Major < 1)
Server->Persistent = false;
else
{
if (Major == 1 && Minor == 0)
{
Server->Persistent = false;
}
else
{
Server->Persistent = true;
if (Server->PipelineAllowed)
Server->Pipeline = true;
}
}
return true;
}
// Blah, some servers use "connection:closes", evil.
// and some even send empty header fields…
string::size_type Pos = Line.find(':');
if (Pos == string::npos)
return _error->Error(_("Bad header line"));
++Pos;
// Parse off any trailing spaces between the : and the next word.
string::size_type Pos2 = Pos;
while (Pos2 < Line.length() && isspace_ascii(Line[Pos2]) != 0)
Pos2++;
string const Tag(Line,0,Pos);
string const Val(Line,Pos2);
if (stringcasecmp(Tag,"Content-Length:") == 0)
{
auto ContentLength = strtoull(Val.c_str(), NULL, 10);
if (ContentLength == 0)
return true;
if (Encoding == Closes)
Encoding = Stream;
HaveContent = true;
unsigned long long * DownloadSizePtr = &DownloadSize;
if (Result == 416 || (Result >= 300 && Result < 400))
DownloadSizePtr = &JunkSize;
*DownloadSizePtr = ContentLength;
if (*DownloadSizePtr >= std::numeric_limits<unsigned long long>::max())
return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header"));
else if (*DownloadSizePtr == 0)
HaveContent = false;
// On partial content (206) the Content-Length less than the real
// size, so do not set it here but leave that to the Content-Range
// header instead
if(Result != 206 && TotalFileSize == 0)
TotalFileSize = DownloadSize;
return true;
}
if (stringcasecmp(Tag,"Content-Type:") == 0)
{
HaveContent = true;
return true;
}
// The Content-Range field only has a meaning in HTTP/1.1 for the
// 206 (Partial Content) and 416 (Range Not Satisfiable) responses
// according to RFC7233 "Range Requests", §4.2, so only consider it
// for such responses.
if ((Result == 416 || Result == 206) && stringcasecmp(Tag,"Content-Range:") == 0)
{
HaveContent = true;
// §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416
if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&TotalFileSize) == 1)
; // we got the expected filesize which is all we wanted
else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&TotalFileSize) != 2)
return _error->Error(_("The HTTP server sent an invalid Content-Range header"));
if (StartPos > TotalFileSize)
return _error->Error(_("This HTTP server has broken range support"));
// figure out what we will download
DownloadSize = TotalFileSize - StartPos;
return true;
}
if (stringcasecmp(Tag,"Transfer-Encoding:") == 0)
{
HaveContent = true;
if (stringcasecmp(Val,"chunked") == 0)
Encoding = Chunked;
return true;
}
if (stringcasecmp(Tag,"Connection:") == 0)
{
if (stringcasecmp(Val,"close") == 0)
{
Server->Persistent = false;
Server->Pipeline = false;
/* Some servers send error pages (as they are dynamically generated)
for simplicity via a connection close instead of e.g. chunked,
so assuming an always closing server only if we get a file + close */
if (Result >= 200 && Result < 300 && Server->PipelineAnswersReceived < PIPELINE_MIN_SUCCESSFUL_ANSWERS_TO_CONTINUE)
{
Server->PipelineAllowed = false;
Server->PipelineAnswersReceived = 0;
}
}
else if (stringcasecmp(Val,"keep-alive") == 0)
Server->Persistent = true;
return true;
}
if (stringcasecmp(Tag,"Last-Modified:") == 0)
{
if (RFC1123StrToTime(Val, Date) == false)
return _error->Error(_("Unknown date format"));
return true;
}
if (stringcasecmp(Tag,"Location:") == 0)
{
Location = Val;
return true;
}
if (stringcasecmp(Tag, "Accept-Ranges:") == 0)
{
std::string ranges = ',' + Val + ',';
ranges.erase(std::remove(ranges.begin(), ranges.end(), ' '), ranges.end());
if (ranges.find(",bytes,") == std::string::npos)
Server->RangesAllowed = false;
return true;
}
return true;
}
/*}}}*/
// ServerState::ServerState - Constructor /*{{{*/
ServerState::ServerState(URI Srv, BaseHttpMethod *Owner) :
ServerName(Srv), TimeOut(30), Owner(Owner)
{
Reset();
}
/*}}}*/
bool RequestState::AddPartialFileToHashes(FileFd &File) /*{{{*/
{
File.Truncate(StartPos);
return Server->GetHashes()->AddFD(File, StartPos);
}
/*}}}*/
void ServerState::Reset() /*{{{*/
{
Persistent = false;
Pipeline = false;
PipelineAllowed = true;
RangesAllowed = true;
PipelineAnswersReceived = 0;
}
/*}}}*/
// BaseHttpMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/
// ---------------------------------------------------------------------
/* We look at the header data we got back from the server and decide what
to do. Returns DealWithHeadersResult (see http.h for details).
*/
static std::string fixURIEncoding(std::string const &part)
{
// if the server sends a space this is not an encoded URI
// so other clients seem to encode it and we do it as well
if (part.find_first_of(" ") != std::string::npos)
return aptMethod::URIEncode(part);
return part;
}
BaseHttpMethod::DealWithHeadersResult
BaseHttpMethod::DealWithHeaders(FetchResult &Res, RequestState &Req)
{
// Not Modified
if (Req.Result == 304)
{
RemoveFile("server", Queue->DestFile);
Res.IMSHit = true;
Res.LastModified = Queue->LastModified;
Res.Size = 0;
return IMS_HIT;
}
/* Note that it is only OK for us to treat all redirection the same
because we *always* use GET, not other HTTP methods.
Codes not mentioned are handled as errors later as required by the
HTTP spec to handle unknown codes the same as the x00 code. */
constexpr unsigned int RedirectCodes[] = {
301, // Moved Permanently
302, // Found
303, // See Other
307, // Temporary Redirect
308, // Permanent Redirect
};
if (AllowRedirect && std::find(std::begin(RedirectCodes), std::end(RedirectCodes), Req.Result) != std::end(RedirectCodes))
{
if (Req.Location.empty() == true)
;
else if (Req.Location[0] == '/' && Queue->Uri.empty() == false)
{
URI Uri(Queue->Uri);
if (Uri.Host.empty() == false)
NextURI = URI::SiteOnly(Uri);
else
NextURI.clear();
if (_config->FindB("Acquire::Send-URI-Encoded", false))
NextURI.append(fixURIEncoding(Req.Location));
else
NextURI.append(DeQuoteString(Req.Location));
if (Queue->Uri == NextURI)
{
SetFailReason("RedirectionLoop");
_error->Error("Redirection loop encountered");
if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
return TRY_AGAIN_OR_REDIRECT;
}
else
{
bool const SendURIEncoded = _config->FindB("Acquire::Send-URI-Encoded", false);
if (not SendURIEncoded)
Req.Location = DeQuoteString(Req.Location);
URI tmpURI(Req.Location);
if (SendURIEncoded)
tmpURI.Path = fixURIEncoding(tmpURI.Path);
if (tmpURI.Access.find('+') != std::string::npos)
{
_error->Error("Server tried to trick us into using a specific implementation: %s", tmpURI.Access.c_str());
if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
NextURI = tmpURI;
URI Uri(Queue->Uri);
if (Binary.find('+') != std::string::npos)
{
auto base = Binary.substr(0, Binary.find('+'));
if (base != tmpURI.Access)
{
tmpURI.Access = base + '+' + tmpURI.Access;
if (tmpURI.Access == Binary)
{
std::swap(tmpURI.Access, Uri.Access);
NextURI = tmpURI;
std::swap(tmpURI.Access, Uri.Access);
}
else
NextURI = tmpURI;
}
}
if (Queue->Uri == NextURI)
{
SetFailReason("RedirectionLoop");
_error->Error("Redirection loop encountered");
if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
Uri.Access = Binary;
// same protocol redirects are okay
if (tmpURI.Access == Uri.Access)
return TRY_AGAIN_OR_REDIRECT;
// as well as http to https
else if ((Uri.Access == "http" || Uri.Access == "https+http") && tmpURI.Access == "https")
return TRY_AGAIN_OR_REDIRECT;
else
{