commit:     6c9477001d99393a7fc4f610d0ef25a4fef0ce60
Author:     Matt Jolly <kangie <AT> gentoo <DOT> org>
AuthorDate: Sat Apr 20 03:51:05 2024 +0000
Commit:     Matt Jolly <kangie <AT> gentoo <DOT> org>
CommitDate: Sat Apr 20 04:04:42 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=6c947700

net-misc/curl: add 8.7.1-r2

Closes: https://bugs.gentoo.org/930194
Signed-off-by: Matt Jolly <kangie <AT> gentoo.org>

 .../{curl-8.7.1-r1.ebuild => curl-8.7.1-r2.ebuild} |   0
 net-misc/curl/files/curl-8.7.1-chunked-post.patch  |  57 ++++++++
 .../files/curl-8.7.1-fix-compress-option.patch     | 153 +++++++++++++++++++++
 3 files changed, 210 insertions(+)

diff --git a/net-misc/curl/curl-8.7.1-r1.ebuild 
b/net-misc/curl/curl-8.7.1-r2.ebuild
similarity index 100%
rename from net-misc/curl/curl-8.7.1-r1.ebuild
rename to net-misc/curl/curl-8.7.1-r2.ebuild

diff --git a/net-misc/curl/files/curl-8.7.1-chunked-post.patch 
b/net-misc/curl/files/curl-8.7.1-chunked-post.patch
new file mode 100644
index 000000000000..9d1fef73d383
--- /dev/null
+++ b/net-misc/curl/files/curl-8.7.1-chunked-post.patch
@@ -0,0 +1,57 @@
+https://github.com/curl/curl/commit/721941aadf4adf4f6aeb3f4c0ab489bb89610c36
+From: Stefan Eissing <[email protected]>
+Date: Mon, 1 Apr 2024 15:41:18 +0200
+Subject: [PATCH] http: with chunked POST forced, disable length check on read
+ callback
+
+- when an application forces HTTP/1.1 chunked transfer encoding
+  by setting the corresponding header and instructs curl to use
+  the CURLOPT_READFUNCTION, disregard any POST length information.
+- this establishes backward compatibility with previous curl versions
+
+Applications are encouraged to not force "chunked", but rather
+set length information for a POST. By setting -1, curl will
+auto-select chunked on HTTP/1.1 and work properly on other HTTP
+versions.
+
+Reported-by: Jeff King
+Fixes #13229
+Closes #13257
+--- a/lib/http.c
++++ b/lib/http.c
+@@ -2046,8 +2046,19 @@ static CURLcode set_reader(struct Curl_easy *data, 
Curl_HttpReq httpreq)
+       else
+         result = Curl_creader_set_null(data);
+     }
+-    else { /* we read the bytes from the callback */
+-      result = Curl_creader_set_fread(data, postsize);
++    else {
++      /* we read the bytes from the callback. In case "chunked" encoding
++       * is forced by the application, we disregard `postsize`. This is
++       * a backward compatibility decision to earlier versions where
++       * chunking disregarded this. See issue #13229. */
++      bool chunked = FALSE;
++      char *ptr = Curl_checkheaders(data, STRCONST("Transfer-Encoding"));
++      if(ptr) {
++        /* Some kind of TE is requested, check if 'chunked' is chosen */
++        chunked = Curl_compareheader(ptr, STRCONST("Transfer-Encoding:"),
++                                     STRCONST("chunked"));
++      }
++      result = Curl_creader_set_fread(data, chunked? -1 : postsize);
+     }
+     return result;
+ 
+@@ -2115,6 +2126,13 @@ CURLcode Curl_http_req_set_reader(struct Curl_easy 
*data,
+     data->req.upload_chunky =
+       Curl_compareheader(ptr,
+                          STRCONST("Transfer-Encoding:"), STRCONST("chunked"));
++    if(data->req.upload_chunky &&
++       Curl_use_http_1_1plus(data, data->conn) &&
++       (data->conn->httpversion >= 20)) {
++       infof(data, "suppressing chunked transfer encoding on connection "
++             "using HTTP version 2 or higher");
++       data->req.upload_chunky = FALSE;
++    }
+   }
+   else {
+     curl_off_t req_clen = Curl_creader_total_length(data);

diff --git a/net-misc/curl/files/curl-8.7.1-fix-compress-option.patch 
b/net-misc/curl/files/curl-8.7.1-fix-compress-option.patch
new file mode 100644
index 000000000000..a06a53729533
--- /dev/null
+++ b/net-misc/curl/files/curl-8.7.1-fix-compress-option.patch
@@ -0,0 +1,153 @@
+https://github.com/curl/curl/commit/b30d694a027eb771c02a3db0dee0ca03ccab7377
+From: Stefan Eissing <[email protected]>
+Date: Thu, 28 Mar 2024 11:08:15 +0100
+Subject: [PATCH] content_encoding: brotli and others, pass through 0-length
+ writes
+
+- curl's transfer handling may write 0-length chunks at the end of the
+  download with an EOS flag. (HTTP/2 does this commonly)
+
+- content encoders need to pass-through such a write and not count this
+  as error in case they are finished decoding
+
+Fixes #13209
+Fixes #13212
+Closes #13219
+--- a/lib/content_encoding.c
++++ b/lib/content_encoding.c
+@@ -300,7 +300,7 @@ static CURLcode deflate_do_write(struct Curl_easy *data,
+   struct zlib_writer *zp = (struct zlib_writer *) writer;
+   z_stream *z = &zp->z;     /* zlib state structure */
+ 
+-  if(!(type & CLIENTWRITE_BODY))
++  if(!(type & CLIENTWRITE_BODY) || !nbytes)
+     return Curl_cwriter_write(data, writer->next, type, buf, nbytes);
+ 
+   /* Set the compressed input when this function is called */
+@@ -457,7 +457,7 @@ static CURLcode gzip_do_write(struct Curl_easy *data,
+   struct zlib_writer *zp = (struct zlib_writer *) writer;
+   z_stream *z = &zp->z;     /* zlib state structure */
+ 
+-  if(!(type & CLIENTWRITE_BODY))
++  if(!(type & CLIENTWRITE_BODY) || !nbytes)
+     return Curl_cwriter_write(data, writer->next, type, buf, nbytes);
+ 
+   if(zp->zlib_init == ZLIB_INIT_GZIP) {
+@@ -669,7 +669,7 @@ static CURLcode brotli_do_write(struct Curl_easy *data,
+   CURLcode result = CURLE_OK;
+   BrotliDecoderResult r = BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT;
+ 
+-  if(!(type & CLIENTWRITE_BODY))
++  if(!(type & CLIENTWRITE_BODY) || !nbytes)
+     return Curl_cwriter_write(data, writer->next, type, buf, nbytes);
+ 
+   if(!bp->br)
+@@ -762,7 +762,7 @@ static CURLcode zstd_do_write(struct Curl_easy *data,
+   ZSTD_outBuffer out;
+   size_t errorCode;
+ 
+-  if(!(type & CLIENTWRITE_BODY))
++  if(!(type & CLIENTWRITE_BODY) || !nbytes)
+     return Curl_cwriter_write(data, writer->next, type, buf, nbytes);
+ 
+   if(!zp->decomp) {
+@@ -916,7 +916,7 @@ static CURLcode error_do_write(struct Curl_easy *data,
+   (void) buf;
+   (void) nbytes;
+ 
+-  if(!(type & CLIENTWRITE_BODY))
++  if(!(type & CLIENTWRITE_BODY) || !nbytes)
+     return Curl_cwriter_write(data, writer->next, type, buf, nbytes);
+ 
+   failf(data, "Unrecognized content encoding type. "
+--- a/tests/http/test_02_download.py
++++ b/tests/http/test_02_download.py
+@@ -394,6 +394,19 @@ def test_02_27_paused_no_cl(self, env: Env, httpd, 
nghttpx, repeat):
+         r = client.run(args=[url])
+         r.check_exit_code(0)
+ 
++    @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
++    def test_02_28_get_compressed(self, env: Env, httpd, nghttpx, repeat, 
proto):
++        if proto == 'h3' and not env.have_h3():
++            pytest.skip("h3 not supported")
++        count = 1
++        urln = f'https://{env.authority_for(env.domain1brotli, 
proto)}/data-100k?[0-{count-1}]'
++        curl = CurlClient(env=env)
++        r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
++            '--compressed'
++        ])
++        r.check_exit_code(code=0)
++        r.check_response(count=count, http_status=200)
++
+     def check_downloads(self, client, srcfile: str, count: int,
+                         complete: bool = True):
+         for i in range(count):
+--- a/tests/http/testenv/env.py
++++ b/tests/http/testenv/env.py
+@@ -129,10 +129,11 @@ def __init__(self):
+         self.htdocs_dir = os.path.join(self.gen_dir, 'htdocs')
+         self.tld = 'http.curl.se'
+         self.domain1 = f"one.{self.tld}"
++        self.domain1brotli = f"brotli.one.{self.tld}"
+         self.domain2 = f"two.{self.tld}"
+         self.proxy_domain = f"proxy.{self.tld}"
+         self.cert_specs = [
+-            CertificateSpec(domains=[self.domain1, 'localhost'], 
key_type='rsa2048'),
++            CertificateSpec(domains=[self.domain1, self.domain1brotli, 
'localhost'], key_type='rsa2048'),
+             CertificateSpec(domains=[self.domain2], key_type='rsa2048'),
+             CertificateSpec(domains=[self.proxy_domain, '127.0.0.1'], 
key_type='rsa2048'),
+             CertificateSpec(name="clientsX", sub_specs=[
+@@ -376,6 +377,10 @@ def htdocs_dir(self) -> str:
+     def domain1(self) -> str:
+         return self.CONFIG.domain1
+ 
++    @property
++    def domain1brotli(self) -> str:
++        return self.CONFIG.domain1brotli
++
+     @property
+     def domain2(self) -> str:
+         return self.CONFIG.domain2
+--- a/tests/http/testenv/httpd.py
++++ b/tests/http/testenv/httpd.py
+@@ -50,6 +50,7 @@ class Httpd:
+         'alias', 'env', 'filter', 'headers', 'mime', 'setenvif',
+         'socache_shmcb',
+         'rewrite', 'http2', 'ssl', 'proxy', 'proxy_http', 'proxy_connect',
++        'brotli',
+         'mpm_event',
+     ]
+     COMMON_MODULES_DIRS = [
+@@ -203,6 +204,7 @@ def _mkpath(self, path):
+ 
+     def _write_config(self):
+         domain1 = self.env.domain1
++        domain1brotli = self.env.domain1brotli
+         creds1 = self.env.get_credentials(domain1)
+         domain2 = self.env.domain2
+         creds2 = self.env.get_credentials(domain2)
+@@ -285,6 +287,24 @@ def _write_config(self):
+                 f'</VirtualHost>',
+                 f'',
+             ])
++            # Alternate to domain1 with BROTLI compression
++            conf.extend([  # https host for domain1, h1 + h2
++                f'<VirtualHost *:{self.env.https_port}>',
++                f'    ServerName {domain1brotli}',
++                f'    Protocols h2 http/1.1',
++                f'    SSLEngine on',
++                f'    SSLCertificateFile {creds1.cert_file}',
++                f'    SSLCertificateKeyFile {creds1.pkey_file}',
++                f'    DocumentRoot "{self._docs_dir}"',
++                f'    SetOutputFilter BROTLI_COMPRESS',
++            ])
++            conf.extend(self._curltest_conf(domain1))
++            if domain1 in self._extra_configs:
++                conf.extend(self._extra_configs[domain1])
++            conf.extend([
++                f'</VirtualHost>',
++                f'',
++            ])
+             conf.extend([  # https host for domain2, no h2
+                 f'<VirtualHost *:{self.env.https_port}>',
+                 f'    ServerName {domain2}',

Reply via email to