Control: tags 674447 + patch Control: tags 704611 + patch Attached is a possible debdiff for these two (but not yet tested).
TODO remain: - is #674447 considered to be RC or should we downgrade ad it needs a extra tuning of tune.bufsize - In case of an upload, will the Release Team also accept a patch for #704611, else it should be removed again. Regards, Salvatore
diff -Nru haproxy-1.4.15/debian/changelog haproxy-1.4.15/debian/changelog --- haproxy-1.4.15/debian/changelog 2011-07-14 18:19:25.000000000 +0200 +++ haproxy-1.4.15/debian/changelog 2013-04-05 15:45:35.000000000 +0200 @@ -1,3 +1,14 @@ +haproxy (1.4.15-1.1) unstable; urgency=low + + * Non-maintainer upload. + * Add CVE-2012-2942 patch. + CVE-2012-2942: Fix buffer overflow in the trash buffer in the header + capture functionality. (Closes: #674447) + * Add CVE-2013-1912 patch. + CVE-2013-1912: Fix crash on TCP content inspection rules. (Closes: #704611) + + -- Salvatore Bonaccorso <car...@debian.org> Fri, 05 Apr 2013 15:45:20 +0200 + haproxy (1.4.15-1) unstable; urgency=low * New upstream release with critical bug fix (Closes: #631351) diff -Nru haproxy-1.4.15/debian/patches/CVE-2012-2942 haproxy-1.4.15/debian/patches/CVE-2012-2942 --- haproxy-1.4.15/debian/patches/CVE-2012-2942 1970-01-01 01:00:00.000000000 +0100 +++ haproxy-1.4.15/debian/patches/CVE-2012-2942 2013-04-05 15:45:35.000000000 +0200 @@ -0,0 +1,350 @@ +From: David du Colombier <dducolomb...@exceliance.fr> +Date: Wed, 16 May 2012 12:16:48 +0000 (+0200) +Subject: BUG/MAJOR: trash must always be the size of a buffer +X-Git-Tag: v1.4.21~7 +X-Git-Url: http://haproxy.1wt.eu:81/git?p=haproxy-1.4.git;a=commitdiff_plain;h=30297cb17147a8d339eb160226bcc08c91d9530b + +BUG/MAJOR: trash must always be the size of a buffer + +Before it was possible to resize the buffers using global.tune.bufsize, +the trash has always been the size of a buffer by design. Unfortunately, +the recent buffer sizing at runtime forgot to adjust the trash, resulting +in it being too short for content rewriting if buffers were enlarged from +the default value. + +The bug was encountered in 1.4 so the fix must be backported there. +(cherry picked from commit 7af4605ef76f6929835eefd13664beab379101b5) +--- + +--- a/include/types/global.h ++++ b/include/types/global.h +@@ -104,7 +104,8 @@ + extern int relative_pid; /* process id starting at 1 */ + extern int actconn; /* # of active sessions */ + extern int listeners; +-extern char trash[BUFSIZE]; ++extern char *trash; ++extern int trashlen; + extern char *swap_buffer; + extern int nb_oldpids; /* contains the number of old pids found */ + extern const int zero; +--- a/src/acl.c ++++ b/src/acl.c +@@ -776,8 +776,7 @@ + opaque = 0; + pattern = NULL; + args[1] = ""; +- while (fgets(trash, sizeof(trash), file) != NULL) { +- ++ while (fgets(trash, trashlen, file) != NULL) { + c = trash; + + /* ignore lines beginning with a dash */ +--- a/src/cfgparse.c ++++ b/src/cfgparse.c +@@ -511,6 +511,8 @@ + global.tune.bufsize = atol(args[1]); + if (global.tune.maxrewrite >= global.tune.bufsize / 2) + global.tune.maxrewrite = global.tune.bufsize / 2; ++ trashlen = global.tune.bufsize; ++ trash = realloc(trash, trashlen); + } + else if (!strcmp(args[0], "tune.maxrewrite")) { + if (*(args[1]) == 0) { +@@ -904,9 +906,9 @@ + continue; + if (strcmp(kwl->kw[index].kw, args[0]) == 0) { + /* prepare error message just in case */ +- snprintf(trash, sizeof(trash), ++ snprintf(trash, trashlen, + "error near '%s' in '%s' section", args[0], "global"); +- rc = kwl->kw[index].parse(args, CFG_GLOBAL, NULL, NULL, trash, sizeof(trash)); ++ rc = kwl->kw[index].parse(args, CFG_GLOBAL, NULL, NULL, trash, trashlen); + if (rc < 0) { + Alert("parsing [%s:%d] : %s\n", file, linenum, trash); + err_code |= ERR_ALERT | ERR_FATAL; +@@ -3302,7 +3304,7 @@ + err_code |= ERR_WARN; + + memcpy(trash, "error near 'balance'", 21); +- if (backend_parse_balance((const char **)args + 1, trash, sizeof(trash), curproxy) < 0) { ++ if (backend_parse_balance((const char **)args + 1, trash, trashlen, curproxy) < 0) { + Alert("parsing [%s:%d] : %s\n", file, linenum, trash); + err_code |= ERR_ALERT | ERR_FATAL; + goto out; +@@ -4529,9 +4531,9 @@ + continue; + if (strcmp(kwl->kw[index].kw, args[0]) == 0) { + /* prepare error message just in case */ +- snprintf(trash, sizeof(trash), ++ snprintf(trash, trashlen, + "error near '%s' in %s section", args[0], cursection); +- rc = kwl->kw[index].parse(args, CFG_LISTEN, curproxy, &defproxy, trash, sizeof(trash)); ++ rc = kwl->kw[index].parse(args, CFG_LISTEN, curproxy, &defproxy, trash, trashlen); + if (rc < 0) { + Alert("parsing [%s:%d] : %s\n", file, linenum, trash); + err_code |= ERR_ALERT | ERR_FATAL; +--- a/src/checks.c ++++ b/src/checks.c +@@ -235,7 +235,7 @@ + + int health, rise, fall, state; + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + /* FIXME begin: calculate local version of the health/rise/fall/state */ + health = s->health; +@@ -385,7 +385,7 @@ + */ + xferred = redistribute_pending(s); + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + if (s->state & SRV_MAINTAIN) { + chunk_printf(&msg, +@@ -464,7 +464,7 @@ + */ + xferred = check_for_pending(s); + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + if (s->state & SRV_MAINTAIN) { + chunk_printf(&msg, +@@ -512,7 +512,7 @@ + */ + xferred = redistribute_pending(s); + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + chunk_printf(&msg, + "Load-balancing on %sServer %s/%s is disabled", +@@ -548,7 +548,7 @@ + */ + xferred = check_for_pending(s); + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + chunk_printf(&msg, + "Load-balancing on %sServer %s/%s is enabled again", +--- a/src/dumpstats.c ++++ b/src/dumpstats.c +@@ -441,7 +441,7 @@ + } + + /* return server's effective weight at the moment */ +- snprintf(trash, sizeof(trash), "%d (initial %d)\n", sv->uweight, sv->iweight); ++ snprintf(trash, trashlen, "%d (initial %d)\n", sv->uweight, sv->iweight); + buffer_feed(si->ib, trash); + return 1; + } +@@ -721,7 +721,7 @@ + if (buffer_almost_full(si->ib)) + break; + +- reql = buffer_si_peekline(si->ob, trash, sizeof(trash)); ++ reql = buffer_si_peekline(si->ob, trash, trashlen); + if (reql <= 0) { /* closed or EOL not found */ + if (reql == 0) + break; +@@ -890,7 +890,7 @@ + struct chunk msg; + unsigned int up; + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + switch (s->data_state) { + case DATA_ST_INIT: +@@ -1002,7 +1002,7 @@ + { + struct chunk msg; + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + switch (s->data_state) { + case DATA_ST_INIT: +@@ -1103,7 +1103,7 @@ + struct chunk msg; + unsigned int up; + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + switch (s->data_state) { + case DATA_ST_INIT: +@@ -1451,7 +1451,7 @@ + struct listener *l; + struct chunk msg; + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + switch (s->data_ctx.stats.px_st) { + case DATA_ST_PX_INIT: +@@ -2502,7 +2502,7 @@ + extern const char *monthname[12]; + char pn[INET6_ADDRSTRLEN]; + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + sess = s->data_ctx.sess.target; + + if (s->data_ctx.sess.section > 0 && s->data_ctx.sess.uid != sess->uniq_id) { +@@ -2713,7 +2713,7 @@ + return 1; + } + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + switch (s->data_state) { + case DATA_ST_INIT: +@@ -2983,7 +2983,7 @@ + if (unlikely(rep->flags & (BF_WRITE_ERROR|BF_SHUTW))) + return 1; + +- chunk_init(&msg, trash, sizeof(trash)); ++ chunk_init(&msg, trash, trashlen); + + if (!s->data_ctx.errors.px) { + /* the function had not been called yet, let's prepare the +--- a/src/haproxy.c ++++ b/src/haproxy.c +@@ -135,7 +135,8 @@ + static int oldpids_sig; /* use USR1 or TERM */ + + /* this is used to drain data, and as a temporary buffer for sprintf()... */ +-char trash[BUFSIZE]; ++char *trash = NULL; ++int trashlen = BUFSIZE; + + /* this buffer is always the same size as standard buffers and is used for + * swapping data inside a buffer. +@@ -284,7 +285,7 @@ + + send_log(p, LOG_NOTICE, "SIGHUP received, dumping servers states for proxy %s.\n", p->id); + while (s) { +- snprintf(trash, sizeof(trash), ++ snprintf(trash, trashlen, + "SIGHUP: Server %s/%s is %s. Conn: %d act, %d pend, %lld tot.", + p->id, s->id, + (s->state & SRV_RUNNING) ? "UP" : "DOWN", +@@ -296,18 +297,18 @@ + + /* FIXME: those info are a bit outdated. We should be able to distinguish between FE and BE. */ + if (!p->srv) { +- snprintf(trash, sizeof(trash), ++ snprintf(trash, trashlen, + "SIGHUP: Proxy %s has no servers. Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.", + p->id, + p->feconn, p->beconn, p->totpend, p->nbpend, p->counters.cum_feconn, p->counters.cum_beconn); + } else if (p->srv_act == 0) { +- snprintf(trash, sizeof(trash), ++ snprintf(trash, trashlen, + "SIGHUP: Proxy %s %s ! Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.", + p->id, + (p->srv_bck) ? "is running on backup servers" : "has no server available", + p->feconn, p->beconn, p->totpend, p->nbpend, p->counters.cum_feconn, p->counters.cum_beconn); + } else { +- snprintf(trash, sizeof(trash), ++ snprintf(trash, trashlen, + "SIGHUP: Proxy %s has %d active servers and %d backup servers available." + " Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.", + p->id, p->srv_act, p->srv_bck, +@@ -404,7 +405,7 @@ + */ + + totalconn = actconn = maxfd = listeners = stopping = 0; +- ++ trash = malloc(trashlen); + + #ifdef HAPROXY_MEMMAX + global.rlimit_memmax = HAPROXY_MEMMAX; +--- a/src/proto_http.c ++++ b/src/proto_http.c +@@ -379,14 +379,14 @@ + static void http_silent_debug(int line, struct session *s) + { + int size = 0; +- size += snprintf(trash + size, sizeof(trash) - size, ++ size += snprintf(trash + size, trashlen - size, + "[%04d] req: p=%d(%d) s=%d bf=%08x an=%08x data=%p size=%d l=%d w=%p r=%p lr=%p sm=%d fw=%ld tf=%08x\n", + line, + s->si[0].state, s->si[0].fd, s->txn.req.msg_state, s->req->flags, s->req->analysers, + s->req->data, s->req->size, s->req->l, s->req->w, s->req->r, s->req->lr, s->req->send_max, s->req->to_forward, s->txn.flags); + write(-1, trash, size); + size = 0; +- size += snprintf(trash + size, sizeof(trash) - size, ++ size += snprintf(trash + size, trashlen - size, + " %04d rep: p=%d(%d) s=%d bf=%08x an=%08x data=%p size=%d l=%d w=%p r=%p lr=%p sm=%d fw=%ld\n", + line, + s->si[1].state, s->si[1].fd, s->txn.rsp.msg_state, s->rep->flags, s->rep->analysers, +@@ -751,7 +751,7 @@ + /* 1: create the response header */ + rdr.len = strlen(HTTP_302); + rdr.str = trash; +- rdr.size = sizeof(trash); ++ rdr.size = trashlen; + memcpy(rdr.str, HTTP_302, rdr.len); + + /* 2: add the server's prefix */ +@@ -3129,7 +3129,7 @@ + realm = do_stats?STATS_DEFAULT_REALM:px->id; + + sprintf(trash, (txn->flags & TX_USE_PX_CONN) ? HTTP_407_fmt : HTTP_401_fmt, realm); +- chunk_initlen(&msg, trash, sizeof(trash), strlen(trash)); ++ chunk_initlen(&msg, trash, trashlen, strlen(trash)); + txn->status = 401; + stream_int_retnclose(req->prod, &msg); + goto return_prx_cond; +@@ -3216,7 +3216,7 @@ + } + + if (ret) { +- struct chunk rdr = { .str = trash, .size = sizeof(trash), .len = 0 }; ++ struct chunk rdr = { .str = trash, .size = trashlen, .len = 0 }; + const char *msg_fmt; + + /* build redirect message */ +@@ -7359,7 +7359,7 @@ + len = sprintf(trash, "%08x:%s.%s[%04x:%04x]: ", t->uniq_id, t->be->id, + dir, (unsigned short)t->req->prod->fd, (unsigned short)t->req->cons->fd); + max = end - start; +- UBOUND(max, sizeof(trash) - len - 1); ++ UBOUND(max, trashlen - len - 1); + len += strlcpy2(trash + len, start, max + 1); + trash[len++] = '\n'; + write(1, trash, len); +--- a/tests/0000-debug-stats.diff ++++ b/tests/0000-debug-stats.diff +@@ -35,12 +35,12 @@ + + + + if (1) { + + for (i=0; i<16096; i++) +-+ chunk_printf(&msg, sizeof(trash), "*"); +++ chunk_printf(&msg, trashlen, "*"); + + +-+ chunk_printf(&msg, sizeof(trash), "\n"); +++ chunk_printf(&msg, trashlen, "\n"); + +#if 0 + if (!(s->data_ctx.stats.flags & STAT_FMT_CSV)) { +- chunk_printf(&msg, sizeof(trash), ++ chunk_printf(&msg, trashlen, + /* name, queue */ + @@ -694,6 +702,7 @@ int stats_dump_proxy(struct session *s, struct proxy *px, struct uri_auth *uri) + px->failed_req, +@@ -48,7 +48,7 @@ + px->state == PR_STIDLE ? "FULL" : "STOP"); + +#endif + } else { +- chunk_printf(&msg, sizeof(trash), ++ chunk_printf(&msg, trashlen, + /* pxid, name, queue cur, queue max, */ + + diff -Nru haproxy-1.4.15/debian/patches/CVE-2013-1912 haproxy-1.4.15/debian/patches/CVE-2013-1912 --- haproxy-1.4.15/debian/patches/CVE-2013-1912 1970-01-01 01:00:00.000000000 +0100 +++ haproxy-1.4.15/debian/patches/CVE-2013-1912 2013-04-05 15:45:35.000000000 +0200 @@ -0,0 +1,99 @@ +From: Willy Tarreau <w...@1wt.eu> +Date: Fri, 29 Mar 2013 11:31:49 +0000 (+0100) +Subject: BUG/CRITICAL: using HTTP information in tcp-request content may crash the process +X-Git-Tag: v1.4.23~1 +X-Git-Url: http://git.1wt.eu:81/web?p=haproxy-1.4.git;a=commitdiff_plain;h=dc80672211 + +BUG/CRITICAL: using HTTP information in tcp-request content may crash the process + +During normal HTTP request processing, request buffers are realigned if +there are less than global.maxrewrite bytes available after them, in +order to leave enough room for rewriting headers after the request. This +is done in http_wait_for_request(). + +However, if some HTTP inspection happens during a "tcp-request content" +rule, this realignment is not performed. In theory this is not a problem +because empty buffers are always aligned and TCP inspection happens at +the beginning of a connection. But with HTTP keep-alive, it also happens +at the beginning of each subsequent request. So if a second request was +pipelined by the client before the first one had a chance to be forwarded, +the second request will not be realigned. Then, http_wait_for_request() +will not perform such a realignment either because the request was +already parsed and marked as such. The consequence of this, is that the +rewrite of a sufficient number of such pipelined, unaligned requests may +leave less room past the request been processed than the configured +reserve, which can lead to a buffer overflow if request processing appends +some data past the end of the buffer. + +A number of conditions are required for the bug to be triggered : + - HTTP keep-alive must be enabled ; + - HTTP inspection in TCP rules must be used ; + - some request appending rules are needed (reqadd, x-forwarded-for) + - since empty buffers are always realigned, the client must pipeline + enough requests so that the buffer always contains something till + the point where there is no more room for rewriting. + +While such a configuration is quite unlikely to be met (which is +confirmed by the bug's lifetime), a few people do use these features +together for very specific usages. And more importantly, writing such +a configuration and the request to attack it is trivial. + +A quick workaround consists in forcing keep-alive off by adding +"option httpclose" or "option forceclose" in the frontend. Alternatively, +disabling HTTP-based TCP inspection rules enough if the application +supports it. + +At first glance, this bug does not look like it could lead to remote code +execution, as the overflowing part is controlled by the configuration and +not by the user. But some deeper analysis should be performed to confirm +this. And anyway, corrupting the process' memory and crashing it is quite +trivial. + +Special thanks go to Yves Lafon from the W3C who reported this bug and +deployed significant efforts to collect the relevant data needed to +understand it in less than one week. + +CVE-2013-1912 was assigned to this issue. + +Note that 1.4 is also affected so the fix must be backported. +(cherry picked from commit aae75e3279c6c9bd136413a72dafdcd4986bb89a) +--- + +--- a/src/proto_http.c ++++ b/src/proto_http.c +@@ -8055,6 +8055,14 @@ + return 1; + } + ++ /* If the buffer does not leave enough free space at the end, ++ * we must first realign it. ++ */ ++ if (unlikely(req->lr > req->data && ++ (req->r < req->lr || req->r > req->data + req->size - global.tune.maxrewrite)) && ++ (req->l <= req->size - global.tune.maxrewrite)) ++ http_buffer_heavy_realign(req, msg); ++ + /* Try to decode HTTP request */ + if (likely(req->lr < req->r)) + http_msg_analyzer(req, msg, &txn->hdr_idx); +@@ -8072,6 +8080,20 @@ + /* OK we got a valid HTTP request. We have some minor preparation to + * perform so that further checks can rely on HTTP tests. + */ ++ ++ /* If the request was parsed but was too large, we must absolutely ++ * return an error so that it is not processed. At the moment this ++ * cannot happen, but if the parsers are to change in the future, ++ * we want this check to be maintained. ++ */ ++ if (unlikely(req->lr > req->data && ++ (req->r < req->lr || req->l > req->size - global.tune.maxrewrite || ++ req->r > req->data + req->size - global.tune.maxrewrite))) { ++ msg->msg_state = HTTP_MSG_ERROR; ++ test->flags |= ACL_TEST_F_SET_RES_PASS; ++ return 1; ++ } ++ + txn->meth = find_http_meth(msg->sol, msg->sl.rq.m_l); + if (txn->meth == HTTP_METH_GET || txn->meth == HTTP_METH_HEAD) + s->flags |= SN_REDIRECTABLE; diff -Nru haproxy-1.4.15/debian/patches/series haproxy-1.4.15/debian/patches/series --- haproxy-1.4.15/debian/patches/series 2011-03-11 01:05:02.000000000 +0100 +++ haproxy-1.4.15/debian/patches/series 2013-04-05 15:45:35.000000000 +0200 @@ -1 +1,3 @@ bashism +CVE-2012-2942 +CVE-2013-1912