Apply patch for CVE-2023-6516:
To keep its cache database efficient, `named` running as a recursive resolver occasionally attempts to clean up the database. It uses several methods, including some that are asynchronous: a small chunk of memory pointing to the cache element that can be cleaned up is first allocated and then queued for later processing. It was discovered that if the resolver is continuously processing query patterns triggering this type of cache-database maintenance, `named` may not be able to handle the cleanup events in a timely manner. This in turn enables the list of queued cleanup events to grow infinitely large over time, allowing the configured `max-cache-size` limit to be significantly exceeded. This issue affects BIND 9 versions 9.16.0 through 9.16.45 and 9.16.8-S1 through 9.16.45-S1.
This commit is contained in:
parent
5a76d1fd2a
commit
f120c4bc4d
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: rbt.h,v 1.6 2022/09/23 12:15:30 christos Exp $ */
|
||||
/* $NetBSD: rbt.h,v 1.7 2024/02/13 15:24:47 christos Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
||||
|
@ -142,6 +142,12 @@ struct dns_rbtnode {
|
|||
*/
|
||||
ISC_LINK(dns_rbtnode_t) deadlink;
|
||||
|
||||
/*%
|
||||
* This linked list is used to store nodes from which tree pruning can
|
||||
* be started.
|
||||
*/
|
||||
ISC_LINK(dns_rbtnode_t) prunelink;
|
||||
|
||||
/*@{*/
|
||||
/*!
|
||||
* These values are used in the RBT DB implementation. The appropriate
|
||||
|
|
|
@ -13,4 +13,4 @@
|
|||
# Whenever releasing a new major release of BIND9, set this value
|
||||
# back to 1.0 when releasing the first alpha. Map files are *never*
|
||||
# compatible across major releases.
|
||||
MAPAPI=3.0
|
||||
MAPAPI=4.0
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: rbt.c,v 1.13 2023/06/26 22:03:00 christos Exp $ */
|
||||
/* $NetBSD: rbt.c,v 1.14 2024/02/13 15:24:47 christos Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
||||
|
@ -2310,6 +2310,7 @@ create_node(isc_mem_t *mctx, const dns_name_t *name, dns_rbtnode_t **nodep) {
|
|||
HASHVAL(node) = 0;
|
||||
|
||||
ISC_LINK_INIT(node, deadlink);
|
||||
ISC_LINK_INIT(node, prunelink);
|
||||
|
||||
LOCKNUM(node) = 0;
|
||||
WILD(node) = 0;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: rbtdb.c,v 1.17 2023/06/26 22:03:00 christos Exp $ */
|
||||
/* $NetBSD: rbtdb.c,v 1.18 2024/02/13 15:24:47 christos Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
||||
|
@ -523,6 +523,10 @@ struct dns_rbtdb {
|
|||
*/
|
||||
rbtnodelist_t *deadnodes;
|
||||
|
||||
/* List of nodes from which recursive tree pruning can be started from.
|
||||
* Locked by tree_lock. */
|
||||
rbtnodelist_t prunenodes;
|
||||
|
||||
/*
|
||||
* Heaps. These are used for TTL based expiry in a cache,
|
||||
* or for zone resigning in a zone DB. hmctx is the memory
|
||||
|
@ -1069,6 +1073,7 @@ free_rbtdb(dns_rbtdb_t *rbtdb, bool log, isc_event_t *event) {
|
|||
unsigned int i;
|
||||
isc_result_t result;
|
||||
char buf[DNS_NAME_FORMATSIZE];
|
||||
dns_rbtnode_t *node = NULL;
|
||||
dns_rbt_t **treep;
|
||||
isc_time_t start;
|
||||
|
||||
|
@ -1094,8 +1099,6 @@ free_rbtdb(dns_rbtdb_t *rbtdb, bool log, isc_event_t *event) {
|
|||
* the overhead of unlinking all nodes here should be negligible.
|
||||
*/
|
||||
for (i = 0; i < rbtdb->node_lock_count; i++) {
|
||||
dns_rbtnode_t *node;
|
||||
|
||||
node = ISC_LIST_HEAD(rbtdb->deadnodes[i]);
|
||||
while (node != NULL) {
|
||||
ISC_LIST_UNLINK(rbtdb->deadnodes[i], node, deadlink);
|
||||
|
@ -1103,6 +1106,12 @@ free_rbtdb(dns_rbtdb_t *rbtdb, bool log, isc_event_t *event) {
|
|||
}
|
||||
}
|
||||
|
||||
node = ISC_LIST_HEAD(rbtdb->prunenodes);
|
||||
while (node != NULL) {
|
||||
ISC_LIST_UNLINK(rbtdb->prunenodes, node, prunelink);
|
||||
node = ISC_LIST_HEAD(rbtdb->prunenodes);
|
||||
}
|
||||
|
||||
if (event == NULL) {
|
||||
rbtdb->quantum = (rbtdb->task != NULL) ? 100 : 0;
|
||||
}
|
||||
|
@ -1937,19 +1946,32 @@ is_leaf(dns_rbtnode_t *node) {
|
|||
node->left == NULL && node->right == NULL);
|
||||
}
|
||||
|
||||
/*%
|
||||
* The tree lock must be held when this function is called as it reads and
|
||||
* updates rbtdb->prunenodes.
|
||||
*/
|
||||
static void
|
||||
send_to_prune_tree(dns_rbtdb_t *rbtdb, dns_rbtnode_t *node,
|
||||
isc_rwlocktype_t locktype) {
|
||||
isc_event_t *ev;
|
||||
dns_db_t *db;
|
||||
bool pruning_queued = (ISC_LIST_HEAD(rbtdb->prunenodes) != NULL);
|
||||
|
||||
INSIST(locktype == isc_rwlocktype_write);
|
||||
|
||||
ev = isc_event_allocate(rbtdb->common.mctx, NULL, DNS_EVENT_RBTPRUNE,
|
||||
prune_tree, node, sizeof(isc_event_t));
|
||||
new_reference(rbtdb, node, locktype);
|
||||
db = NULL;
|
||||
INSIST(!ISC_LINK_LINKED(node, prunelink));
|
||||
ISC_LIST_APPEND(rbtdb->prunenodes, node, prunelink);
|
||||
|
||||
if (!pruning_queued) {
|
||||
isc_event_t *ev = NULL;
|
||||
dns_db_t *db = NULL;
|
||||
|
||||
attach((dns_db_t *)rbtdb, &db);
|
||||
ev->ev_sender = db;
|
||||
|
||||
ev = isc_event_allocate(rbtdb->common.mctx, NULL,
|
||||
DNS_EVENT_RBTPRUNE, prune_tree, db,
|
||||
sizeof(isc_event_t));
|
||||
isc_task_send(rbtdb->task, &ev);
|
||||
}
|
||||
}
|
||||
|
||||
/*%
|
||||
|
@ -2224,17 +2246,26 @@ restore_locks:
|
|||
}
|
||||
|
||||
/*
|
||||
* Prune the tree by recursively cleaning-up single leaves. In the worst
|
||||
* case, the number of iteration is the number of tree levels, which is at
|
||||
* most the maximum number of domain name labels, i.e, 127. In practice, this
|
||||
* should be much smaller (only a few times), and even the worst case would be
|
||||
* acceptable for a single event.
|
||||
* Prune the tree by recursively cleaning up single leaves. Go through all
|
||||
* nodes stored in the rbtdb->prunenodes list; for each of them, in the worst
|
||||
* case, it will be necessary to traverse a number of tree levels equal to the
|
||||
* maximum legal number of domain name labels (127); in practice, the number of
|
||||
* tree levels to traverse will virtually always be much smaller (a few levels
|
||||
* at most). While holding the tree lock throughout this entire operation is
|
||||
* less than ideal, so is splitting the latter up by queueing a separate
|
||||
* prune_tree() run for each node to start pruning from (as queueing requires
|
||||
* allocating memory and can therefore potentially be exploited to exhaust
|
||||
* available memory). Also note that actually freeing up the memory used by
|
||||
* RBTDB nodes (which is what this function does) is essential to keeping cache
|
||||
* memory use in check, so since the tree lock needs to be acquired anyway,
|
||||
* freeing as many nodes as possible before the tree lock gets released is
|
||||
* prudent.
|
||||
*/
|
||||
static void
|
||||
prune_tree(isc_task_t *task, isc_event_t *event) {
|
||||
dns_rbtdb_t *rbtdb = event->ev_sender;
|
||||
dns_rbtnode_t *node = event->ev_arg;
|
||||
dns_rbtnode_t *parent;
|
||||
dns_rbtdb_t *rbtdb = (dns_rbtdb_t *)event->ev_arg;
|
||||
dns_rbtnode_t *node = NULL;
|
||||
dns_rbtnode_t *parent = NULL;
|
||||
unsigned int locknum;
|
||||
|
||||
UNUSED(task);
|
||||
|
@ -2242,44 +2273,60 @@ prune_tree(isc_task_t *task, isc_event_t *event) {
|
|||
isc_event_free(&event);
|
||||
|
||||
RWLOCK(&rbtdb->tree_lock, isc_rwlocktype_write);
|
||||
|
||||
while ((node = ISC_LIST_HEAD(rbtdb->prunenodes)) != NULL) {
|
||||
locknum = node->locknum;
|
||||
NODE_LOCK(&rbtdb->node_locks[locknum].lock, isc_rwlocktype_write);
|
||||
NODE_LOCK(&rbtdb->node_locks[locknum].lock,
|
||||
isc_rwlocktype_write);
|
||||
do {
|
||||
if (ISC_LINK_LINKED(node, prunelink)) {
|
||||
ISC_LIST_UNLINK(rbtdb->prunenodes, node,
|
||||
prunelink);
|
||||
}
|
||||
|
||||
parent = node->parent;
|
||||
decrement_reference(rbtdb, node, 0, isc_rwlocktype_write,
|
||||
decrement_reference(rbtdb, node, 0,
|
||||
isc_rwlocktype_write,
|
||||
isc_rwlocktype_write, true);
|
||||
|
||||
if (parent != NULL && parent->down == NULL) {
|
||||
/*
|
||||
* node was the only down child of the parent and has
|
||||
* just been removed. We'll then need to examine the
|
||||
* parent. Keep the lock if possible; otherwise,
|
||||
* release the old lock and acquire one for the parent.
|
||||
* node was the only down child of the parent
|
||||
* and has just been removed. We'll then need
|
||||
* to examine the parent. Keep the lock if
|
||||
* possible; otherwise, release the old lock and
|
||||
* acquire one for the parent.
|
||||
*/
|
||||
if (parent->locknum != locknum) {
|
||||
NODE_UNLOCK(&rbtdb->node_locks[locknum].lock,
|
||||
NODE_UNLOCK(
|
||||
&rbtdb->node_locks[locknum].lock,
|
||||
isc_rwlocktype_write);
|
||||
locknum = parent->locknum;
|
||||
NODE_LOCK(&rbtdb->node_locks[locknum].lock,
|
||||
NODE_LOCK(
|
||||
&rbtdb->node_locks[locknum].lock,
|
||||
isc_rwlocktype_write);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to gain a reference to the node before
|
||||
* decrementing it in the next iteration.
|
||||
* We need to gain a reference to the node
|
||||
* before decrementing it in the next iteration.
|
||||
*/
|
||||
if (ISC_LINK_LINKED(parent, deadlink)) {
|
||||
ISC_LIST_UNLINK(rbtdb->deadnodes[locknum],
|
||||
ISC_LIST_UNLINK(
|
||||
rbtdb->deadnodes[locknum],
|
||||
parent, deadlink);
|
||||
}
|
||||
new_reference(rbtdb, parent, isc_rwlocktype_write);
|
||||
new_reference(rbtdb, parent,
|
||||
isc_rwlocktype_write);
|
||||
} else {
|
||||
parent = NULL;
|
||||
}
|
||||
|
||||
node = parent;
|
||||
} while (node != NULL);
|
||||
NODE_UNLOCK(&rbtdb->node_locks[locknum].lock, isc_rwlocktype_write);
|
||||
NODE_UNLOCK(&rbtdb->node_locks[locknum].lock,
|
||||
isc_rwlocktype_write);
|
||||
}
|
||||
RWUNLOCK(&rbtdb->tree_lock, isc_rwlocktype_write);
|
||||
|
||||
detach((dns_db_t **)(void *)&rbtdb);
|
||||
|
@ -8737,6 +8784,8 @@ dns_rbtdb_create(isc_mem_t *mctx, const dns_name_t *origin, dns_dbtype_t type,
|
|||
ISC_LIST_INIT(rbtdb->deadnodes[i]);
|
||||
}
|
||||
|
||||
ISC_LIST_INIT(rbtdb->prunenodes);
|
||||
|
||||
rbtdb->active = rbtdb->node_lock_count;
|
||||
|
||||
for (i = 0; i < (int)(rbtdb->node_lock_count); i++) {
|
||||
|
|
Loading…
Reference in New Issue