From cda67ac5ac69b839c4b4d77baf4e413bad9224e6 Mon Sep 17 00:00:00 2001 From: Richard Fuchs Date: Tue, 27 Jul 2021 11:14:13 -0400 Subject: [PATCH] TT#14008 fix call iterator race condition The contents of the ->next element cannot be accessed completely lock free as they're zeroed out during call removal. Instead grab a reference to the linked next call before releasing the lock, and also lock the next element before moving on. This requires a more granular locking as not to interfere with call removal: One lock to protect the contained call and the ->next, and another to protect the ->prev Change-Id: I5474ea3f88e3276f93ba62a952b3be13c0c182e9 --- daemon/call.c | 30 +++++++++++++++++------------- include/call.h | 24 +++++++++++++++++++----- 2 files changed, 36 insertions(+), 18 deletions(-) diff --git a/daemon/call.c b/daemon/call.c index 02c4abdb8..3619d48a7 100644 --- a/daemon/call.c +++ b/daemon/call.c @@ -2822,21 +2822,22 @@ void call_destroy(struct call *c) { while (1) { mutex_lock(&rtpe_call_iterators[i].lock); // lock this entry - mutex_lock(&c->iterator[i].lock); + mutex_lock(&c->iterator[i].next_lock); + mutex_lock(&c->iterator[i].prev_lock); // try lock adjacent entries prev_call = c->iterator[i].link.prev ? c->iterator[i].link.prev->data : NULL; next_call = c->iterator[i].link.next ? c->iterator[i].link.next->data : NULL; if (prev_call) { - if (mutex_trylock(&prev_call->iterator[i].lock)) { - mutex_unlock(&c->iterator[i].lock); + if (mutex_trylock(&prev_call->iterator[i].next_lock)) { + mutex_unlock(&c->iterator[i].next_lock); mutex_unlock(&rtpe_call_iterators[i].lock); continue; // try again } } if (next_call) { - if (mutex_trylock(&next_call->iterator[i].lock)) { - mutex_unlock(&prev_call->iterator[i].lock); - mutex_unlock(&c->iterator[i].lock); + if (mutex_trylock(&next_call->iterator[i].prev_lock)) { + mutex_unlock(&prev_call->iterator[i].next_lock); + mutex_unlock(&c->iterator[i].next_lock); mutex_unlock(&rtpe_call_iterators[i].lock); continue; // try again } @@ -2849,10 +2850,11 @@ void call_destroy(struct call *c) { &c->iterator[i].link); ZERO(c->iterator[i].link); if (prev_call) - mutex_unlock(&prev_call->iterator[i].lock); + mutex_unlock(&prev_call->iterator[i].next_lock); if (next_call) - mutex_unlock(&next_call->iterator[i].lock); - mutex_unlock(&c->iterator[i].lock); + mutex_unlock(&next_call->iterator[i].prev_lock); + mutex_unlock(&c->iterator[i].next_lock); + mutex_unlock(&c->iterator[i].prev_lock); mutex_unlock(&rtpe_call_iterators[i].lock); } @@ -3106,8 +3108,10 @@ static struct call *call_create(const str *callid) { c->dtls_cert = dtls_cert(); c->tos = rtpe_config.default_tos; - for (int i = 0; i < NUM_CALL_ITERATORS; i++) - mutex_init(&c->iterator[i].lock); + for (int i = 0; i < NUM_CALL_ITERATORS; i++) { + mutex_init(&c->iterator[i].next_lock); + mutex_init(&c->iterator[i].prev_lock); + } return c; } @@ -3150,7 +3154,7 @@ restart: if (rtpe_call_iterators[i].first) { first_call = rtpe_call_iterators[i].first->data; // coverity[lock_order : FALSE] - if (mutex_trylock(&first_call->iterator[i].lock)) { + if (mutex_trylock(&first_call->iterator[i].prev_lock)) { mutex_unlock(&rtpe_call_iterators[i].lock); continue; // retry } @@ -3162,7 +3166,7 @@ restart: = g_list_insert_before_link(rtpe_call_iterators[i].first, rtpe_call_iterators[i].first, &c->iterator[i].link); if (first_call) - mutex_unlock(&first_call->iterator[i].lock); + mutex_unlock(&first_call->iterator[i].prev_lock); mutex_unlock(&rtpe_call_iterators[i].lock); } diff --git a/include/call.h b/include/call.h index 6c9441f95..0fe59e1f8 100644 --- a/include/call.h +++ b/include/call.h @@ -427,7 +427,8 @@ struct call_iterator_list { }; struct call_iterator_entry { GList link; // .data is protected by the list's main lock - mutex_t lock; // held while the link is in use, protects link.prev and link.next + mutex_t next_lock; // held while the link is in use, protects link.data and link.next + mutex_t prev_lock; // held while the link is in use, protects link.prev }; #define ITERATE_CALL_LIST_START(which, varname) \ @@ -436,15 +437,28 @@ struct call_iterator_entry { mutex_lock(&rtpe_call_iterators[__which].lock); \ \ GList *__l = rtpe_call_iterators[__which].first; \ + struct call *next_ ## varname = NULL; \ while (__l) { \ - struct call *varname = __l->data; \ - obj_hold(varname); \ - mutex_lock(&varname->iterator[__which].lock); \ + struct call *varname = NULL; \ + if (next_ ## varname) \ + varname = next_ ## varname; \ + else { \ + varname = __l->data; \ + obj_hold(varname); \ + mutex_lock(&varname->iterator[__which].next_lock); \ + } \ mutex_unlock(&rtpe_call_iterators[__which].lock) #define ITERATE_CALL_LIST_NEXT_END(varname) \ GList *__next = varname->iterator[__which].link.next; \ - mutex_unlock(&varname->iterator[__which].lock); \ + if (__next) { \ + next_ ## varname = __next->data; \ + obj_hold(next_ ## varname); \ + mutex_lock(&next_ ## varname->iterator[__which].next_lock); \ + } \ + else \ + next_ ## varname = NULL; \ + mutex_unlock(&varname->iterator[__which].next_lock); \ __l = __next; \ obj_put(varname); \ mutex_lock(&rtpe_call_iterators[__which].lock); \