pjproject-bundled: Add upstream timer fixes

Fixed #2191:
  - Stricter double timer entry scheduling prevention.
  - Integrate group lock in SIP transport, e.g: for add/dec ref,
    for timer scheduling.

ASTERISK-28161
Reported-by: Ross Beer

Change-Id: I2e09aa66de0dda9414d8a8259a649c4d2d96a9f5
17.0
Joshua Colp 6 years ago
parent 0575a8923b
commit 3853fab3f5

@ -0,0 +1,372 @@
From 27a076f2f6c6007c0ba41d2868a803c4d841e815 Mon Sep 17 00:00:00 2001
From: nanang <nanang@localhost>
Date: Tue, 23 Apr 2019 08:42:45 +0000
Subject: [PATCH] Fixed #2191: - Stricter double timer entry scheduling
prevention. - Integrate group lock in SIP transport, e.g: for add/dec ref,
for timer scheduling.
---
pjlib/include/pj/timer.h | 2 +-
pjlib/src/pj/timer.c | 11 +++++++-
pjsip/include/pjsip/sip_endpoint.h | 39 +++++++++++++++++++++++++++++
pjsip/include/pjsip/sip_transport.h | 2 ++
pjsip/src/pjsip/sip_endpoint.c | 36 ++++++++++++++++++++++++++
pjsip/src/pjsip/sip_transport.c | 36 +++++++++++++++++++++-----
pjsip/src/pjsip/sip_transport_tcp.c | 10 +++++---
pjsip/src/pjsip/sip_transport_tls.c | 14 ++++++++---
pjsip/src/pjsip/sip_transport_udp.c | 2 ++
9 files changed, 137 insertions(+), 15 deletions(-)
diff --git a/pjlib/include/pj/timer.h b/pjlib/include/pj/timer.h
index df6155a81..14857b872 100644
--- a/pjlib/include/pj/timer.h
+++ b/pjlib/include/pj/timer.h
@@ -252,9 +252,9 @@ PJ_DECL(pj_status_t) pj_timer_heap_schedule( pj_timer_heap_t *ht,
*
* @param ht The timer heap.
* @param entry The entry to be registered.
+ * @param delay The interval to expire.
* @param id_val The value to be set to the "id" field of the timer entry
* once the timer is scheduled.
- * @param delay The interval to expire.
* @param grp_lock The group lock.
*
* @return PJ_SUCCESS, or the appropriate error code.
diff --git a/pjlib/src/pj/timer.c b/pjlib/src/pj/timer.c
index f0a2cbbc9..cbdd9791f 100644
--- a/pjlib/src/pj/timer.c
+++ b/pjlib/src/pj/timer.c
@@ -502,7 +502,7 @@ static pj_status_t schedule_w_grp_lock(pj_timer_heap_t *ht,
PJ_ASSERT_RETURN(entry->cb != NULL, PJ_EINVAL);
/* Prevent same entry from being scheduled more than once */
- PJ_ASSERT_RETURN(entry->_timer_id < 1, PJ_EINVALIDOP);
+ //PJ_ASSERT_RETURN(entry->_timer_id < 1, PJ_EINVALIDOP);
#if PJ_TIMER_DEBUG
entry->src_file = src_file;
@@ -512,6 +512,15 @@ static pj_status_t schedule_w_grp_lock(pj_timer_heap_t *ht,
PJ_TIME_VAL_ADD(expires, *delay);
lock_timer_heap(ht);
+
+ /* Prevent same entry from being scheduled more than once */
+ if (pj_timer_entry_running(entry)) {
+ unlock_timer_heap(ht);
+ PJ_LOG(3,(THIS_FILE, "Bug! Rescheduling outstanding entry (%p)",
+ entry));
+ return PJ_EINVALIDOP;
+ }
+
status = schedule_entry(ht, entry, &expires);
if (status == PJ_SUCCESS) {
if (set_id)
diff --git a/pjsip/include/pjsip/sip_endpoint.h b/pjsip/include/pjsip/sip_endpoint.h
index 99683fbe1..ee967f8d9 100644
--- a/pjsip/include/pjsip/sip_endpoint.h
+++ b/pjsip/include/pjsip/sip_endpoint.h
@@ -138,6 +138,7 @@ PJ_DECL(pj_status_t) pjsip_endpt_handle_events( pjsip_endpoint *endpt,
PJ_DECL(pj_status_t) pjsip_endpt_handle_events2(pjsip_endpoint *endpt,
const pj_time_val *max_timeout,
unsigned *count);
+
/**
* Schedule timer to endpoint's timer heap. Application must poll the endpoint
* periodically (by calling #pjsip_endpt_handle_events) to ensure that the
@@ -166,6 +167,44 @@ PJ_DECL(pj_status_t) pjsip_endpt_schedule_timer( pjsip_endpoint *endpt,
const pj_time_val *delay );
#endif
+/**
+ * Schedule timer to endpoint's timer heap with group lock. Application must
+ * poll the endpoint periodically (by calling #pjsip_endpt_handle_events) to
+ * ensure that the timer events are handled in timely manner. When the
+ * timeout for the timer has elapsed, the callback specified in the entry
+ * argument will be called. This function, like all other endpoint functions,
+ * is thread safe.
+ *
+ * @param endpt The endpoint.
+ * @param entry The timer entry.
+ * @param delay The relative delay of the timer.
+ * @param id_val The value to be set to the "id" field of the timer entry
+ * once the timer is scheduled.
+ * @param grp_lock The group lock.
+ * @return PJ_OK (zero) if successfull.
+ */
+#if PJ_TIMER_DEBUG
+#define pjsip_endpt_schedule_timer_w_grp_lock(ept,ent,d,id,gl) \
+ pjsip_endpt_schedule_timer_w_grp_lock_dbg(ept,ent,d,id,gl,\
+ __FILE__, __LINE__)
+
+PJ_DECL(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock_dbg(
+ pjsip_endpoint *endpt,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock,
+ const char *src_file,
+ int src_line);
+#else
+PJ_DECL(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock(
+ pjsip_endpoint *endpt,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock );
+#endif
+
/**
* Cancel the previously registered timer.
* This function, like all other endpoint functions, is thread safe.
diff --git a/pjsip/include/pjsip/sip_transport.h b/pjsip/include/pjsip/sip_transport.h
index addc8d521..d1ff3618b 100644
--- a/pjsip/include/pjsip/sip_transport.h
+++ b/pjsip/include/pjsip/sip_transport.h
@@ -810,6 +810,8 @@ struct pjsip_transport
pj_pool_t *pool; /**< Pool used by transport. */
pj_atomic_t *ref_cnt; /**< Reference counter. */
pj_lock_t *lock; /**< Lock object. */
+ pj_grp_lock_t *grp_lock; /**< Group lock for sync with
+ ioqueue and timer. */
pj_bool_t tracing; /**< Tracing enabled? */
pj_bool_t is_shutdown; /**< Being shutdown? */
pj_bool_t is_destroying; /**< Destroy in progress? */
diff --git a/pjsip/src/pjsip/sip_endpoint.c b/pjsip/src/pjsip/sip_endpoint.c
index d810781d5..71bc761c2 100644
--- a/pjsip/src/pjsip/sip_endpoint.c
+++ b/pjsip/src/pjsip/sip_endpoint.c
@@ -802,6 +802,42 @@ PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer( pjsip_endpoint *endpt,
}
#endif
+/*
+ * Schedule timer with group lock.
+ */
+#if PJ_TIMER_DEBUG
+PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock_dbg(
+ pjsip_endpoint *endpt,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock,
+ const char *src_file,
+ int src_line)
+{
+ PJ_LOG(6, (THIS_FILE, "pjsip_endpt_schedule_timer_w_grp_lock"
+ "(entry=%p, delay=%u.%u, grp_lock=%p)",
+ entry, delay->sec, delay->msec, grp_lock));
+ return pj_timer_heap_schedule_w_grp_lock_dbg(endpt->timer_heap, entry,
+ delay, id_val, grp_lock,
+ src_file, src_line);
+}
+#else
+PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock(
+ pjsip_endpoint *endpt,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock )
+{
+ PJ_LOG(6, (THIS_FILE, "pjsip_endpt_schedule_timer_w_grp_lock"
+ "(entry=%p, delay=%u.%u, grp_lock=%p)",
+ entry, delay->sec, delay->msec, grp_lock));
+ return pj_timer_heap_schedule_w_grp_lock( endpt->timer_heap, entry,
+ delay, id_val, grp_lock );
+}
+#endif
+
/*
* Cancel the previously registered timer.
*/
diff --git a/pjsip/src/pjsip/sip_transport.c b/pjsip/src/pjsip/sip_transport.c
index 67e235a39..529604399 100644
--- a/pjsip/src/pjsip/sip_transport.c
+++ b/pjsip/src/pjsip/sip_transport.c
@@ -1012,6 +1012,9 @@ static void transport_idle_callback(pj_timer_heap_t *timer_heap,
PJ_UNUSED_ARG(timer_heap);
+ if (entry->id == PJ_FALSE)
+ return;
+
entry->id = PJ_FALSE;
pjsip_transport_destroy(tp);
}
@@ -1049,6 +1052,10 @@ PJ_DEF(pj_status_t) pjsip_transport_add_ref( pjsip_transport *tp )
PJ_ASSERT_RETURN(tp != NULL, PJ_EINVAL);
+ /* Add ref transport group lock, if any */
+ if (tp->grp_lock)
+ pj_grp_lock_add_ref(tp->grp_lock);
+
/* Cache some vars for checking transport validity later */
tpmgr = tp->tpmgr;
key_len = sizeof(tp->key.type) + tp->addr_len;
@@ -1063,8 +1070,8 @@ PJ_DEF(pj_status_t) pjsip_transport_add_ref( pjsip_transport *tp )
pj_atomic_get(tp->ref_cnt) == 1)
{
if (tp->idle_timer.id != PJ_FALSE) {
- pjsip_endpt_cancel_timer(tp->tpmgr->endpt, &tp->idle_timer);
tp->idle_timer.id = PJ_FALSE;
+ pjsip_endpt_cancel_timer(tp->tpmgr->endpt, &tp->idle_timer);
}
}
pj_lock_release(tpmgr->lock);
@@ -1114,14 +1121,23 @@ PJ_DEF(pj_status_t) pjsip_transport_dec_ref( pjsip_transport *tp )
delay.msec = 0;
}
- pj_assert(tp->idle_timer.id == 0);
- tp->idle_timer.id = PJ_TRUE;
- pjsip_endpt_schedule_timer(tp->tpmgr->endpt, &tp->idle_timer,
- &delay);
+ /* Avoid double timer entry scheduling */
+ if (pj_timer_entry_running(&tp->idle_timer))
+ pjsip_endpt_cancel_timer(tp->tpmgr->endpt, &tp->idle_timer);
+
+ pjsip_endpt_schedule_timer_w_grp_lock(tp->tpmgr->endpt,
+ &tp->idle_timer,
+ &delay,
+ PJ_TRUE,
+ tp->grp_lock);
}
pj_lock_release(tpmgr->lock);
}
+ /* Dec ref transport group lock, if any */
+ if (tp->grp_lock)
+ pj_grp_lock_dec_ref(tp->grp_lock);
+
return PJ_SUCCESS;
}
@@ -1168,6 +1184,10 @@ PJ_DEF(pj_status_t) pjsip_transport_register( pjsip_tpmgr *mgr,
/* Register new entry */
pj_hash_set(tp->pool, mgr->table, &tp->key, key_len, hval, tp);
+ /* Add ref transport group lock, if any */
+ if (tp->grp_lock)
+ pj_grp_lock_add_ref(tp->grp_lock);
+
pj_lock_release(mgr->lock);
TRACE_((THIS_FILE,"Transport %s registered: type=%s, remote=%s:%d",
@@ -1199,8 +1219,8 @@ static pj_status_t destroy_transport( pjsip_tpmgr *mgr,
*/
//pj_assert(tp->idle_timer.id == PJ_FALSE);
if (tp->idle_timer.id != PJ_FALSE) {
- pjsip_endpt_cancel_timer(mgr->endpt, &tp->idle_timer);
tp->idle_timer.id = PJ_FALSE;
+ pjsip_endpt_cancel_timer(mgr->endpt, &tp->idle_timer);
}
/*
@@ -1226,6 +1246,10 @@ static pj_status_t destroy_transport( pjsip_tpmgr *mgr,
pj_lock_release(mgr->lock);
pj_lock_release(tp->lock);
+ /* Dec ref transport group lock, if any */
+ if (tp->grp_lock)
+ pj_grp_lock_dec_ref(tp->grp_lock);
+
/* Destroy. */
return tp->destroy(tp);
}
diff --git a/pjsip/src/pjsip/sip_transport_tcp.c b/pjsip/src/pjsip/sip_transport_tcp.c
index fe327459e..374bf461b 100644
--- a/pjsip/src/pjsip/sip_transport_tcp.c
+++ b/pjsip/src/pjsip/sip_transport_tcp.c
@@ -692,6 +692,8 @@ static pj_status_t tcp_create( struct tcp_listener *listener,
pj_grp_lock_add_ref(tcp->grp_lock);
pj_grp_lock_add_handler(tcp->grp_lock, pool, tcp, &tcp_on_destroy);
+ tcp->base.grp_lock = tcp->grp_lock;
+
/* Create active socket */
pj_activesock_cfg_default(&asock_cfg);
asock_cfg.async_cnt = 1;
@@ -746,7 +748,11 @@ static pj_status_t tcp_create( struct tcp_listener *listener,
return PJ_SUCCESS;
on_error:
- tcp_destroy(&tcp->base, status);
+ if (tcp->grp_lock && pj_grp_lock_get_ref(tcp->grp_lock))
+ tcp_destroy(&tcp->base, status);
+ else
+ tcp_on_destroy(tcp);
+
return status;
}
@@ -867,8 +873,6 @@ static pj_status_t tcp_destroy(pjsip_transport *transport,
tcp->grp_lock = NULL;
pj_grp_lock_dec_ref(grp_lock);
/* Transport may have been deleted at this point */
- } else {
- tcp_on_destroy(tcp);
}
return PJ_SUCCESS;
diff --git a/pjsip/src/pjsip/sip_transport_tls.c b/pjsip/src/pjsip/sip_transport_tls.c
index d3afae5e9..dd3a4d639 100644
--- a/pjsip/src/pjsip/sip_transport_tls.c
+++ b/pjsip/src/pjsip/sip_transport_tls.c
@@ -165,6 +165,10 @@ static pj_status_t tls_create(struct tls_listener *listener,
struct tls_transport **p_tls);
+/* Clean up TLS resources */
+static void tls_on_destroy(void *arg);
+
+
static void tls_perror(const char *sender, const char *title,
pj_status_t status)
{
@@ -893,7 +897,11 @@ static pj_status_t tls_create( struct tls_listener *listener,
return PJ_SUCCESS;
on_error:
- tls_destroy(&tls->base, status);
+ if (tls->grp_lock && pj_grp_lock_get_ref(tls->grp_lock))
+ tls_destroy(&tls->base, status);
+ else
+ tls_on_destroy(tls);
+
return status;
}
@@ -1048,8 +1056,6 @@ static pj_status_t tls_destroy(pjsip_transport *transport,
tls->grp_lock = NULL;
pj_grp_lock_dec_ref(grp_lock);
/* Transport may have been deleted at this point */
- } else {
- tls_on_destroy(tls);
}
return PJ_SUCCESS;
@@ -1235,7 +1241,7 @@ static pj_status_t lis_create_transport(pjsip_tpfactory *factory,
pj_ssl_sock_set_user_data(tls->ssock, tls);
/* Set up the group lock */
- tls->grp_lock = glock;
+ tls->grp_lock = tls->base.grp_lock = glock;
pj_grp_lock_add_ref(tls->grp_lock);
pj_grp_lock_add_handler(tls->grp_lock, pool, tls, &tls_on_destroy);
diff --git a/pjsip/src/pjsip/sip_transport_udp.c b/pjsip/src/pjsip/sip_transport_udp.c
index dbda474cf..b82d519c9 100644
--- a/pjsip/src/pjsip/sip_transport_udp.c
+++ b/pjsip/src/pjsip/sip_transport_udp.c
@@ -691,6 +691,8 @@ static pj_status_t register_to_ioqueue(struct udp_transport *tp)
pj_grp_lock_add_ref(tp->grp_lock);
pj_grp_lock_add_handler(tp->grp_lock, tp->base.pool, tp,
&udp_on_destroy);
+
+ tp->base.grp_lock = tp->grp_lock;
}
/* Register to ioqueue. */
--
2.20.1

@ -0,0 +1,141 @@
From 0de79e4bb4114b60155fe3641ff410f48d99bc1d Mon Sep 17 00:00:00 2001
From: nanang <nanang@localhost>
Date: Wed, 15 May 2019 02:54:52 +0000
Subject: [PATCH] Re #2191: Fixed crash in SIP transport destroy due to bug
introduced by r5971, i.e: group lock is set after registering tp to tpmgr, so
tpmgr won't call pj_grp_lock_add_ref(), but in unregisteration, group lock is
set, so tpmgr will call pj_grp_lock_dec_ref().
---
pjsip/src/pjsip/sip_transport_tls.c | 29 +++++++++++------------------
pjsip/src/pjsip/sip_transport_udp.c | 21 +++++++++++++++------
2 files changed, 26 insertions(+), 24 deletions(-)
diff --git a/pjsip/src/pjsip/sip_transport_tls.c b/pjsip/src/pjsip/sip_transport_tls.c
index a8468e092..466877edf 100644
--- a/pjsip/src/pjsip/sip_transport_tls.c
+++ b/pjsip/src/pjsip/sip_transport_tls.c
@@ -162,6 +162,7 @@ static pj_status_t tls_create(struct tls_listener *listener,
const pj_sockaddr *local,
const pj_sockaddr *remote,
const pj_str_t *remote_name,
+ pj_grp_lock_t *glock,
struct tls_transport **p_tls);
@@ -786,6 +787,7 @@ static pj_status_t tls_create( struct tls_listener *listener,
const pj_sockaddr *local,
const pj_sockaddr *remote,
const pj_str_t *remote_name,
+ pj_grp_lock_t *glock,
struct tls_transport **p_tls)
{
struct tls_transport *tls;
@@ -870,6 +872,11 @@ static pj_status_t tls_create( struct tls_listener *listener,
tls->ssock = ssock;
+ /* Set up the group lock */
+ tls->grp_lock = tls->base.grp_lock = glock;
+ pj_grp_lock_add_ref(tls->grp_lock);
+ pj_grp_lock_add_handler(tls->grp_lock, pool, tls, &tls_on_destroy);
+
/* Register transport to transport manager */
status = pjsip_transport_register(listener->tpmgr, &tls->base);
if (status != PJ_SUCCESS) {
@@ -1226,20 +1233,13 @@ static pj_status_t lis_create_transport(pjsip_tpfactory *factory,
/* Create the transport descriptor */
status = tls_create(listener, pool, ssock, PJ_FALSE, &local_addr,
- rem_addr, &remote_name, &tls);
- if (status != PJ_SUCCESS) {
- pj_grp_lock_destroy(glock);
+ rem_addr, &remote_name, glock, &tls);
+ if (status != PJ_SUCCESS)
return status;
- }
/* Set the "pending" SSL socket user data */
pj_ssl_sock_set_user_data(tls->ssock, tls);
- /* Set up the group lock */
- tls->grp_lock = tls->base.grp_lock = glock;
- pj_grp_lock_add_ref(tls->grp_lock);
- pj_grp_lock_add_handler(tls->grp_lock, pool, tls, &tls_on_destroy);
-
/* Start asynchronous connect() operation */
tls->has_pending_connect = PJ_TRUE;
status = pj_ssl_sock_start_connect(tls->ssock, tls->base.pool,
@@ -1393,7 +1393,8 @@ static pj_bool_t on_accept_complete2(pj_ssl_sock_t *ssock,
* Create TLS transport for the new socket.
*/
status = tls_create( listener, NULL, new_ssock, PJ_TRUE,
- &ssl_info.local_addr, &tmp_src_addr, NULL, &tls);
+ &ssl_info.local_addr, &tmp_src_addr, NULL,
+ ssl_info.grp_lock, &tls);
if (status != PJ_SUCCESS) {
if (listener->tls_setting.on_accept_fail_cb) {
@@ -1410,14 +1411,6 @@ static pj_bool_t on_accept_complete2(pj_ssl_sock_t *ssock,
/* Set the "pending" SSL socket user data */
pj_ssl_sock_set_user_data(new_ssock, tls);
- /* Set up the group lock */
- if (ssl_info.grp_lock) {
- tls->grp_lock = ssl_info.grp_lock;
- pj_grp_lock_add_ref(tls->grp_lock);
- pj_grp_lock_add_handler(tls->grp_lock, tls->base.pool, tls,
- &tls_on_destroy);
- }
-
/* Prevent immediate transport destroy as application may access it
* (getting info, etc) in transport state notification callback.
*/
diff --git a/pjsip/src/pjsip/sip_transport_udp.c b/pjsip/src/pjsip/sip_transport_udp.c
index c02c48a03..905487dd9 100644
--- a/pjsip/src/pjsip/sip_transport_udp.c
+++ b/pjsip/src/pjsip/sip_transport_udp.c
@@ -470,6 +470,16 @@ static pj_status_t udp_destroy( pjsip_transport *transport )
break;
}
+ /* When creating this transport, reference count was incremented to flag
+ * this transport as permanent so it will not be destroyed by transport
+ * manager whenever idle. Application may or may not have cleared the
+ * flag (by calling pjsip_transport_dec_ref()), so in case it has not,
+ * let's do it now, so this transport can be destroyed.
+ */
+ if (pj_atomic_get(tp->base.ref_cnt) > 0)
+ pjsip_transport_dec_ref(&tp->base);
+
+ /* Destroy transport */
if (tp->grp_lock) {
pj_grp_lock_t *grp_lock = tp->grp_lock;
tp->grp_lock = NULL;
@@ -844,18 +854,17 @@ static pj_status_t transport_attach( pjsip_endpoint *endpt,
tp->base.do_shutdown = &udp_shutdown;
tp->base.destroy = &udp_destroy;
- /* This is a permanent transport, so we initialize the ref count
- * to one so that transport manager don't destroy this transport
- * when there's no user!
- */
- pj_atomic_inc(tp->base.ref_cnt);
-
/* Register to transport manager. */
tp->base.tpmgr = pjsip_endpt_get_tpmgr(endpt);
status = pjsip_transport_register( tp->base.tpmgr, (pjsip_transport*)tp);
if (status != PJ_SUCCESS)
goto on_error;
+ /* This is a permanent transport, so we initialize the ref count
+ * to one so that transport manager won't destroy this transport
+ * when there's no user!
+ */
+ pjsip_transport_add_ref(&tp->base);
/* Create rdata and put it in the array. */
tp->rdata_cnt = 0;
--
2.21.0
Loading…
Cancel
Save