From cfeb8a59eb892e7f72b5ce0acfc0dbde1a51d3b8 Mon Sep 17 00:00:00 2001 From: George Joseph Date: Fri, 3 May 2019 07:27:43 -0600 Subject: [PATCH] pjproject-bundled: Add upstream timer fixes Fixed #2191: - Stricter double timer entry scheduling prevention. - Integrate group lock in SIP transport, e.g: for add/dec ref, for timer scheduling. ASTERISK-28161 Reported-by: Ross Beer Change-Id: I02a791fd1570a1e594a132b36c4ff72441108c17 --- .../patches/0031-r2191-timer-fixes.patch | 372 ++++++++++++++++++ 1 file changed, 372 insertions(+) create mode 100644 third-party/pjproject/patches/0031-r2191-timer-fixes.patch diff --git a/third-party/pjproject/patches/0031-r2191-timer-fixes.patch b/third-party/pjproject/patches/0031-r2191-timer-fixes.patch new file mode 100644 index 0000000000..e1205f03ce --- /dev/null +++ b/third-party/pjproject/patches/0031-r2191-timer-fixes.patch @@ -0,0 +1,372 @@ +From 27a076f2f6c6007c0ba41d2868a803c4d841e815 Mon Sep 17 00:00:00 2001 +From: nanang +Date: Tue, 23 Apr 2019 08:42:45 +0000 +Subject: [PATCH] Fixed #2191: - Stricter double timer entry scheduling + prevention. - Integrate group lock in SIP transport, e.g: for add/dec ref, + for timer scheduling. + +--- + pjlib/include/pj/timer.h | 2 +- + pjlib/src/pj/timer.c | 11 +++++++- + pjsip/include/pjsip/sip_endpoint.h | 39 +++++++++++++++++++++++++++++ + pjsip/include/pjsip/sip_transport.h | 2 ++ + pjsip/src/pjsip/sip_endpoint.c | 36 ++++++++++++++++++++++++++ + pjsip/src/pjsip/sip_transport.c | 36 +++++++++++++++++++++----- + pjsip/src/pjsip/sip_transport_tcp.c | 10 +++++--- + pjsip/src/pjsip/sip_transport_tls.c | 14 ++++++++--- + pjsip/src/pjsip/sip_transport_udp.c | 2 ++ + 9 files changed, 137 insertions(+), 15 deletions(-) + +diff --git a/pjlib/include/pj/timer.h b/pjlib/include/pj/timer.h +index df6155a81..14857b872 100644 +--- a/pjlib/include/pj/timer.h ++++ b/pjlib/include/pj/timer.h +@@ -252,9 +252,9 @@ PJ_DECL(pj_status_t) pj_timer_heap_schedule( pj_timer_heap_t *ht, + * + * @param ht The timer heap. + * @param entry The entry to be registered. ++ * @param delay The interval to expire. + * @param id_val The value to be set to the "id" field of the timer entry + * once the timer is scheduled. +- * @param delay The interval to expire. + * @param grp_lock The group lock. + * + * @return PJ_SUCCESS, or the appropriate error code. +diff --git a/pjlib/src/pj/timer.c b/pjlib/src/pj/timer.c +index f0a2cbbc9..cbdd9791f 100644 +--- a/pjlib/src/pj/timer.c ++++ b/pjlib/src/pj/timer.c +@@ -502,7 +502,7 @@ static pj_status_t schedule_w_grp_lock(pj_timer_heap_t *ht, + PJ_ASSERT_RETURN(entry->cb != NULL, PJ_EINVAL); + + /* Prevent same entry from being scheduled more than once */ +- PJ_ASSERT_RETURN(entry->_timer_id < 1, PJ_EINVALIDOP); ++ //PJ_ASSERT_RETURN(entry->_timer_id < 1, PJ_EINVALIDOP); + + #if PJ_TIMER_DEBUG + entry->src_file = src_file; +@@ -512,6 +512,15 @@ static pj_status_t schedule_w_grp_lock(pj_timer_heap_t *ht, + PJ_TIME_VAL_ADD(expires, *delay); + + lock_timer_heap(ht); ++ ++ /* Prevent same entry from being scheduled more than once */ ++ if (pj_timer_entry_running(entry)) { ++ unlock_timer_heap(ht); ++ PJ_LOG(3,(THIS_FILE, "Bug! Rescheduling outstanding entry (%p)", ++ entry)); ++ return PJ_EINVALIDOP; ++ } ++ + status = schedule_entry(ht, entry, &expires); + if (status == PJ_SUCCESS) { + if (set_id) +diff --git a/pjsip/include/pjsip/sip_endpoint.h b/pjsip/include/pjsip/sip_endpoint.h +index 99683fbe1..ee967f8d9 100644 +--- a/pjsip/include/pjsip/sip_endpoint.h ++++ b/pjsip/include/pjsip/sip_endpoint.h +@@ -138,6 +138,7 @@ PJ_DECL(pj_status_t) pjsip_endpt_handle_events( pjsip_endpoint *endpt, + PJ_DECL(pj_status_t) pjsip_endpt_handle_events2(pjsip_endpoint *endpt, + const pj_time_val *max_timeout, + unsigned *count); ++ + /** + * Schedule timer to endpoint's timer heap. Application must poll the endpoint + * periodically (by calling #pjsip_endpt_handle_events) to ensure that the +@@ -166,6 +167,44 @@ PJ_DECL(pj_status_t) pjsip_endpt_schedule_timer( pjsip_endpoint *endpt, + const pj_time_val *delay ); + #endif + ++/** ++ * Schedule timer to endpoint's timer heap with group lock. Application must ++ * poll the endpoint periodically (by calling #pjsip_endpt_handle_events) to ++ * ensure that the timer events are handled in timely manner. When the ++ * timeout for the timer has elapsed, the callback specified in the entry ++ * argument will be called. This function, like all other endpoint functions, ++ * is thread safe. ++ * ++ * @param endpt The endpoint. ++ * @param entry The timer entry. ++ * @param delay The relative delay of the timer. ++ * @param id_val The value to be set to the "id" field of the timer entry ++ * once the timer is scheduled. ++ * @param grp_lock The group lock. ++ * @return PJ_OK (zero) if successfull. ++ */ ++#if PJ_TIMER_DEBUG ++#define pjsip_endpt_schedule_timer_w_grp_lock(ept,ent,d,id,gl) \ ++ pjsip_endpt_schedule_timer_w_grp_lock_dbg(ept,ent,d,id,gl,\ ++ __FILE__, __LINE__) ++ ++PJ_DECL(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock_dbg( ++ pjsip_endpoint *endpt, ++ pj_timer_entry *entry, ++ const pj_time_val *delay, ++ int id_val, ++ pj_grp_lock_t *grp_lock, ++ const char *src_file, ++ int src_line); ++#else ++PJ_DECL(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock( ++ pjsip_endpoint *endpt, ++ pj_timer_entry *entry, ++ const pj_time_val *delay, ++ int id_val, ++ pj_grp_lock_t *grp_lock ); ++#endif ++ + /** + * Cancel the previously registered timer. + * This function, like all other endpoint functions, is thread safe. +diff --git a/pjsip/include/pjsip/sip_transport.h b/pjsip/include/pjsip/sip_transport.h +index addc8d521..d1ff3618b 100644 +--- a/pjsip/include/pjsip/sip_transport.h ++++ b/pjsip/include/pjsip/sip_transport.h +@@ -810,6 +810,8 @@ struct pjsip_transport + pj_pool_t *pool; /**< Pool used by transport. */ + pj_atomic_t *ref_cnt; /**< Reference counter. */ + pj_lock_t *lock; /**< Lock object. */ ++ pj_grp_lock_t *grp_lock; /**< Group lock for sync with ++ ioqueue and timer. */ + pj_bool_t tracing; /**< Tracing enabled? */ + pj_bool_t is_shutdown; /**< Being shutdown? */ + pj_bool_t is_destroying; /**< Destroy in progress? */ +diff --git a/pjsip/src/pjsip/sip_endpoint.c b/pjsip/src/pjsip/sip_endpoint.c +index d810781d5..71bc761c2 100644 +--- a/pjsip/src/pjsip/sip_endpoint.c ++++ b/pjsip/src/pjsip/sip_endpoint.c +@@ -802,6 +802,42 @@ PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer( pjsip_endpoint *endpt, + } + #endif + ++/* ++ * Schedule timer with group lock. ++ */ ++#if PJ_TIMER_DEBUG ++PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock_dbg( ++ pjsip_endpoint *endpt, ++ pj_timer_entry *entry, ++ const pj_time_val *delay, ++ int id_val, ++ pj_grp_lock_t *grp_lock, ++ const char *src_file, ++ int src_line) ++{ ++ PJ_LOG(6, (THIS_FILE, "pjsip_endpt_schedule_timer_w_grp_lock" ++ "(entry=%p, delay=%u.%u, grp_lock=%p)", ++ entry, delay->sec, delay->msec, grp_lock)); ++ return pj_timer_heap_schedule_w_grp_lock_dbg(endpt->timer_heap, entry, ++ delay, id_val, grp_lock, ++ src_file, src_line); ++} ++#else ++PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer_w_grp_lock( ++ pjsip_endpoint *endpt, ++ pj_timer_entry *entry, ++ const pj_time_val *delay, ++ int id_val, ++ pj_grp_lock_t *grp_lock ) ++{ ++ PJ_LOG(6, (THIS_FILE, "pjsip_endpt_schedule_timer_w_grp_lock" ++ "(entry=%p, delay=%u.%u, grp_lock=%p)", ++ entry, delay->sec, delay->msec, grp_lock)); ++ return pj_timer_heap_schedule_w_grp_lock( endpt->timer_heap, entry, ++ delay, id_val, grp_lock ); ++} ++#endif ++ + /* + * Cancel the previously registered timer. + */ +diff --git a/pjsip/src/pjsip/sip_transport.c b/pjsip/src/pjsip/sip_transport.c +index 67e235a39..529604399 100644 +--- a/pjsip/src/pjsip/sip_transport.c ++++ b/pjsip/src/pjsip/sip_transport.c +@@ -1012,6 +1012,9 @@ static void transport_idle_callback(pj_timer_heap_t *timer_heap, + + PJ_UNUSED_ARG(timer_heap); + ++ if (entry->id == PJ_FALSE) ++ return; ++ + entry->id = PJ_FALSE; + pjsip_transport_destroy(tp); + } +@@ -1049,6 +1052,10 @@ PJ_DEF(pj_status_t) pjsip_transport_add_ref( pjsip_transport *tp ) + + PJ_ASSERT_RETURN(tp != NULL, PJ_EINVAL); + ++ /* Add ref transport group lock, if any */ ++ if (tp->grp_lock) ++ pj_grp_lock_add_ref(tp->grp_lock); ++ + /* Cache some vars for checking transport validity later */ + tpmgr = tp->tpmgr; + key_len = sizeof(tp->key.type) + tp->addr_len; +@@ -1063,8 +1070,8 @@ PJ_DEF(pj_status_t) pjsip_transport_add_ref( pjsip_transport *tp ) + pj_atomic_get(tp->ref_cnt) == 1) + { + if (tp->idle_timer.id != PJ_FALSE) { +- pjsip_endpt_cancel_timer(tp->tpmgr->endpt, &tp->idle_timer); + tp->idle_timer.id = PJ_FALSE; ++ pjsip_endpt_cancel_timer(tp->tpmgr->endpt, &tp->idle_timer); + } + } + pj_lock_release(tpmgr->lock); +@@ -1114,14 +1121,23 @@ PJ_DEF(pj_status_t) pjsip_transport_dec_ref( pjsip_transport *tp ) + delay.msec = 0; + } + +- pj_assert(tp->idle_timer.id == 0); +- tp->idle_timer.id = PJ_TRUE; +- pjsip_endpt_schedule_timer(tp->tpmgr->endpt, &tp->idle_timer, +- &delay); ++ /* Avoid double timer entry scheduling */ ++ if (pj_timer_entry_running(&tp->idle_timer)) ++ pjsip_endpt_cancel_timer(tp->tpmgr->endpt, &tp->idle_timer); ++ ++ pjsip_endpt_schedule_timer_w_grp_lock(tp->tpmgr->endpt, ++ &tp->idle_timer, ++ &delay, ++ PJ_TRUE, ++ tp->grp_lock); + } + pj_lock_release(tpmgr->lock); + } + ++ /* Dec ref transport group lock, if any */ ++ if (tp->grp_lock) ++ pj_grp_lock_dec_ref(tp->grp_lock); ++ + return PJ_SUCCESS; + } + +@@ -1168,6 +1184,10 @@ PJ_DEF(pj_status_t) pjsip_transport_register( pjsip_tpmgr *mgr, + /* Register new entry */ + pj_hash_set(tp->pool, mgr->table, &tp->key, key_len, hval, tp); + ++ /* Add ref transport group lock, if any */ ++ if (tp->grp_lock) ++ pj_grp_lock_add_ref(tp->grp_lock); ++ + pj_lock_release(mgr->lock); + + TRACE_((THIS_FILE,"Transport %s registered: type=%s, remote=%s:%d", +@@ -1199,8 +1219,8 @@ static pj_status_t destroy_transport( pjsip_tpmgr *mgr, + */ + //pj_assert(tp->idle_timer.id == PJ_FALSE); + if (tp->idle_timer.id != PJ_FALSE) { +- pjsip_endpt_cancel_timer(mgr->endpt, &tp->idle_timer); + tp->idle_timer.id = PJ_FALSE; ++ pjsip_endpt_cancel_timer(mgr->endpt, &tp->idle_timer); + } + + /* +@@ -1226,6 +1246,10 @@ static pj_status_t destroy_transport( pjsip_tpmgr *mgr, + pj_lock_release(mgr->lock); + pj_lock_release(tp->lock); + ++ /* Dec ref transport group lock, if any */ ++ if (tp->grp_lock) ++ pj_grp_lock_dec_ref(tp->grp_lock); ++ + /* Destroy. */ + return tp->destroy(tp); + } +diff --git a/pjsip/src/pjsip/sip_transport_tcp.c b/pjsip/src/pjsip/sip_transport_tcp.c +index fe327459e..374bf461b 100644 +--- a/pjsip/src/pjsip/sip_transport_tcp.c ++++ b/pjsip/src/pjsip/sip_transport_tcp.c +@@ -692,6 +692,8 @@ static pj_status_t tcp_create( struct tcp_listener *listener, + pj_grp_lock_add_ref(tcp->grp_lock); + pj_grp_lock_add_handler(tcp->grp_lock, pool, tcp, &tcp_on_destroy); + ++ tcp->base.grp_lock = tcp->grp_lock; ++ + /* Create active socket */ + pj_activesock_cfg_default(&asock_cfg); + asock_cfg.async_cnt = 1; +@@ -746,7 +748,11 @@ static pj_status_t tcp_create( struct tcp_listener *listener, + return PJ_SUCCESS; + + on_error: +- tcp_destroy(&tcp->base, status); ++ if (tcp->grp_lock && pj_grp_lock_get_ref(tcp->grp_lock)) ++ tcp_destroy(&tcp->base, status); ++ else ++ tcp_on_destroy(tcp); ++ + return status; + } + +@@ -867,8 +873,6 @@ static pj_status_t tcp_destroy(pjsip_transport *transport, + tcp->grp_lock = NULL; + pj_grp_lock_dec_ref(grp_lock); + /* Transport may have been deleted at this point */ +- } else { +- tcp_on_destroy(tcp); + } + + return PJ_SUCCESS; +diff --git a/pjsip/src/pjsip/sip_transport_tls.c b/pjsip/src/pjsip/sip_transport_tls.c +index d3afae5e9..dd3a4d639 100644 +--- a/pjsip/src/pjsip/sip_transport_tls.c ++++ b/pjsip/src/pjsip/sip_transport_tls.c +@@ -165,6 +165,10 @@ static pj_status_t tls_create(struct tls_listener *listener, + struct tls_transport **p_tls); + + ++/* Clean up TLS resources */ ++static void tls_on_destroy(void *arg); ++ ++ + static void tls_perror(const char *sender, const char *title, + pj_status_t status) + { +@@ -893,7 +897,11 @@ static pj_status_t tls_create( struct tls_listener *listener, + return PJ_SUCCESS; + + on_error: +- tls_destroy(&tls->base, status); ++ if (tls->grp_lock && pj_grp_lock_get_ref(tls->grp_lock)) ++ tls_destroy(&tls->base, status); ++ else ++ tls_on_destroy(tls); ++ + return status; + } + +@@ -1048,8 +1056,6 @@ static pj_status_t tls_destroy(pjsip_transport *transport, + tls->grp_lock = NULL; + pj_grp_lock_dec_ref(grp_lock); + /* Transport may have been deleted at this point */ +- } else { +- tls_on_destroy(tls); + } + + return PJ_SUCCESS; +@@ -1235,7 +1241,7 @@ static pj_status_t lis_create_transport(pjsip_tpfactory *factory, + pj_ssl_sock_set_user_data(tls->ssock, tls); + + /* Set up the group lock */ +- tls->grp_lock = glock; ++ tls->grp_lock = tls->base.grp_lock = glock; + pj_grp_lock_add_ref(tls->grp_lock); + pj_grp_lock_add_handler(tls->grp_lock, pool, tls, &tls_on_destroy); + +diff --git a/pjsip/src/pjsip/sip_transport_udp.c b/pjsip/src/pjsip/sip_transport_udp.c +index dbda474cf..b82d519c9 100644 +--- a/pjsip/src/pjsip/sip_transport_udp.c ++++ b/pjsip/src/pjsip/sip_transport_udp.c +@@ -691,6 +691,8 @@ static pj_status_t register_to_ioqueue(struct udp_transport *tp) + pj_grp_lock_add_ref(tp->grp_lock); + pj_grp_lock_add_handler(tp->grp_lock, tp->base.pool, tp, + &udp_on_destroy); ++ ++ tp->base.grp_lock = tp->grp_lock; + } + + /* Register to ioqueue. */ +-- +2.20.1 +