Implementation of Re #1628: Modify SIP transaction to use group lock to avoid deadlock etc.


git-svn-id: https://svn.pjsip.org/repos/pjproject/trunk@4420 74dad513-b988-da41-8d7b-12977e46ad98
diff --git a/pjsip-apps/src/samples/stateful_proxy.c b/pjsip-apps/src/samples/stateful_proxy.c
index 379d28a..3e87036 100644
--- a/pjsip-apps/src/samples/stateful_proxy.c
+++ b/pjsip-apps/src/samples/stateful_proxy.c
@@ -273,17 +273,17 @@
 	if (uas_data->uac_tsx && uas_data->uac_tsx->status_code < 200) {
 	    pjsip_tx_data *cancel;
 
-	    pj_mutex_lock(uas_data->uac_tsx->mutex);
+	    pj_grp_lock_acquire(uas_data->uac_tsx->grp_lock);
 
 	    pjsip_endpt_create_cancel(global.endpt, uas_data->uac_tsx->last_tx,
 				      &cancel);
 	    pjsip_endpt_send_request(global.endpt, cancel, -1, NULL, NULL);
 
-	    pj_mutex_unlock(uas_data->uac_tsx->mutex);
+	    pj_grp_lock_release(uas_data->uac_tsx->grp_lock);
 	}
 
 	/* Unlock UAS tsx because it is locked in find_tsx() */
-	pj_mutex_unlock(invite_uas->mutex);
+	pj_grp_lock_release(invite_uas->grp_lock);
     }
 
     return PJ_TRUE;
diff --git a/pjsip/include/pjsip/sip_transaction.h b/pjsip/include/pjsip/sip_transaction.h
index 5be37ac..e60f214 100644
--- a/pjsip/include/pjsip/sip_transaction.h
+++ b/pjsip/include/pjsip/sip_transaction.h
@@ -85,7 +85,8 @@
     pj_pool_t		       *pool;           /**< Pool owned by the tsx. */
     pjsip_module	       *tsx_user;	/**< Transaction user.	    */
     pjsip_endpoint	       *endpt;          /**< Endpoint instance.     */
-    pj_mutex_t		       *mutex;          /**< Mutex for this tsx.    */
+    pj_bool_t			terminating;	/**< terminate() was called */
+    pj_grp_lock_t	       *grp_lock;       /**< Transaction grp lock.  */
     pj_mutex_t		       *mutex_b;	/**< Second mutex to avoid
 						     deadlock. It is used to
 						     protect timer.	    */
@@ -213,6 +214,30 @@
 					   pjsip_transaction **p_tsx);
 
 /**
+ * Variant of pjsip_tsx_create_uac() with additional parameter to specify
+ * the group lock to use. Group lock can be used to synchronize locking
+ * among several objects to prevent deadlock, and to synchronize the
+ * lifetime of objects sharing the same group lock.
+ *
+ * See pjsip_tsx_create_uac() for general info about this function.
+ *
+ * @param tsx_user  Module to be registered as transaction user of the new
+ *		    transaction, which will receive notification from the
+ *		    transaction via on_tsx_state() callback.
+ * @param tdata     The outgoing request message.
+ * @param grp_lock  Optional group lock to use by this transaction. If
+ * 		    the value is NULL, the transaction will create its
+ * 		    own group lock.
+ * @param p_tsx	    On return will contain the new transaction instance.
+ *
+ * @return          PJ_SUCCESS if successfull.
+ */
+PJ_DECL(pj_status_t) pjsip_tsx_create_uac2(pjsip_module *tsx_user,
+					   pjsip_tx_data *tdata,
+					   pj_grp_lock_t *grp_lock,
+					   pjsip_transaction **p_tsx);
+
+/**
  * Create, initialize, and register a new transaction as UAS from the
  * specified incoming request in \c rdata. After calling this function,
  * application MUST call #pjsip_tsx_recv_msg() so that transaction
@@ -230,6 +255,29 @@
 					   pjsip_rx_data *rdata,
 					   pjsip_transaction **p_tsx );
 
+/**
+ * Variant of pjsip_tsx_create_uas() with additional parameter to specify
+ * the group lock to use. Group lock can be used to synchronize locking
+ * among several objects to prevent deadlock, and to synchronize the
+ * lifetime of objects sharing the same group lock.
+ *
+ * See pjsip_tsx_create_uas() for general info about this function.
+ *
+ * @param tsx_user  Module to be registered as transaction user of the new
+ *		    transaction, which will receive notification from the
+ *		    transaction via on_tsx_state() callback.
+ * @param rdata     The received incoming request.
+ * @param grp_lock  Optional group lock to use by this transaction. If
+ * 		    the value is NULL, the transaction will create its
+ * 		    own group lock.
+ * @param p_tsx	    On return will contain the new transaction instance.
+ *
+ * @return	    PJ_SUCCESS if successfull.
+ */
+PJ_DECL(pj_status_t) pjsip_tsx_create_uas2(pjsip_module *tsx_user,
+					   pjsip_rx_data *rdata,
+					   pj_grp_lock_t *grp_lock,
+					   pjsip_transaction **p_tsx );
 
 /**
  * Lock/bind transaction to a specific transport/listener. This is optional,
diff --git a/pjsip/src/pjsip-ua/sip_inv.c b/pjsip/src/pjsip-ua/sip_inv.c
index 6276497..7d45894 100644
--- a/pjsip/src/pjsip-ua/sip_inv.c
+++ b/pjsip/src/pjsip-ua/sip_inv.c
@@ -2910,7 +2910,7 @@
     }
 
     if (invite_tsx)
-	pj_mutex_unlock(invite_tsx->mutex);
+	pj_grp_lock_release(invite_tsx->grp_lock);
 }
 
 
diff --git a/pjsip/src/pjsip/sip_transaction.c b/pjsip/src/pjsip/sip_transaction.c
index b0cad3c..eff4856 100644
--- a/pjsip/src/pjsip/sip_transaction.c
+++ b/pjsip/src/pjsip/sip_transaction.c
@@ -27,6 +27,7 @@
 #include <pj/hash.h>
 #include <pj/pool.h>
 #include <pj/os.h>
+#include <pj/rand.h>
 #include <pj/string.h>
 #include <pj/assert.h>
 #include <pj/guid.h>
@@ -90,9 +91,6 @@
     }
 };
 
-/* Thread Local Storage ID for transaction lock */
-static long pjsip_tsx_lock_tls_id;
-
 /* Transaction state names */
 static const char *state_str[] = 
 {
@@ -123,14 +121,6 @@
     TSX_HAS_RESOLVED_SERVER	= 16,
 };
 
-/* Transaction lock. */
-typedef struct tsx_lock_data {
-    struct tsx_lock_data *prev;
-    pjsip_transaction    *tsx;
-    int			  is_alive;
-} tsx_lock_data;
-
-
 /* Timer timeout value constants */
 static pj_time_val t1_timer_val = { PJSIP_T1_TIMEOUT/1000, 
                                     PJSIP_T1_TIMEOUT%1000 };
@@ -148,9 +138,6 @@
 
 
 /* Prototypes. */
-static void	   lock_tsx(pjsip_transaction *tsx, struct tsx_lock_data *lck);
-static pj_status_t unlock_tsx( pjsip_transaction *tsx, 
-                               struct tsx_lock_data *lck);
 static pj_status_t tsx_on_state_null(		pjsip_transaction *tsx, 
 				                pjsip_event *event);
 static pj_status_t tsx_on_state_calling(	pjsip_transaction *tsx, 
@@ -178,8 +165,10 @@
 				       pjsip_transport_state state,
 				       const pjsip_transport_state_info *info);
 static pj_status_t tsx_create( pjsip_module *tsx_user,
+                               pj_grp_lock_t *grp_lock,
 			       pjsip_transaction **p_tsx);
-static pj_status_t tsx_destroy( pjsip_transaction *tsx );
+static void	   tsx_on_destroy(void *arg);
+static pj_status_t tsx_shutdown( pjsip_transaction *tsx );
 static void	   tsx_resched_retransmission( pjsip_transaction *tsx );
 static pj_status_t tsx_retransmit( pjsip_transaction *tsx, int resched);
 static int         tsx_send_msg( pjsip_transaction *tsx, 
@@ -270,10 +259,9 @@
 			                const pjsip_rx_data *rdata )
 {
 #define SEPARATOR   '$'
-    char *key, *p, *end;
+    char *key, *p;
     int len;
     pj_size_t len_required;
-    pjsip_uri *req_uri;
     pj_str_t *host;
 
     PJ_ASSERT_RETURN(pool && str && method && rdata, PJ_EINVAL);
@@ -283,7 +271,6 @@
     PJ_ASSERT_RETURN(rdata->msg_info.from, PJSIP_EMISSINGHDR);
 
     host = &rdata->msg_info.via->sent_by.host;
-    req_uri = (pjsip_uri*)rdata->msg_info.msg->line.req.uri;
 
     /* Calculate length required. */
     len_required = 9 +			    /* CSeq number */
@@ -293,7 +280,6 @@
 		   9 +			    /* Via port. */
 		   16;			    /* Separator+Allowance. */
     key = p = (char*) pj_pool_alloc(pool, len_required);
-    end = p + len_required;
 
     /* Add role. */
     *p++ = (char)(role==PJSIP_ROLE_UAC ? 'c' : 's');
@@ -451,13 +437,6 @@
     //timeout_timer_val.msec = (64 * pjsip_cfg()->tsx.t1) % 1000;
     timeout_timer_val = td_timer_val;
 
-    /* Initialize TLS ID for transaction lock. */
-    status = pj_thread_local_alloc(&pjsip_tsx_lock_tls_id);
-    if (status != PJ_SUCCESS)
-	return status;
-
-    pj_thread_local_set(pjsip_tsx_lock_tls_id, NULL);
-
     /*
      * Initialize transaction layer structure.
      */
@@ -482,7 +461,7 @@
 	return PJ_ENOMEM;
     }
 
-    /* Create mutex. */
+    /* Create group lock. */
     status = pj_mutex_create_recursive(pool, "tsxlayer", &mod_tsx_layer.mutex);
     if (status != PJ_SUCCESS) {
 	pjsip_endpt_release_pool(endpt, pool);
@@ -665,8 +644,10 @@
      * Transaction may gets deleted before we have chance to lock it.
      */
     PJ_TODO(FIX_RACE_CONDITION_HERE);
+    PJ_RACE_ME(5);
+
     if (tsx && lock)
-	pj_mutex_lock(tsx->mutex);
+	pj_grp_lock_acquire(tsx->grp_lock);
 
     return tsx;
 }
@@ -711,7 +692,7 @@
 	if (tsx) {
 	    pjsip_tsx_terminate(tsx, PJSIP_SC_SERVICE_UNAVAILABLE);
 	    mod_tsx_layer_unregister_tsx(tsx);
-	    tsx_destroy(tsx);
+	    tsx_shutdown(tsx);
 	}
 	it = next;
     }
@@ -735,9 +716,6 @@
     /* Release pool. */
     pjsip_endpt_release_pool(mod_tsx_layer.endpt, mod_tsx_layer.pool);
 
-    /* Free TLS */
-    pj_thread_local_free(pjsip_tsx_lock_tls_id);
-
     /* Mark as unregistered. */
     mod_tsx_layer.endpt = NULL;
 
@@ -813,6 +791,7 @@
      * in pjsip_tsx_recv_msg().
      */
     PJ_TODO(FIX_RACE_CONDITION_HERE);
+    PJ_RACE_ME(5);
 
     /* Pass the message to the transaction. */
     pjsip_tsx_recv_msg(tsx, rdata );
@@ -862,6 +841,7 @@
      * in pjsip_tsx_recv_msg().
      */
     PJ_TODO(FIX_RACE_CONDITION_HERE);
+    PJ_RACE_ME(5);
 
     /* Pass the message to the transaction. */
     pjsip_tsx_recv_msg(tsx, rdata );
@@ -928,43 +908,6 @@
  **
  *****************************************************************************
  **/
-/*
- * Lock transaction and set the value of Thread Local Storage.
- */
-static void lock_tsx(pjsip_transaction *tsx, struct tsx_lock_data *lck)
-{
-    struct tsx_lock_data *prev_data;
-
-    pj_mutex_lock(tsx->mutex);
-    prev_data = (struct tsx_lock_data *) 
-                    pj_thread_local_get(pjsip_tsx_lock_tls_id);
-    lck->prev = prev_data;
-    lck->tsx = tsx;
-    lck->is_alive = 1;
-    pj_thread_local_set(pjsip_tsx_lock_tls_id, lck);
-}
-
-
-/*
- * Unlock transaction.
- * This will selectively unlock the mutex ONLY IF the transaction has not been 
- * destroyed. The function knows whether the transaction has been destroyed
- * because when transaction is destroyed the is_alive flag for the transaction
- * will be set to zero.
- */
-static pj_status_t unlock_tsx( pjsip_transaction *tsx, 
-                               struct tsx_lock_data *lck)
-{
-    pj_assert( (void*)pj_thread_local_get(pjsip_tsx_lock_tls_id) == lck);
-    pj_assert( lck->tsx == tsx );
-    pj_thread_local_set(pjsip_tsx_lock_tls_id, lck->prev);
-    if (lck->is_alive)
-	pj_mutex_unlock(tsx->mutex);
-
-    return lck->is_alive ? PJ_SUCCESS : PJSIP_ETSXDESTROYED;
-}
-
-
 /* Lock transaction for accessing the timeout timer only. */
 static void lock_timer(pjsip_transaction *tsx)
 {
@@ -981,6 +924,7 @@
  * This function is called by both UAC and UAS creation.
  */
 static pj_status_t tsx_create( pjsip_module *tsx_user,
+                               pj_grp_lock_t *grp_lock,
 			       pjsip_transaction **p_tsx)
 {
     pj_pool_t *pool;
@@ -1009,16 +953,22 @@
     tsx->timeout_timer.user_data = tsx;
     tsx->timeout_timer.cb = &tsx_timer_callback;
     
-    status = pj_mutex_create_recursive(pool, tsx->obj_name, &tsx->mutex);
-    if (status != PJ_SUCCESS) {
-	pjsip_endpt_release_pool(mod_tsx_layer.endpt, pool);
-	return status;
+    if (grp_lock) {
+	tsx->grp_lock = grp_lock;
+    } else {
+	status = pj_grp_lock_create(pool, NULL, &tsx->grp_lock);
+	if (status != PJ_SUCCESS) {
+	    pjsip_endpt_release_pool(mod_tsx_layer.endpt, pool);
+	    return status;
+	}
     }
 
+    pj_grp_lock_add_ref(tsx->grp_lock);
+    pj_grp_lock_add_handler(tsx->grp_lock, tsx->pool, tsx, &tsx_on_destroy);
+
     status = pj_mutex_create_simple(pool, tsx->obj_name, &tsx->mutex_b);
     if (status != PJ_SUCCESS) {
-	pj_mutex_destroy(tsx->mutex);
-	pjsip_endpt_release_pool(mod_tsx_layer.endpt, pool);
+	tsx_shutdown(tsx);
 	return status;
     }
 
@@ -1026,17 +976,28 @@
     return PJ_SUCCESS;
 }
 
-
-/* Destroy transaction. */
-static pj_status_t tsx_destroy( pjsip_transaction *tsx )
+/* Really destroy transaction, when grp_lock reference is zero */
+static void tsx_on_destroy( void *arg )
 {
-    struct tsx_lock_data *lck;
+    pjsip_transaction *tsx = (pjsip_transaction*)arg;
 
+    PJ_LOG(5,(tsx->obj_name, "Transaction destroyed!"));
+
+    pj_mutex_destroy(tsx->mutex_b);
+    pjsip_endpt_release_pool(tsx->endpt, tsx->pool);
+}
+
+/* Shutdown transaction. */
+static pj_status_t tsx_shutdown( pjsip_transaction *tsx )
+{
     /* Release the transport */
     tsx_update_transport(tsx, NULL);
 
-    /* Decrement reference counter in transport selector */
-    pjsip_tpselector_dec_ref(&tsx->tp_sel);
+    /* Decrement reference counter in transport selector, only if
+     * we haven't been called before */
+    if (!tsx->terminating) {
+	pjsip_tpselector_dec_ref(&tsx->tp_sel);
+    }
 
     /* Free last transmitted message. */
     if (tsx->last_tx) {
@@ -1057,30 +1018,21 @@
     /* Clear some pending flags. */
     tsx->transport_flag &= ~(TSX_HAS_PENDING_RESCHED | TSX_HAS_PENDING_SEND);
 
+
     /* Refuse to destroy transaction if it has pending resolving. */
     if (tsx->transport_flag & TSX_HAS_PENDING_TRANSPORT) {
 	tsx->transport_flag |= TSX_HAS_PENDING_DESTROY;
 	tsx->tsx_user = NULL;
 	PJ_LOG(4,(tsx->obj_name, "Will destroy later because transport is "
 				 "in progress"));
-	return PJ_EBUSY;
     }
 
-    /* Clear TLS, so that mutex will not be unlocked */
-    lck = (struct tsx_lock_data*) pj_thread_local_get(pjsip_tsx_lock_tls_id);
-    while (lck) {
-	if (lck->tsx == tsx) {
-	    lck->is_alive = 0;
-	}
-	lck = lck->prev;
+    if (!tsx->terminating) {
+	tsx->terminating = PJ_TRUE;
+	pj_grp_lock_dec_ref(tsx->grp_lock);
     }
 
-    pj_mutex_destroy(tsx->mutex_b);
-    pj_mutex_destroy(tsx->mutex);
-
-    PJ_LOG(5,(tsx->obj_name, "Transaction destroyed!"));
-
-    pjsip_endpt_release_pool(tsx->endpt, tsx->pool);
+    /* No acccess to tsx after this, it may have been destroyed */
 
     return PJ_SUCCESS;
 }
@@ -1093,7 +1045,6 @@
 {
     pjsip_event event;
     pjsip_transaction *tsx = (pjsip_transaction*) entry->user_data;
-    struct tsx_lock_data lck;
 
     PJ_UNUSED_ARG(theap);
 
@@ -1107,9 +1058,9 @@
     PJSIP_EVENT_INIT_TIMER(event, entry);
 
     /* Dispatch event to transaction. */
-    lock_tsx(tsx, &lck);
+    pj_grp_lock_acquire(tsx->grp_lock);
     (*tsx->state_handler)(tsx, &event);
-    unlock_tsx(tsx, &lck);
+    pj_grp_lock_release(tsx->grp_lock);
 
     pj_log_pop_indent();
 }
@@ -1208,7 +1159,7 @@
 	mod_tsx_layer_unregister_tsx(tsx);
 
 	/* Destroy transaction. */
-	tsx_destroy(tsx);
+	tsx_shutdown(tsx);
     }
 
     pj_log_pop_indent();
@@ -1222,12 +1173,19 @@
 					  pjsip_tx_data *tdata,
 					  pjsip_transaction **p_tsx)
 {
+    return pjsip_tsx_create_uac2(tsx_user, tdata, NULL, p_tsx);
+}
+
+PJ_DEF(pj_status_t) pjsip_tsx_create_uac2(pjsip_module *tsx_user,
+					  pjsip_tx_data *tdata,
+					  pj_grp_lock_t *grp_lock,
+					  pjsip_transaction **p_tsx)
+{
     pjsip_transaction *tsx;
     pjsip_msg *msg;
     pjsip_cseq_hdr *cseq;
     pjsip_via_hdr *via;
     pjsip_host_info dst_info;
-    struct tsx_lock_data lck;
     pj_status_t status;
 
     /* Validate arguments. */
@@ -1251,13 +1209,13 @@
 
 
     /* Create transaction instance. */
-    status = tsx_create( tsx_user, &tsx);
+    status = tsx_create( tsx_user, grp_lock, &tsx);
     if (status != PJ_SUCCESS)
 	return status;
 
 
     /* Lock transaction. */
-    lock_tsx(tsx, &lck);
+    pj_grp_lock_acquire(tsx->grp_lock);
 
     /* Role is UAC. */
     tsx->role = PJSIP_ROLE_UAC;
@@ -1323,8 +1281,8 @@
      */
     status = pjsip_get_request_dest(tdata, &dst_info);
     if (status != PJ_SUCCESS) {
-	unlock_tsx(tsx, &lck);
-	tsx_destroy(tsx);
+	pj_grp_lock_release(tsx->grp_lock);
+	tsx_shutdown(tsx);
 	return status;
     }
     tsx->is_reliable = (dst_info.flag & PJSIP_TRANSPORT_RELIABLE);
@@ -1335,14 +1293,14 @@
 	/* The assertion is removed by #1090:
 	pj_assert(!"Bug in branch_param generator (i.e. not unique)");
 	*/
-	unlock_tsx(tsx, &lck);
-	tsx_destroy(tsx);
+	pj_grp_lock_release(tsx->grp_lock);
+	tsx_shutdown(tsx);
 	return status;
     }
 
 
     /* Unlock transaction and return. */
-    unlock_tsx(tsx, &lck);
+    pj_grp_lock_release(tsx->grp_lock);
 
     pj_log_push_indent();
     PJ_LOG(5,(tsx->obj_name, "Transaction created for %s",
@@ -1361,12 +1319,19 @@
 					  pjsip_rx_data *rdata,
 					  pjsip_transaction **p_tsx)
 {
+    return pjsip_tsx_create_uas2(tsx_user, rdata, NULL, p_tsx);
+}
+
+PJ_DEF(pj_status_t) pjsip_tsx_create_uas2(pjsip_module *tsx_user,
+					  pjsip_rx_data *rdata,
+					  pj_grp_lock_t *grp_lock,
+					  pjsip_transaction **p_tsx)
+{
     pjsip_transaction *tsx;
     pjsip_msg *msg;
     pj_str_t *branch;
     pjsip_cseq_hdr *cseq;
     pj_status_t status;
-    struct tsx_lock_data lck;
 
     /* Validate arguments. */
     PJ_ASSERT_RETURN(rdata && rdata->msg_info.msg && p_tsx, PJ_EINVAL);
@@ -1404,13 +1369,13 @@
     /* 
      * Create transaction instance. 
      */
-    status = tsx_create( tsx_user, &tsx);
+    status = tsx_create( tsx_user, grp_lock, &tsx);
     if (status != PJ_SUCCESS)
 	return status;
 
 
     /* Lock transaction. */
-    lock_tsx(tsx, &lck);
+    pj_grp_lock_acquire(tsx->grp_lock);
 
     /* Role is UAS */
     tsx->role = PJSIP_ROLE_UAS;
@@ -1427,8 +1392,8 @@
     status = pjsip_tsx_create_key(tsx->pool, &tsx->transaction_key, 
                                   PJSIP_ROLE_UAS, &tsx->method, rdata);
     if (status != PJ_SUCCESS) {
-	unlock_tsx(tsx, &lck);
-        tsx_destroy(tsx);
+	pj_grp_lock_release(tsx->grp_lock);
+	tsx_shutdown(tsx);
         return status;
     }
 
@@ -1454,8 +1419,8 @@
     /* Get response address. */
     status = pjsip_get_response_addr( tsx->pool, rdata, &tsx->res_addr );
     if (status != PJ_SUCCESS) {
-	unlock_tsx(tsx, &lck);
-	tsx_destroy(tsx);
+	pj_grp_lock_release(tsx->grp_lock);
+	tsx_shutdown(tsx);
 	return status;
     }
 
@@ -1476,8 +1441,8 @@
     /* Register the transaction. */
     status = mod_tsx_layer_register_tsx(tsx);
     if (status != PJ_SUCCESS) {
-	unlock_tsx(tsx, &lck);
-	tsx_destroy(tsx);
+	pj_grp_lock_release(tsx->grp_lock);
+	tsx_shutdown(tsx);
 	return status;
     }
 
@@ -1485,7 +1450,7 @@
     rdata->endpt_info.mod_data[mod_tsx_layer.mod.id] = tsx;
 
     /* Unlock transaction and return. */
-    unlock_tsx(tsx, &lck);
+    pj_grp_lock_release(tsx->grp_lock);
 
     pj_log_push_indent();
     PJ_LOG(5,(tsx->obj_name, "Transaction created for %s",
@@ -1504,13 +1469,11 @@
 PJ_DEF(pj_status_t) pjsip_tsx_set_transport(pjsip_transaction *tsx,
 					    const pjsip_tpselector *sel)
 {
-    struct tsx_lock_data lck;
-
     /* Must be UAC transaction */
     PJ_ASSERT_RETURN(tsx && sel, PJ_EINVAL);
 
     /* Start locking the transaction. */
-    lock_tsx(tsx, &lck);
+    pj_grp_lock_acquire(tsx->grp_lock);
 
     /* Decrement reference counter of previous transport selector */
     pjsip_tpselector_dec_ref(&tsx->tp_sel);
@@ -1522,7 +1485,7 @@
     pjsip_tpselector_add_ref(&tsx->tp_sel);
 
     /* Unlock transaction. */
-    unlock_tsx(tsx, &lck);
+    pj_grp_lock_release(tsx->grp_lock);
 
     return PJ_SUCCESS;
 }
@@ -1547,8 +1510,6 @@
  */
 PJ_DEF(pj_status_t) pjsip_tsx_terminate( pjsip_transaction *tsx, int code )
 {
-    struct tsx_lock_data lck;
-
     PJ_ASSERT_RETURN(tsx != NULL, PJ_EINVAL);
 
     PJ_LOG(5,(tsx->obj_name, "Request to terminate transaction"));
@@ -1560,10 +1521,10 @@
 
     pj_log_push_indent();
 
-    lock_tsx(tsx, &lck);
+    pj_grp_lock_acquire(tsx->grp_lock);
     tsx_set_status_code(tsx, code, NULL);
     tsx_set_state( tsx, PJSIP_TSX_STATE_TERMINATED, PJSIP_EVENT_USER, NULL);
-    unlock_tsx(tsx, &lck);
+    pj_grp_lock_release(tsx->grp_lock);
 
     pj_log_pop_indent();
 
@@ -1578,8 +1539,6 @@
  */
 PJ_DEF(pj_status_t) pjsip_tsx_stop_retransmit(pjsip_transaction *tsx)
 {
-    struct tsx_lock_data lck;
-
     PJ_ASSERT_RETURN(tsx != NULL, PJ_EINVAL);
     PJ_ASSERT_RETURN(tsx->role == PJSIP_ROLE_UAC &&
 		     tsx->method.id == PJSIP_INVITE_METHOD,
@@ -1589,13 +1548,13 @@
 
     pj_log_push_indent();
 
-    lock_tsx(tsx, &lck);
+    pj_grp_lock_acquire(tsx->grp_lock);
     /* Cancel retransmission timer. */
     if (tsx->retransmit_timer.id != 0) {
 	pjsip_endpt_cancel_timer(tsx->endpt, &tsx->retransmit_timer);
 	tsx->retransmit_timer.id = 0;
     }
-    unlock_tsx(tsx, &lck);
+    pj_grp_lock_release(tsx->grp_lock);
 
     pj_log_pop_indent();
 
@@ -1617,8 +1576,8 @@
 		     tsx->method.id == PJSIP_INVITE_METHOD,
 		     PJ_EINVALIDOP);
 
-    /* Note: must not call lock_tsx() as that would introduce deadlock.
-     * See #1121.
+    /* Note: must not call pj_grp_lock_acquire(tsx->grp_lock) as
+     * that would introduce deadlock. See #1121.
      */
     lock_timer(tsx);
 
@@ -1659,7 +1618,6 @@
 				        pjsip_tx_data *tdata )
 {
     pjsip_event event;
-    struct tsx_lock_data lck;
     pj_status_t status;
 
     if (tdata == NULL)
@@ -1675,7 +1633,7 @@
     PJSIP_EVENT_INIT_TX_MSG(event, tdata);
 
     /* Dispatch to transaction. */
-    lock_tsx(tsx, &lck);
+    pj_grp_lock_acquire(tsx->grp_lock);
 
     /* Set transport selector to tdata */
     pjsip_tx_data_set_transport(tdata, &tsx->tp_sel);
@@ -1683,7 +1641,7 @@
     /* Dispatch to state handler */
     status = (*tsx->state_handler)(tsx, &event);
 
-    unlock_tsx(tsx, &lck);
+    pj_grp_lock_release(tsx->grp_lock);
 
     /* Only decrement reference counter when it returns success.
      * (This is the specification from the .PDF design document).
@@ -1706,8 +1664,6 @@
 				 pjsip_rx_data *rdata)
 {
     pjsip_event event;
-    struct tsx_lock_data lck;
-    pj_status_t status;
 
     PJ_LOG(5,(tsx->obj_name, "Incoming %s in state %s", 
 	      pjsip_rx_data_get_info(rdata), state_str[tsx->state]));
@@ -1720,9 +1676,9 @@
     PJSIP_EVENT_INIT_RX_MSG(event, rdata);
 
     /* Dispatch to transaction. */
-    lock_tsx(tsx, &lck);
-    status = (*tsx->state_handler)(tsx, &event);
-    unlock_tsx(tsx, &lck);
+    pj_grp_lock_acquire(tsx->grp_lock);
+    (*tsx->state_handler)(tsx, &event);
+    pj_grp_lock_release(tsx->grp_lock);
 
     pj_log_pop_indent();
 }
@@ -1734,7 +1690,6 @@
 {
     pjsip_transaction *tsx = (pjsip_transaction*) send_state->token;
     pjsip_tx_data *tdata = send_state->tdata;
-    struct tsx_lock_data lck;
 
     /* Check if transaction has cancelled itself from this transmit
      * notification (https://trac.pjsip.org/repos/ticket/1033).
@@ -1748,12 +1703,12 @@
 	return;
     }
 
+    pj_grp_lock_acquire(tsx->grp_lock);
+
     /* Reset */
     tdata->mod_data[mod_tsx_layer.mod.id] = NULL;
     tsx->pending_tx = NULL;
 
-    lock_tsx(tsx, &lck);
-
     if (sent > 0) {
 	/* Successfully sent! */
 	pj_assert(send_state->cur_transport != NULL);
@@ -1782,7 +1737,7 @@
 	if (tsx->transport_flag & TSX_HAS_PENDING_DESTROY) {
 	    tsx_set_state( tsx, PJSIP_TSX_STATE_DESTROYED, 
 			   PJSIP_EVENT_UNKNOWN, NULL );
-	    unlock_tsx(tsx, &lck);
+	    pj_grp_lock_release(tsx->grp_lock);
 	    return;
 	}
 
@@ -1824,7 +1779,7 @@
 
 	    err =pj_strerror(-sent, errmsg, sizeof(errmsg));
 
-	    PJ_LOG(2,(tsx->obj_name, 
+	    PJ_LOG(2,(tsx->obj_name,
 		      "Failed to send %s! err=%d (%s)",
 		      pjsip_tx_data_get_info(send_state->tdata), -sent,
 		      errmsg));
@@ -1862,13 +1817,10 @@
 	    }
 
 	} else {
-	    char errmsg[PJ_ERR_MSG_SIZE];
-
-	    PJ_LOG(2,(tsx->obj_name, 
-		      "Temporary failure in sending %s, "
-		      "will try next server. Err=%d (%s)",
-		      pjsip_tx_data_get_info(send_state->tdata), -sent,
-		      pj_strerror(-sent, errmsg, sizeof(errmsg)).ptr));
+	    PJ_PERROR(2,(tsx->obj_name, -sent,
+		         "Temporary failure in sending %s, "
+		         "will try next server",
+		         pjsip_tx_data_get_info(send_state->tdata)));
 
 	    /* Reset retransmission count */
 	    tsx->retransmit_count = 0;
@@ -1893,7 +1845,7 @@
 	}
     }
 
-    unlock_tsx(tsx, &lck);
+    pj_grp_lock_release(tsx->grp_lock);
 }
 
 
@@ -1903,7 +1855,6 @@
 {
     if (sent < 0) {
 	pjsip_transaction *tsx = (pjsip_transaction*) token;
-	struct tsx_lock_data lck;
 	char errmsg[PJ_ERR_MSG_SIZE];
 	pj_str_t err;
 
@@ -1912,9 +1863,9 @@
 	err = pj_strerror(-sent, errmsg, sizeof(errmsg));
 
 	PJ_LOG(2,(tsx->obj_name, "Transport failed to send %s! Err=%d (%s)",
-		  pjsip_tx_data_get_info(tdata), -sent, errmsg));
+		pjsip_tx_data_get_info(tdata), -sent, errmsg));
 
-	lock_tsx(tsx, &lck);
+	pj_grp_lock_acquire(tsx->grp_lock);
 
 	/* Release transport. */
 	tsx_update_transport(tsx, NULL);
@@ -1924,7 +1875,7 @@
 	tsx_set_state( tsx, PJSIP_TSX_STATE_TERMINATED, 
 		       PJSIP_EVENT_TRANSPORT_ERROR, tdata );
 
-	unlock_tsx(tsx, &lck);
+	pj_grp_lock_release(tsx->grp_lock);
    }
 }
 
@@ -1940,13 +1891,12 @@
 
     if (state == PJSIP_TP_STATE_DISCONNECTED) {
 	pjsip_transaction *tsx;
-	struct tsx_lock_data lck;
 
 	pj_assert(tp && info && info->user_data);
 
 	tsx = (pjsip_transaction*)info->user_data;
 
-	lock_tsx(tsx, &lck);
+	pj_grp_lock_acquire(tsx->grp_lock);
 
 	/* Terminate transaction when transport disconnected */
 	if (tsx->state < PJSIP_TSX_STATE_TERMINATED) {
@@ -1959,7 +1909,7 @@
 			   PJSIP_EVENT_TRANSPORT_ERROR, NULL);
 	}
 
-	unlock_tsx(tsx, &lck);
+	pj_grp_lock_release(tsx->grp_lock);
     }
 }
 
@@ -1991,12 +1941,9 @@
 	    status = PJ_SUCCESS;
 
 	if (status != PJ_SUCCESS) {
-	    char errmsg[PJ_ERR_MSG_SIZE];
-
-	    PJ_LOG(2,(tsx->obj_name, 
-		      "Error sending %s: Err=%d (%s)",
-		      pjsip_tx_data_get_info(tdata), status, 
-		      pj_strerror(status, errmsg, sizeof(errmsg)).ptr));
+	    PJ_PERROR(2,(tsx->obj_name, status,
+		         "Error sending %s",
+		         pjsip_tx_data_get_info(tdata)));
 
 	    /* On error, release transport to force using full transport
 	     * resolution procedure.
@@ -2109,15 +2056,14 @@
 PJ_DEF(pj_status_t) pjsip_tsx_retransmit_no_state(pjsip_transaction *tsx,
 						  pjsip_tx_data *tdata)
 {
-    struct tsx_lock_data lck;
     pj_status_t status;
 
-    lock_tsx(tsx, &lck);
+    pj_grp_lock_acquire(tsx->grp_lock);
     if (tdata == NULL) {
 	tdata = tsx->last_tx;
     }
     status = tsx_send_msg(tsx, tdata);
-    unlock_tsx(tsx, &lck);
+    pj_grp_lock_release(tsx->grp_lock);
 
     /* Only decrement reference counter when it returns success.
      * (This is the specification from the .PDF design document).
diff --git a/pjsip/src/pjsip/sip_ua_layer.c b/pjsip/src/pjsip/sip_ua_layer.c
index 1279d73..073b25e 100644
--- a/pjsip/src/pjsip/sip_ua_layer.c
+++ b/pjsip/src/pjsip/sip_ua_layer.c
@@ -545,7 +545,7 @@
 	/* We should find the dialog attached to the INVITE transaction */
 	if (tsx) {
 	    dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id];
-	    pj_mutex_unlock(tsx->mutex);
+	    pj_grp_lock_release(tsx->grp_lock);
 
 	    /* Dlg may be NULL on some extreme condition
 	     * (e.g. during debugging where initially there is a dialog)
diff --git a/pjsip/src/test/regc_test.c b/pjsip/src/test/regc_test.c
index 7a5b806..17cb399 100644
--- a/pjsip/src/test/regc_test.c
+++ b/pjsip/src/test/regc_test.c
@@ -347,7 +347,10 @@
 		  client_cfg->error, client_result.error));
 	return -210;
     }
-    if (client_result.code != client_cfg->code) {
+    if (client_result.code != client_cfg->code &&
+	client_cfg->code != 502 && client_cfg->code != 503 &&
+	client_result.code != 502 && client_result.code != 503)
+    {
 	PJ_LOG(3,(THIS_FILE, "    error: expecting code=%d, got code=%d",
 		  client_cfg->code, client_result.code));
 	return -220;
diff --git a/pjsip/src/test/test.c b/pjsip/src/test/test.c
index 8ffe566..0361e61 100644
--- a/pjsip/src/test/test.c
+++ b/pjsip/src/test/test.c
@@ -47,9 +47,10 @@
 
 
 pjsip_endpoint *endpt;
+pj_caching_pool caching_pool;
 int log_level = 3;
 int param_log_decor = PJ_LOG_HAS_NEWLINE | PJ_LOG_HAS_TIME | 
-		      PJ_LOG_HAS_MICRO_SEC;
+		      PJ_LOG_HAS_MICRO_SEC | PJ_LOG_HAS_INDENT;
 
 static pj_oshandle_t fd_report;
 const char *system_name = "Unknown";
@@ -223,7 +224,6 @@
 int test_main(void)
 {
     pj_status_t rc;
-    pj_caching_pool caching_pool;
     const char *filename;
     unsigned tsx_test_cnt=0;
     struct tsx_test_param tsx_test[10];
@@ -369,6 +369,12 @@
     DO_TEST(regc_test());
 #endif
 
+    /*
+     * Better be last because it recreates the endpt
+     */
+#if INCLUDE_TSX_DESTROY_TEST
+    DO_TEST(tsx_destroy_test());
+#endif
 
 on_return:
     flush_events(500);
diff --git a/pjsip/src/test/test.h b/pjsip/src/test/test.h
index 358f930..690598b 100644
--- a/pjsip/src/test/test.h
+++ b/pjsip/src/test/test.h
@@ -23,6 +23,7 @@
 #include <pjsip/sip_types.h>
 
 extern pjsip_endpoint *endpt;
+extern pj_caching_pool caching_pool;
 
 #define TEST_UDP_PORT	    15060
 #define TEST_UDP_PORT_STR   "15060"
@@ -64,6 +65,7 @@
 #define INCLUDE_TCP_TEST	INCLUDE_TRANSPORT_GROUP
 #define INCLUDE_RESOLVE_TEST	INCLUDE_TRANSPORT_GROUP
 #define INCLUDE_TSX_TEST	INCLUDE_TSX_GROUP
+#define INCLUDE_TSX_DESTROY_TEST INCLUDE_TSX_GROUP
 #define INCLUDE_INV_OA_TEST	INCLUDE_INV_GROUP
 #define INCLUDE_REGC_TEST	INCLUDE_REGC_GROUP
 
@@ -75,6 +77,7 @@
 int multipart_test(void);
 int txdata_test(void);
 int tsx_bench(void);
+int tsx_destroy_test(void);
 int transport_udp_test(void);
 int transport_loop_test(void);
 int transport_tcp_test(void);
diff --git a/pjsip/src/test/tsx_basic_test.c b/pjsip/src/test/tsx_basic_test.c
index 163c3b9..2de73d7 100644
--- a/pjsip/src/test/tsx_basic_test.c
+++ b/pjsip/src/test/tsx_basic_test.c
@@ -125,7 +125,7 @@
 	    app_perror("   error: unable to terminate transaction", status);
 	    return -40;
 	}
-	pj_mutex_unlock(tsx->mutex);
+	pj_grp_lock_release(tsx->grp_lock);
     }
 
     flush_events(500);
@@ -155,3 +155,189 @@
 
     return 0;
 }
+
+/**************************************************************************/
+
+struct tsx_test_state
+{
+    int pool_cnt;
+};
+
+static void save_tsx_test_state(struct tsx_test_state *st)
+{
+    st->pool_cnt = caching_pool.used_count;
+}
+
+static pj_status_t check_tsx_test_state(struct tsx_test_state *st)
+{
+    if (caching_pool.used_count > st->pool_cnt)
+	return -1;
+
+    return 0;
+}
+
+static void destroy_endpt()
+{
+    pjsip_endpt_destroy(endpt);
+    endpt = NULL;
+}
+
+static pj_status_t init_endpt()
+{
+    pj_str_t ns = { "10.187.27.172", 13};	/* just a random, unreachable IP */
+    pj_dns_resolver *resolver;
+    pj_status_t rc;
+
+    rc = pjsip_endpt_create(&caching_pool.factory, "endpt", &endpt);
+    if (rc != PJ_SUCCESS) {
+	app_perror("pjsip_endpt_create", rc);
+	return rc;
+    }
+
+    /* Start transaction layer module. */
+    rc = pjsip_tsx_layer_init_module(endpt);
+    if (rc != PJ_SUCCESS) {
+	app_perror("tsx_layer_init", rc);
+	return rc;
+    }
+
+    rc = pjsip_udp_transport_start(endpt, NULL, NULL, 1,  NULL);
+    if (rc != PJ_SUCCESS) {
+	app_perror("udp init", rc);
+	return rc;
+    }
+
+    rc = pjsip_tcp_transport_start(endpt, NULL, 1, NULL);
+    if (rc != PJ_SUCCESS) {
+	app_perror("tcp init", rc);
+	return rc;
+    }
+
+    rc = pjsip_endpt_create_resolver(endpt, &resolver);
+    if (rc != PJ_SUCCESS) {
+	app_perror("create resolver", rc);
+	return rc;
+    }
+
+    pj_dns_resolver_set_ns(resolver, 1, &ns, NULL);
+
+    rc = pjsip_endpt_set_resolver(endpt, resolver);
+    if (rc != PJ_SUCCESS) {
+	app_perror("set resolver", rc);
+	return rc;
+    }
+
+    return PJ_SUCCESS;
+}
+
+static int tsx_create_and_send_req(void *arg)
+{
+    pj_str_t dst_uri = pj_str((char*)arg);
+    pj_str_t from_uri = pj_str((char*)"<sip:user@host>");
+    pjsip_tx_data *tdata;
+    pj_status_t status;
+
+    status = pjsip_endpt_create_request(endpt, &pjsip_options_method,
+                                        &dst_uri, &from_uri, &dst_uri,
+                                        NULL, NULL, -1, NULL,
+                                        &tdata);
+    if (status != PJ_SUCCESS)
+	return status;
+
+    status = pjsip_endpt_send_request(endpt, tdata, -1, NULL, NULL);
+    if (status != PJ_SUCCESS)
+	return status;
+
+    return PJ_SUCCESS;
+}
+
+int tsx_destroy_test()
+{
+    struct tsx_test_state state;
+    struct test_desc
+    {
+	const char *title;
+	int (*func)(void*);
+	void *arg;
+	int sleep_before_unload;
+	int sleep_after_unload;
+    } test_entries[] =
+    {
+	{
+	    "normal unable to resolve",
+	    &tsx_create_and_send_req,
+	    "sip:user@somehost",
+	    10000,
+	    1
+	},
+	{
+	    "resolve and destroy, wait",
+	    &tsx_create_and_send_req,
+	    "sip:user@somehost",
+	    1,
+	    10000
+	},
+	{
+	    "tcp connect and destroy",
+	    &tsx_create_and_send_req,
+	    "sip:user@10.125.36.63:58517;transport=tcp",
+	    60000,
+	    1000
+	},
+	{
+	    "tcp connect and destroy",
+	    &tsx_create_and_send_req,
+	    "sip:user@10.125.36.63:58517;transport=tcp",
+	    1,
+	    60000
+	},
+
+    };
+    int rc;
+    unsigned i;
+    const int INDENT = 2;
+
+    pj_log_add_indent(INDENT);
+    destroy_endpt();
+
+    for (i=0; i<PJ_ARRAY_SIZE(test_entries); ++i) {
+	struct test_desc *td = &test_entries[i];
+
+	PJ_LOG(3,(THIS_FILE, "%s", td->title));
+
+	pj_log_add_indent(INDENT);
+	save_tsx_test_state(&state);
+
+	rc = init_endpt();
+	if (rc != PJ_SUCCESS) {
+	    pj_log_add_indent(-INDENT*2);
+	    return -10;
+	}
+
+	rc = td->func(td->arg);
+	if (rc != PJ_SUCCESS) {
+	    pj_log_add_indent(-INDENT*2);
+	    return -20;
+	}
+
+	flush_events(td->sleep_before_unload);
+	pjsip_tsx_layer_destroy();
+	flush_events(td->sleep_after_unload);
+	destroy_endpt();
+
+	rc = check_tsx_test_state(&state);
+	if (rc != PJ_SUCCESS) {
+	    init_endpt();
+	    pj_log_add_indent(-INDENT*2);
+	    return -30;
+	}
+
+	pj_log_add_indent(-INDENT);
+    }
+
+    init_endpt();
+
+    pj_log_add_indent(-INDENT);
+    return 0;
+}
+
diff --git a/pjsip/src/test/tsx_bench.c b/pjsip/src/test/tsx_bench.c
index 8ae5778..2282b6a 100644
--- a/pjsip/src/test/tsx_bench.c
+++ b/pjsip/src/test/tsx_bench.c
@@ -79,8 +79,13 @@
 on_error:
     for (i=0; i<working_set; ++i) {
 	if (tsx[i]) {
+	    pj_timer_heap_t *th;
+
 	    pjsip_tsx_terminate(tsx[i], 601);
 	    tsx[i] = NULL;
+
+	    th = pjsip_endpt_get_timer_heap(endpt);
+	    pj_timer_heap_poll(th, NULL);
 	}
     }
     pjsip_tx_data_dec_ref(request);
@@ -177,8 +182,14 @@
 on_error:
     for (i=0; i<working_set; ++i) {
 	if (tsx[i]) {
+	    pj_timer_heap_t *th;
+
 	    pjsip_tsx_terminate(tsx[i], 601);
 	    tsx[i] = NULL;
+
+	    th = pjsip_endpt_get_timer_heap(endpt);
+	    pj_timer_heap_poll(th, NULL);
+
 	}
     }
     pjsip_tx_data_dec_ref(request);
@@ -208,6 +219,8 @@
     for (i=0; i<REPEAT; ++i) {
 	PJ_LOG(3,(THIS_FILE, "    test %d of %d..",
 		  i+1, REPEAT));
+	PJ_LOG(3,(THIS_FILE, "    number of current tsx: %d",
+		  pjsip_tsx_layer_get_tsx_count()));
 	status = uac_tsx_bench(WORKING_SET, &usec[i]);
 	if (status != PJ_SUCCESS)
 	    return status;
@@ -246,6 +259,8 @@
     for (i=0; i<REPEAT; ++i) {
 	PJ_LOG(3,(THIS_FILE, "    test %d of %d..",
 		  i+1, REPEAT));
+	PJ_LOG(3,(THIS_FILE, "    number of current tsx: %d",
+		  pjsip_tsx_layer_get_tsx_count()));
 	status = uas_tsx_bench(WORKING_SET, &usec[i]);
 	if (status != PJ_SUCCESS)
 	    return status;
diff --git a/pjsip/src/test/tsx_uac_test.c b/pjsip/src/test/tsx_uac_test.c
index 0677da1..d978df8 100644
--- a/pjsip/src/test/tsx_uac_test.c
+++ b/pjsip/src/test/tsx_uac_test.c
@@ -220,10 +220,13 @@
 	if (tsx->state == PJSIP_TSX_STATE_TERMINATED) {
 
 	    /* Test the status code. */
-	    if (tsx->status_code != PJSIP_SC_TSX_TRANSPORT_ERROR) {
+	    if (tsx->status_code != PJSIP_SC_TSX_TRANSPORT_ERROR &&
+		tsx->status_code != PJSIP_SC_BAD_GATEWAY)
+	    {
 		PJ_LOG(3,(THIS_FILE, 
-			  "    error: status code is %d instead of %d",
-			  tsx->status_code, PJSIP_SC_TSX_TRANSPORT_ERROR));
+			  "    error: status code is %d instead of %d or %d",
+			  tsx->status_code, PJSIP_SC_TSX_TRANSPORT_ERROR,
+			  PJSIP_SC_BAD_GATEWAY));
 		test_complete = -720;
 	    }
 
@@ -688,7 +691,7 @@
 	    tsx = pjsip_tsx_layer_find_tsx(&key, PJ_TRUE);
 	    if (tsx) {
 		pjsip_tsx_terminate(tsx, PJSIP_SC_REQUEST_TERMINATED);
-		pj_mutex_unlock(tsx->mutex);
+		pj_grp_lock_release(tsx->grp_lock);
 	    } else {
 		PJ_LOG(3,(THIS_FILE, "    error: uac transaction not found!"));
 		test_complete = -633;
@@ -1027,7 +1030,7 @@
 	tsx = pjsip_tsx_layer_find_tsx(&tsx_key, PJ_TRUE);
 	if (tsx) {
 	    pjsip_tsx_terminate(tsx, PJSIP_SC_REQUEST_TERMINATED);
-	    pj_mutex_unlock(tsx->mutex);
+	    pj_grp_lock_release(tsx->grp_lock);
 	    flush_events(1000);
 	}
 	pjsip_tx_data_dec_ref(tdata);
diff --git a/pjsip/src/test/tsx_uas_test.c b/pjsip/src/test/tsx_uas_test.c
index 973f331..3c43f53 100644
--- a/pjsip/src/test/tsx_uas_test.c
+++ b/pjsip/src/test/tsx_uas_test.c
@@ -225,12 +225,12 @@
     if (status != PJ_SUCCESS) {
 	// Some tests do expect failure!
 	//PJ_LOG(3,(THIS_FILE,"    error: timer unable to send response"));
-	pj_mutex_unlock(tsx->mutex);
+	pj_grp_lock_release(tsx->grp_lock);
 	pjsip_tx_data_dec_ref(r->tdata);
 	return;
     }
 
-    pj_mutex_unlock(tsx->mutex);
+    pj_grp_lock_release(tsx->grp_lock);
 }
 
 /* Utility to send response. */
@@ -313,7 +313,7 @@
     }
 
     pjsip_tsx_terminate(tsx, status_code);
-    pj_mutex_unlock(tsx->mutex);
+    pj_grp_lock_release(tsx->grp_lock);
 }
 
 #if 0	/* Unused for now */
@@ -1259,7 +1259,7 @@
 	tsx = pjsip_tsx_layer_find_tsx(&tsx_key, PJ_TRUE);
 	if (tsx) {
 	    pjsip_tsx_terminate(tsx, PJSIP_SC_REQUEST_TERMINATED);
-	    pj_mutex_unlock(tsx->mutex);
+	    pj_grp_lock_release(tsx->grp_lock);
 	    flush_events(1000);
 	}
 	pjsip_tx_data_dec_ref(tdata);