Author: mturk
Date: Tue Nov 15 22:55:41 2011
New Revision: 1202459

URL: http://svn.apache.org/viewvc?rev=1202459&view=rev
Log:
Axe thread lock result check. Its usage wasn't consistent and its basically 
useless

Modified:
    tomcat/jk/trunk/native/common/jk_ajp_common.c
    tomcat/jk/trunk/native/common/jk_lb_worker.c
    tomcat/jk/trunk/native/common/jk_mt.h
    tomcat/jk/trunk/native/common/jk_shm.c
    tomcat/jk/trunk/native/common/jk_uri_worker_map.c
    tomcat/jk/trunk/native/common/jk_worker.c

Modified: tomcat/jk/trunk/native/common/jk_ajp_common.c
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/common/jk_ajp_common.c?rev=1202459&r1=1202458&r2=1202459&view=diff
==============================================================================
--- tomcat/jk/trunk/native/common/jk_ajp_common.c (original)
+++ tomcat/jk/trunk/native/common/jk_ajp_common.c Tue Nov 15 22:55:41 2011
@@ -823,7 +823,7 @@ void ajp_close_endpoint(ajp_endpoint_t *
  */
 static int ajp_next_connection(ajp_endpoint_t *ae, jk_logger_t *l)
 {
-    int rc;
+    unsigned int i;
     int ret = JK_FALSE;
     ajp_worker_t *aw = ae->worker;
 
@@ -834,27 +834,24 @@ static int ajp_next_connection(ajp_endpo
         jk_shutdown_socket(ae->sd, l);
     /* Mark existing endpoint socket as closed */
     ae->sd = JK_INVALID_SOCKET;
-    JK_ENTER_CS(&aw->cs, rc);
-    if (rc) {
-        unsigned int i;
-        for (i = 0; i < aw->ep_cache_sz; i++) {
-            /* Find cache slot with usable socket */
-            if (IS_SLOT_AVAIL(aw->ep_cache[i]) &&
-                IS_VALID_SOCKET(aw->ep_cache[i]->sd)) {
-                ae->sd = aw->ep_cache[i]->sd;
-                aw->ep_cache[i]->sd = JK_INVALID_SOCKET;
-                break;
-            }
-        }
-        JK_LEAVE_CS(&aw->cs, rc);
-        if (IS_VALID_SOCKET(ae->sd)) {
-            ret = JK_TRUE;
-            if (JK_IS_DEBUG_LEVEL(l))
-                jk_log(l, JK_LOG_DEBUG,
-                       "(%s) Will try pooled connection socket %d from slot 
%d",
-                        ae->worker->name, ae->sd, i);
+    JK_ENTER_CS(&aw->cs);
+    for (i = 0; i < aw->ep_cache_sz; i++) {
+        /* Find cache slot with usable socket */
+        if (IS_SLOT_AVAIL(aw->ep_cache[i]) &&
+            IS_VALID_SOCKET(aw->ep_cache[i]->sd)) {
+            ae->sd = aw->ep_cache[i]->sd;
+            aw->ep_cache[i]->sd = JK_INVALID_SOCKET;
+            break;
         }
     }
+    JK_LEAVE_CS(&aw->cs);
+    if (IS_VALID_SOCKET(ae->sd)) {
+        ret = JK_TRUE;
+        if (JK_IS_DEBUG_LEVEL(l))
+            jk_log(l, JK_LOG_DEBUG,
+                    "(%s) Will try pooled connection socket %d from slot %d",
+                    ae->worker->name, ae->sd, i);
+    }
     JK_TRACE_EXIT(l);
     return ret;
 }
@@ -1063,33 +1060,24 @@ void jk_ajp_pull(ajp_worker_t * aw, int 
                    host, port, aw->name);
         }
         else {
-            int rc;
-            JK_ENTER_CS(&aw->cs, rc);
-            if (rc) {
-                unsigned int i;
-                for (i = 0; i < aw->ep_cache_sz; i++) {
-                    /* Close all avail connections in the cache
-                     * Note that this won't change active connections.
-                     */
-                    if (IS_SLOT_AVAIL(aw->ep_cache[i]) && 
IS_VALID_SOCKET(aw->ep_cache[i]->sd)) {
-                        int sd = aw->ep_cache[i]->sd;
-                        aw->ep_cache[i]->sd = JK_INVALID_SOCKET;
-                        aw->ep_cache[i]->addr_sequence = aw->addr_sequence;
-                        jk_shutdown_socket(sd, l);
-                        aw->s->connected--;
-                    }
+            unsigned int i;
+            JK_ENTER_CS(&aw->cs);
+            for (i = 0; i < aw->ep_cache_sz; i++) {
+                /* Close all avail connections in the cache
+                 * Note that this won't change active connections.
+                 */
+                if (IS_SLOT_AVAIL(aw->ep_cache[i]) && 
IS_VALID_SOCKET(aw->ep_cache[i]->sd)) {
+                    int sd = aw->ep_cache[i]->sd;
+                    aw->ep_cache[i]->sd = JK_INVALID_SOCKET;
+                    aw->ep_cache[i]->addr_sequence = aw->addr_sequence;
+                    jk_shutdown_socket(sd, l);
+                    aw->s->connected--;
                 }
             }
             aw->port = port;
             strncpy(aw->host, host, JK_SHM_STR_SIZ);
             memcpy(&(aw->worker_inet_addr), &inet_addr, sizeof(inet_addr));
-            if (rc) {
-                JK_LEAVE_CS(&aw->cs, rc);
-            }
-            else {
-                jk_log(l, JK_LOG_ERROR,
-                       "locking thread (errno=%d)", errno);
-            }
+            JK_LEAVE_CS(&aw->cs);
         }
     }
 
@@ -1130,26 +1118,20 @@ void jk_ajp_push(ajp_worker_t * aw, int 
         jk_shm_unlock();
 
     if (address_change == JK_TRUE) {
-        int rc;
-        JK_ENTER_CS(&aw->cs, rc);
-        if (rc) {
-            unsigned int i;
-            for (i = 0; i < aw->ep_cache_sz; i++) {
-                /* Close all connections in the cache */
-                if (IS_SLOT_AVAIL(aw->ep_cache[i]) && 
IS_VALID_SOCKET(aw->ep_cache[i]->sd)) {
-                    int sd = aw->ep_cache[i]->sd;
-                    aw->ep_cache[i]->sd = JK_INVALID_SOCKET;
-                    aw->ep_cache[i]->addr_sequence = aw->addr_sequence;
-                    jk_shutdown_socket(sd, l);
-                    aw->s->connected--;
-                }
+        unsigned int i;
+
+        JK_ENTER_CS(&aw->cs);
+        for (i = 0; i < aw->ep_cache_sz; i++) {
+            /* Close all connections in the cache */
+            if (IS_SLOT_AVAIL(aw->ep_cache[i]) && 
IS_VALID_SOCKET(aw->ep_cache[i]->sd)) {
+                int sd = aw->ep_cache[i]->sd;
+                aw->ep_cache[i]->sd = JK_INVALID_SOCKET;
+                aw->ep_cache[i]->addr_sequence = aw->addr_sequence;
+                jk_shutdown_socket(sd, l);
+                aw->s->connected--;
             }
-            JK_LEAVE_CS(&aw->cs, rc);
-        }
-        else {
-            jk_log(l, JK_LOG_ERROR,
-                   "locking thread (errno=%d)", errno);
         }
+        JK_LEAVE_CS(&aw->cs);
     }
     JK_TRACE_EXIT(l);
 }
@@ -3008,7 +2990,7 @@ int ajp_destroy(jk_worker_t **pThis, jk_
                 ajp_close_endpoint(aw->ep_cache[i], l);
         }
         free(aw->ep_cache);
-        JK_DELETE_CS(&(aw->cs), i);
+        JK_DELETE_CS(&aw->cs);
 
         if (aw->login) {
              /* take care of removing previously allocated data */
@@ -3036,7 +3018,6 @@ int JK_METHOD ajp_done(jk_endpoint_t **e
 
     if (e && *e && (*e)->endpoint_private) {
         ajp_endpoint_t *p = (*e)->endpoint_private;
-        int rc;
         ajp_worker_t *w = p->worker;
 
         /* set last_access only if needed */
@@ -3048,22 +3029,16 @@ int JK_METHOD ajp_done(jk_endpoint_t **e
         }
         ajp_reset_endpoint(p, l);
         *e = NULL;
-        JK_ENTER_CS(&w->cs, rc);
-        if (rc) {
-            p->avail = JK_TRUE;
-            JK_LEAVE_CS(&w->cs, rc);
+        JK_ENTER_CS(&w->cs);
+        p->avail = JK_TRUE;
+        JK_LEAVE_CS(&w->cs);
 
-            if (JK_IS_DEBUG_LEVEL(l))
-                jk_log(l, JK_LOG_DEBUG,
-                        "recycling connection pool for worker %s and socket 
%d",
-                        p->worker->name, (int)p->sd);
-            JK_TRACE_EXIT(l);
-            return JK_TRUE;
-        }
-        jk_log(l, JK_LOG_ERROR,
-               "locking thread (errno=%d)", errno);
+        if (JK_IS_DEBUG_LEVEL(l))
+            jk_log(l, JK_LOG_DEBUG,
+                    "recycling connection pool for worker %s and socket %d",
+                    p->worker->name, (int)p->sd);
         JK_TRACE_EXIT(l);
-        return JK_FALSE;
+        return JK_TRUE;
     }
 
     JK_LOG_NULL_PARAMS(l);
@@ -3079,77 +3054,67 @@ int ajp_get_endpoint(jk_worker_t *pThis,
     if (pThis && pThis->worker_private && je) {
         ajp_worker_t *aw = pThis->worker_private;
         ajp_endpoint_t *ae = NULL;
-        int rc;
         int retry = 0;
 
         *je = NULL;
         /* Loop until cache_acquire_timeout interval elapses */
         while ((retry * JK_SLEEP_DEF) < aw->cache_acquire_timeout) {
+            unsigned int slot;
 
-            JK_ENTER_CS(&aw->cs, rc);
-            if (rc) {
-                unsigned int slot;
-                /* Try to find connected socket cache entry */
-                for (slot = 0; slot < aw->ep_cache_sz; slot++) {
-                    if (IS_SLOT_AVAIL(aw->ep_cache[slot]) &&
-                        IS_VALID_SOCKET(aw->ep_cache[slot]->sd)) {
-                        ae = aw->ep_cache[slot];
-                        if (ae->reuse) {
-                            aw->ep_cache[slot]->avail = JK_FALSE;
-                            break;
-                        }
-                        else {
-                            /* XXX: We shouldn't have non reusable
-                             * opened socket in the cache
-                             */
-                            ajp_reset_endpoint(ae, l);
-                            ae->avail = JK_TRUE;
-                            ae = NULL;
-                            jk_log(l, JK_LOG_WARNING,
-                                   "closing non reusable pool slot=%d", slot);
-                        }
+            JK_ENTER_CS(&aw->cs);
+            /* Try to find connected socket cache entry */
+            for (slot = 0; slot < aw->ep_cache_sz; slot++) {
+                if (IS_SLOT_AVAIL(aw->ep_cache[slot]) &&
+                    IS_VALID_SOCKET(aw->ep_cache[slot]->sd)) {
+                    ae = aw->ep_cache[slot];
+                    if (ae->reuse) {
+                        aw->ep_cache[slot]->avail = JK_FALSE;
+                        break;
                     }
-                }
-                if (!ae) {
-                    /* No connected cache entry found.
-                     * Use the first free one.
-                     */
-                    for (slot = 0; slot < aw->ep_cache_sz; slot++) {
-                        if (IS_SLOT_AVAIL(aw->ep_cache[slot])) {
-                            ae = aw->ep_cache[slot];
-                            aw->ep_cache[slot]->avail = JK_FALSE;
-                            break;
-                        }
+                    else {
+                        /* XXX: We shouldn't have non reusable
+                         * opened socket in the cache
+                         */
+                        ajp_reset_endpoint(ae, l);
+                        ae->avail = JK_TRUE;
+                        ae = NULL;
+                        jk_log(l, JK_LOG_WARNING,
+                               "closing non reusable pool slot=%d", slot);
                     }
                 }
-                JK_LEAVE_CS(&aw->cs, rc);
-                if (ae) {
-                    if (aw->cache_timeout > 0)
-                        ae->last_access = time(NULL);
-                    *je = &ae->endpoint;
-                    if (JK_IS_DEBUG_LEVEL(l))
-                        jk_log(l, JK_LOG_DEBUG,
-                               "acquired connection pool slot=%u after %d 
retries",
-                               slot, retry);
-                    JK_TRACE_EXIT(l);
-                    return JK_TRUE;
-                }
-                else {
-                    retry++;
-                    if (JK_IS_DEBUG_LEVEL(l))
-                        jk_log(l, JK_LOG_DEBUG,
-                               "could not get free endpoint for worker %s"
-                               " (retry %d, sleeping for %d ms)",
-                               aw->name, retry, JK_SLEEP_DEF);
-                    jk_sleep(JK_SLEEP_DEF);
+            }
+            if (!ae) {
+                /* No connected cache entry found.
+                 * Use the first free one.
+                 */
+                for (slot = 0; slot < aw->ep_cache_sz; slot++) {
+                    if (IS_SLOT_AVAIL(aw->ep_cache[slot])) {
+                        ae = aw->ep_cache[slot];
+                        aw->ep_cache[slot]->avail = JK_FALSE;
+                        break;
+                    }
                 }
             }
-            else {
-               jk_log(l, JK_LOG_ERROR,
-                      "locking thread (errno=%d)", errno);
+            JK_LEAVE_CS(&aw->cs);
+            if (ae) {
+                if (aw->cache_timeout > 0)
+                    ae->last_access = time(NULL);
+                *je = &ae->endpoint;
+                if (JK_IS_DEBUG_LEVEL(l))
+                    jk_log(l, JK_LOG_DEBUG,
+                           "acquired connection pool slot=%u after %d retries",
+                           slot, retry);
                 JK_TRACE_EXIT(l);
-                return JK_FALSE;
-
+                return JK_TRUE;
+            }
+            else {
+                retry++;
+                if (JK_IS_DEBUG_LEVEL(l))
+                    jk_log(l, JK_LOG_DEBUG,
+                            "could not get free endpoint for worker %s"
+                            " (retry %d, sleeping for %d ms)",
+                            aw->name, retry, JK_SLEEP_DEF);
+                jk_sleep(JK_SLEEP_DEF);
             }
         }
         jk_log(l, JK_LOG_WARNING,
@@ -3171,8 +3136,11 @@ int JK_METHOD ajp_maintain(jk_worker_t *
     if (pThis && pThis->worker_private) {
         ajp_worker_t *aw = pThis->worker_private;
         time_t now = mstarted;
-        int rc;
+        int i;
         long delta;
+        unsigned int n = 0, k = 0, cnt = 0;
+        unsigned int m, m_count = 0;
+        jk_sock_t   *m_sock;
 
         jk_shm_lock();
 
@@ -3201,114 +3169,100 @@ int JK_METHOD ajp_maintain(jk_worker_t *
             return JK_TRUE;
         }
 
-        JK_ENTER_CS(&aw->cs, rc);
-        if (rc) {
-            unsigned int n = 0, k = 0, cnt = 0;
-            int i;
-            unsigned int m, m_count = 0;
-            jk_sock_t   *m_sock;
-            /* Count open slots */
-            for (i = (int)aw->ep_cache_sz - 1; i >= 0; i--) {
-                if (aw->ep_cache[i] && IS_VALID_SOCKET(aw->ep_cache[i]->sd))
-                    cnt++;
+        JK_ENTER_CS(&aw->cs);
+        /* Count open slots */
+        for (i = (int)aw->ep_cache_sz - 1; i >= 0; i--) {
+            if (aw->ep_cache[i] && IS_VALID_SOCKET(aw->ep_cache[i]->sd))
+                cnt++;
+        }
+        m_sock = (jk_sock_t *)malloc((cnt + 1) * sizeof(jk_sock_t));
+        /* Handle worker cache timeouts */
+        if (aw->cache_timeout > 0) {
+            for (i = (int)aw->ep_cache_sz - 1;
+                    i >= 0; i--) {
+                /* Skip the closed sockets */
+                if (IS_SLOT_AVAIL(aw->ep_cache[i]) &&
+                    IS_VALID_SOCKET(aw->ep_cache[i]->sd)) {
+                    int elapsed = (int)difftime(mstarted, 
aw->ep_cache[i]->last_access);
+                    if (elapsed > aw->cache_timeout) {
+                        time_t rt = 0;
+                        n++;
+                        if (JK_IS_DEBUG_LEVEL(l))
+                            rt = time(NULL);
+                        aw->ep_cache[i]->reuse = JK_FALSE;
+                        m_sock[m_count++] = aw->ep_cache[i]->sd;
+                        aw->ep_cache[i]->sd = JK_INVALID_SOCKET;
+                        ajp_reset_endpoint(aw->ep_cache[i], l);
+                        if (JK_IS_DEBUG_LEVEL(l))
+                            jk_log(l, JK_LOG_DEBUG,
+                                   "cleaning pool slot=%d elapsed %d in %d",
+                                   i, elapsed, (int)(difftime(time(NULL), 
rt)));
+                    }
+                }
+                if (cnt <= aw->ep_mincache_sz + n) {
+                    if (JK_IS_DEBUG_LEVEL(l)) {
+                        jk_log(l, JK_LOG_DEBUG,
+                        "reached pool min size %u from %u cache slots",
+                        aw->ep_mincache_sz, aw->ep_cache_sz);
+                    }
+                    break;
+                }
             }
-            m_sock = (jk_sock_t *)malloc((cnt + 1) * sizeof(jk_sock_t));
-            /* Handle worker cache timeouts */
-            if (aw->cache_timeout > 0) {
-                for (i = (int)aw->ep_cache_sz - 1;
-                     i >= 0; i--) {
-                    /* Skip the closed sockets */
-                    if (IS_SLOT_AVAIL(aw->ep_cache[i]) &&
-                        IS_VALID_SOCKET(aw->ep_cache[i]->sd)) {
-                        int elapsed = (int)difftime(mstarted, 
aw->ep_cache[i]->last_access);
-                        if (elapsed > aw->cache_timeout) {
-                            time_t rt = 0;
-                            n++;
-                            if (JK_IS_DEBUG_LEVEL(l))
-                                rt = time(NULL);
+        }
+        /* Handle worker connection keepalive */
+        if (aw->conn_ping_interval > 0 && aw->ping_timeout > 0) {
+            for (i = (int)aw->ep_cache_sz - 1; i >= 0; i--) {
+                /* Skip the closed sockets */
+                if (IS_SLOT_AVAIL(aw->ep_cache[i]) &&
+                    IS_VALID_SOCKET(aw->ep_cache[i]->sd)) {
+                    int elapsed = (int)difftime(now, 
aw->ep_cache[i]->last_access);
+                    if (elapsed > aw->conn_ping_interval) {
+                        k++;
+                        /* handle cping/cpong.
+                         */
+                        if (ajp_handle_cping_cpong(aw->ep_cache[i],
+                            aw->ping_timeout, l) == JK_FALSE) {
+                            jk_log(l, JK_LOG_INFO,
+                                   "(%s) failed sending request, "
+                                   "socket %d keepalive cping/cpong "
+                                   "failure (errno=%d)",
+                                   aw->name,
+                                   aw->ep_cache[i]->sd,
+                                   aw->ep_cache[i]->last_errno);
                             aw->ep_cache[i]->reuse = JK_FALSE;
                             m_sock[m_count++] = aw->ep_cache[i]->sd;
                             aw->ep_cache[i]->sd = JK_INVALID_SOCKET;
                             ajp_reset_endpoint(aw->ep_cache[i], l);
-                            if (JK_IS_DEBUG_LEVEL(l))
-                                jk_log(l, JK_LOG_DEBUG,
-                                        "cleaning pool slot=%d elapsed %d in 
%d",
-                                        i, elapsed, (int)(difftime(time(NULL), 
rt)));
                         }
-                    }
-                    if (cnt <= aw->ep_mincache_sz + n) {
-                        if (JK_IS_DEBUG_LEVEL(l)) {
-                            jk_log(l, JK_LOG_DEBUG,
-                            "reached pool min size %u from %u cache slots",
-                            aw->ep_mincache_sz, aw->ep_cache_sz);
-                        }
-                        break;
-                    }
-                }
-            }
-            /* Handle worker connection keepalive */
-            if (aw->conn_ping_interval > 0 && aw->ping_timeout > 0) {
-                for (i = (int)aw->ep_cache_sz - 1; i >= 0; i--) {
-                    /* Skip the closed sockets */
-                    if (IS_SLOT_AVAIL(aw->ep_cache[i]) &&
-                        IS_VALID_SOCKET(aw->ep_cache[i]->sd)) {
-                        int elapsed = (int)difftime(now, 
aw->ep_cache[i]->last_access);
-                        if (elapsed > aw->conn_ping_interval) {
-                            k++;
-                            /* handle cping/cpong.
-                             */
-                            if (ajp_handle_cping_cpong(aw->ep_cache[i],
-                                aw->ping_timeout, l) == JK_FALSE) {
-                                jk_log(l, JK_LOG_INFO,
-                                       "(%s) failed sending request, "
-                                       "socket %d keepalive cping/cpong "
-                                       "failure (errno=%d)",
-                                       aw->name,
-                                       aw->ep_cache[i]->sd,
-                                       aw->ep_cache[i]->last_errno);
-                                aw->ep_cache[i]->reuse = JK_FALSE;
-                                m_sock[m_count++] = aw->ep_cache[i]->sd;
-                                aw->ep_cache[i]->sd = JK_INVALID_SOCKET;
-                                ajp_reset_endpoint(aw->ep_cache[i], l);
-                            }
-                            else {
-                                now = time(NULL);
-                                aw->ep_cache[i]->last_access = now;
-                            }
+                        else {
+                            now = time(NULL);
+                            aw->ep_cache[i]->last_access = now;
                         }
                     }
                 }
             }
-            JK_LEAVE_CS(&aw->cs, rc);
-            /* Shutdown sockets outside of the lock.
-             * This has benefits only if maintain was
-             * called from the watchdog thread.
-             */
-            for (m = 0; m < m_count; m++) {
-                jk_shutdown_socket(m_sock[m], l);
-            }
-            free(m_sock);
-            if (n && JK_IS_DEBUG_LEVEL(l))
-                jk_log(l, JK_LOG_DEBUG,
-                        "recycled %u sockets in %d seconds from %u pool slots",
-                        n, (int)(difftime(time(NULL), mstarted)),
-                        aw->ep_cache_sz);
-            if (k && JK_IS_DEBUG_LEVEL(l))
-                jk_log(l, JK_LOG_DEBUG,
-                        "pinged %u sockets in %d seconds from %u pool slots",
-                        k, (int)(difftime(time(NULL), mstarted)),
-                        aw->ep_cache_sz);
-            JK_TRACE_EXIT(l);
-            return JK_TRUE;
         }
-        else {
-           jk_log(l, JK_LOG_ERROR,
-                  "locking thread (errno=%d)",
-                  errno);
-            JK_TRACE_EXIT(l);
-            return JK_FALSE;
-
+        JK_LEAVE_CS(&aw->cs);
+        /* Shutdown sockets outside of the lock.
+         * This has benefits only if maintain was
+         * called from the watchdog thread.
+         */
+        for (m = 0; m < m_count; m++) {
+            jk_shutdown_socket(m_sock[m], l);
         }
+        free(m_sock);
+        if (n && JK_IS_DEBUG_LEVEL(l))
+            jk_log(l, JK_LOG_DEBUG,
+                   "recycled %u sockets in %d seconds from %u pool slots",
+                   n, (int)(difftime(time(NULL), mstarted)),
+                   aw->ep_cache_sz);
+        if (k && JK_IS_DEBUG_LEVEL(l))
+            jk_log(l, JK_LOG_DEBUG,
+                   "pinged %u sockets in %d seconds from %u pool slots",
+                   k, (int)(difftime(time(NULL), mstarted)),
+                   aw->ep_cache_sz);
+        JK_TRACE_EXIT(l);
+        return JK_TRUE;
     }
     else {
         JK_LOG_NULL_PARAMS(l);
@@ -3325,26 +3279,17 @@ int ajp_has_endpoint(jk_worker_t *pThis,
 
     if (pThis && pThis->worker_private) {
         ajp_worker_t *aw = pThis->worker_private;
-        int rc;
+        unsigned int slot;
 
-        JK_ENTER_CS(&aw->cs, rc);
-        if (rc) {
-            unsigned int slot;
-            /* Try to find connected socket cache entry */
-            for (slot = 0; slot < aw->ep_cache_sz; slot++) {
-                if (IS_SLOT_AVAIL(aw->ep_cache[slot])) {
-                    JK_LEAVE_CS(&aw->cs, rc);
-                    return JK_TRUE;
-                }
+        JK_ENTER_CS(&aw->cs);
+        /* Try to find connected socket cache entry */
+        for (slot = 0; slot < aw->ep_cache_sz; slot++) {
+            if (IS_SLOT_AVAIL(aw->ep_cache[slot])) {
+                JK_LEAVE_CS(&aw->cs);
+                return JK_TRUE;
             }
-            JK_LEAVE_CS(&aw->cs, rc);
-        }
-        else {
-            jk_log(l, JK_LOG_ERROR,
-                    "locking thread (errno=%d)", errno);
-            JK_TRACE_EXIT(l);
-            return JK_FALSE;
         }
+        JK_LEAVE_CS(&aw->cs);
     }
     else {
         JK_LOG_NULL_PARAMS(l);

Modified: tomcat/jk/trunk/native/common/jk_lb_worker.c
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/common/jk_lb_worker.c?rev=1202459&r1=1202458&r2=1202459&view=diff
==============================================================================
--- tomcat/jk/trunk/native/common/jk_lb_worker.c (original)
+++ tomcat/jk/trunk/native/common/jk_lb_worker.c Tue Nov 15 22:55:41 2011
@@ -954,7 +954,6 @@ static int get_most_suitable_worker(jk_w
                                     jk_logger_t *l)
 {
     int rc = -1;
-    int r;
 
     JK_TRACE_ENTER(l);
     if (p->num_of_workers == 1) {
@@ -978,15 +977,14 @@ static int get_most_suitable_worker(jk_w
         }
     }
     if (p->lblock == JK_LB_LOCK_PESSIMISTIC) {
-        r = jk_shm_lock();
+        if (!jk_shm_lock()) {
+            jk_log(l, JK_LOG_ERROR, "locking failed (errno=%d)", errno);
+            JK_TRACE_EXIT(l);
+            return -1;            
+        }        
     }
     else {
-        JK_ENTER_CS(&(p->cs), r);
-    }
-    if (!r) {
-       jk_log(l, JK_LOG_ERROR,
-              "locking failed (errno=%d)",
-              errno);
+        JK_ENTER_CS(&p->cs);
     }
     if (sessionid) {
         char *session = sessionid;
@@ -1016,7 +1014,7 @@ static int get_most_suitable_worker(jk_w
                         jk_shm_unlock();
                     }
                     else {
-                        JK_LEAVE_CS(&(p->cs), r);
+                        JK_LEAVE_CS(&p->cs);
                     }
                     if (JK_IS_DEBUG_LEVEL(l))
                         jk_log(l, JK_LOG_DEBUG,
@@ -1035,7 +1033,7 @@ static int get_most_suitable_worker(jk_w
                 jk_shm_unlock();
             }
             else {
-                JK_LEAVE_CS(&(p->cs), r);
+                JK_LEAVE_CS(&p->cs);
             }
             jk_log(l, JK_LOG_INFO,
                    "all workers are in error state for session %s",
@@ -1049,7 +1047,7 @@ static int get_most_suitable_worker(jk_w
         jk_shm_unlock();
     }
     else {
-        JK_LEAVE_CS(&(p->cs), r);
+        JK_LEAVE_CS(&p->cs);
     }
     if (rc >= 0) {
         lb_sub_worker_t *wr = &(p->lb_workers[rc]);
@@ -1824,11 +1822,10 @@ static int JK_METHOD destroy(jk_worker_t
     JK_TRACE_ENTER(l);
 
     if (pThis && *pThis && (*pThis)->worker_private) {
-        unsigned int i;
         lb_worker_t *private_data = (*pThis)->worker_private;
 
         close_workers(private_data, private_data->num_of_workers, l);
-        JK_DELETE_CS(&(private_data->cs), i);
+        JK_DELETE_CS(&private_data->cs);
         jk_close_pool(&private_data->p);
         free(private_data);
 

Modified: tomcat/jk/trunk/native/common/jk_mt.h
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/common/jk_mt.h?rev=1202459&r1=1202458&r2=1202459&view=diff
==============================================================================
--- tomcat/jk/trunk/native/common/jk_mt.h (original)
+++ tomcat/jk/trunk/native/common/jk_mt.h Tue Nov 15 22:55:41 2011
@@ -48,9 +48,9 @@
 
 typedef CRITICAL_SECTION JK_CRIT_SEC;
 #define JK_INIT_CS(x, rc)   InitializeCriticalSection(x); rc = JK_TRUE
-#define JK_DELETE_CS(x, rc) DeleteCriticalSection(x);    rc = JK_TRUE
-#define JK_ENTER_CS(x, rc)  EnterCriticalSection(x);     rc = JK_TRUE
-#define JK_LEAVE_CS(x, rc)  LeaveCriticalSection(x);     rc = JK_TRUE
+#define JK_DELETE_CS(x)     DeleteCriticalSection(x)
+#define JK_ENTER_CS(x)      EnterCriticalSection(x)
+#define JK_LEAVE_CS(x)      LeaveCriticalSection(x)
 
 #else /* !WIN32 */
 #define _MT_CODE_PTHREAD
@@ -59,17 +59,12 @@ typedef CRITICAL_SECTION JK_CRIT_SEC;
 #include <fcntl.h>
 
 typedef pthread_mutex_t JK_CRIT_SEC;
-#define JK_INIT_CS(x, rc)\
+#define JK_INIT_CS(x, rc)       \
             if (pthread_mutex_init(x, NULL)) rc = JK_FALSE; else rc = JK_TRUE
 
-#define JK_DELETE_CS(x, rc)\
-            if (pthread_mutex_destroy(x))    rc = JK_FALSE; else rc = JK_TRUE
-
-#define JK_ENTER_CS(x, rc)\
-            if (pthread_mutex_lock(x))       rc = JK_FALSE; else rc = JK_TRUE
-
-#define JK_LEAVE_CS(x, rc)\
-            if (pthread_mutex_unlock(x))     rc = JK_FALSE; else rc = JK_TRUE
+#define JK_DELETE_CS(x) pthread_mutex_destroy(x)
+#define JK_ENTER_CS(x)  pthread_mutex_lock(x)
+#define JK_LEAVE_CS(x)  pthread_mutex_unlock(x)
 
 #if defined(AS400) || defined(NETWARE)
 #define jk_pthread_t   jk_uint32_t
@@ -81,9 +76,9 @@ jk_pthread_t jk_gettid(void);
 
 typedef void *JK_CRIT_SEC;
 #define JK_INIT_CS(x, rc)   rc = JK_TRUE
-#define JK_DELETE_CS(x, rc) rc = JK_TRUE
-#define JK_ENTER_CS(x, rc)  rc = JK_TRUE
-#define JK_LEAVE_CS(x, rc)  rc = JK_TRUE
+#define JK_DELETE_CS(x)     (void)0
+#define JK_ENTER_CS(x)      (void)0
+#define JK_LEAVE_CS(x)      (void)0
 #define jk_gettid()         0
 #endif /* MT_CODE */
 

Modified: tomcat/jk/trunk/native/common/jk_shm.c
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/common/jk_shm.c?rev=1202459&r1=1202458&r2=1202459&view=diff
==============================================================================
--- tomcat/jk/trunk/native/common/jk_shm.c (original)
+++ tomcat/jk/trunk/native/common/jk_shm.c Tue Nov 15 22:55:41 2011
@@ -159,12 +159,12 @@ int jk_shm_open(const char *fname, size_
         jk_shm_inited_cs = 1;
         JK_INIT_CS(&jk_shmem.cs, rc);
     }
-    JK_ENTER_CS(&jk_shmem.cs, rc);
+    JK_ENTER_CS(&jk_shmem.cs);
     if (jk_shmem.hdr) {
         if (JK_IS_DEBUG_LEVEL(l))
             jk_log(l, JK_LOG_DEBUG, "Shared memory is already opened");
         JK_TRACE_EXIT(l);
-        JK_LEAVE_CS(&jk_shmem.cs, rc);
+        JK_LEAVE_CS(&jk_shmem.cs);
         return 0;
     }
     jk_shmem.size = JK_SHM_ALIGN(sizeof(jk_shm_header_t) + sz);
@@ -179,7 +179,7 @@ int jk_shm_open(const char *fname, size_
             }
         }
         if (jk_shm_hlock == NULL) {
-            JK_LEAVE_CS(&jk_shmem.cs, rc);
+            JK_LEAVE_CS(&jk_shmem.cs);
             JK_TRACE_EXIT(l);
             return -1;
         }
@@ -188,7 +188,7 @@ int jk_shm_open(const char *fname, size_
             if (ws == WAIT_FAILED) {
                 CloseHandle(jk_shm_hlock);
                 jk_shm_hlock = NULL;
-                JK_LEAVE_CS(&jk_shmem.cs, rc);
+                JK_LEAVE_CS(&jk_shmem.cs);
                 JK_TRACE_EXIT(l);
                 return -1;
             }
@@ -309,8 +309,7 @@ int jk_shm_attach(const char *fname, siz
 void jk_shm_close()
 {
     if (jk_shm_inited_cs) {
-        int rc;
-        JK_ENTER_CS(&jk_shmem.cs, rc);
+        JK_ENTER_CS(&jk_shmem.cs);
     }
     if (jk_shmem.hdr) {
 #if defined (WIN32)
@@ -334,8 +333,7 @@ void jk_shm_close()
         jk_shmem.filename = NULL;
     }
     if (jk_shm_inited_cs) {
-        int rc;
-        JK_LEAVE_CS(&jk_shmem.cs, rc);
+        JK_LEAVE_CS(&jk_shmem.cs);
     }
 }
 
@@ -761,20 +759,18 @@ void jk_shm_sync_access_time()
 
 int jk_shm_lock()
 {
-    int rc;
+    int rc = JK_TRUE;
 
     if (!jk_shm_inited_cs)
         return JK_FALSE;
-    JK_ENTER_CS(&jk_shmem.cs, rc);
+    JK_ENTER_CS(&jk_shmem.cs);
 #if defined (WIN32)
-    if (rc == JK_TRUE && jk_shm_hlock != NULL) {
-        if (WaitForSingleObject(jk_shm_hlock, INFINITE) != WAIT_FAILED)
-            rc = JK_TRUE;
-        else
+    if (jk_shm_hlock != NULL) {
+        if (WaitForSingleObject(jk_shm_hlock, INFINITE) == WAIT_FAILED)
             rc = JK_FALSE;
     }
 #else
-    if (rc == JK_TRUE && jk_shmem.fd_lock != -1) {
+    if (jk_shmem.fd_lock != -1) {
         JK_ENTER_LOCK(jk_shmem.fd_lock, rc);
     }
 #endif
@@ -783,7 +779,7 @@ int jk_shm_lock()
 
 int jk_shm_unlock()
 {
-    int rc;
+    int rc = JK_TRUE;
 
     if (!jk_shm_inited_cs)
         return JK_FALSE;
@@ -796,7 +792,7 @@ int jk_shm_unlock()
         JK_LEAVE_LOCK(jk_shmem.fd_lock, rc);
     }
 #endif
-    JK_LEAVE_CS(&jk_shmem.cs, rc);
+    JK_LEAVE_CS(&jk_shmem.cs);
     return rc;
 }
 

Modified: tomcat/jk/trunk/native/common/jk_uri_worker_map.c
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/common/jk_uri_worker_map.c?rev=1202459&r1=1202458&r2=1202459&view=diff
==============================================================================
--- tomcat/jk/trunk/native/common/jk_uri_worker_map.c (original)
+++ tomcat/jk/trunk/native/common/jk_uri_worker_map.c Tue Nov 15 22:55:41 2011
@@ -256,8 +256,7 @@ static int uri_worker_map_close(jk_uri_w
     JK_TRACE_ENTER(l);
 
     if (uw_map) {
-        int i;
-        JK_DELETE_CS(&(uw_map->cs), i);
+        JK_DELETE_CS(&uw_map->cs);
         jk_close_pool(&uw_map->p_dyn[0]);
         jk_close_pool(&uw_map->p_dyn[1]);
         jk_close_pool(&uw_map->p);
@@ -1254,10 +1253,10 @@ int uri_worker_map_update(jk_uri_worker_
                        uw_map->fname);
             return JK_TRUE;
         }
-        JK_ENTER_CS(&(uw_map->cs), rc);
+        JK_ENTER_CS(&uw_map->cs);
         /* Check if some other thread updated status */
         if (statbuf.st_mtime == uw_map->modified) {
-            JK_LEAVE_CS(&(uw_map->cs), rc);
+            JK_LEAVE_CS(&uw_map->cs);
             if (JK_IS_DEBUG_LEVEL(l))
                 jk_log(l, JK_LOG_DEBUG,
                        "File %s  is not modified",
@@ -1267,7 +1266,7 @@ int uri_worker_map_update(jk_uri_worker_
         rc = uri_worker_map_load(uw_map, l);
         uri_worker_map_ext(uw_map, l);
         uri_worker_map_switch(uw_map, l);
-        JK_LEAVE_CS(&(uw_map->cs), rc);
+        JK_LEAVE_CS(&uw_map->cs);
         jk_log(l, JK_LOG_INFO,
                "Reloaded urimaps from %s", uw_map->fname);
     }

Modified: tomcat/jk/trunk/native/common/jk_worker.c
URL: 
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/common/jk_worker.c?rev=1202459&r1=1202458&r2=1202459&view=diff
==============================================================================
--- tomcat/jk/trunk/native/common/jk_worker.c (original)
+++ tomcat/jk/trunk/native/common/jk_worker.c Tue Nov 15 22:55:41 2011
@@ -92,9 +92,8 @@ int wc_open(jk_map_t *init_data, jk_work
 
 void wc_close(jk_logger_t *l)
 {
-    int rc;
     JK_TRACE_ENTER(l);
-    JK_DELETE_CS(&worker_lock, rc);
+    JK_DELETE_CS(&worker_lock);
     close_workers(l);
     JK_TRACE_EXIT(l);
 }
@@ -318,11 +317,11 @@ void wc_maintain(jk_logger_t *l)
     if (sz > 0 && worker_maintain_time > 0 &&
         difftime(time(NULL), last_maintain) >= worker_maintain_time) {
         int i;
-        JK_ENTER_CS(&worker_lock, i);
+        JK_ENTER_CS(&worker_lock);
         if (running_maintain ||
             difftime(time(NULL), last_maintain) < worker_maintain_time) {
             /* Already in maintain */
-            JK_LEAVE_CS(&worker_lock, i);
+            JK_LEAVE_CS(&worker_lock);
             JK_TRACE_EXIT(l);
             return;
         }
@@ -330,7 +329,7 @@ void wc_maintain(jk_logger_t *l)
          * the maintain until we are finished.
          */
         running_maintain = 1;
-        JK_LEAVE_CS(&worker_lock, i);
+        JK_LEAVE_CS(&worker_lock);
 
         for (i = 0; i < sz; i++) {
             jk_worker_t *w = jk_map_value_at(worker_map, i);
@@ -342,10 +341,10 @@ void wc_maintain(jk_logger_t *l)
                 w->maintain(w, time(NULL), l);
             }
         }
-        JK_ENTER_CS(&worker_lock, i);
+        JK_ENTER_CS(&worker_lock);
         last_maintain = time(NULL);
         running_maintain = 0;
-        JK_LEAVE_CS(&worker_lock, i);
+        JK_LEAVE_CS(&worker_lock);
     }
     JK_TRACE_EXIT(l);
 }



---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscr...@tomcat.apache.org
For additional commands, e-mail: dev-h...@tomcat.apache.org

Reply via email to