}
 
 void
-ksocknal_free_tx (ksock_tx_t *tx)
+ksocknal_free_tx(ksock_tx_t *tx)
 {
        atomic_dec(&ksocknal_data.ksnd_nactive_txs);
 
 }
 
 static int
-ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
 {
        struct kvec *iov = tx->tx_iov;
        int nob;
                return rc;
 
        nob = rc;
-       LASSERT (nob <= tx->tx_resid);
+       LASSERT(nob <= tx->tx_resid);
        tx->tx_resid -= nob;
 
        /* "consume" iov */
 }
 
 static int
-ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
 {
        lnet_kiov_t *kiov = tx->tx_kiov;
        int nob;
                return rc;
 
        nob = rc;
-       LASSERT (nob <= tx->tx_resid);
+       LASSERT(nob <= tx->tx_resid);
        tx->tx_resid -= nob;
 
        /* "consume" kiov */
 }
 
 static int
-ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
 {
        int rc;
        int bufnob;
 
        rc = ksocknal_connsock_addref(conn);
        if (rc != 0) {
-               LASSERT (conn->ksnc_closing);
+               LASSERT(conn->ksnc_closing);
                return -ESHUTDOWN;
        }
 
                        ksocknal_data.ksnd_enomem_tx--;
                        rc = -EAGAIN;
                } else if (tx->tx_niov != 0) {
-                       rc = ksocknal_send_iov (conn, tx);
+                       rc = ksocknal_send_iov(conn, tx);
                } else {
-                       rc = ksocknal_send_kiov (conn, tx);
+                       rc = ksocknal_send_kiov(conn, tx);
                }
 
                bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
                }
 
                /* socket's wmem_queued now includes 'rc' bytes */
-               atomic_sub (rc, &conn->ksnc_tx_nob);
+               atomic_sub(rc, &conn->ksnc_tx_nob);
                rc = 0;
 
        } while (tx->tx_resid != 0);
 }
 
 static int
-ksocknal_recv_iov (ksock_conn_t *conn)
+ksocknal_recv_iov(ksock_conn_t *conn)
 {
        struct kvec *iov = conn->ksnc_rx_iov;
        int nob;
 }
 
 static int
-ksocknal_recv_kiov (ksock_conn_t *conn)
+ksocknal_recv_kiov(ksock_conn_t *conn)
 {
        lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
        int nob;
 }
 
 static int
-ksocknal_receive (ksock_conn_t *conn)
+ksocknal_receive(ksock_conn_t *conn)
 {
        /*
         * Return 1 on success, 0 on EOF, < 0 on error.
 
        rc = ksocknal_connsock_addref(conn);
        if (rc != 0) {
-               LASSERT (conn->ksnc_closing);
+               LASSERT(conn->ksnc_closing);
                return -ESHUTDOWN;
        }
 
        for (;;) {
                if (conn->ksnc_rx_niov != 0)
-                       rc = ksocknal_recv_iov (conn);
+                       rc = ksocknal_recv_iov(conn);
                else
-                       rc = ksocknal_recv_kiov (conn);
+                       rc = ksocknal_recv_kiov(conn);
 
                if (rc <= 0) {
                        /* error/EOF or partial receive */
 }
 
 void
-ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
+ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
 {
        lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
        int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
        if (ni == NULL && tx->tx_conn != NULL)
                ni = tx->tx_conn->ksnc_peer->ksnp_ni;
 
-       ksocknal_free_tx (tx);
+       ksocknal_free_tx(tx);
        if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
-               lnet_finalize (ni, lnetmsg, rc);
+               lnet_finalize(ni, lnetmsg, rc);
 }
 
 void
-ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
+ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
 {
        ksock_tx_t *tx;
 
-       while (!list_empty (txlist)) {
+       while (!list_empty(txlist)) {
                tx = list_entry(txlist->next, ksock_tx_t, tx_list);
 
                if (error && tx->tx_lnetmsg != NULL) {
                        CNETERR("Deleting packet type %d len %d %s->%s\n",
-                               le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
-                               le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
+                               le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
+                               le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
                                libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
                                libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
                } else if (error) {
 }
 
 static int
-ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
 {
        int rc;
 
        if (tx->tx_zc_capable && !tx->tx_zc_checked)
                ksocknal_check_zc_req(tx);
 
-       rc = ksocknal_transmit (conn, tx);
+       rc = ksocknal_transmit(conn, tx);
 
        CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
 
        if (tx->tx_resid == 0) {
                /* Sent everything OK */
-               LASSERT (rc == 0);
+               LASSERT(rc == 0);
 
                return 0;
        }
                spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
                /* enomem list takes over scheduler's ref... */
-               LASSERT (conn->ksnc_tx_scheduled);
+               LASSERT(conn->ksnc_tx_scheduled);
                list_add_tail(&conn->ksnc_tx_list,
                              &ksocknal_data.ksnd_enomem_conns);
                if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
                                                   SOCKNAL_ENOMEM_RETRY),
                                   ksocknal_data.ksnd_reaper_waketime))
-                       wake_up (&ksocknal_data.ksnd_reaper_waitq);
+                       wake_up(&ksocknal_data.ksnd_reaper_waitq);
 
                spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
                return rc;
                ksocknal_uncheck_zc_req(tx);
 
        /* it's not an error if conn is being closed */
-       ksocknal_close_conn_and_siblings (conn,
-                                         (conn->ksnc_closing) ? 0 : rc);
+       ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc);
 
        return rc;
 }
 
 static void
-ksocknal_launch_connection_locked (ksock_route_t *route)
+ksocknal_launch_connection_locked(ksock_route_t *route)
 {
        /* called holding write lock on ksnd_global_lock */
 
 }
 
 void
-ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
+ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
 {
        ksock_route_t *route;
 
        int tnob = 0;
        int fnob = 0;
 
-       list_for_each (tmp, &peer->ksnp_conns) {
+       list_for_each(tmp, &peer->ksnp_conns) {
                ksock_conn_t *c  = list_entry(tmp, ksock_conn_t, ksnc_list);
                int nob = atomic_read(&c->ksnc_tx_nob) +
                        c->ksnc_sock->sk->sk_wmem_queued;
 {
        conn->ksnc_proto->pro_pack(tx);
 
-       atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+       atomic_add(tx->tx_nob, &conn->ksnc_tx_nob);
        ksocknal_conn_addref(conn); /* +1 ref for tx */
        tx->tx_conn = conn;
 }
 
 void
-ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
 {
        ksock_sched_t *sched = conn->ksnc_scheduler;
        ksock_msg_t *msg = &tx->tx_msg;
         * We always expect at least 1 mapped fragment containing the
         * complete ksocknal message header.
         */
-       LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
+       LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) +
                lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
                (unsigned int)tx->tx_nob);
        LASSERT(tx->tx_niov >= 1);
        LASSERT(tx->tx_resid == tx->tx_nob);
 
-       CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
-               tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type :
-                                              KSOCK_MSG_NOOP,
-               tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
+       CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
+              tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type :
+                                             KSOCK_MSG_NOOP,
+              tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
 
        /*
         * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
        }
 
        if (ztx != NULL) {
-               atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+               atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob);
                list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
        }
 
                ksocknal_conn_addref(conn);
                list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
                conn->ksnc_tx_scheduled = 1;
-               wake_up (&sched->kss_waitq);
+               wake_up(&sched->kss_waitq);
        }
 
        spin_unlock_bh(&sched->kss_lock);
 }
 
 ksock_route_t *
-ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
+ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
 {
        unsigned long now = cfs_time_current();
        struct list_head *tmp;
        ksock_route_t *route;
 
-       list_for_each (tmp, &peer->ksnp_routes) {
-               route = list_entry (tmp, ksock_route_t, ksnr_list);
+       list_for_each(tmp, &peer->ksnp_routes) {
+               route = list_entry(tmp, ksock_route_t, ksnr_list);
 
                LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
 }
 
 ksock_route_t *
-ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
+ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
 {
        struct list_head *tmp;
        ksock_route_t *route;
 
-       list_for_each (tmp, &peer->ksnp_routes) {
-               route = list_entry (tmp, ksock_route_t, ksnr_list);
+       list_for_each(tmp, &peer->ksnp_routes) {
+               route = list_entry(tmp, ksock_route_t, ksnr_list);
 
                LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
 }
 
 int
-ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
+ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 {
        ksock_peer_t *peer;
        ksock_conn_t *conn;
                                         * connecting and I do have an actual
                                         * connection...
                                         */
-                                       ksocknal_queue_tx_locked (tx, conn);
+                                       ksocknal_queue_tx_locked(tx, conn);
                                        read_unlock(g_lock);
                                        return 0;
                                }
        conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
        if (conn != NULL) {
                /* Connection exists; queue message on it */
-               ksocknal_queue_tx_locked (tx, conn);
+               ksocknal_queue_tx_locked(tx, conn);
                write_unlock_bh(g_lock);
                return 0;
        }
 
        if (peer->ksnp_accepting > 0 ||
-           ksocknal_find_connecting_route_locked (peer) != NULL) {
+           ksocknal_find_connecting_route_locked(peer) != NULL) {
                /* the message is going to be pinned to the peer */
                tx->tx_deadline =
                        cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
 
                /* Queue the message until a connection is established */
-               list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+               list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
                write_unlock_bh(g_lock);
                return 0;
        }
        LASSERT(payload_nob == 0 || payload_niov > 0);
        LASSERT(payload_niov <= LNET_MAX_IOV);
        /* payload is either all vaddrs or all pages */
-       LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
-       LASSERT (!in_interrupt ());
+       LASSERT(!(payload_kiov != NULL && payload_iov != NULL));
+       LASSERT(!in_interrupt());
 
        if (payload_iov != NULL)
                desc_size = offsetof(ksock_tx_t,
 }
 
 void
-ksocknal_thread_fini (void)
+ksocknal_thread_fini(void)
 {
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
        ksocknal_data.ksnd_nthreads--;
 }
 
 int
-ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
 {
        static char ksocknal_slop_buffer[4096];
 
 
                        conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
                        conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
-                       conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
+                       conn->ksnc_rx_iov[0].iov_len = sizeof(lnet_hdr_t);
                        break;
 
                default:
-                       LBUG ();
+                       LBUG();
                }
                conn->ksnc_rx_niov = 1;
 
                nob_to_skip -= nob;
 
        } while (nob_to_skip != 0 &&    /* mustn't overflow conn's rx iov */
-                niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
+                niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
 
        conn->ksnc_rx_niov = niov;
        conn->ksnc_rx_kiov = NULL;
 }
 
 static int
-ksocknal_process_receive (ksock_conn_t *conn)
+ksocknal_process_receive(ksock_conn_t *conn)
 {
        lnet_hdr_t *lhdr;
        lnet_process_id_t *id;
        int rc;
 
-       LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+       LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
 
        /* NB: sched lock NOT held */
        /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
                rc = ksocknal_receive(conn);
 
                if (rc <= 0) {
-                       LASSERT (rc != -EAGAIN);
+                       LASSERT(rc != -EAGAIN);
 
                        if (rc == 0)
                                CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
                                       conn->ksnc_port);
 
                        /* it's not an error if conn is being closed */
-                       ksocknal_close_conn_and_siblings (conn,
-                                                         (conn->ksnc_closing) ? 0 : rc);
+                       ksocknal_close_conn_and_siblings(conn,
+                                                        (conn->ksnc_closing) ? 0 : rc);
                        return (rc == 0 ? -ESHUTDOWN : rc);
                }
 
                if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
                        __u64 cookie = 0;
 
-                       LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
+                       LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
 
                        if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
                                cookie = conn->ksnc_msg.ksm_zc_cookies[0];
                }
 
                if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
-                       ksocknal_new_packet (conn, 0);
+                       ksocknal_new_packet(conn, 0);
                        return 0;       /* NOOP is done and just return */
                }
 
                if (rc < 0) {
                        /* I just received garbage: give up on this conn */
                        ksocknal_new_packet(conn, 0);
-                       ksocknal_close_conn_and_siblings (conn, rc);
+                       ksocknal_close_conn_and_siblings(conn, rc);
                        ksocknal_conn_decref(conn);
                        return -EPROTO;
                }
 
                /* I'm racing with ksocknal_recv() */
-               LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
-                        conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
+               LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
+                       conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
 
                if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
                        return 0;
 
                if (rc != 0) {
                        ksocknal_new_packet(conn, 0);
-                       ksocknal_close_conn_and_siblings (conn, rc);
+                       ksocknal_close_conn_and_siblings(conn, rc);
                        return -EPROTO;
                }
                /* Fall through */
 
        case SOCKNAL_RX_SLOP:
                /* starting new packet? */
-               if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
+               if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
                        return 0;       /* come back later */
                goto again;          /* try to finish reading slop now */
 
 }
 
 int
-ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
-              unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
-              unsigned int offset, unsigned int mlen, unsigned int rlen)
+ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
+             unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+             unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
        ksock_conn_t *conn = private;
        ksock_sched_t *sched = conn->ksnc_scheduler;
        switch (conn->ksnc_rx_state) {
        case SOCKNAL_RX_PARSE_WAIT:
                list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
-               wake_up (&sched->kss_waitq);
-               LASSERT (conn->ksnc_rx_ready);
+               wake_up(&sched->kss_waitq);
+               LASSERT(conn->ksnc_rx_ready);
                break;
 
        case SOCKNAL_RX_PARSE:
 
                /* Ensure I progress everything semi-fairly */
 
-               if (!list_empty (&sched->kss_rx_conns)) {
+               if (!list_empty(&sched->kss_rx_conns)) {
                        conn = list_entry(sched->kss_rx_conns.next,
                                          ksock_conn_t, ksnc_rx_list);
                        list_del(&conn->ksnc_rx_list);
                        did_something = 1;
                }
 
-               if (!list_empty (&sched->kss_tx_conns)) {
+               if (!list_empty(&sched->kss_tx_conns)) {
                        LIST_HEAD(zlist);
 
                        if (!list_empty(&sched->kss_zombie_noop_txs)) {
 
                        conn = list_entry(sched->kss_tx_conns.next,
                                          ksock_conn_t, ksnc_tx_list);
-                       list_del (&conn->ksnc_tx_list);
+                       list_del(&conn->ksnc_tx_list);
 
                        LASSERT(conn->ksnc_tx_scheduled);
                        LASSERT(conn->ksnc_tx_ready);
                                rc = wait_event_interruptible_exclusive(
                                        sched->kss_waitq,
                                        !ksocknal_sched_cansleep(sched));
-                               LASSERT (rc == 0);
+                               LASSERT(rc == 0);
                        } else {
                                cond_resched();
                        }
  * Add connection to kss_rx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_read_callback (ksock_conn_t *conn)
+void ksocknal_read_callback(ksock_conn_t *conn)
 {
        ksock_sched_t *sched;
 
                /* extra ref for scheduler */
                ksocknal_conn_addref(conn);
 
-               wake_up (&sched->kss_waitq);
+               wake_up(&sched->kss_waitq);
        }
        spin_unlock_bh(&sched->kss_lock);
 }
  * Add connection to kss_tx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
 {
        ksock_sched_t *sched;
 
                /* extra ref for scheduler */
                ksocknal_conn_addref(conn);
 
-               wake_up (&sched->kss_waitq);
+               wake_up(&sched->kss_waitq);
        }
 
        spin_unlock_bh(&sched->kss_lock);
 }
 
 static ksock_proto_t *
-ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
+ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
 {
        __u32 version = 0;
 
        if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
                lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
 
-               CLASSERT(sizeof (lnet_magicversion_t) ==
-                        offsetof (ksock_hello_msg_t, kshm_src_nid));
+               CLASSERT(sizeof(lnet_magicversion_t) ==
+                        offsetof(ksock_hello_msg_t, kshm_src_nid));
 
-               if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
-                   hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
+               if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) &&
+                   hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR))
                        return &ksocknal_protocol_v1x;
        }
 
 }
 
 int
-ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
-                    lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
+ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+                   lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
 {
        /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
        ksock_net_t *net = (ksock_net_t *)ni->ni_data;
 }
 
 int
-ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
-                    ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
-                    __u64 *incarnation)
+ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+                   ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
+                   __u64 *incarnation)
 {
        /* Return < 0   fatal error
         *      0         success
        timeout = active ? *ksocknal_tunables.ksnd_timeout :
                            lnet_acceptor_timeout();
 
-       rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
+       rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
        if (rc != 0) {
                CERROR("Error %d reading HELLO from %pI4h\n",
                       rc, &conn->ksnc_ipaddr);
-               LASSERT (rc < 0);
+               LASSERT(rc < 0);
                return rc;
        }
 
        if (hello->kshm_magic != LNET_PROTO_MAGIC &&
            hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
-           hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
+           hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
                /* Unexpected magic! */
                CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
-                      __cpu_to_le32 (hello->kshm_magic),
+                      __cpu_to_le32(hello->kshm_magic),
                       LNET_PROTO_TCP_MAGIC,
                       &conn->ksnc_ipaddr);
                return -EPROTO;
 }
 
 static int
-ksocknal_connect (ksock_route_t *route)
+ksocknal_connect(ksock_route_t *route)
 {
        LIST_HEAD(zombies);
        ksock_peer_t *peer = route->ksnr_peer;
                } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
                        type = SOCKLND_CONN_BULK_IN;
                } else {
-                       LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
+                       LASSERT((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
                        type = SOCKLND_CONN_BULK_OUT;
                }
 
                min(route->ksnr_retry_interval,
                    cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000);
 
-       LASSERT (route->ksnr_retry_interval != 0);
+       LASSERT(route->ksnr_retry_interval != 0);
        route->ksnr_timeout = cfs_time_add(cfs_time_current(),
                                           route->ksnr_retry_interval);
 
                 * ksnp_tx_queue is queued on a conn on successful
                 * connection for V1.x and V2.x
                 */
-               if (!list_empty (&peer->ksnp_conns)) {
+               if (!list_empty(&peer->ksnp_conns)) {
                        conn = list_entry(peer->ksnp_conns.next,
                                          ksock_conn_t, ksnc_list);
-                       LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
+                       LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
                }
 
                /*
 }
 
 int
-ksocknal_connd (void *arg)
+ksocknal_connd(void *arg)
 {
        spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
        ksock_connreq_t *cr;
                        route = ksocknal_connd_get_route_locked(&timeout);
                }
                if (route != NULL) {
-                       list_del (&route->ksnr_connd_list);
+                       list_del(&route->ksnr_connd_list);
                        ksocknal_data.ksnd_connd_connecting++;
                        spin_unlock_bh(connd_lock);
                        dropped_lock = 1;
 }
 
 static ksock_conn_t *
-ksocknal_find_timed_out_conn (ksock_peer_t *peer)
+ksocknal_find_timed_out_conn(ksock_peer_t *peer)
 {
        /* We're called with a shared lock on ksnd_global_lock */
        ksock_conn_t *conn;
        struct list_head *ctmp;
 
-       list_for_each (ctmp, &peer->ksnp_conns) {
+       list_for_each(ctmp, &peer->ksnp_conns) {
                int error;
 
-               conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+               conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
 
                /* Don't need the {get,put}connsock dance to deref ksnc_sock */
                LASSERT(!conn->ksnc_closing);
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-       while (!list_empty (&peer->ksnp_tx_queue)) {
+       while (!list_empty(&peer->ksnp_tx_queue)) {
                tx = list_entry(peer->ksnp_tx_queue.next, ksock_tx_t, tx_list);
 
                if (!cfs_time_aftereq(cfs_time_current(),
                                      tx->tx_deadline))
                        break;
 
-               list_del (&tx->tx_list);
-               list_add_tail (&tx->tx_list, &stale_txs);
+               list_del(&tx->tx_list);
+               list_add_tail(&tx->tx_list, &stale_txs);
        }
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 }
 
 static void
-ksocknal_check_peer_timeouts (int idx)
+ksocknal_check_peer_timeouts(int idx)
 {
        struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
        ksock_peer_t *peer;
                        goto again;
                }
 
-               conn = ksocknal_find_timed_out_conn (peer);
+               conn = ksocknal_find_timed_out_conn(peer);
 
                if (conn != NULL) {
                        read_unlock(&ksocknal_data.ksnd_global_lock);
 
-                       ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+                       ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
 
                        /*
                         * NB we won't find this one again, but we can't
                 * we can't process stale txs right here because we're
                 * holding only shared lock
                 */
-               if (!list_empty (&peer->ksnp_tx_queue)) {
+               if (!list_empty(&peer->ksnp_tx_queue)) {
                        ksock_tx_t *tx = list_entry(peer->ksnp_tx_queue.next,
                                                    ksock_tx_t, tx_list);
 
                       cfs_duration_sec(cfs_time_current() - deadline),
                       resid, conn->ksnc_sock->sk->sk_wmem_queued);
 
-               ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+               ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
                ksocknal_conn_decref(conn);
                goto again;
        }
 }
 
 int
-ksocknal_reaper (void *arg)
+ksocknal_reaper(void *arg)
 {
        wait_queue_t wait;
        ksock_conn_t *conn;
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
        while (!ksocknal_data.ksnd_shuttingdown) {
-               if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
-                       conn = list_entry (ksocknal_data. \
-                                              ksnd_deathrow_conns.next,
-                                              ksock_conn_t, ksnc_list);
-                       list_del (&conn->ksnc_list);
+               if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
+                       conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
+                                         ksock_conn_t, ksnc_list);
+                       list_del(&conn->ksnc_list);
 
                        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
                        continue;
                }
 
-               if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
-                       conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
-                                              next, ksock_conn_t, ksnc_list);
-                       list_del (&conn->ksnc_list);
+               if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
+                       conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
+                                         ksock_conn_t, ksnc_list);
+                       list_del(&conn->ksnc_list);
 
                        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
                        continue;
                }
 
-               if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
+               if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
                        list_add(&enomem_conns,
                                 &ksocknal_data.ksnd_enomem_conns);
                        list_del_init(&ksocknal_data.ksnd_enomem_conns);
 
                /* reschedule all the connections that stalled with ENOMEM... */
                nenomem_conns = 0;
-               while (!list_empty (&enomem_conns)) {
+               while (!list_empty(&enomem_conns)) {
                        conn = list_entry(enomem_conns.next, ksock_conn_t,
                                          ksnc_tx_list);
-                       list_del (&conn->ksnc_tx_list);
+                       list_del(&conn->ksnc_tx_list);
 
                        sched = conn->ksnc_scheduler;
 
                                chunk = 1;
 
                        for (i = 0; i < chunk; i++) {
-                               ksocknal_check_peer_timeouts (peer_index);
+                               ksocknal_check_peer_timeouts(peer_index);
                                peer_index = (peer_index + 1) %
                                             ksocknal_data.ksnd_peer_hash_size;
                        }
                ksocknal_data.ksnd_reaper_waketime =
                        cfs_time_add(cfs_time_current(), timeout);
 
-               set_current_state (TASK_INTERRUPTIBLE);
-               add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
                if (!ksocknal_data.ksnd_shuttingdown &&
-                   list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
-                   list_empty (&ksocknal_data.ksnd_zombie_conns))
+                   list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+                   list_empty(&ksocknal_data.ksnd_zombie_conns))
                        schedule_timeout(timeout);
 
-               set_current_state (TASK_RUNNING);
-               remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
 
                spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
        }