return fixup_copy_count;
 }
 
-void
-rpcrdma_connect_worker(struct work_struct *work)
-{
-       struct rpcrdma_ep *ep =
-               container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
-       struct rpcrdma_xprt *r_xprt =
-               container_of(ep, struct rpcrdma_xprt, rx_ep);
-       struct rpc_xprt *xprt = &r_xprt->rx_xprt;
-
-       spin_lock_bh(&xprt->transport_lock);
-       if (++xprt->connect_cookie == 0)        /* maintain a reserved value */
-               ++xprt->connect_cookie;
-       if (ep->rep_connected > 0) {
-               if (!xprt_test_and_set_connected(xprt))
-                       xprt_wake_pending_tasks(xprt, 0);
-       } else {
-               if (xprt_test_and_clear_connected(xprt))
-                       xprt_wake_pending_tasks(xprt, -ENOTCONN);
-       }
-       spin_unlock_bh(&xprt->transport_lock);
-}
-
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 /* By convention, backchannel calls arrive via rdma_msg type
  * messages, and never populate the chunk lists. This makes
 }
 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
 
-/*
- * This function is called when an async event is posted to
- * the connection which changes the connection state. All it
- * does at this point is mark the connection up/down, the rpc
- * timers do the rest.
- */
-void
-rpcrdma_conn_func(struct rpcrdma_ep *ep)
-{
-       schedule_delayed_work(&ep->rep_connect_worker, 0);
-}
-
 /* Process received RPC/RDMA messages.
  *
  * Errors must result in the RPC task either being awakened, or
 
                }
 }
 
+void
+rpcrdma_conn_func(struct rpcrdma_ep *ep)
+{
+       schedule_delayed_work(&ep->rep_connect_worker, 0);
+}
+
+void
+rpcrdma_connect_worker(struct work_struct *work)
+{
+       struct rpcrdma_ep *ep =
+               container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
+       struct rpcrdma_xprt *r_xprt =
+               container_of(ep, struct rpcrdma_xprt, rx_ep);
+       struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+
+       spin_lock_bh(&xprt->transport_lock);
+       if (++xprt->connect_cookie == 0)        /* maintain a reserved value */
+               ++xprt->connect_cookie;
+       if (ep->rep_connected > 0) {
+               if (!xprt_test_and_set_connected(xprt))
+                       xprt_wake_pending_tasks(xprt, 0);
+       } else {
+               if (xprt_test_and_clear_connected(xprt))
+                       xprt_wake_pending_tasks(xprt, -ENOTCONN);
+       }
+       spin_unlock_bh(&xprt->transport_lock);
+}
+
 static void
 xprt_rdma_connect_worker(struct work_struct *work)
 {
 
                                struct rpcrdma_create_data_internal *);
 void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
 int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
+void rpcrdma_conn_func(struct rpcrdma_ep *ep);
 void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
 
 int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
        return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 }
 
-/*
- * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
- */
-void rpcrdma_connect_worker(struct work_struct *);
-void rpcrdma_conn_func(struct rpcrdma_ep *);
-void rpcrdma_reply_handler(struct work_struct *);
-
 /*
  * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
  */
 void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
 int rpcrdma_marshal_req(struct rpc_rqst *);
 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
+void rpcrdma_reply_handler(struct work_struct *work);
 
 /* RPC/RDMA module init - xprtrdma/transport.c
  */
 extern unsigned int xprt_rdma_max_inline_read;
 void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
 void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
+void rpcrdma_connect_worker(struct work_struct *work);
 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
 int xprt_rdma_init(void);
 void xprt_rdma_cleanup(void);