1fb7a0b9aSRicardo Labiaga /****************************************************************************** 2fb7a0b9aSRicardo Labiaga 3fb7a0b9aSRicardo Labiaga (c) 2007 Network Appliance, Inc. All Rights Reserved. 4fb7a0b9aSRicardo Labiaga (c) 2009 NetApp. All Rights Reserved. 5fb7a0b9aSRicardo Labiaga 6fb7a0b9aSRicardo Labiaga NetApp provides this source code under the GPL v2 License. 7fb7a0b9aSRicardo Labiaga The GPL v2 license is available at 8fb7a0b9aSRicardo Labiaga http://opensource.org/licenses/gpl-license.php. 9fb7a0b9aSRicardo Labiaga 10fb7a0b9aSRicardo Labiaga THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 11fb7a0b9aSRicardo Labiaga "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 12fb7a0b9aSRicardo Labiaga LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 13fb7a0b9aSRicardo Labiaga A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 14fb7a0b9aSRicardo Labiaga CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 15fb7a0b9aSRicardo Labiaga EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 16fb7a0b9aSRicardo Labiaga PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 17fb7a0b9aSRicardo Labiaga PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 18fb7a0b9aSRicardo Labiaga LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 19fb7a0b9aSRicardo Labiaga NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 20fb7a0b9aSRicardo Labiaga SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 21fb7a0b9aSRicardo Labiaga 22fb7a0b9aSRicardo Labiaga ******************************************************************************/ 23fb7a0b9aSRicardo Labiaga 24fb7a0b9aSRicardo Labiaga #include <linux/tcp.h> 255a0e3ad6STejun Heo #include <linux/slab.h> 26fb7a0b9aSRicardo Labiaga #include <linux/sunrpc/xprt.h> 27bc3b2d7fSPaul Gortmaker #include <linux/export.h> 2809acfea5STrond Myklebust #include <linux/sunrpc/bc_xprt.h> 29fb7a0b9aSRicardo Labiaga 30f895b252SJeff Layton #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 31fb7a0b9aSRicardo Labiaga #define RPCDBG_FACILITY RPCDBG_TRANS 32fb7a0b9aSRicardo Labiaga #endif 33fb7a0b9aSRicardo Labiaga 34fb7a0b9aSRicardo Labiaga /* 35fb7a0b9aSRicardo Labiaga * Helper routines that track the number of preallocation elements 36fb7a0b9aSRicardo Labiaga * on the transport. 37fb7a0b9aSRicardo Labiaga */ 38fb7a0b9aSRicardo Labiaga static inline int xprt_need_to_requeue(struct rpc_xprt *xprt) 39fb7a0b9aSRicardo Labiaga { 400d2a970dSTrond Myklebust return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots); 41fb7a0b9aSRicardo Labiaga } 42fb7a0b9aSRicardo Labiaga 43fb7a0b9aSRicardo Labiaga static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n) 44fb7a0b9aSRicardo Labiaga { 450d2a970dSTrond Myklebust atomic_add(n, &xprt->bc_free_slots); 46fb7a0b9aSRicardo Labiaga xprt->bc_alloc_count += n; 47fb7a0b9aSRicardo Labiaga } 48fb7a0b9aSRicardo Labiaga 49fb7a0b9aSRicardo Labiaga static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n) 50fb7a0b9aSRicardo Labiaga { 510d2a970dSTrond Myklebust atomic_sub(n, &xprt->bc_free_slots); 52fb7a0b9aSRicardo Labiaga return xprt->bc_alloc_count -= n; 53fb7a0b9aSRicardo Labiaga } 54fb7a0b9aSRicardo Labiaga 55fb7a0b9aSRicardo Labiaga /* 56fb7a0b9aSRicardo Labiaga * Free the preallocated rpc_rqst structure and the memory 57fb7a0b9aSRicardo Labiaga * buffers hanging off of it. 58fb7a0b9aSRicardo Labiaga */ 59fb7a0b9aSRicardo Labiaga static void xprt_free_allocation(struct rpc_rqst *req) 60fb7a0b9aSRicardo Labiaga { 61fb7a0b9aSRicardo Labiaga struct xdr_buf *xbufp; 62fb7a0b9aSRicardo Labiaga 63fb7a0b9aSRicardo Labiaga dprintk("RPC: free allocations for req= %p\n", req); 64f30dfbbaSWeston Andros Adamson WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); 6588de6af2STrond Myklebust xbufp = &req->rq_rcv_buf; 66fb7a0b9aSRicardo Labiaga free_page((unsigned long)xbufp->head[0].iov_base); 67fb7a0b9aSRicardo Labiaga xbufp = &req->rq_snd_buf; 68fb7a0b9aSRicardo Labiaga free_page((unsigned long)xbufp->head[0].iov_base); 69fb7a0b9aSRicardo Labiaga kfree(req); 70fb7a0b9aSRicardo Labiaga } 71fb7a0b9aSRicardo Labiaga 721dddda86STrond Myklebust static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags) 731dddda86STrond Myklebust { 741dddda86STrond Myklebust struct page *page; 751dddda86STrond Myklebust /* Preallocate one XDR receive buffer */ 761dddda86STrond Myklebust page = alloc_page(gfp_flags); 771dddda86STrond Myklebust if (page == NULL) 781dddda86STrond Myklebust return -ENOMEM; 791dddda86STrond Myklebust buf->head[0].iov_base = page_address(page); 801dddda86STrond Myklebust buf->head[0].iov_len = PAGE_SIZE; 811dddda86STrond Myklebust buf->tail[0].iov_base = NULL; 821dddda86STrond Myklebust buf->tail[0].iov_len = 0; 831dddda86STrond Myklebust buf->page_len = 0; 841dddda86STrond Myklebust buf->len = 0; 851dddda86STrond Myklebust buf->buflen = PAGE_SIZE; 861dddda86STrond Myklebust return 0; 871dddda86STrond Myklebust } 881dddda86STrond Myklebust 891dddda86STrond Myklebust static 901dddda86STrond Myklebust struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags) 911dddda86STrond Myklebust { 921dddda86STrond Myklebust struct rpc_rqst *req; 931dddda86STrond Myklebust 941dddda86STrond Myklebust /* Pre-allocate one backchannel rpc_rqst */ 951dddda86STrond Myklebust req = kzalloc(sizeof(*req), gfp_flags); 961dddda86STrond Myklebust if (req == NULL) 971dddda86STrond Myklebust return NULL; 981dddda86STrond Myklebust 991dddda86STrond Myklebust req->rq_xprt = xprt; 1001dddda86STrond Myklebust INIT_LIST_HEAD(&req->rq_list); 1011dddda86STrond Myklebust INIT_LIST_HEAD(&req->rq_bc_list); 1021dddda86STrond Myklebust 1031dddda86STrond Myklebust /* Preallocate one XDR receive buffer */ 1041dddda86STrond Myklebust if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) { 1051dddda86STrond Myklebust printk(KERN_ERR "Failed to create bc receive xbuf\n"); 1061dddda86STrond Myklebust goto out_free; 1071dddda86STrond Myklebust } 1081dddda86STrond Myklebust req->rq_rcv_buf.len = PAGE_SIZE; 1091dddda86STrond Myklebust 1101dddda86STrond Myklebust /* Preallocate one XDR send buffer */ 1111dddda86STrond Myklebust if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) { 1121dddda86STrond Myklebust printk(KERN_ERR "Failed to create bc snd xbuf\n"); 1131dddda86STrond Myklebust goto out_free; 1141dddda86STrond Myklebust } 1151dddda86STrond Myklebust return req; 1161dddda86STrond Myklebust out_free: 1171dddda86STrond Myklebust xprt_free_allocation(req); 1181dddda86STrond Myklebust return NULL; 1191dddda86STrond Myklebust } 1201dddda86STrond Myklebust 121fb7a0b9aSRicardo Labiaga /* 122fb7a0b9aSRicardo Labiaga * Preallocate up to min_reqs structures and related buffers for use 123fb7a0b9aSRicardo Labiaga * by the backchannel. This function can be called multiple times 124fb7a0b9aSRicardo Labiaga * when creating new sessions that use the same rpc_xprt. The 125fb7a0b9aSRicardo Labiaga * preallocated buffers are added to the pool of resources used by 126fb7a0b9aSRicardo Labiaga * the rpc_xprt. Anyone of these resources may be used used by an 127fb7a0b9aSRicardo Labiaga * incoming callback request. It's up to the higher levels in the 128fb7a0b9aSRicardo Labiaga * stack to enforce that the maximum number of session slots is not 129fb7a0b9aSRicardo Labiaga * being exceeded. 130fb7a0b9aSRicardo Labiaga * 131fb7a0b9aSRicardo Labiaga * Some callback arguments can be large. For example, a pNFS server 132fb7a0b9aSRicardo Labiaga * using multiple deviceids. The list can be unbound, but the client 133fb7a0b9aSRicardo Labiaga * has the ability to tell the server the maximum size of the callback 134fb7a0b9aSRicardo Labiaga * requests. Each deviceID is 16 bytes, so allocate one page 135fb7a0b9aSRicardo Labiaga * for the arguments to have enough room to receive a number of these 136fb7a0b9aSRicardo Labiaga * deviceIDs. The NFS client indicates to the pNFS server that its 137fb7a0b9aSRicardo Labiaga * callback requests can be up to 4096 bytes in size. 138fb7a0b9aSRicardo Labiaga */ 139fb7a0b9aSRicardo Labiaga int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs) 140fb7a0b9aSRicardo Labiaga { 14142e5c3e2SChuck Lever if (!xprt->ops->bc_setup) 14242e5c3e2SChuck Lever return 0; 14342e5c3e2SChuck Lever return xprt->ops->bc_setup(xprt, min_reqs); 14442e5c3e2SChuck Lever } 14542e5c3e2SChuck Lever EXPORT_SYMBOL_GPL(xprt_setup_backchannel); 14642e5c3e2SChuck Lever 14742e5c3e2SChuck Lever int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs) 14842e5c3e2SChuck Lever { 1491dddda86STrond Myklebust struct rpc_rqst *req; 150fb7a0b9aSRicardo Labiaga struct list_head tmp_list; 151fb7a0b9aSRicardo Labiaga int i; 152fb7a0b9aSRicardo Labiaga 153fb7a0b9aSRicardo Labiaga dprintk("RPC: setup backchannel transport\n"); 154fb7a0b9aSRicardo Labiaga 155fb7a0b9aSRicardo Labiaga /* 156fb7a0b9aSRicardo Labiaga * We use a temporary list to keep track of the preallocated 157fb7a0b9aSRicardo Labiaga * buffers. Once we're done building the list we splice it 158fb7a0b9aSRicardo Labiaga * into the backchannel preallocation list off of the rpc_xprt 159fb7a0b9aSRicardo Labiaga * struct. This helps minimize the amount of time the list 160fb7a0b9aSRicardo Labiaga * lock is held on the rpc_xprt struct. It also makes cleanup 161fb7a0b9aSRicardo Labiaga * easier in case of memory allocation errors. 162fb7a0b9aSRicardo Labiaga */ 163fb7a0b9aSRicardo Labiaga INIT_LIST_HEAD(&tmp_list); 164fb7a0b9aSRicardo Labiaga for (i = 0; i < min_reqs; i++) { 165fb7a0b9aSRicardo Labiaga /* Pre-allocate one backchannel rpc_rqst */ 1661dddda86STrond Myklebust req = xprt_alloc_bc_req(xprt, GFP_KERNEL); 167fb7a0b9aSRicardo Labiaga if (req == NULL) { 168fb7a0b9aSRicardo Labiaga printk(KERN_ERR "Failed to create bc rpc_rqst\n"); 169fb7a0b9aSRicardo Labiaga goto out_free; 170fb7a0b9aSRicardo Labiaga } 171fb7a0b9aSRicardo Labiaga 172fb7a0b9aSRicardo Labiaga /* Add the allocated buffer to the tmp list */ 173fb7a0b9aSRicardo Labiaga dprintk("RPC: adding req= %p\n", req); 174fb7a0b9aSRicardo Labiaga list_add(&req->rq_bc_pa_list, &tmp_list); 175fb7a0b9aSRicardo Labiaga } 176fb7a0b9aSRicardo Labiaga 177fb7a0b9aSRicardo Labiaga /* 178fb7a0b9aSRicardo Labiaga * Add the temporary list to the backchannel preallocation list 179fb7a0b9aSRicardo Labiaga */ 180fb7a0b9aSRicardo Labiaga spin_lock_bh(&xprt->bc_pa_lock); 181fb7a0b9aSRicardo Labiaga list_splice(&tmp_list, &xprt->bc_pa_list); 182fb7a0b9aSRicardo Labiaga xprt_inc_alloc_count(xprt, min_reqs); 183fb7a0b9aSRicardo Labiaga spin_unlock_bh(&xprt->bc_pa_lock); 184fb7a0b9aSRicardo Labiaga 185fb7a0b9aSRicardo Labiaga dprintk("RPC: setup backchannel transport done\n"); 186fb7a0b9aSRicardo Labiaga return 0; 187fb7a0b9aSRicardo Labiaga 188fb7a0b9aSRicardo Labiaga out_free: 189fb7a0b9aSRicardo Labiaga /* 190fb7a0b9aSRicardo Labiaga * Memory allocation failed, free the temporary list 191fb7a0b9aSRicardo Labiaga */ 1921dddda86STrond Myklebust while (!list_empty(&tmp_list)) { 1931dddda86STrond Myklebust req = list_first_entry(&tmp_list, 1941dddda86STrond Myklebust struct rpc_rqst, 1951dddda86STrond Myklebust rq_bc_pa_list); 19662835679STrond Myklebust list_del(&req->rq_bc_pa_list); 197fb7a0b9aSRicardo Labiaga xprt_free_allocation(req); 19862835679STrond Myklebust } 199fb7a0b9aSRicardo Labiaga 200fb7a0b9aSRicardo Labiaga dprintk("RPC: setup backchannel transport failed\n"); 201d24bab93SWeston Andros Adamson return -ENOMEM; 202fb7a0b9aSRicardo Labiaga } 203fb7a0b9aSRicardo Labiaga 2042c53040fSBen Hutchings /** 2052c53040fSBen Hutchings * xprt_destroy_backchannel - Destroys the backchannel preallocated structures. 2062c53040fSBen Hutchings * @xprt: the transport holding the preallocated strucures 2072c53040fSBen Hutchings * @max_reqs the maximum number of preallocated structures to destroy 2082c53040fSBen Hutchings * 209fb7a0b9aSRicardo Labiaga * Since these structures may have been allocated by multiple calls 210fb7a0b9aSRicardo Labiaga * to xprt_setup_backchannel, we only destroy up to the maximum number 211fb7a0b9aSRicardo Labiaga * of reqs specified by the caller. 212fb7a0b9aSRicardo Labiaga */ 213fb7a0b9aSRicardo Labiaga void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) 214fb7a0b9aSRicardo Labiaga { 21542e5c3e2SChuck Lever if (xprt->ops->bc_destroy) 21642e5c3e2SChuck Lever xprt->ops->bc_destroy(xprt, max_reqs); 21742e5c3e2SChuck Lever } 21842e5c3e2SChuck Lever EXPORT_SYMBOL_GPL(xprt_destroy_backchannel); 21942e5c3e2SChuck Lever 22042e5c3e2SChuck Lever void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs) 22142e5c3e2SChuck Lever { 222fb7a0b9aSRicardo Labiaga struct rpc_rqst *req = NULL, *tmp = NULL; 223fb7a0b9aSRicardo Labiaga 224fb7a0b9aSRicardo Labiaga dprintk("RPC: destroy backchannel transport\n"); 225fb7a0b9aSRicardo Labiaga 226c4ded8d9SWeston Andros Adamson if (max_reqs == 0) 227c4ded8d9SWeston Andros Adamson goto out; 228c4ded8d9SWeston Andros Adamson 229fb7a0b9aSRicardo Labiaga spin_lock_bh(&xprt->bc_pa_lock); 230fb7a0b9aSRicardo Labiaga xprt_dec_alloc_count(xprt, max_reqs); 231fb7a0b9aSRicardo Labiaga list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 232fb7a0b9aSRicardo Labiaga dprintk("RPC: req=%p\n", req); 23362835679STrond Myklebust list_del(&req->rq_bc_pa_list); 234fb7a0b9aSRicardo Labiaga xprt_free_allocation(req); 235fb7a0b9aSRicardo Labiaga if (--max_reqs == 0) 236fb7a0b9aSRicardo Labiaga break; 237fb7a0b9aSRicardo Labiaga } 238fb7a0b9aSRicardo Labiaga spin_unlock_bh(&xprt->bc_pa_lock); 239fb7a0b9aSRicardo Labiaga 240c4ded8d9SWeston Andros Adamson out: 241fb7a0b9aSRicardo Labiaga dprintk("RPC: backchannel list empty= %s\n", 242fb7a0b9aSRicardo Labiaga list_empty(&xprt->bc_pa_list) ? "true" : "false"); 243fb7a0b9aSRicardo Labiaga } 244fb7a0b9aSRicardo Labiaga 2452ea24497STrond Myklebust static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid) 246fb7a0b9aSRicardo Labiaga { 2472ea24497STrond Myklebust struct rpc_rqst *req = NULL; 248fb7a0b9aSRicardo Labiaga 249fb7a0b9aSRicardo Labiaga dprintk("RPC: allocate a backchannel request\n"); 2500d2a970dSTrond Myklebust if (atomic_read(&xprt->bc_free_slots) <= 0) 2512ea24497STrond Myklebust goto not_found; 2520d2a970dSTrond Myklebust if (list_empty(&xprt->bc_pa_list)) { 2530d2a970dSTrond Myklebust req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); 2540d2a970dSTrond Myklebust if (!req) 2550d2a970dSTrond Myklebust goto not_found; 25668514471STrond Myklebust list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); 25768514471STrond Myklebust xprt->bc_alloc_count++; 2580d2a970dSTrond Myklebust } 259fb7a0b9aSRicardo Labiaga req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, 260fb7a0b9aSRicardo Labiaga rq_bc_pa_list); 261dd2b63d0SRicardo Labiaga req->rq_reply_bytes_recvd = 0; 262fb7a0b9aSRicardo Labiaga req->rq_bytes_sent = 0; 263fb7a0b9aSRicardo Labiaga memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 264fb7a0b9aSRicardo Labiaga sizeof(req->rq_private_buf)); 2652ea24497STrond Myklebust req->rq_xid = xid; 2662ea24497STrond Myklebust req->rq_connect_cookie = xprt->connect_cookie; 2672ea24497STrond Myklebust not_found: 268fb7a0b9aSRicardo Labiaga dprintk("RPC: backchannel req=%p\n", req); 269fb7a0b9aSRicardo Labiaga return req; 270fb7a0b9aSRicardo Labiaga } 271fb7a0b9aSRicardo Labiaga 272fb7a0b9aSRicardo Labiaga /* 273fb7a0b9aSRicardo Labiaga * Return the preallocated rpc_rqst structure and XDR buffers 274fb7a0b9aSRicardo Labiaga * associated with this rpc_task. 275fb7a0b9aSRicardo Labiaga */ 276fb7a0b9aSRicardo Labiaga void xprt_free_bc_request(struct rpc_rqst *req) 277fb7a0b9aSRicardo Labiaga { 278fb7a0b9aSRicardo Labiaga struct rpc_xprt *xprt = req->rq_xprt; 279fb7a0b9aSRicardo Labiaga 28042e5c3e2SChuck Lever xprt->ops->bc_free_rqst(req); 28142e5c3e2SChuck Lever } 28242e5c3e2SChuck Lever 28342e5c3e2SChuck Lever void xprt_free_bc_rqst(struct rpc_rqst *req) 28442e5c3e2SChuck Lever { 28542e5c3e2SChuck Lever struct rpc_xprt *xprt = req->rq_xprt; 28642e5c3e2SChuck Lever 287fb7a0b9aSRicardo Labiaga dprintk("RPC: free backchannel req=%p\n", req); 288fb7a0b9aSRicardo Labiaga 2892ea24497STrond Myklebust req->rq_connect_cookie = xprt->connect_cookie - 1; 2904e857c58SPeter Zijlstra smp_mb__before_atomic(); 291fb7a0b9aSRicardo Labiaga clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 2924e857c58SPeter Zijlstra smp_mb__after_atomic(); 293fb7a0b9aSRicardo Labiaga 2940d2a970dSTrond Myklebust /* 2950d2a970dSTrond Myklebust * Return it to the list of preallocations so that it 2960d2a970dSTrond Myklebust * may be reused by a new callback request. 2970d2a970dSTrond Myklebust */ 2980d2a970dSTrond Myklebust spin_lock_bh(&xprt->bc_pa_lock); 2990d2a970dSTrond Myklebust if (xprt_need_to_requeue(xprt)) { 3000d2a970dSTrond Myklebust list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); 3010d2a970dSTrond Myklebust xprt->bc_alloc_count++; 3020d2a970dSTrond Myklebust req = NULL; 3030d2a970dSTrond Myklebust } 3040d2a970dSTrond Myklebust spin_unlock_bh(&xprt->bc_pa_lock); 3050d2a970dSTrond Myklebust if (req != NULL) { 306fb7a0b9aSRicardo Labiaga /* 307fb7a0b9aSRicardo Labiaga * The last remaining session was destroyed while this 308fb7a0b9aSRicardo Labiaga * entry was in use. Free the entry and don't attempt 309fb7a0b9aSRicardo Labiaga * to add back to the list because there is no need to 310fb7a0b9aSRicardo Labiaga * have anymore preallocated entries. 311fb7a0b9aSRicardo Labiaga */ 312fb7a0b9aSRicardo Labiaga dprintk("RPC: Last session removed req=%p\n", req); 313fb7a0b9aSRicardo Labiaga xprt_free_allocation(req); 314fb7a0b9aSRicardo Labiaga return; 315fb7a0b9aSRicardo Labiaga } 316fb7a0b9aSRicardo Labiaga } 317fb7a0b9aSRicardo Labiaga 3182ea24497STrond Myklebust /* 3192ea24497STrond Myklebust * One or more rpc_rqst structure have been preallocated during the 3202ea24497STrond Myklebust * backchannel setup. Buffer space for the send and private XDR buffers 3212ea24497STrond Myklebust * has been preallocated as well. Use xprt_alloc_bc_request to allocate 3222ea24497STrond Myklebust * to this request. Use xprt_free_bc_request to return it. 3232ea24497STrond Myklebust * 3242ea24497STrond Myklebust * We know that we're called in soft interrupt context, grab the spin_lock 3252ea24497STrond Myklebust * since there is no need to grab the bottom half spin_lock. 3262ea24497STrond Myklebust * 3272ea24497STrond Myklebust * Return an available rpc_rqst, otherwise NULL if non are available. 3282ea24497STrond Myklebust */ 3292ea24497STrond Myklebust struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid) 3302ea24497STrond Myklebust { 3312ea24497STrond Myklebust struct rpc_rqst *req; 3322ea24497STrond Myklebust 3332ea24497STrond Myklebust spin_lock(&xprt->bc_pa_lock); 3342ea24497STrond Myklebust list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) { 3352ea24497STrond Myklebust if (req->rq_connect_cookie != xprt->connect_cookie) 3362ea24497STrond Myklebust continue; 3372ea24497STrond Myklebust if (req->rq_xid == xid) 3382ea24497STrond Myklebust goto found; 3392ea24497STrond Myklebust } 3402ea24497STrond Myklebust req = xprt_alloc_bc_request(xprt, xid); 3412ea24497STrond Myklebust found: 3422ea24497STrond Myklebust spin_unlock(&xprt->bc_pa_lock); 3432ea24497STrond Myklebust return req; 3442ea24497STrond Myklebust } 3452ea24497STrond Myklebust 3462ea24497STrond Myklebust /* 3472ea24497STrond Myklebust * Add callback request to callback list. The callback 3482ea24497STrond Myklebust * service sleeps on the sv_cb_waitq waiting for new 3492ea24497STrond Myklebust * requests. Wake it up after adding enqueing the 3502ea24497STrond Myklebust * request. 3512ea24497STrond Myklebust */ 3522ea24497STrond Myklebust void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) 3532ea24497STrond Myklebust { 3542ea24497STrond Myklebust struct rpc_xprt *xprt = req->rq_xprt; 3552ea24497STrond Myklebust struct svc_serv *bc_serv = xprt->bc_serv; 3562ea24497STrond Myklebust 357813b00d6SChuck Lever spin_lock(&xprt->bc_pa_lock); 358813b00d6SChuck Lever list_del(&req->rq_bc_pa_list); 3591980bd4dSTrond Myklebust xprt_dec_alloc_count(xprt, 1); 360813b00d6SChuck Lever spin_unlock(&xprt->bc_pa_lock); 361813b00d6SChuck Lever 3622ea24497STrond Myklebust req->rq_private_buf.len = copied; 3632ea24497STrond Myklebust set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 3642ea24497STrond Myklebust 3652ea24497STrond Myklebust dprintk("RPC: add callback request to list\n"); 3662ea24497STrond Myklebust spin_lock(&bc_serv->sv_cb_lock); 3672ea24497STrond Myklebust list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); 3682ea24497STrond Myklebust wake_up(&bc_serv->sv_cb_waitq); 3692ea24497STrond Myklebust spin_unlock(&bc_serv->sv_cb_lock); 3702ea24497STrond Myklebust } 3712ea24497STrond Myklebust 372