Release 4.11 net/sunrpc/backchannel_rqst.c
/******************************************************************************
(c) 2007 Network Appliance, Inc. All Rights Reserved.
(c) 2009 NetApp. All Rights Reserved.
NetApp provides this source code under the GPL v2 License.
The GPL v2 license is available at
http://opensource.org/licenses/gpl-license.php.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/sunrpc/xprt.h>
#include <linux/export.h>
#include <linux/sunrpc/bc_xprt.h>
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
#define RPCDBG_FACILITY RPCDBG_TRANS
#endif
/*
* Helper routines that track the number of preallocation elements
* on the transport.
*/
static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
{
return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ricardo Labiaga | 17 | 68.00% | 1 | 50.00% |
Trond Myklebust | 8 | 32.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
{
atomic_add(n, &xprt->bc_free_slots);
xprt->bc_alloc_count += n;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ricardo Labiaga | 22 | 68.75% | 1 | 50.00% |
Trond Myklebust | 10 | 31.25% | 1 | 50.00% |
Total | 32 | 100.00% | 2 | 100.00% |
static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
{
atomic_sub(n, &xprt->bc_free_slots);
return xprt->bc_alloc_count -= n;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ricardo Labiaga | 23 | 69.70% | 1 | 50.00% |
Trond Myklebust | 10 | 30.30% | 1 | 50.00% |
Total | 33 | 100.00% | 2 | 100.00% |
/*
* Free the preallocated rpc_rqst structure and the memory
* buffers hanging off of it.
*/
static void xprt_free_allocation(struct rpc_rqst *req)
{
struct xdr_buf *xbufp;
dprintk("RPC: free allocations for req= %p\n", req);
WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
xbufp = &req->rq_rcv_buf;
free_page((unsigned long)xbufp->head[0].iov_base);
xbufp = &req->rq_snd_buf;
free_page((unsigned long)xbufp->head[0].iov_base);
kfree(req);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ricardo Labiaga | 85 | 97.70% | 1 | 33.33% |
Weston Andros Adamson | 1 | 1.15% | 1 | 33.33% |
Trond Myklebust | 1 | 1.15% | 1 | 33.33% |
Total | 87 | 100.00% | 3 | 100.00% |
static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
{
struct page *page;
/* Preallocate one XDR receive buffer */
page = alloc_page(gfp_flags);
if (page == NULL)
return -ENOMEM;
xdr_buf_init(buf, page_address(page), PAGE_SIZE);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ricardo Labiaga | 27 | 51.92% | 1 | 33.33% |
Trond Myklebust | 20 | 38.46% | 1 | 33.33% |
Chuck Lever | 5 | 9.62% | 1 | 33.33% |
Total | 52 | 100.00% | 3 | 100.00% |
static
struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
{
struct rpc_rqst *req;
/* Pre-allocate one backchannel rpc_rqst */
req = kzalloc(sizeof(*req), gfp_flags);
if (req == NULL)
return NULL;
req->rq_xprt = xprt;
INIT_LIST_HEAD(&req->rq_list);
INIT_LIST_HEAD(&req->rq_bc_list);
/* Preallocate one XDR receive buffer */
if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
printk(KERN_ERR "Failed to create bc receive xbuf\n");
goto out_free;
}
req->rq_rcv_buf.len = PAGE_SIZE;
/* Preallocate one XDR send buffer */
if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
printk(KERN_ERR "Failed to create bc snd xbuf\n");
goto out_free;
}
return req;
out_free:
xprt_free_allocation(req);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 109 | 78.42% | 1 | 50.00% |
Ricardo Labiaga | 30 | 21.58% | 1 | 50.00% |
Total | 139 | 100.00% | 2 | 100.00% |
/*
* Preallocate up to min_reqs structures and related buffers for use
* by the backchannel. This function can be called multiple times
* when creating new sessions that use the same rpc_xprt. The
* preallocated buffers are added to the pool of resources used by
* the rpc_xprt. Anyone of these resources may be used used by an
* incoming callback request. It's up to the higher levels in the
* stack to enforce that the maximum number of session slots is not
* being exceeded.
*
* Some callback arguments can be large. For example, a pNFS server
* using multiple deviceids. The list can be unbound, but the client
* has the ability to tell the server the maximum size of the callback
* requests. Each deviceID is 16 bytes, so allocate one page
* for the arguments to have enough room to receive a number of these
* deviceIDs. The NFS client indicates to the pNFS server that its
* callback requests can be up to 4096 bytes in size.
*/
int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
{
if (!xprt->ops->bc_setup)
return 0;
return xprt->ops->bc_setup(xprt, min_reqs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chuck Lever | 25 | 65.79% | 1 | 50.00% |
Trond Myklebust | 13 | 34.21% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
{
struct rpc_rqst *req;
struct list_head tmp_list;
int i;
dprintk("RPC: setup backchannel transport\n");
/*
* We use a temporary list to keep track of the preallocated
* buffers. Once we're done building the list we splice it
* into the backchannel preallocation list off of the rpc_xprt
* struct. This helps minimize the amount of time the list
* lock is held on the rpc_xprt struct. It also makes cleanup
* easier in case of memory allocation errors.
*/
INIT_LIST_HEAD(&tmp_list);
for (i = 0; i < min_reqs; i++) {
/* Pre-allocate one backchannel rpc_rqst */
req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
if (req == NULL) {
printk(KERN_ERR "Failed to create bc rpc_rqst\n");
goto out_free;
}
/* Add the allocated buffer to the tmp list */
dprintk("RPC: adding req= %p\n", req);
list_add(&req->rq_bc_pa_list, &tmp_list);
}
/*
* Add the temporary list to the backchannel preallocation list
*/
spin_lock_bh(&xprt->bc_pa_lock);
list_splice(&tmp_list, &xprt->bc_pa_list);
xprt_inc_alloc_count(xprt, min_reqs);
spin_unlock_bh(&xprt->bc_pa_lock);
dprintk("RPC: setup backchannel transport done\n");
return 0;
out_free:
/*
* Memory allocation failed, free the temporary list
*/
while (!list_empty(&tmp_list)) {
req = list_first_entry(&tmp_list,
struct rpc_rqst,
rq_bc_pa_list);
list_del(&req->rq_bc_pa_list);
xprt_free_allocation(req);
}
dprintk("RPC: setup backchannel transport failed\n");
return -ENOMEM;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 104 | 54.45% | 2 | 40.00% |
Ricardo Labiaga | 73 | 38.22% | 1 | 20.00% |
Chuck Lever | 13 | 6.81% | 1 | 20.00% |
Weston Andros Adamson | 1 | 0.52% | 1 | 20.00% |
Total | 191 | 100.00% | 5 | 100.00% |
/**
* xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
* @xprt: the transport holding the preallocated strucures
* @max_reqs the maximum number of preallocated structures to destroy
*
* Since these structures may have been allocated by multiple calls
* to xprt_setup_backchannel, we only destroy up to the maximum number
* of reqs specified by the caller.
*/
void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
{
if (xprt->ops->bc_destroy)
xprt->ops->bc_destroy(xprt, max_reqs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chuck Lever | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
{
struct rpc_rqst *req = NULL, *tmp = NULL;
dprintk("RPC: destroy backchannel transport\n");
if (max_reqs == 0)
goto out;
spin_lock_bh(&xprt->bc_pa_lock);
xprt_dec_alloc_count(xprt, max_reqs);
list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
dprintk("RPC: req=%p\n", req);
list_del(&req->rq_bc_pa_list);
xprt_free_allocation(req);
if (--max_reqs == 0)
break;
}
spin_unlock_bh(&xprt->bc_pa_lock);
out:
dprintk("RPC: backchannel list empty= %s\n",
list_empty(&xprt->bc_pa_list) ? "true" : "false");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ricardo Labiaga | 105 | 86.07% | 1 | 25.00% |
Trond Myklebust | 8 | 6.56% | 1 | 25.00% |
Weston Andros Adamson | 8 | 6.56% | 1 | 25.00% |
Chuck Lever | 1 | 0.82% | 1 | 25.00% |
Total | 122 | 100.00% | 4 | 100.00% |
static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
{
struct rpc_rqst *req = NULL;
dprintk("RPC: allocate a backchannel request\n");
if (atomic_read(&xprt->bc_free_slots) <= 0)
goto not_found;
if (list_empty(&xprt->bc_pa_list)) {
req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
if (!req)
goto not_found;
list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
xprt->bc_alloc_count++;
}
req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
rq_bc_pa_list);
req->rq_reply_bytes_recvd = 0;
req->rq_bytes_sent = 0;
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
sizeof(req->rq_private_buf));
req->rq_xid = xid;
req->rq_connect_cookie = xprt->connect_cookie;
not_found:
dprintk("RPC: backchannel req=%p\n", req);
return req;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ricardo Labiaga | 89 | 54.60% | 2 | 40.00% |
Trond Myklebust | 74 | 45.40% | 3 | 60.00% |
Total | 163 | 100.00% | 5 | 100.00% |
/*
* Return the preallocated rpc_rqst structure and XDR buffers
* associated with this rpc_task.
*/
void xprt_free_bc_request(struct rpc_rqst *req)
{
struct rpc_xprt *xprt = req->rq_xprt;
xprt->ops->bc_free_rqst(req);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ricardo Labiaga | 18 | 64.29% | 1 | 50.00% |
Chuck Lever | 10 | 35.71% | 1 | 50.00% |
Total | 28 | 100.00% | 2 | 100.00% |
void xprt_free_bc_rqst(struct rpc_rqst *req)
{
struct rpc_xprt *xprt = req->rq_xprt;
dprintk("RPC: free backchannel req=%p\n", req);
req->rq_connect_cookie = xprt->connect_cookie - 1;
smp_mb__before_atomic();
clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
smp_mb__after_atomic();
/*
* Return it to the list of preallocations so that it
* may be reused by a new callback request.
*/
spin_lock_bh(&xprt->bc_pa_lock);
if (xprt_need_to_requeue(xprt)) {
list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
xprt->bc_alloc_count++;
req = NULL;
}
spin_unlock_bh(&xprt->bc_pa_lock);
if (req != NULL) {
/*
* The last remaining session was destroyed while this
* entry was in use. Free the entry and don't attempt
* to add back to the list because there is no need to
* have anymore preallocated entries.
*/
dprintk("RPC: Last session removed req=%p\n", req);
xprt_free_allocation(req);
return;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ricardo Labiaga | 51 | 41.80% | 1 | 20.00% |
Trond Myklebust | 51 | 41.80% | 2 | 40.00% |
Chuck Lever | 18 | 14.75% | 1 | 20.00% |
Peter Zijlstra | 2 | 1.64% | 1 | 20.00% |
Total | 122 | 100.00% | 5 | 100.00% |
/*
* One or more rpc_rqst structure have been preallocated during the
* backchannel setup. Buffer space for the send and private XDR buffers
* has been preallocated as well. Use xprt_alloc_bc_request to allocate
* to this request. Use xprt_free_bc_request to return it.
*
* We know that we're called in soft interrupt context, grab the spin_lock
* since there is no need to grab the bottom half spin_lock.
*
* Return an available rpc_rqst, otherwise NULL if non are available.
*/
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
{
struct rpc_rqst *req;
spin_lock(&xprt->bc_pa_lock);
list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
if (req->rq_connect_cookie != xprt->connect_cookie)
continue;
if (req->rq_xid == xid)
goto found;
}
req = xprt_alloc_bc_request(xprt, xid);
found:
spin_unlock(&xprt->bc_pa_lock);
return req;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 82 | 100.00% | 1 | 100.00% |
Total | 82 | 100.00% | 1 | 100.00% |
/*
* Add callback request to callback list. The callback
* service sleeps on the sv_cb_waitq waiting for new
* requests. Wake it up after adding enqueing the
* request.
*/
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
{
struct rpc_xprt *xprt = req->rq_xprt;
struct svc_serv *bc_serv = xprt->bc_serv;
spin_lock(&xprt->bc_pa_lock);
list_del(&req->rq_bc_pa_list);
xprt_dec_alloc_count(xprt, 1);
spin_unlock(&xprt->bc_pa_lock);
req->rq_private_buf.len = copied;
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
dprintk("RPC: add callback request to list\n");
spin_lock(&bc_serv->sv_cb_lock);
list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
wake_up(&bc_serv->sv_cb_waitq);
spin_unlock(&bc_serv->sv_cb_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 98 | 80.33% | 3 | 75.00% |
Chuck Lever | 24 | 19.67% | 1 | 25.00% |
Total | 122 | 100.00% | 4 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Trond Myklebust | 595 | 45.28% | 9 | 42.86% |
Ricardo Labiaga | 559 | 42.54% | 2 | 9.52% |
Chuck Lever | 136 | 10.35% | 3 | 14.29% |
Weston Andros Adamson | 10 | 0.76% | 3 | 14.29% |
Jeff Layton | 6 | 0.46% | 1 | 4.76% |
Paul Gortmaker | 3 | 0.23% | 1 | 4.76% |
Tejun Heo | 3 | 0.23% | 1 | 4.76% |
Peter Zijlstra | 2 | 0.15% | 1 | 4.76% |
Total | 1314 | 100.00% | 21 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.