cregit-Linux how code gets into the kernel

Release 4.11 net/sunrpc/xdr.c

Directory: net/sunrpc
/*
 * linux/net/sunrpc/xdr.c
 *
 * Generic XDR support.
 *
 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
 */

#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/errno.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>

/*
 * XDR functions for basic NFS types
 */

__be32 * xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) { unsigned int quadlen = XDR_QUADLEN(obj->len); p[quadlen] = 0; /* zero trailing bytes */ *p++ = cpu_to_be32(obj->len); memcpy(p, obj->data, obj->len); return p + XDR_QUADLEN(obj->len); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6695.65%250.00%
Alexey Dobriyan22.90%125.00%
Benny Halevy11.45%125.00%
Total69100.00%4100.00%

EXPORT_SYMBOL_GPL(xdr_encode_netobj);
__be32 * xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) { unsigned int len; if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) return NULL; obj->len = len; obj->data = (u8 *) p; return p + XDR_QUADLEN(len); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5895.08%133.33%
Alexey Dobriyan23.28%133.33%
Benny Halevy11.64%133.33%
Total61100.00%3100.00%

EXPORT_SYMBOL_GPL(xdr_decode_netobj); /** * xdr_encode_opaque_fixed - Encode fixed length opaque data * @p: pointer to current position in XDR buffer. * @ptr: pointer to data to encode (or NULL) * @nbytes: size of data. * * Copy the array of data of length nbytes at ptr to the XDR buffer * at position p, then align to the next 32-bit boundary by padding * with zero bytes (see RFC1832). * Note: if ptr is NULL, only the padding is performed. * * Returns the updated current XDR buffer position * */
__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) { if (likely(nbytes != 0)) { unsigned int quadlen = XDR_QUADLEN(nbytes); unsigned int padding = (quadlen << 2) - nbytes; if (ptr != NULL) memcpy(p, ptr, nbytes); if (padding != 0) memset((char *)p + nbytes, 0, padding); p += quadlen; } return p; }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust6973.40%240.00%
Linus Torvalds (pre-git)2324.47%240.00%
Alexey Dobriyan22.13%120.00%
Total94100.00%5100.00%

EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); /** * xdr_encode_opaque - Encode variable length opaque data * @p: pointer to current position in XDR buffer. * @ptr: pointer to data to encode (or NULL) * @nbytes: size of data. * * Returns the updated current XDR buffer position */
__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) { *p++ = cpu_to_be32(nbytes); return xdr_encode_opaque_fixed(p, ptr, nbytes); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust2155.26%125.00%
Linus Torvalds (pre-git)1436.84%125.00%
Alexey Dobriyan25.26%125.00%
Benny Halevy12.63%125.00%
Total38100.00%4100.00%

EXPORT_SYMBOL_GPL(xdr_encode_opaque);
__be32 * xdr_encode_string(__be32 *p, const char *string) { return xdr_encode_array(p, string, strlen(string)); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2692.86%266.67%
Alexey Dobriyan27.14%133.33%
Total28100.00%3100.00%

EXPORT_SYMBOL_GPL(xdr_encode_string);
__be32 * xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp, unsigned int maxlen) { u32 len; len = be32_to_cpu(*p++); if (len > maxlen) return NULL; *lenp = len; *sp = (char *) p; return p + XDR_QUADLEN(len); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds5683.58%233.33%
Chuck Lever710.45%116.67%
Alexey Dobriyan22.99%116.67%
Benny Halevy11.49%116.67%
Linus Torvalds (pre-git)11.49%116.67%
Total67100.00%6100.00%

EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); /** * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf * @buf: XDR buffer where string resides * @len: length of string, in bytes * */
void xdr_terminate_string(struct xdr_buf *buf, const u32 len) { char *kaddr; kaddr = kmap_atomic(buf->pages[0]); kaddr[buf->page_base + len] = '\0'; kunmap_atomic(kaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Chuck Lever46100.00%1100.00%
Total46100.00%1100.00%

EXPORT_SYMBOL_GPL(xdr_terminate_string);
void xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, struct page **pages, unsigned int base, unsigned int len) { struct kvec *head = xdr->head; struct kvec *tail = xdr->tail; char *buf = (char *)head->iov_base; unsigned int buflen = head->iov_len; head->iov_len = offset; xdr->pages = pages; xdr->page_base = base; xdr->page_len = len; tail->iov_base = buf + offset; tail->iov_len = buflen - offset; xdr->buflen += len; }

Contributors

PersonTokensPropCommitsCommitProp
Adrian Bunk7264.29%125.00%
Trond Myklebust4035.71%375.00%
Total112100.00%4100.00%

EXPORT_SYMBOL_GPL(xdr_inline_pages); /* * Helper routines for doing 'memmove' like operations on a struct xdr_buf */ /** * _shift_data_right_pages * @pages: vector of pages containing both the source and dest memory area. * @pgto_base: page vector address of destination * @pgfrom_base: page vector address of source * @len: number of bytes to copy * * Note: the addresses pgto_base and pgfrom_base are both calculated in * the same way: * if a memory area starts at byte 'base' in page 'pages[i]', * then its address is given as (i << PAGE_SHIFT) + base * Also note: pgfrom_base must be < pgto_base, but the memory areas * they point to may overlap. */
static void _shift_data_right_pages(struct page **pages, size_t pgto_base, size_t pgfrom_base, size_t len) { struct page **pgfrom, **pgto; char *vfrom, *vto; size_t copy; BUG_ON(pgto_base <= pgfrom_base); pgto_base += len; pgfrom_base += len; pgto = pages + (pgto_base >> PAGE_SHIFT); pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); pgto_base &= ~PAGE_MASK; pgfrom_base &= ~PAGE_MASK; do { /* Are any pointers crossing a page boundary? */ if (pgto_base == 0) { pgto_base = PAGE_SIZE; pgto--; } if (pgfrom_base == 0) { pgfrom_base = PAGE_SIZE; pgfrom--; } copy = len; if (copy > pgto_base) copy = pgto_base; if (copy > pgfrom_base) copy = pgfrom_base; pgto_base -= copy; pgfrom_base -= copy; vto = kmap_atomic(*pgto); if (*pgto != *pgfrom) { vfrom = kmap_atomic(*pgfrom); memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); kunmap_atomic(vfrom); } else memmove(vto + pgto_base, vto + pgfrom_base, copy); flush_dcache_page(*pgto); kunmap_atomic(vto); } while ((len -= copy) != 0); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust22697.41%375.00%
Kirill A. Shutemov62.59%125.00%
Total232100.00%4100.00%

/** * _copy_to_pages * @pages: array of pages * @pgbase: page vector address of destination * @p: pointer to source data * @len: length * * Copies data from an arbitrary memory location into an array of pages * The copy is assumed to be non-overlapping. */
static void _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) { struct page **pgto; char *vto; size_t copy; pgto = pages + (pgbase >> PAGE_SHIFT); pgbase &= ~PAGE_MASK; for (;;) { copy = PAGE_SIZE - pgbase; if (copy > len) copy = len; vto = kmap_atomic(*pgto); memcpy(vto + pgbase, p, copy); kunmap_atomic(vto); len -= copy; if (len == 0) break; pgbase += copy; if (pgbase == PAGE_SIZE) { flush_dcache_page(*pgto); pgbase = 0; pgto++; } p += copy; } flush_dcache_page(*pgto); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust14097.22%375.00%
Kirill A. Shutemov42.78%125.00%
Total144100.00%4100.00%

/** * _copy_from_pages * @p: pointer to destination * @pages: array of pages * @pgbase: offset of source data * @len: length * * Copies data into an arbitrary memory location from an array of pages * The copy is assumed to be non-overlapping. */
void _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) { struct page **pgfrom; char *vfrom; size_t copy; pgfrom = pages + (pgbase >> PAGE_SHIFT); pgbase &= ~PAGE_MASK; do { copy = PAGE_SIZE - pgbase; if (copy > len) copy = len; vfrom = kmap_atomic(*pgfrom); memcpy(p, vfrom + pgbase, copy); kunmap_atomic(vfrom); pgbase += copy; if (pgbase == PAGE_SIZE) { pgbase = 0; pgfrom++; } p += copy; } while ((len -= copy) != 0); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust12296.83%150.00%
Kirill A. Shutemov43.17%150.00%
Total126100.00%2100.00%

EXPORT_SYMBOL_GPL(_copy_from_pages); /** * xdr_shrink_bufhead * @buf: xdr_buf * @len: bytes to remove from buf->head[0] * * Shrinks XDR buffer's header kvec buf->head[0] by * 'len' bytes. The extra data is not lost, but is instead * moved into the inlined pages and/or the tail. */
static void xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) { struct kvec *head, *tail; size_t copy, offs; unsigned int pglen = buf->page_len; tail = buf->tail; head = buf->head; WARN_ON_ONCE(len > head->iov_len); if (len > head->iov_len) len = head->iov_len; /* Shift the tail first */ if (tail->iov_len != 0) { if (tail->iov_len > len) { copy = tail->iov_len - len; memmove((char *)tail->iov_base + len, tail->iov_base, copy); } /* Copy from the inlined pages into the tail */ copy = len; if (copy > pglen) copy = pglen; offs = len - copy; if (offs >= tail->iov_len) copy = 0; else if (copy > tail->iov_len - offs) copy = tail->iov_len - offs; if (copy != 0) _copy_from_pages((char *)tail->iov_base + offs, buf->pages, buf->page_base + pglen + offs - len, copy); /* Do we also need to copy data from the head into the tail ? */ if (len > pglen) { offs = copy = len - pglen; if (copy > tail->iov_len) copy = tail->iov_len; memcpy(tail->iov_base, (char *)head->iov_base + head->iov_len - offs, copy); } } /* Now handle pages */ if (pglen != 0) { if (pglen > len) _shift_data_right_pages(buf->pages, buf->page_base + len, buf->page_base, pglen - len); copy = len; if (len > pglen) copy = pglen; _copy_to_pages(buf->pages, buf->page_base, (char *)head->iov_base + head->iov_len - len, copy); } head->iov_len -= len; buf->buflen -= len; /* Have we truncated the message? */ if (buf->len > buf->buflen) buf->len = buf->buflen; }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust35095.37%240.00%
Weston Andros Adamson154.09%120.00%
Al Viro10.27%120.00%
Adrian Bunk10.27%120.00%
Total367100.00%5100.00%

/** * xdr_shrink_pagelen * @buf: xdr_buf * @len: bytes to remove from buf->pages * * Shrinks XDR buffer's page array buf->pages by * 'len' bytes. The extra data is not lost, but is instead * moved into the tail. */
static void xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) { struct kvec *tail; size_t copy; unsigned int pglen = buf->page_len; unsigned int tailbuf_len; tail = buf->tail; BUG_ON (len > pglen); tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; /* Shift the tail first */ if (tailbuf_len != 0) { unsigned int free_space = tailbuf_len - tail->iov_len; if (len < free_space) free_space = len; tail->iov_len += free_space; copy = len; if (tail->iov_len > len) { char *p = (char *)tail->iov_base + len; memmove(p, tail->iov_base, tail->iov_len - len); } else copy = tail->iov_len; /* Copy from the inlined pages into the tail */ _copy_from_pages((char *)tail->iov_base, buf->pages, buf->page_base + pglen - len, copy); } buf->page_len -= len; buf->buflen -= len; /* Have we truncated the message? */ if (buf->len > buf->buflen) buf->len = buf->buflen; }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust17987.32%337.50%
Benny Halevy2411.71%337.50%
Al Viro10.49%112.50%
Adrian Bunk10.49%112.50%
Total205100.00%8100.00%


void xdr_shift_buf(struct xdr_buf *buf, size_t len) { xdr_shrink_bufhead(buf, len); }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust20100.00%1100.00%
Total20100.00%1100.00%

EXPORT_SYMBOL_GPL(xdr_shift_buf); /** * xdr_stream_pos - Return the current offset from the start of the xdr_stream * @xdr: pointer to struct xdr_stream */
unsigned int xdr_stream_pos(const struct xdr_stream *xdr) { return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2; }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust34100.00%1100.00%
Total34100.00%1100.00%

EXPORT_SYMBOL_GPL(xdr_stream_pos); /** * xdr_init_encode - Initialize a struct xdr_stream for sending data. * @xdr: pointer to xdr_stream struct * @buf: pointer to XDR buffer in which to encode data * @p: current pointer inside XDR buffer * * Note: at the moment the RPC client only passes the length of our * scratch buffer in the xdr_buf's header kvec. Previously this * meant we needed to call xdr_adjust_iovec() after encoding the * data. With the new scheme, the xdr_stream manages the details * of the buffer length, and takes care of adjusting the kvec * length for us. */
void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) { struct kvec *iov = buf->head; int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; xdr_set_scratch_buffer(xdr, NULL, 0); BUG_ON(scratch_len < 0); xdr->buf = buf; xdr->iov = iov; xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len); xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len); BUG_ON(iov->iov_len > scratch_len); if (p != xdr->p && p != NULL) { size_t len; BUG_ON(p < xdr->p || p > xdr->end); len = (char *)p - (char *)xdr->p; xdr->p = p; buf->len += len; iov->iov_len += len; } }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust18093.26%350.00%
J. Bruce Fields94.66%116.67%
Alexey Dobriyan31.55%116.67%
Al Viro10.52%116.67%
Total193100.00%6100.00%

EXPORT_SYMBOL_GPL(xdr_init_encode); /** * xdr_commit_encode - Ensure all data is written to buffer * @xdr: pointer to xdr_stream * * We handle encoding across page boundaries by giving the caller a * temporary location to write to, then later copying the data into * place; xdr_commit_encode does that copying. * * Normally the caller doesn't need to call this directly, as the * following xdr_reserve_space will do it. But an explicit call may be * required at the end of encoding, or any other time when the xdr_buf * data might be read. */
void xdr_commit_encode(struct xdr_stream *xdr) { int shift = xdr->scratch.iov_len; void *page; if (shift == 0) return; page = page_address(*xdr->page_ptr); memcpy(xdr->scratch.iov_base, page, shift); memmove(page, page + shift, (void *)xdr->p - page); xdr->scratch.iov_len = 0; }

Contributors

PersonTokensPropCommitsCommitProp
J. Bruce Fields80100.00%1100.00%
Total80100.00%1100.00%

EXPORT_SYMBOL_GPL(xdr_commit_encode);
static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, size_t nbytes) { static __be32 *p; int space_left; int frag1bytes, frag2bytes; if (nbytes > PAGE_SIZE) return NULL; /* Bigger buffers require special handling */ if (xdr->buf->len + nbytes > xdr->buf->buflen) return NULL; /* Sorry, we're totally out of space */ frag1bytes = (xdr->end - xdr->p) << 2; frag2bytes = nbytes - frag1bytes; if (xdr->iov) xdr->iov->iov_len += frag1bytes; else xdr->buf->page_len += frag1bytes; xdr->page_ptr++; xdr->iov = NULL; /* * If the last encode didn't end exactly on a page boundary, the * next one will straddle boundaries. Encode into the next * page, then copy it back later in xdr_commit_encode. We use * the "scratch" iov to track any temporarily unused fragment of * space at the end of the previous buffer: */ xdr->scratch.iov_base = xdr->p; xdr->scratch.iov_len = frag1bytes; p = page_address(*xdr->page_ptr); /* * Note this is where the next encode will start after we've * shifted this one back: */ xdr->p = (void *)p + frag2bytes; space_left = xdr->buf->buflen - xdr->buf->len; xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE); xdr->buf->page_len += frag2bytes; xdr->buf->len += nbytes; return p; }

Contributors

PersonTokensPropCommitsCommitProp
J. Bruce Fields20599.51%150.00%
Trond Myklebust10.49%150.00%
Total206100.00%2100.00%

/** * xdr_reserve_space - Reserve buffer space for sending * @xdr: pointer to xdr_stream * @nbytes: number of bytes to reserve * * Checks that we have enough buffer space to encode 'nbytes' more * bytes of data. If so, update the total xdr_buf length, and * adjust the length of the current kvec. */
__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) { __be32 *p = xdr->p; __be32 *q; xdr_commit_encode(xdr); /* align nbytes on the next 32-bit boundary */ nbytes += 3; nbytes &= ~3; q = p + (nbytes >> 2); if (unlikely(q > xdr->end || q < p)) return xdr_get_next_encode_buffer(xdr, nbytes); xdr->p = q; if (xdr->iov) xdr->iov->iov_len += nbytes; else xdr->buf->page_len += nbytes; xdr->buf->len += nbytes; return p; }

Contributors

PersonTokensPropCommitsCommitProp
Trond Myklebust8574.56%133.33%
J. Bruce Fields2622.81%133.33%
Alexey Dobriyan32.63%133.33%
Total114100.00%3100.00%

EXPORT_SYMBOL_GPL(xdr_reserve_space); /** * xdr_truncate_encode - truncate an encode buffer * @xdr: pointer to xdr_stream * @len: new length of buffer * * Truncates the xdr stream, so that xdr->buf->len == len, * and xdr->p points at offset len from the start of the buffer, and * head, tail, and page lengths are adjusted to correspond. * * If this means moving xdr->p to a different buffer, we assume that * that the end pointer should be set to the end of the current page, * except in the case of the head buffer when we assume the head * buffer's current length represents the end of the available buffer. * * This is *not* safe to use on a buffer that already has inlined page * cache pages (as in a zero-copy server read reply), except for the * simple case of truncating from one position in the tail to another. * */
void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) { struct xdr_buf *buf = xdr->buf; struct kvec *head = buf->head; struct kvec *tail = buf->tail; int fraglen; int new; if (len > buf->len) { WARN_ON_ONCE(1); return; } xdr_commit_encode(xdr); fraglen = min_t(int, buf->len - len, tail->iov_len); tail->iov_len -= fraglen; buf->len -= fraglen; if (tail->iov_len) { xdr->p = tail->iov_base + tail->iov_len; WARN_ON_ONCE(!xdr->end); WARN_ON_ONCE(!xdr->iov); return; } WARN_ON_ONCE(fraglen); fraglen = min_t(int, buf->len - len, buf->page_len); buf->page_len -= fraglen; buf->len -= fraglen; new = buf->page_base + buf->page_len; xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); if (buf->page_len) { xdr->p = page_address(*xdr->page_ptr); xdr->end = (void *)xdr->p + PAGE_SIZE; xdr->p = (void *)xdr->p + (new % PAGE_SIZE); WARN_ON_ONCE(xdr->iov); return; } if (fraglen) { xdr->end = head->iov_base + head->iov_len; xdr->page_ptr--; } /* (otherwise assume xdr->end is already set) */ head->iov_len = len; buf->len = len; xdr->p = head->iov_base + head->iov_len; xdr->iov = buf->head; }

Contributors

PersonTokensPropCommitsCommitProp
J. Bruce Fields307100.00%5100.00%
Total307100.00%5100.00%

EXPORT_SYMBOL(xdr_truncate_encode); /** * xdr_restrict_buflen - decrease available buffer space * @xdr: pointer to xdr_stream * @newbuflen: new maximum number of bytes available * * Adjust our idea of how much space is available in the buffer. * If we've already used too much space in the buffer, returns -1. * If the available space is already smaller than newbuflen, returns 0 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen * and ensures xdr->end is set at most offset newbuflen from the start * of the buffer. */
int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen) { struct xdr_buf *buf = xdr->buf; int left_in_this_buf = (void *)xdr->end - (void *)xdr->p; int end_offset = buf->len + left_in_this_buf; if (newbuflen < 0 || newbuflen < buf->len) return -1; if (newbuflen > buf->buflen) return 0; if (newbuflen < end_offset) xdr->end = (void *)xdr->end + newbuflen - end_offset; buf->buflen = newbuflen; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
J. Bruce Fields108100.00%1100.00%
Total108100.00%1100.00%

EXPORT_SYMBOL(xdr_restrict_buflen); /** * xdr_write_pages - Insert a list of pages into an XDR buffer for sending * @xdr: pointer to xdr_stream * @pages: list of pages * @base: offset of first byte * @len: length of data in bytes * */
void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,