1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/xdr.c
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/pagemap.h>
16 #include <linux/errno.h>
17 #include <linux/sunrpc/xdr.h>
18 #include <linux/sunrpc/msg_prot.h>
19 #include <linux/bvec.h>
20 #include <trace/events/sunrpc.h>
22 static void _copy_to_pages(struct page **, size_t, const char *, size_t);
26 * XDR functions for basic NFS types
29 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
31 unsigned int quadlen = XDR_QUADLEN(obj->len);
33 p[quadlen] = 0; /* zero trailing bytes */
34 *p++ = cpu_to_be32(obj->len);
35 memcpy(p, obj->data, obj->len);
36 return p + XDR_QUADLEN(obj->len);
38 EXPORT_SYMBOL_GPL(xdr_encode_netobj);
41 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
45 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
49 return p + XDR_QUADLEN(len);
51 EXPORT_SYMBOL_GPL(xdr_decode_netobj);
54 * xdr_encode_opaque_fixed - Encode fixed length opaque data
55 * @p: pointer to current position in XDR buffer.
56 * @ptr: pointer to data to encode (or NULL)
57 * @nbytes: size of data.
59 * Copy the array of data of length nbytes at ptr to the XDR buffer
60 * at position p, then align to the next 32-bit boundary by padding
61 * with zero bytes (see RFC1832).
62 * Note: if ptr is NULL, only the padding is performed.
64 * Returns the updated current XDR buffer position
67 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
69 if (likely(nbytes != 0)) {
70 unsigned int quadlen = XDR_QUADLEN(nbytes);
71 unsigned int padding = (quadlen << 2) - nbytes;
74 memcpy(p, ptr, nbytes);
76 memset((char *)p + nbytes, 0, padding);
81 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
84 * xdr_encode_opaque - Encode variable length opaque data
85 * @p: pointer to current position in XDR buffer.
86 * @ptr: pointer to data to encode (or NULL)
87 * @nbytes: size of data.
89 * Returns the updated current XDR buffer position
91 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
93 *p++ = cpu_to_be32(nbytes);
94 return xdr_encode_opaque_fixed(p, ptr, nbytes);
96 EXPORT_SYMBOL_GPL(xdr_encode_opaque);
99 xdr_encode_string(__be32 *p, const char *string)
101 return xdr_encode_array(p, string, strlen(string));
103 EXPORT_SYMBOL_GPL(xdr_encode_string);
106 xdr_decode_string_inplace(__be32 *p, char **sp,
107 unsigned int *lenp, unsigned int maxlen)
111 len = be32_to_cpu(*p++);
116 return p + XDR_QUADLEN(len);
118 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
121 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
122 * @buf: XDR buffer where string resides
123 * @len: length of string, in bytes
127 xdr_terminate_string(struct xdr_buf *buf, const u32 len)
131 kaddr = kmap_atomic(buf->pages[0]);
132 kaddr[buf->page_base + len] = '\0';
133 kunmap_atomic(kaddr);
135 EXPORT_SYMBOL_GPL(xdr_terminate_string);
138 xdr_buf_pagecount(struct xdr_buf *buf)
142 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
146 xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
148 size_t i, n = xdr_buf_pagecount(buf);
150 if (n != 0 && buf->bvec == NULL) {
151 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
154 for (i = 0; i < n; i++) {
155 buf->bvec[i].bv_page = buf->pages[i];
156 buf->bvec[i].bv_len = PAGE_SIZE;
157 buf->bvec[i].bv_offset = 0;
164 xdr_free_bvec(struct xdr_buf *buf)
171 * xdr_inline_pages - Prepare receive buffer for a large reply
172 * @xdr: xdr_buf into which reply will be placed
173 * @offset: expected offset where data payload will start, in bytes
174 * @pages: vector of struct page pointers
175 * @base: offset in first page where receive should start, in bytes
176 * @len: expected size of the upper layer data payload, in bytes
180 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
181 struct page **pages, unsigned int base, unsigned int len)
183 struct kvec *head = xdr->head;
184 struct kvec *tail = xdr->tail;
185 char *buf = (char *)head->iov_base;
186 unsigned int buflen = head->iov_len;
188 head->iov_len = offset;
191 xdr->page_base = base;
194 tail->iov_base = buf + offset;
195 tail->iov_len = buflen - offset;
198 EXPORT_SYMBOL_GPL(xdr_inline_pages);
201 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
205 * _shift_data_left_pages
206 * @pages: vector of pages containing both the source and dest memory area.
207 * @pgto_base: page vector address of destination
208 * @pgfrom_base: page vector address of source
209 * @len: number of bytes to copy
211 * Note: the addresses pgto_base and pgfrom_base are both calculated in
213 * if a memory area starts at byte 'base' in page 'pages[i]',
214 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
215 * Alse note: pgto_base must be < pgfrom_base, but the memory areas
216 * they point to may overlap.
219 _shift_data_left_pages(struct page **pages, size_t pgto_base,
220 size_t pgfrom_base, size_t len)
222 struct page **pgfrom, **pgto;
226 BUG_ON(pgfrom_base <= pgto_base);
228 pgto = pages + (pgto_base >> PAGE_SHIFT);
229 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
231 pgto_base &= ~PAGE_MASK;
232 pgfrom_base &= ~PAGE_MASK;
235 if (pgto_base >= PAGE_SIZE) {
239 if (pgfrom_base >= PAGE_SIZE){
245 if (copy > (PAGE_SIZE - pgto_base))
246 copy = PAGE_SIZE - pgto_base;
247 if (copy > (PAGE_SIZE - pgfrom_base))
248 copy = PAGE_SIZE - pgfrom_base;
250 vto = kmap_atomic(*pgto);
251 if (*pgto != *pgfrom) {
252 vfrom = kmap_atomic(*pgfrom);
253 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
254 kunmap_atomic(vfrom);
256 memmove(vto + pgto_base, vto + pgfrom_base, copy);
257 flush_dcache_page(*pgto);
263 } while ((len -= copy) != 0);
267 _shift_data_left_tail(struct xdr_buf *buf, unsigned int pgto, size_t len)
269 struct kvec *tail = buf->tail;
271 if (len > tail->iov_len)
274 _copy_to_pages(buf->pages,
275 buf->page_base + pgto,
276 (char *)tail->iov_base,
278 tail->iov_len -= len;
280 if (tail->iov_len > 0)
281 memmove((char *)tail->iov_base,
282 tail->iov_base + len,
287 * _shift_data_right_pages
288 * @pages: vector of pages containing both the source and dest memory area.
289 * @pgto_base: page vector address of destination
290 * @pgfrom_base: page vector address of source
291 * @len: number of bytes to copy
293 * Note: the addresses pgto_base and pgfrom_base are both calculated in
295 * if a memory area starts at byte 'base' in page 'pages[i]',
296 * then its address is given as (i << PAGE_SHIFT) + base
297 * Also note: pgfrom_base must be < pgto_base, but the memory areas
298 * they point to may overlap.
301 _shift_data_right_pages(struct page **pages, size_t pgto_base,
302 size_t pgfrom_base, size_t len)
304 struct page **pgfrom, **pgto;
308 BUG_ON(pgto_base <= pgfrom_base);
313 pgto = pages + (pgto_base >> PAGE_SHIFT);
314 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
316 pgto_base &= ~PAGE_MASK;
317 pgfrom_base &= ~PAGE_MASK;
320 /* Are any pointers crossing a page boundary? */
321 if (pgto_base == 0) {
322 pgto_base = PAGE_SIZE;
325 if (pgfrom_base == 0) {
326 pgfrom_base = PAGE_SIZE;
331 if (copy > pgto_base)
333 if (copy > pgfrom_base)
338 vto = kmap_atomic(*pgto);
339 if (*pgto != *pgfrom) {
340 vfrom = kmap_atomic(*pgfrom);
341 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
342 kunmap_atomic(vfrom);
344 memmove(vto + pgto_base, vto + pgfrom_base, copy);
345 flush_dcache_page(*pgto);
348 } while ((len -= copy) != 0);
352 _shift_data_right_tail(struct xdr_buf *buf, unsigned int pgfrom, size_t len)
354 struct kvec *tail = buf->tail;
355 unsigned int tailbuf_len;
356 unsigned int result = 0;
359 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
361 /* Shift the tail first */
362 if (tailbuf_len != 0) {
363 unsigned int free_space = tailbuf_len - tail->iov_len;
365 if (len < free_space)
367 if (len > free_space)
370 tail->iov_len += free_space;
373 if (tail->iov_len > len) {
374 char *p = (char *)tail->iov_base + len;
375 memmove(p, tail->iov_base, tail->iov_len - free_space);
376 result += tail->iov_len - free_space;
378 copy = tail->iov_len;
380 /* Copy from the inlined pages into the tail */
381 _copy_from_pages((char *)tail->iov_base,
383 buf->page_base + pgfrom,
393 * @pages: array of pages
394 * @pgbase: page vector address of destination
395 * @p: pointer to source data
398 * Copies data from an arbitrary memory location into an array of pages
399 * The copy is assumed to be non-overlapping.
402 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
408 pgto = pages + (pgbase >> PAGE_SHIFT);
409 pgbase &= ~PAGE_MASK;
412 copy = PAGE_SIZE - pgbase;
416 vto = kmap_atomic(*pgto);
417 memcpy(vto + pgbase, p, copy);
425 if (pgbase == PAGE_SIZE) {
426 flush_dcache_page(*pgto);
432 flush_dcache_page(*pgto);
437 * @p: pointer to destination
438 * @pages: array of pages
439 * @pgbase: offset of source data
442 * Copies data into an arbitrary memory location from an array of pages
443 * The copy is assumed to be non-overlapping.
446 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
448 struct page **pgfrom;
452 pgfrom = pages + (pgbase >> PAGE_SHIFT);
453 pgbase &= ~PAGE_MASK;
456 copy = PAGE_SIZE - pgbase;
460 vfrom = kmap_atomic(*pgfrom);
461 memcpy(p, vfrom + pgbase, copy);
462 kunmap_atomic(vfrom);
465 if (pgbase == PAGE_SIZE) {
471 } while ((len -= copy) != 0);
473 EXPORT_SYMBOL_GPL(_copy_from_pages);
477 * @pages: array of pages
478 * @pgbase: beginning page vector address
482 _zero_pages(struct page **pages, size_t pgbase, size_t len)
488 page = pages + (pgbase >> PAGE_SHIFT);
489 pgbase &= ~PAGE_MASK;
492 zero = PAGE_SIZE - pgbase;
496 vpage = kmap_atomic(*page);
497 memset(vpage + pgbase, 0, zero);
498 kunmap_atomic(vpage);
500 flush_dcache_page(*page);
504 } while ((len -= zero) != 0);
510 * @len: bytes to remove from buf->head[0]
512 * Shrinks XDR buffer's header kvec buf->head[0] by
513 * 'len' bytes. The extra data is not lost, but is instead
514 * moved into the inlined pages and/or the tail.
517 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
519 struct kvec *head, *tail;
521 unsigned int pglen = buf->page_len;
528 WARN_ON_ONCE(len > head->iov_len);
529 if (len > head->iov_len)
532 /* Shift the tail first */
533 if (tail->iov_len != 0) {
534 if (tail->iov_len > len) {
535 copy = tail->iov_len - len;
536 memmove((char *)tail->iov_base + len,
537 tail->iov_base, copy);
540 /* Copy from the inlined pages into the tail */
545 if (offs >= tail->iov_len)
547 else if (copy > tail->iov_len - offs)
548 copy = tail->iov_len - offs;
550 _copy_from_pages((char *)tail->iov_base + offs,
552 buf->page_base + pglen + offs - len,
556 /* Do we also need to copy data from the head into the tail ? */
558 offs = copy = len - pglen;
559 if (copy > tail->iov_len)
560 copy = tail->iov_len;
561 memcpy(tail->iov_base,
562 (char *)head->iov_base +
563 head->iov_len - offs,
568 /* Now handle pages */
571 _shift_data_right_pages(buf->pages,
572 buf->page_base + len,
578 _copy_to_pages(buf->pages, buf->page_base,
579 (char *)head->iov_base + head->iov_len - len,
583 head->iov_len -= len;
585 /* Have we truncated the message? */
586 if (buf->len > buf->buflen)
587 buf->len = buf->buflen;
593 * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes
595 * @len: bytes to remove from buf->pages
597 * The extra data is not lost, but is instead moved into buf->tail.
598 * Returns the actual number of bytes moved.
601 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
603 unsigned int pglen = buf->page_len;
606 if (len > buf->page_len)
607 len = buf-> page_len;
609 result = _shift_data_right_tail(buf, pglen - len, len);
610 buf->page_len -= len;
612 /* Have we truncated the message? */
613 if (buf->len > buf->buflen)
614 buf->len = buf->buflen;
620 xdr_shift_buf(struct xdr_buf *buf, size_t len)
622 xdr_shrink_bufhead(buf, len);
624 EXPORT_SYMBOL_GPL(xdr_shift_buf);
627 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
628 * @xdr: pointer to struct xdr_stream
630 unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
632 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
634 EXPORT_SYMBOL_GPL(xdr_stream_pos);
637 * xdr_page_pos - Return the current offset from the start of the xdr pages
638 * @xdr: pointer to struct xdr_stream
640 unsigned int xdr_page_pos(const struct xdr_stream *xdr)
642 unsigned int pos = xdr_stream_pos(xdr);
644 WARN_ON(pos < xdr->buf->head[0].iov_len);
645 return pos - xdr->buf->head[0].iov_len;
647 EXPORT_SYMBOL_GPL(xdr_page_pos);
650 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
651 * @xdr: pointer to xdr_stream struct
652 * @buf: pointer to XDR buffer in which to encode data
653 * @p: current pointer inside XDR buffer
654 * @rqst: pointer to controlling rpc_rqst, for debugging
656 * Note: at the moment the RPC client only passes the length of our
657 * scratch buffer in the xdr_buf's header kvec. Previously this
658 * meant we needed to call xdr_adjust_iovec() after encoding the
659 * data. With the new scheme, the xdr_stream manages the details
660 * of the buffer length, and takes care of adjusting the kvec
663 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
664 struct rpc_rqst *rqst)
666 struct kvec *iov = buf->head;
667 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
669 xdr_set_scratch_buffer(xdr, NULL, 0);
670 BUG_ON(scratch_len < 0);
673 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
674 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
675 BUG_ON(iov->iov_len > scratch_len);
677 if (p != xdr->p && p != NULL) {
680 BUG_ON(p < xdr->p || p > xdr->end);
681 len = (char *)p - (char *)xdr->p;
688 EXPORT_SYMBOL_GPL(xdr_init_encode);
691 * xdr_commit_encode - Ensure all data is written to buffer
692 * @xdr: pointer to xdr_stream
694 * We handle encoding across page boundaries by giving the caller a
695 * temporary location to write to, then later copying the data into
696 * place; xdr_commit_encode does that copying.
698 * Normally the caller doesn't need to call this directly, as the
699 * following xdr_reserve_space will do it. But an explicit call may be
700 * required at the end of encoding, or any other time when the xdr_buf
701 * data might be read.
703 inline void xdr_commit_encode(struct xdr_stream *xdr)
705 int shift = xdr->scratch.iov_len;
710 page = page_address(*xdr->page_ptr);
711 memcpy(xdr->scratch.iov_base, page, shift);
712 memmove(page, page + shift, (void *)xdr->p - page);
713 xdr->scratch.iov_len = 0;
715 EXPORT_SYMBOL_GPL(xdr_commit_encode);
717 static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
722 int frag1bytes, frag2bytes;
724 if (nbytes > PAGE_SIZE)
725 goto out_overflow; /* Bigger buffers require special handling */
726 if (xdr->buf->len + nbytes > xdr->buf->buflen)
727 goto out_overflow; /* Sorry, we're totally out of space */
728 frag1bytes = (xdr->end - xdr->p) << 2;
729 frag2bytes = nbytes - frag1bytes;
731 xdr->iov->iov_len += frag1bytes;
733 xdr->buf->page_len += frag1bytes;
737 * If the last encode didn't end exactly on a page boundary, the
738 * next one will straddle boundaries. Encode into the next
739 * page, then copy it back later in xdr_commit_encode. We use
740 * the "scratch" iov to track any temporarily unused fragment of
741 * space at the end of the previous buffer:
743 xdr->scratch.iov_base = xdr->p;
744 xdr->scratch.iov_len = frag1bytes;
745 p = page_address(*xdr->page_ptr);
747 * Note this is where the next encode will start after we've
748 * shifted this one back:
750 xdr->p = (void *)p + frag2bytes;
751 space_left = xdr->buf->buflen - xdr->buf->len;
752 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
753 xdr->buf->page_len += frag2bytes;
754 xdr->buf->len += nbytes;
757 trace_rpc_xdr_overflow(xdr, nbytes);
762 * xdr_reserve_space - Reserve buffer space for sending
763 * @xdr: pointer to xdr_stream
764 * @nbytes: number of bytes to reserve
766 * Checks that we have enough buffer space to encode 'nbytes' more
767 * bytes of data. If so, update the total xdr_buf length, and
768 * adjust the length of the current kvec.
770 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
775 xdr_commit_encode(xdr);
776 /* align nbytes on the next 32-bit boundary */
779 q = p + (nbytes >> 2);
780 if (unlikely(q > xdr->end || q < p))
781 return xdr_get_next_encode_buffer(xdr, nbytes);
784 xdr->iov->iov_len += nbytes;
786 xdr->buf->page_len += nbytes;
787 xdr->buf->len += nbytes;
790 EXPORT_SYMBOL_GPL(xdr_reserve_space);
794 * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
795 * @xdr: pointer to xdr_stream
796 * @vec: pointer to a kvec array
797 * @nbytes: number of bytes to reserve
799 * Reserves enough buffer space to encode 'nbytes' of data and stores the
800 * pointers in 'vec'. The size argument passed to xdr_reserve_space() is
801 * determined based on the number of bytes remaining in the current page to
802 * avoid invalidating iov_base pointers when xdr_commit_encode() is called.
804 int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes)
811 * svcrdma requires every READ payload to start somewhere
814 if (xdr->iov == xdr->buf->head) {
820 thislen = xdr->buf->page_len % PAGE_SIZE;
821 thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
823 p = xdr_reserve_space(xdr, thislen);
828 vec[v].iov_len = thislen;
835 EXPORT_SYMBOL_GPL(xdr_reserve_space_vec);
838 * xdr_truncate_encode - truncate an encode buffer
839 * @xdr: pointer to xdr_stream
840 * @len: new length of buffer
842 * Truncates the xdr stream, so that xdr->buf->len == len,
843 * and xdr->p points at offset len from the start of the buffer, and
844 * head, tail, and page lengths are adjusted to correspond.
846 * If this means moving xdr->p to a different buffer, we assume that
847 * the end pointer should be set to the end of the current page,
848 * except in the case of the head buffer when we assume the head
849 * buffer's current length represents the end of the available buffer.
851 * This is *not* safe to use on a buffer that already has inlined page
852 * cache pages (as in a zero-copy server read reply), except for the
853 * simple case of truncating from one position in the tail to another.
856 void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
858 struct xdr_buf *buf = xdr->buf;
859 struct kvec *head = buf->head;
860 struct kvec *tail = buf->tail;
864 if (len > buf->len) {
868 xdr_commit_encode(xdr);
870 fraglen = min_t(int, buf->len - len, tail->iov_len);
871 tail->iov_len -= fraglen;
874 xdr->p = tail->iov_base + tail->iov_len;
875 WARN_ON_ONCE(!xdr->end);
876 WARN_ON_ONCE(!xdr->iov);
879 WARN_ON_ONCE(fraglen);
880 fraglen = min_t(int, buf->len - len, buf->page_len);
881 buf->page_len -= fraglen;
884 new = buf->page_base + buf->page_len;
886 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
889 xdr->p = page_address(*xdr->page_ptr);
890 xdr->end = (void *)xdr->p + PAGE_SIZE;
891 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
892 WARN_ON_ONCE(xdr->iov);
896 xdr->end = head->iov_base + head->iov_len;
897 /* (otherwise assume xdr->end is already set) */
901 xdr->p = head->iov_base + head->iov_len;
902 xdr->iov = buf->head;
904 EXPORT_SYMBOL(xdr_truncate_encode);
907 * xdr_restrict_buflen - decrease available buffer space
908 * @xdr: pointer to xdr_stream
909 * @newbuflen: new maximum number of bytes available
911 * Adjust our idea of how much space is available in the buffer.
912 * If we've already used too much space in the buffer, returns -1.
913 * If the available space is already smaller than newbuflen, returns 0
914 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
915 * and ensures xdr->end is set at most offset newbuflen from the start
918 int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
920 struct xdr_buf *buf = xdr->buf;
921 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
922 int end_offset = buf->len + left_in_this_buf;
924 if (newbuflen < 0 || newbuflen < buf->len)
926 if (newbuflen > buf->buflen)
928 if (newbuflen < end_offset)
929 xdr->end = (void *)xdr->end + newbuflen - end_offset;
930 buf->buflen = newbuflen;
933 EXPORT_SYMBOL(xdr_restrict_buflen);
936 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
937 * @xdr: pointer to xdr_stream
938 * @pages: list of pages
939 * @base: offset of first byte
940 * @len: length of data in bytes
943 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
946 struct xdr_buf *buf = xdr->buf;
947 struct kvec *iov = buf->tail;
949 buf->page_base = base;
952 iov->iov_base = (char *)xdr->p;
957 unsigned int pad = 4 - (len & 3);
959 BUG_ON(xdr->p >= xdr->end);
960 iov->iov_base = (char *)xdr->p + (len & 3);
968 EXPORT_SYMBOL_GPL(xdr_write_pages);
970 static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
971 unsigned int base, unsigned int len)
973 if (len > iov->iov_len)
975 if (unlikely(base > len))
977 xdr->p = (__be32*)(iov->iov_base + base);
978 xdr->end = (__be32*)(iov->iov_base + len);
980 xdr->page_ptr = NULL;
984 static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
985 unsigned int base, unsigned int len)
993 maxlen = xdr->buf->page_len;
994 if (base >= maxlen) {
1002 base += xdr->buf->page_base;
1004 pgnr = base >> PAGE_SHIFT;
1005 xdr->page_ptr = &xdr->buf->pages[pgnr];
1006 kaddr = page_address(*xdr->page_ptr);
1008 pgoff = base & ~PAGE_MASK;
1009 xdr->p = (__be32*)(kaddr + pgoff);
1011 pgend = pgoff + len;
1012 if (pgend > PAGE_SIZE)
1014 xdr->end = (__be32*)(kaddr + pgend);
1019 static void xdr_set_page(struct xdr_stream *xdr, unsigned int base,
1022 if (xdr_set_page_base(xdr, base, len) == 0)
1023 xdr_set_iov(xdr, xdr->buf->tail, 0, xdr_stream_remaining(xdr));
1026 static void xdr_set_next_page(struct xdr_stream *xdr)
1028 unsigned int newbase;
1030 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
1031 newbase -= xdr->buf->page_base;
1033 xdr_set_page(xdr, newbase, PAGE_SIZE);
1036 static bool xdr_set_next_buffer(struct xdr_stream *xdr)
1038 if (xdr->page_ptr != NULL)
1039 xdr_set_next_page(xdr);
1040 else if (xdr->iov == xdr->buf->head) {
1041 xdr_set_page(xdr, 0, PAGE_SIZE);
1043 return xdr->p != xdr->end;
1047 * xdr_init_decode - Initialize an xdr_stream for decoding data.
1048 * @xdr: pointer to xdr_stream struct
1049 * @buf: pointer to XDR buffer from which to decode data
1050 * @p: current pointer inside XDR buffer
1051 * @rqst: pointer to controlling rpc_rqst, for debugging
1053 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
1054 struct rpc_rqst *rqst)
1057 xdr->scratch.iov_base = NULL;
1058 xdr->scratch.iov_len = 0;
1059 xdr->nwords = XDR_QUADLEN(buf->len);
1060 if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 &&
1061 xdr_set_page_base(xdr, 0, buf->len) == 0)
1062 xdr_set_iov(xdr, buf->tail, 0, buf->len);
1063 if (p != NULL && p > xdr->p && xdr->end >= p) {
1064 xdr->nwords -= p - xdr->p;
1069 EXPORT_SYMBOL_GPL(xdr_init_decode);
1072 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
1073 * @xdr: pointer to xdr_stream struct
1074 * @buf: pointer to XDR buffer from which to decode data
1075 * @pages: list of pages to decode into
1076 * @len: length in bytes of buffer in pages
1078 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
1079 struct page **pages, unsigned int len)
1081 memset(buf, 0, sizeof(*buf));
1083 buf->page_len = len;
1086 xdr_init_decode(xdr, buf, NULL, NULL);
1088 EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
1090 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1092 unsigned int nwords = XDR_QUADLEN(nbytes);
1094 __be32 *q = p + nwords;
1096 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
1099 xdr->nwords -= nwords;
1104 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
1105 * @xdr: pointer to xdr_stream struct
1106 * @buf: pointer to an empty buffer
1107 * @buflen: size of 'buf'
1109 * The scratch buffer is used when decoding from an array of pages.
1110 * If an xdr_inline_decode() call spans across page boundaries, then
1111 * we copy the data into the scratch buffer in order to allow linear
1114 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
1116 xdr->scratch.iov_base = buf;
1117 xdr->scratch.iov_len = buflen;
1119 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
1121 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
1124 char *cpdest = xdr->scratch.iov_base;
1125 size_t cplen = (char *)xdr->end - (char *)xdr->p;
1127 if (nbytes > xdr->scratch.iov_len)
1129 p = __xdr_inline_decode(xdr, cplen);
1132 memcpy(cpdest, p, cplen);
1133 if (!xdr_set_next_buffer(xdr))
1137 p = __xdr_inline_decode(xdr, nbytes);
1140 memcpy(cpdest, p, nbytes);
1141 return xdr->scratch.iov_base;
1143 trace_rpc_xdr_overflow(xdr, nbytes);
1148 * xdr_inline_decode - Retrieve XDR data to decode
1149 * @xdr: pointer to xdr_stream struct
1150 * @nbytes: number of bytes of data to decode
1152 * Check if the input buffer is long enough to enable us to decode
1153 * 'nbytes' more bytes of data starting at the current position.
1154 * If so return the current pointer, then update the current
1157 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
1161 if (unlikely(nbytes == 0))
1163 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1165 p = __xdr_inline_decode(xdr, nbytes);
1168 return xdr_copy_to_scratch(xdr, nbytes);
1170 trace_rpc_xdr_overflow(xdr, nbytes);
1173 EXPORT_SYMBOL_GPL(xdr_inline_decode);
1175 static void xdr_realign_pages(struct xdr_stream *xdr)
1177 struct xdr_buf *buf = xdr->buf;
1178 struct kvec *iov = buf->head;
1179 unsigned int cur = xdr_stream_pos(xdr);
1180 unsigned int copied, offset;
1182 /* Realign pages to current pointer position */
1183 if (iov->iov_len > cur) {
1184 offset = iov->iov_len - cur;
1185 copied = xdr_shrink_bufhead(buf, offset);
1186 trace_rpc_xdr_alignment(xdr, offset, copied);
1187 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1191 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
1193 struct xdr_buf *buf = xdr->buf;
1194 unsigned int nwords = XDR_QUADLEN(len);
1195 unsigned int cur = xdr_stream_pos(xdr);
1196 unsigned int copied, offset;
1198 if (xdr->nwords == 0)
1201 xdr_realign_pages(xdr);
1202 if (nwords > xdr->nwords) {
1203 nwords = xdr->nwords;
1206 if (buf->page_len <= len)
1207 len = buf->page_len;
1208 else if (nwords < xdr->nwords) {
1209 /* Truncate page data and move it into the tail */
1210 offset = buf->page_len - len;
1211 copied = xdr_shrink_pagelen(buf, offset);
1212 trace_rpc_xdr_alignment(xdr, offset, copied);
1213 xdr->nwords = XDR_QUADLEN(buf->len - cur);
1219 * xdr_read_pages - align page-based XDR data to current pointer position
1220 * @xdr: pointer to xdr_stream struct
1221 * @len: number of bytes of page data
1223 * Moves data beyond the current pointer position from the XDR head[] buffer
1224 * into the page list. Any data that lies beyond current position + @len
1225 * bytes is moved into the XDR tail[]. The xdr_stream current position is
1226 * then advanced past that data to align to the next XDR object in the tail.
1228 * Returns the number of XDR encoded bytes now contained in the pages
1230 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
1232 unsigned int nwords = XDR_QUADLEN(len);
1233 unsigned int base, end, pglen;
1235 pglen = xdr_align_pages(xdr, nwords << 2);
1239 xdr->nwords -= nwords;
1240 base = (nwords << 2) - pglen;
1241 end = xdr_stream_remaining(xdr) - pglen;
1243 if (xdr_set_iov(xdr, xdr->buf->tail, base, end) == 0)
1245 return len <= pglen ? len : pglen;
1247 EXPORT_SYMBOL_GPL(xdr_read_pages);
1249 uint64_t xdr_align_data(struct xdr_stream *xdr, uint64_t offset, uint32_t length)
1251 struct xdr_buf *buf = xdr->buf;
1252 unsigned int from, bytes;
1253 unsigned int shift = 0;
1255 if ((offset + length) < offset ||
1256 (offset + length) > buf->page_len)
1257 length = buf->page_len - offset;
1259 xdr_realign_pages(xdr);
1260 from = xdr_page_pos(xdr);
1261 bytes = xdr->nwords << 2;
1265 /* Move page data to the left */
1266 if (from > offset) {
1267 shift = min_t(unsigned int, bytes, buf->page_len - from);
1268 _shift_data_left_pages(buf->pages,
1269 buf->page_base + offset,
1270 buf->page_base + from,
1274 /* Move tail data into the pages, if necessary */
1276 _shift_data_left_tail(buf, offset + shift, bytes);
1279 xdr->nwords -= XDR_QUADLEN(length);
1280 xdr_set_page(xdr, from + length, PAGE_SIZE);
1283 EXPORT_SYMBOL_GPL(xdr_align_data);
1285 uint64_t xdr_expand_hole(struct xdr_stream *xdr, uint64_t offset, uint64_t length)
1287 struct xdr_buf *buf = xdr->buf;
1290 unsigned int truncated = 0;
1292 if ((offset + length) < offset ||
1293 (offset + length) > buf->page_len)
1294 length = buf->page_len - offset;
1296 xdr_realign_pages(xdr);
1297 from = xdr_page_pos(xdr);
1298 bytes = xdr->nwords << 2;
1300 if (offset + length + bytes > buf->page_len) {
1301 unsigned int shift = (offset + length + bytes) - buf->page_len;
1302 unsigned int res = _shift_data_right_tail(buf, from + bytes - shift, shift);
1303 truncated = shift - res;
1304 xdr->nwords -= XDR_QUADLEN(truncated);
1308 /* Now move the page data over and zero pages */
1310 _shift_data_right_pages(buf->pages,
1311 buf->page_base + offset + length,
1312 buf->page_base + from,
1314 _zero_pages(buf->pages, buf->page_base + offset, length);
1316 buf->len += length - (from - offset) - truncated;
1317 xdr_set_page(xdr, offset + length, PAGE_SIZE);
1320 EXPORT_SYMBOL_GPL(xdr_expand_hole);
1323 * xdr_enter_page - decode data from the XDR page
1324 * @xdr: pointer to xdr_stream struct
1325 * @len: number of bytes of page data
1327 * Moves data beyond the current pointer position from the XDR head[] buffer
1328 * into the page list. Any data that lies beyond current position + "len"
1329 * bytes is moved into the XDR tail[]. The current pointer is then
1330 * repositioned at the beginning of the first XDR page.
1332 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1334 len = xdr_align_pages(xdr, len);
1336 * Position current pointer at beginning of tail, and
1337 * set remaining message length.
1340 xdr_set_page_base(xdr, 0, len);
1342 EXPORT_SYMBOL_GPL(xdr_enter_page);
1344 static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1347 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
1349 buf->head[0] = *iov;
1350 buf->tail[0] = empty_iov;
1352 buf->buflen = buf->len = iov->iov_len;
1354 EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1357 * xdr_buf_subsegment - set subbuf to a portion of buf
1358 * @buf: an xdr buffer
1359 * @subbuf: the result buffer
1360 * @base: beginning of range in bytes
1361 * @len: length of range in bytes
1363 * sets @subbuf to an xdr buffer representing the portion of @buf of
1364 * length @len starting at offset @base.
1366 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1368 * Returns -1 if base of length are out of bounds.
1371 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1372 unsigned int base, unsigned int len)
1374 subbuf->buflen = subbuf->len = len;
1375 if (base < buf->head[0].iov_len) {
1376 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1377 subbuf->head[0].iov_len = min_t(unsigned int, len,
1378 buf->head[0].iov_len - base);
1379 len -= subbuf->head[0].iov_len;
1382 base -= buf->head[0].iov_len;
1383 subbuf->head[0].iov_base = buf->head[0].iov_base;
1384 subbuf->head[0].iov_len = 0;
1387 if (base < buf->page_len) {
1388 subbuf->page_len = min(buf->page_len - base, len);
1389 base += buf->page_base;
1390 subbuf->page_base = base & ~PAGE_MASK;
1391 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1392 len -= subbuf->page_len;
1395 base -= buf->page_len;
1396 subbuf->pages = buf->pages;
1397 subbuf->page_base = 0;
1398 subbuf->page_len = 0;
1401 if (base < buf->tail[0].iov_len) {
1402 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1403 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1404 buf->tail[0].iov_len - base);
1405 len -= subbuf->tail[0].iov_len;
1408 base -= buf->tail[0].iov_len;
1409 subbuf->tail[0].iov_base = buf->tail[0].iov_base;
1410 subbuf->tail[0].iov_len = 0;
1417 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1420 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1421 * @buf: buf to be trimmed
1422 * @len: number of bytes to reduce "buf" by
1424 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1425 * that it's possible that we'll trim less than that amount if the xdr_buf is
1426 * too small, or if (for instance) it's all in the head and the parser has
1427 * already read too far into it.
1429 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1432 unsigned int trim = len;
1434 if (buf->tail[0].iov_len) {
1435 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1436 buf->tail[0].iov_len -= cur;
1442 if (buf->page_len) {
1443 cur = min_t(unsigned int, buf->page_len, trim);
1444 buf->page_len -= cur;
1450 if (buf->head[0].iov_len) {
1451 cur = min_t(size_t, buf->head[0].iov_len, trim);
1452 buf->head[0].iov_len -= cur;
1456 buf->len -= (len - trim);
1458 EXPORT_SYMBOL_GPL(xdr_buf_trim);
1460 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1462 unsigned int this_len;
1464 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1465 memcpy(obj, subbuf->head[0].iov_base, this_len);
1468 this_len = min_t(unsigned int, len, subbuf->page_len);
1470 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1473 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1474 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1477 /* obj is assumed to point to allocated memory of size at least len: */
1478 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1480 struct xdr_buf subbuf;
1483 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1486 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1489 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
1491 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1493 unsigned int this_len;
1495 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1496 memcpy(subbuf->head[0].iov_base, obj, this_len);
1499 this_len = min_t(unsigned int, len, subbuf->page_len);
1501 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
1504 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1505 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1508 /* obj is assumed to point to allocated memory of size at least len: */
1509 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1511 struct xdr_buf subbuf;
1514 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1517 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1520 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
1523 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1528 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1531 *obj = be32_to_cpu(raw);
1534 EXPORT_SYMBOL_GPL(xdr_decode_word);
1537 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
1539 __be32 raw = cpu_to_be32(obj);
1541 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1543 EXPORT_SYMBOL_GPL(xdr_encode_word);
1545 /* Returns 0 on success, or else a negative error code. */
1547 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1548 struct xdr_array2_desc *desc, int encode)
1550 char *elem = NULL, *c;
1551 unsigned int copied = 0, todo, avail_here;
1552 struct page **ppages = NULL;
1556 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1559 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1560 desc->array_len > desc->array_maxlen ||
1561 (unsigned long) base + 4 + desc->array_len *
1562 desc->elem_size > buf->len)
1570 todo = desc->array_len * desc->elem_size;
1573 if (todo && base < buf->head->iov_len) {
1574 c = buf->head->iov_base + base;
1575 avail_here = min_t(unsigned int, todo,
1576 buf->head->iov_len - base);
1579 while (avail_here >= desc->elem_size) {
1580 err = desc->xcode(desc, c);
1583 c += desc->elem_size;
1584 avail_here -= desc->elem_size;
1588 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1594 err = desc->xcode(desc, elem);
1597 memcpy(c, elem, avail_here);
1599 memcpy(elem, c, avail_here);
1600 copied = avail_here;
1602 base = buf->head->iov_len; /* align to start of pages */
1605 /* process pages array */
1606 base -= buf->head->iov_len;
1607 if (todo && base < buf->page_len) {
1608 unsigned int avail_page;
1610 avail_here = min(todo, buf->page_len - base);
1613 base += buf->page_base;
1614 ppages = buf->pages + (base >> PAGE_SHIFT);
1616 avail_page = min_t(unsigned int, PAGE_SIZE - base,
1618 c = kmap(*ppages) + base;
1620 while (avail_here) {
1621 avail_here -= avail_page;
1622 if (copied || avail_page < desc->elem_size) {
1623 unsigned int l = min(avail_page,
1624 desc->elem_size - copied);
1626 elem = kmalloc(desc->elem_size,
1634 err = desc->xcode(desc, elem);
1638 memcpy(c, elem + copied, l);
1640 if (copied == desc->elem_size)
1643 memcpy(elem + copied, c, l);
1645 if (copied == desc->elem_size) {
1646 err = desc->xcode(desc, elem);
1655 while (avail_page >= desc->elem_size) {
1656 err = desc->xcode(desc, c);
1659 c += desc->elem_size;
1660 avail_page -= desc->elem_size;
1663 unsigned int l = min(avail_page,
1664 desc->elem_size - copied);
1666 elem = kmalloc(desc->elem_size,
1674 err = desc->xcode(desc, elem);
1678 memcpy(c, elem + copied, l);
1680 if (copied == desc->elem_size)
1683 memcpy(elem + copied, c, l);
1685 if (copied == desc->elem_size) {
1686 err = desc->xcode(desc, elem);
1699 avail_page = min(avail_here,
1700 (unsigned int) PAGE_SIZE);
1702 base = buf->page_len; /* align to start of tail */
1706 base -= buf->page_len;
1708 c = buf->tail->iov_base + base;
1710 unsigned int l = desc->elem_size - copied;
1713 memcpy(c, elem + copied, l);
1715 memcpy(elem + copied, c, l);
1716 err = desc->xcode(desc, elem);
1724 err = desc->xcode(desc, c);
1727 c += desc->elem_size;
1728 todo -= desc->elem_size;
1741 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1742 struct xdr_array2_desc *desc)
1744 if (base >= buf->len)
1747 return xdr_xcode_array2(buf, base, desc, 0);
1749 EXPORT_SYMBOL_GPL(xdr_decode_array2);
1752 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1753 struct xdr_array2_desc *desc)
1755 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1756 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1759 return xdr_xcode_array2(buf, base, desc, 1);
1761 EXPORT_SYMBOL_GPL(xdr_encode_array2);
1764 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1765 int (*actor)(struct scatterlist *, void *), void *data)
1768 unsigned int page_len, thislen, page_offset;
1769 struct scatterlist sg[1];
1771 sg_init_table(sg, 1);
1773 if (offset >= buf->head[0].iov_len) {
1774 offset -= buf->head[0].iov_len;
1776 thislen = buf->head[0].iov_len - offset;
1779 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1780 ret = actor(sg, data);
1789 if (offset >= buf->page_len) {
1790 offset -= buf->page_len;
1792 page_len = buf->page_len - offset;
1796 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1797 i = (offset + buf->page_base) >> PAGE_SHIFT;
1798 thislen = PAGE_SIZE - page_offset;
1800 if (thislen > page_len)
1802 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1803 ret = actor(sg, data);
1806 page_len -= thislen;
1809 thislen = PAGE_SIZE;
1810 } while (page_len != 0);
1815 if (offset < buf->tail[0].iov_len) {
1816 thislen = buf->tail[0].iov_len - offset;
1819 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1820 ret = actor(sg, data);
1828 EXPORT_SYMBOL_GPL(xdr_process_buf);
1831 * xdr_stream_decode_opaque - Decode variable length opaque
1832 * @xdr: pointer to xdr_stream
1833 * @ptr: location to store opaque data
1834 * @size: size of storage buffer @ptr
1837 * On success, returns size of object stored in *@ptr
1838 * %-EBADMSG on XDR buffer overflow
1839 * %-EMSGSIZE on overflow of storage buffer @ptr
1841 ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
1846 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1849 memcpy(ptr, p, ret);
1852 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
1855 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
1856 * @xdr: pointer to xdr_stream
1857 * @ptr: location to store pointer to opaque data
1858 * @maxlen: maximum acceptable object size
1859 * @gfp_flags: GFP mask to use
1862 * On success, returns size of object stored in *@ptr
1863 * %-EBADMSG on XDR buffer overflow
1864 * %-EMSGSIZE if the size of the object would exceed @maxlen
1865 * %-ENOMEM on memory allocation failure
1867 ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
1868 size_t maxlen, gfp_t gfp_flags)
1873 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1875 *ptr = kmemdup(p, ret, gfp_flags);
1883 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
1886 * xdr_stream_decode_string - Decode variable length string
1887 * @xdr: pointer to xdr_stream
1888 * @str: location to store string
1889 * @size: size of storage buffer @str
1892 * On success, returns length of NUL-terminated string stored in *@str
1893 * %-EBADMSG on XDR buffer overflow
1894 * %-EMSGSIZE on overflow of storage buffer @str
1896 ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
1901 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1903 memcpy(str, p, ret);
1910 EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
1913 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
1914 * @xdr: pointer to xdr_stream
1915 * @str: location to store pointer to string
1916 * @maxlen: maximum acceptable string length
1917 * @gfp_flags: GFP mask to use
1920 * On success, returns length of NUL-terminated string stored in *@ptr
1921 * %-EBADMSG on XDR buffer overflow
1922 * %-EMSGSIZE if the size of the string would exceed @maxlen
1923 * %-ENOMEM on memory allocation failure
1925 ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
1926 size_t maxlen, gfp_t gfp_flags)
1931 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1933 char *s = kmemdup_nul(p, ret, gfp_flags);
1943 EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);