2 * Berkeley style UIO structures - Alan Cox 1994.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/thread_info.h>
14 #include <crypto/hash.h>
15 #include <uapi/linux/uio.h>
18 struct pipe_inode_info;
21 void *iov_base; /* and that should *never* hold a userland pointer */
26 /* set if ITER_BVEC doesn't hold a bv_page ref */
27 ITER_BVEC_FLAG_NO_REF = 2,
39 * Bit 0 is the read/write bit, set if we're writing.
40 * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
41 * the caller isn't expecting to drop a page reference when done.
47 const struct iovec *iov;
48 const struct kvec *kvec;
49 const struct bio_vec *bvec;
50 struct pipe_inode_info *pipe;
53 unsigned long nr_segs;
61 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
63 return i->type & ~(READ | WRITE);
66 static inline bool iter_is_iovec(const struct iov_iter *i)
68 return iov_iter_type(i) == ITER_IOVEC;
71 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
73 return iov_iter_type(i) == ITER_KVEC;
76 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
78 return iov_iter_type(i) == ITER_BVEC;
81 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
83 return iov_iter_type(i) == ITER_PIPE;
86 static inline bool iov_iter_is_discard(const struct iov_iter *i)
88 return iov_iter_type(i) == ITER_DISCARD;
91 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
93 return i->type & (READ | WRITE);
96 static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i)
98 return (i->type & ITER_BVEC_FLAG_NO_REF) != 0;
102 * Total number of bytes covered by an iovec.
104 * NOTE that it is not safe to use this function until all the iovec's
105 * segment lengths have been validated. Because the individual lengths can
106 * overflow a size_t when added together.
108 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
113 for (seg = 0; seg < nr_segs; seg++)
114 ret += iov[seg].iov_len;
118 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
120 return (struct iovec) {
121 .iov_base = iter->iov->iov_base + iter->iov_offset,
122 .iov_len = min(iter->count,
123 iter->iov->iov_len - iter->iov_offset),
127 size_t iov_iter_copy_from_user_atomic(struct page *page,
128 struct iov_iter *i, unsigned long offset, size_t bytes);
129 void iov_iter_advance(struct iov_iter *i, size_t bytes);
130 void iov_iter_revert(struct iov_iter *i, size_t bytes);
131 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
132 size_t iov_iter_single_seg_count(const struct iov_iter *i);
133 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
135 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
138 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
139 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
140 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
141 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
142 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
144 static __always_inline __must_check
145 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
147 if (unlikely(!check_copy_size(addr, bytes, true)))
150 return _copy_to_iter(addr, bytes, i);
153 static __always_inline __must_check
154 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
156 if (unlikely(!check_copy_size(addr, bytes, false)))
159 return _copy_from_iter(addr, bytes, i);
162 static __always_inline __must_check
163 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
165 if (unlikely(!check_copy_size(addr, bytes, false)))
168 return _copy_from_iter_full(addr, bytes, i);
171 static __always_inline __must_check
172 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
174 if (unlikely(!check_copy_size(addr, bytes, false)))
177 return _copy_from_iter_nocache(addr, bytes, i);
180 static __always_inline __must_check
181 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
183 if (unlikely(!check_copy_size(addr, bytes, false)))
186 return _copy_from_iter_full_nocache(addr, bytes, i);
189 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
191 * Note, users like pmem that depend on the stricter semantics of
192 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
193 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
194 * destination is flushed from the cache on return.
196 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
198 #define _copy_from_iter_flushcache _copy_from_iter_nocache
201 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
202 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
204 #define _copy_to_iter_mcsafe _copy_to_iter
207 static __always_inline __must_check
208 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
210 if (unlikely(!check_copy_size(addr, bytes, false)))
213 return _copy_from_iter_flushcache(addr, bytes, i);
216 static __always_inline __must_check
217 size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
219 if (unlikely(!check_copy_size(addr, bytes, true)))
222 return _copy_to_iter_mcsafe(addr, bytes, i);
225 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
226 unsigned long iov_iter_alignment(const struct iov_iter *i);
227 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
228 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
229 unsigned long nr_segs, size_t count);
230 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
231 unsigned long nr_segs, size_t count);
232 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
233 unsigned long nr_segs, size_t count);
234 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
236 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
237 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
238 size_t maxsize, unsigned maxpages, size_t *start);
239 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
240 size_t maxsize, size_t *start);
241 int iov_iter_npages(const struct iov_iter *i, int maxpages);
243 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
245 static inline size_t iov_iter_count(const struct iov_iter *i)
251 * Cap the iov_iter by given limit; note that the second argument is
252 * *not* the new size - it's upper limit for such. Passing it a value
253 * greater than the amount of data in iov_iter is fine - it'll just do
254 * nothing in that case.
256 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
259 * count doesn't have to fit in size_t - comparison extends both
260 * operands to u64 here and any value that would be truncated by
261 * conversion in assignement is by definition greater than all
262 * values of size_t, including old i->count.
264 if (i->count > count)
269 * reexpand a previously truncated iterator; count must be no more than how much
272 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
276 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
277 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
278 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
279 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
282 int import_iovec(int type, const struct iovec __user * uvector,
283 unsigned nr_segs, unsigned fast_segs,
284 struct iovec **iov, struct iov_iter *i);
288 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
289 unsigned nr_segs, unsigned fast_segs,
290 struct iovec **iov, struct iov_iter *i);
293 int import_single_range(int type, void __user *buf, size_t len,
294 struct iovec *iov, struct iov_iter *i);
296 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
297 int (*f)(struct kvec *vec, void *context),