Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / fs / ceph / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21
22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24         u32 wire_flags = 0;
25
26         switch (flags & O_ACCMODE) {
27         case O_RDONLY:
28                 wire_flags |= CEPH_O_RDONLY;
29                 break;
30         case O_WRONLY:
31                 wire_flags |= CEPH_O_WRONLY;
32                 break;
33         case O_RDWR:
34                 wire_flags |= CEPH_O_RDWR;
35                 break;
36         }
37
38         flags &= ~O_ACCMODE;
39
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41
42         ceph_sys2wire(O_CREAT);
43         ceph_sys2wire(O_EXCL);
44         ceph_sys2wire(O_TRUNC);
45         ceph_sys2wire(O_DIRECTORY);
46         ceph_sys2wire(O_NOFOLLOW);
47
48 #undef ceph_sys2wire
49
50         if (flags)
51                 dout("unused open flags: %x\n", flags);
52
53         return cpu_to_le32(wire_flags);
54 }
55
56 /*
57  * Ceph file operations
58  *
59  * Implement basic open/close functionality, and implement
60  * read/write.
61  *
62  * We implement three modes of file I/O:
63  *  - buffered uses the generic_file_aio_{read,write} helpers
64  *
65  *  - synchronous is used when there is multi-client read/write
66  *    sharing, avoids the page cache, and synchronously waits for an
67  *    ack from the OSD.
68  *
69  *  - direct io takes the variant of the sync path that references
70  *    user pages directly.
71  *
72  * fsync() flushes and waits on dirty pages, but just queues metadata
73  * for writeback: since the MDS can recover size and mtime there is no
74  * need to wait for MDS acknowledgement.
75  */
76
77 /*
78  * How many pages to get in one call to iov_iter_get_pages().  This
79  * determines the size of the on-stack array used as a buffer.
80  */
81 #define ITER_GET_BVECS_PAGES    64
82
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84                                 struct bio_vec *bvecs)
85 {
86         size_t size = 0;
87         int bvec_idx = 0;
88
89         if (maxsize > iov_iter_count(iter))
90                 maxsize = iov_iter_count(iter);
91
92         while (size < maxsize) {
93                 struct page *pages[ITER_GET_BVECS_PAGES];
94                 ssize_t bytes;
95                 size_t start;
96                 int idx = 0;
97
98                 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
99                                            ITER_GET_BVECS_PAGES, &start);
100                 if (bytes < 0)
101                         return size ?: bytes;
102
103                 iov_iter_advance(iter, bytes);
104                 size += bytes;
105
106                 for ( ; bytes; idx++, bvec_idx++) {
107                         struct bio_vec bv = {
108                                 .bv_page = pages[idx],
109                                 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
110                                 .bv_offset = start,
111                         };
112
113                         bvecs[bvec_idx] = bv;
114                         bytes -= bv.bv_len;
115                         start = 0;
116                 }
117         }
118
119         return size;
120 }
121
122 /*
123  * iov_iter_get_pages() only considers one iov_iter segment, no matter
124  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
125  * page.
126  *
127  * Attempt to get up to @maxsize bytes worth of pages from @iter.
128  * Return the number of bytes in the created bio_vec array, or an error.
129  */
130 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
131                                     struct bio_vec **bvecs, int *num_bvecs)
132 {
133         struct bio_vec *bv;
134         size_t orig_count = iov_iter_count(iter);
135         ssize_t bytes;
136         int npages;
137
138         iov_iter_truncate(iter, maxsize);
139         npages = iov_iter_npages(iter, INT_MAX);
140         iov_iter_reexpand(iter, orig_count);
141
142         /*
143          * __iter_get_bvecs() may populate only part of the array -- zero it
144          * out.
145          */
146         bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
147         if (!bv)
148                 return -ENOMEM;
149
150         bytes = __iter_get_bvecs(iter, maxsize, bv);
151         if (bytes < 0) {
152                 /*
153                  * No pages were pinned -- just free the array.
154                  */
155                 kvfree(bv);
156                 return bytes;
157         }
158
159         *bvecs = bv;
160         *num_bvecs = npages;
161         return bytes;
162 }
163
164 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
165 {
166         int i;
167
168         for (i = 0; i < num_bvecs; i++) {
169                 if (bvecs[i].bv_page) {
170                         if (should_dirty)
171                                 set_page_dirty_lock(bvecs[i].bv_page);
172                         put_page(bvecs[i].bv_page);
173                 }
174         }
175         kvfree(bvecs);
176 }
177
178 /*
179  * Prepare an open request.  Preallocate ceph_cap to avoid an
180  * inopportune ENOMEM later.
181  */
182 static struct ceph_mds_request *
183 prepare_open_request(struct super_block *sb, int flags, int create_mode)
184 {
185         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
186         struct ceph_mds_request *req;
187         int want_auth = USE_ANY_MDS;
188         int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
189
190         if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
191                 want_auth = USE_AUTH_MDS;
192
193         req = ceph_mdsc_create_request(mdsc, op, want_auth);
194         if (IS_ERR(req))
195                 goto out;
196         req->r_fmode = ceph_flags_to_mode(flags);
197         req->r_args.open.flags = ceph_flags_sys2wire(flags);
198         req->r_args.open.mode = cpu_to_le32(create_mode);
199 out:
200         return req;
201 }
202
203 static int ceph_init_file_info(struct inode *inode, struct file *file,
204                                         int fmode, bool isdir)
205 {
206         struct ceph_inode_info *ci = ceph_inode(inode);
207         struct ceph_file_info *fi;
208
209         dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
210                         inode->i_mode, isdir ? "dir" : "regular");
211         BUG_ON(inode->i_fop->release != ceph_release);
212
213         if (isdir) {
214                 struct ceph_dir_file_info *dfi =
215                         kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
216                 if (!dfi)
217                         return -ENOMEM;
218
219                 file->private_data = dfi;
220                 fi = &dfi->file_info;
221                 dfi->next_offset = 2;
222                 dfi->readdir_cache_idx = -1;
223         } else {
224                 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
225                 if (!fi)
226                         return -ENOMEM;
227
228                 file->private_data = fi;
229         }
230
231         ceph_get_fmode(ci, fmode, 1);
232         fi->fmode = fmode;
233
234         spin_lock_init(&fi->rw_contexts_lock);
235         INIT_LIST_HEAD(&fi->rw_contexts);
236         fi->meta_err = errseq_sample(&ci->i_meta_err);
237         fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
238
239         return 0;
240 }
241
242 /*
243  * initialize private struct file data.
244  * if we fail, clean up by dropping fmode reference on the ceph_inode
245  */
246 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
247 {
248         int ret = 0;
249
250         switch (inode->i_mode & S_IFMT) {
251         case S_IFREG:
252                 ceph_fscache_register_inode_cookie(inode);
253                 ceph_fscache_file_set_cookie(inode, file);
254                 fallthrough;
255         case S_IFDIR:
256                 ret = ceph_init_file_info(inode, file, fmode,
257                                                 S_ISDIR(inode->i_mode));
258                 break;
259
260         case S_IFLNK:
261                 dout("init_file %p %p 0%o (symlink)\n", inode, file,
262                      inode->i_mode);
263                 break;
264
265         default:
266                 dout("init_file %p %p 0%o (special)\n", inode, file,
267                      inode->i_mode);
268                 /*
269                  * we need to drop the open ref now, since we don't
270                  * have .release set to ceph_release.
271                  */
272                 BUG_ON(inode->i_fop->release == ceph_release);
273
274                 /* call the proper open fop */
275                 ret = inode->i_fop->open(inode, file);
276         }
277         return ret;
278 }
279
280 /*
281  * try renew caps after session gets killed.
282  */
283 int ceph_renew_caps(struct inode *inode, int fmode)
284 {
285         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
286         struct ceph_inode_info *ci = ceph_inode(inode);
287         struct ceph_mds_request *req;
288         int err, flags, wanted;
289
290         spin_lock(&ci->i_ceph_lock);
291         __ceph_touch_fmode(ci, mdsc, fmode);
292         wanted = __ceph_caps_file_wanted(ci);
293         if (__ceph_is_any_real_caps(ci) &&
294             (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
295                 int issued = __ceph_caps_issued(ci, NULL);
296                 spin_unlock(&ci->i_ceph_lock);
297                 dout("renew caps %p want %s issued %s updating mds_wanted\n",
298                      inode, ceph_cap_string(wanted), ceph_cap_string(issued));
299                 ceph_check_caps(ci, 0, NULL);
300                 return 0;
301         }
302         spin_unlock(&ci->i_ceph_lock);
303
304         flags = 0;
305         if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
306                 flags = O_RDWR;
307         else if (wanted & CEPH_CAP_FILE_RD)
308                 flags = O_RDONLY;
309         else if (wanted & CEPH_CAP_FILE_WR)
310                 flags = O_WRONLY;
311 #ifdef O_LAZY
312         if (wanted & CEPH_CAP_FILE_LAZYIO)
313                 flags |= O_LAZY;
314 #endif
315
316         req = prepare_open_request(inode->i_sb, flags, 0);
317         if (IS_ERR(req)) {
318                 err = PTR_ERR(req);
319                 goto out;
320         }
321
322         req->r_inode = inode;
323         ihold(inode);
324         req->r_num_caps = 1;
325
326         err = ceph_mdsc_do_request(mdsc, NULL, req);
327         ceph_mdsc_put_request(req);
328 out:
329         dout("renew caps %p open result=%d\n", inode, err);
330         return err < 0 ? err : 0;
331 }
332
333 /*
334  * If we already have the requisite capabilities, we can satisfy
335  * the open request locally (no need to request new caps from the
336  * MDS).  We do, however, need to inform the MDS (asynchronously)
337  * if our wanted caps set expands.
338  */
339 int ceph_open(struct inode *inode, struct file *file)
340 {
341         struct ceph_inode_info *ci = ceph_inode(inode);
342         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
343         struct ceph_mds_client *mdsc = fsc->mdsc;
344         struct ceph_mds_request *req;
345         struct ceph_file_info *fi = file->private_data;
346         int err;
347         int flags, fmode, wanted;
348
349         if (fi) {
350                 dout("open file %p is already opened\n", file);
351                 return 0;
352         }
353
354         /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
355         flags = file->f_flags & ~(O_CREAT|O_EXCL);
356         if (S_ISDIR(inode->i_mode))
357                 flags = O_DIRECTORY;  /* mds likes to know */
358
359         dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
360              ceph_vinop(inode), file, flags, file->f_flags);
361         fmode = ceph_flags_to_mode(flags);
362         wanted = ceph_caps_for_mode(fmode);
363
364         /* snapped files are read-only */
365         if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
366                 return -EROFS;
367
368         /* trivially open snapdir */
369         if (ceph_snap(inode) == CEPH_SNAPDIR) {
370                 return ceph_init_file(inode, file, fmode);
371         }
372
373         /*
374          * No need to block if we have caps on the auth MDS (for
375          * write) or any MDS (for read).  Update wanted set
376          * asynchronously.
377          */
378         spin_lock(&ci->i_ceph_lock);
379         if (__ceph_is_any_real_caps(ci) &&
380             (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
381                 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
382                 int issued = __ceph_caps_issued(ci, NULL);
383
384                 dout("open %p fmode %d want %s issued %s using existing\n",
385                      inode, fmode, ceph_cap_string(wanted),
386                      ceph_cap_string(issued));
387                 __ceph_touch_fmode(ci, mdsc, fmode);
388                 spin_unlock(&ci->i_ceph_lock);
389
390                 /* adjust wanted? */
391                 if ((issued & wanted) != wanted &&
392                     (mds_wanted & wanted) != wanted &&
393                     ceph_snap(inode) != CEPH_SNAPDIR)
394                         ceph_check_caps(ci, 0, NULL);
395
396                 return ceph_init_file(inode, file, fmode);
397         } else if (ceph_snap(inode) != CEPH_NOSNAP &&
398                    (ci->i_snap_caps & wanted) == wanted) {
399                 __ceph_touch_fmode(ci, mdsc, fmode);
400                 spin_unlock(&ci->i_ceph_lock);
401                 return ceph_init_file(inode, file, fmode);
402         }
403
404         spin_unlock(&ci->i_ceph_lock);
405
406         dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
407         req = prepare_open_request(inode->i_sb, flags, 0);
408         if (IS_ERR(req)) {
409                 err = PTR_ERR(req);
410                 goto out;
411         }
412         req->r_inode = inode;
413         ihold(inode);
414
415         req->r_num_caps = 1;
416         err = ceph_mdsc_do_request(mdsc, NULL, req);
417         if (!err)
418                 err = ceph_init_file(inode, file, req->r_fmode);
419         ceph_mdsc_put_request(req);
420         dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
421 out:
422         return err;
423 }
424
425 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
426 static void
427 cache_file_layout(struct inode *dst, struct inode *src)
428 {
429         struct ceph_inode_info *cdst = ceph_inode(dst);
430         struct ceph_inode_info *csrc = ceph_inode(src);
431
432         spin_lock(&cdst->i_ceph_lock);
433         if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
434             !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
435                 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
436                         sizeof(cdst->i_cached_layout));
437                 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
438                                    ceph_try_get_string(csrc->i_layout.pool_ns));
439         }
440         spin_unlock(&cdst->i_ceph_lock);
441 }
442
443 /*
444  * Try to set up an async create. We need caps, a file layout, and inode number,
445  * and either a lease on the dentry or complete dir info. If any of those
446  * criteria are not satisfied, then return false and the caller can go
447  * synchronous.
448  */
449 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
450                                  struct ceph_file_layout *lo, u64 *pino)
451 {
452         struct ceph_inode_info *ci = ceph_inode(dir);
453         struct ceph_dentry_info *di = ceph_dentry(dentry);
454         int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
455         u64 ino;
456
457         spin_lock(&ci->i_ceph_lock);
458         /* No auth cap means no chance for Dc caps */
459         if (!ci->i_auth_cap)
460                 goto no_async;
461
462         /* Any delegated inos? */
463         if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
464                 goto no_async;
465
466         if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
467                 goto no_async;
468
469         if ((__ceph_caps_issued(ci, NULL) & want) != want)
470                 goto no_async;
471
472         if (d_in_lookup(dentry)) {
473                 if (!__ceph_dir_is_complete(ci))
474                         goto no_async;
475                 spin_lock(&dentry->d_lock);
476                 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
477                 spin_unlock(&dentry->d_lock);
478         } else if (atomic_read(&ci->i_shared_gen) !=
479                    READ_ONCE(di->lease_shared_gen)) {
480                 goto no_async;
481         }
482
483         ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
484         if (!ino)
485                 goto no_async;
486
487         *pino = ino;
488         ceph_take_cap_refs(ci, want, false);
489         memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
490         rcu_assign_pointer(lo->pool_ns,
491                            ceph_try_get_string(ci->i_cached_layout.pool_ns));
492         got = want;
493 no_async:
494         spin_unlock(&ci->i_ceph_lock);
495         return got;
496 }
497
498 static void restore_deleg_ino(struct inode *dir, u64 ino)
499 {
500         struct ceph_inode_info *ci = ceph_inode(dir);
501         struct ceph_mds_session *s = NULL;
502
503         spin_lock(&ci->i_ceph_lock);
504         if (ci->i_auth_cap)
505                 s = ceph_get_mds_session(ci->i_auth_cap->session);
506         spin_unlock(&ci->i_ceph_lock);
507         if (s) {
508                 int err = ceph_restore_deleg_ino(s, ino);
509                 if (err)
510                         pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
511                                 ino, err);
512                 ceph_put_mds_session(s);
513         }
514 }
515
516 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
517                                  struct ceph_mds_request *req)
518 {
519         int result = req->r_err ? req->r_err :
520                         le32_to_cpu(req->r_reply_info.head->result);
521
522         if (result == -EJUKEBOX)
523                 goto out;
524
525         mapping_set_error(req->r_parent->i_mapping, result);
526
527         if (result) {
528                 struct dentry *dentry = req->r_dentry;
529                 int pathlen = 0;
530                 u64 base = 0;
531                 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
532                                                   &base, 0);
533
534                 ceph_dir_clear_complete(req->r_parent);
535                 if (!d_unhashed(dentry))
536                         d_drop(dentry);
537
538                 /* FIXME: start returning I/O errors on all accesses? */
539                 pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
540                         base, IS_ERR(path) ? "<<bad>>" : path, result);
541                 ceph_mdsc_free_path(path, pathlen);
542         }
543
544         if (req->r_target_inode) {
545                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
546                 u64 ino = ceph_vino(req->r_target_inode).ino;
547
548                 if (req->r_deleg_ino != ino)
549                         pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
550                                 __func__, req->r_err, req->r_deleg_ino, ino);
551                 mapping_set_error(req->r_target_inode->i_mapping, result);
552
553                 spin_lock(&ci->i_ceph_lock);
554                 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
555                         ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
556                         wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
557                 }
558                 ceph_kick_flushing_inode_caps(req->r_session, ci);
559                 spin_unlock(&ci->i_ceph_lock);
560         } else {
561                 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
562                         req->r_deleg_ino);
563         }
564 out:
565         ceph_mdsc_release_dir_caps(req);
566 }
567
568 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
569                                     struct file *file, umode_t mode,
570                                     struct ceph_mds_request *req,
571                                     struct ceph_acl_sec_ctx *as_ctx,
572                                     struct ceph_file_layout *lo)
573 {
574         int ret;
575         char xattr_buf[4];
576         struct ceph_mds_reply_inode in = { };
577         struct ceph_mds_reply_info_in iinfo = { .in = &in };
578         struct ceph_inode_info *ci = ceph_inode(dir);
579         struct inode *inode;
580         struct timespec64 now;
581         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
582         struct ceph_vino vino = { .ino = req->r_deleg_ino,
583                                   .snap = CEPH_NOSNAP };
584
585         ktime_get_real_ts64(&now);
586
587         inode = ceph_get_inode(dentry->d_sb, vino);
588         if (IS_ERR(inode))
589                 return PTR_ERR(inode);
590
591         iinfo.inline_version = CEPH_INLINE_NONE;
592         iinfo.change_attr = 1;
593         ceph_encode_timespec64(&iinfo.btime, &now);
594
595         iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
596         iinfo.xattr_data = xattr_buf;
597         memset(iinfo.xattr_data, 0, iinfo.xattr_len);
598
599         in.ino = cpu_to_le64(vino.ino);
600         in.snapid = cpu_to_le64(CEPH_NOSNAP);
601         in.version = cpu_to_le64(1);    // ???
602         in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
603         in.cap.cap_id = cpu_to_le64(1);
604         in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
605         in.cap.flags = CEPH_CAP_FLAG_AUTH;
606         in.ctime = in.mtime = in.atime = iinfo.btime;
607         in.mode = cpu_to_le32((u32)mode);
608         in.truncate_seq = cpu_to_le32(1);
609         in.truncate_size = cpu_to_le64(-1ULL);
610         in.xattr_version = cpu_to_le64(1);
611         in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
612         in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
613                                 dir->i_gid : current_fsgid()));
614         in.nlink = cpu_to_le32(1);
615         in.max_size = cpu_to_le64(lo->stripe_unit);
616
617         ceph_file_layout_to_legacy(lo, &in.layout);
618
619         down_read(&mdsc->snap_rwsem);
620         ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
621                               req->r_fmode, NULL);
622         up_read(&mdsc->snap_rwsem);
623         if (ret) {
624                 dout("%s failed to fill inode: %d\n", __func__, ret);
625                 ceph_dir_clear_complete(dir);
626                 if (!d_unhashed(dentry))
627                         d_drop(dentry);
628                 if (inode->i_state & I_NEW)
629                         discard_new_inode(inode);
630         } else {
631                 struct dentry *dn;
632
633                 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
634                         vino.ino, ceph_ino(dir), dentry->d_name.name);
635                 ceph_dir_clear_ordered(dir);
636                 ceph_init_inode_acls(inode, as_ctx);
637                 if (inode->i_state & I_NEW) {
638                         /*
639                          * If it's not I_NEW, then someone created this before
640                          * we got here. Assume the server is aware of it at
641                          * that point and don't worry about setting
642                          * CEPH_I_ASYNC_CREATE.
643                          */
644                         ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
645                         unlock_new_inode(inode);
646                 }
647                 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
648                         if (!d_unhashed(dentry))
649                                 d_drop(dentry);
650                         dn = d_splice_alias(inode, dentry);
651                         WARN_ON_ONCE(dn && dn != dentry);
652                 }
653                 file->f_mode |= FMODE_CREATED;
654                 ret = finish_open(file, dentry, ceph_open);
655         }
656         return ret;
657 }
658
659 /*
660  * Do a lookup + open with a single request.  If we get a non-existent
661  * file or symlink, return 1 so the VFS can retry.
662  */
663 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
664                      struct file *file, unsigned flags, umode_t mode)
665 {
666         struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
667         struct ceph_mds_client *mdsc = fsc->mdsc;
668         struct ceph_mds_request *req;
669         struct dentry *dn;
670         struct ceph_acl_sec_ctx as_ctx = {};
671         bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
672         int mask;
673         int err;
674
675         dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
676              dir, dentry, dentry,
677              d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
678
679         if (dentry->d_name.len > NAME_MAX)
680                 return -ENAMETOOLONG;
681
682         if (flags & O_CREAT) {
683                 if (ceph_quota_is_max_files_exceeded(dir))
684                         return -EDQUOT;
685                 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
686                 if (err < 0)
687                         return err;
688                 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
689                 if (err < 0)
690                         goto out_ctx;
691         } else if (!d_in_lookup(dentry)) {
692                 /* If it's not being looked up, it's negative */
693                 return -ENOENT;
694         }
695 retry:
696         /* do the open */
697         req = prepare_open_request(dir->i_sb, flags, mode);
698         if (IS_ERR(req)) {
699                 err = PTR_ERR(req);
700                 goto out_ctx;
701         }
702         req->r_dentry = dget(dentry);
703         req->r_num_caps = 2;
704         mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
705         if (ceph_security_xattr_wanted(dir))
706                 mask |= CEPH_CAP_XATTR_SHARED;
707         req->r_args.open.mask = cpu_to_le32(mask);
708         req->r_parent = dir;
709         ihold(dir);
710
711         if (flags & O_CREAT) {
712                 struct ceph_file_layout lo;
713
714                 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
715                 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
716                 if (as_ctx.pagelist) {
717                         req->r_pagelist = as_ctx.pagelist;
718                         as_ctx.pagelist = NULL;
719                 }
720                 if (try_async &&
721                     (req->r_dir_caps =
722                       try_prep_async_create(dir, dentry, &lo,
723                                             &req->r_deleg_ino))) {
724                         set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
725                         req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
726                         req->r_callback = ceph_async_create_cb;
727                         err = ceph_mdsc_submit_request(mdsc, dir, req);
728                         if (!err) {
729                                 err = ceph_finish_async_create(dir, dentry,
730                                                         file, mode, req,
731                                                         &as_ctx, &lo);
732                         } else if (err == -EJUKEBOX) {
733                                 restore_deleg_ino(dir, req->r_deleg_ino);
734                                 ceph_mdsc_put_request(req);
735                                 try_async = false;
736                                 goto retry;
737                         }
738                         goto out_req;
739                 }
740         }
741
742         set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
743         err = ceph_mdsc_do_request(mdsc,
744                                    (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
745                                    req);
746         if (err == -ENOENT) {
747                 dentry = ceph_handle_snapdir(req, dentry);
748                 if (IS_ERR(dentry)) {
749                         err = PTR_ERR(dentry);
750                         goto out_req;
751                 }
752                 err = 0;
753         }
754
755         if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
756                 err = ceph_handle_notrace_create(dir, dentry);
757
758         if (d_in_lookup(dentry)) {
759                 dn = ceph_finish_lookup(req, dentry, err);
760                 if (IS_ERR(dn))
761                         err = PTR_ERR(dn);
762         } else {
763                 /* we were given a hashed negative dentry */
764                 dn = NULL;
765         }
766         if (err)
767                 goto out_req;
768         if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
769                 /* make vfs retry on splice, ENOENT, or symlink */
770                 dout("atomic_open finish_no_open on dn %p\n", dn);
771                 err = finish_no_open(file, dn);
772         } else {
773                 dout("atomic_open finish_open on dn %p\n", dn);
774                 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
775                         struct inode *newino = d_inode(dentry);
776
777                         cache_file_layout(dir, newino);
778                         ceph_init_inode_acls(newino, &as_ctx);
779                         file->f_mode |= FMODE_CREATED;
780                 }
781                 err = finish_open(file, dentry, ceph_open);
782         }
783 out_req:
784         ceph_mdsc_put_request(req);
785 out_ctx:
786         ceph_release_acl_sec_ctx(&as_ctx);
787         dout("atomic_open result=%d\n", err);
788         return err;
789 }
790
791 int ceph_release(struct inode *inode, struct file *file)
792 {
793         struct ceph_inode_info *ci = ceph_inode(inode);
794
795         if (S_ISDIR(inode->i_mode)) {
796                 struct ceph_dir_file_info *dfi = file->private_data;
797                 dout("release inode %p dir file %p\n", inode, file);
798                 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
799
800                 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
801
802                 if (dfi->last_readdir)
803                         ceph_mdsc_put_request(dfi->last_readdir);
804                 kfree(dfi->last_name);
805                 kfree(dfi->dir_info);
806                 kmem_cache_free(ceph_dir_file_cachep, dfi);
807         } else {
808                 struct ceph_file_info *fi = file->private_data;
809                 dout("release inode %p regular file %p\n", inode, file);
810                 WARN_ON(!list_empty(&fi->rw_contexts));
811
812                 ceph_put_fmode(ci, fi->fmode, 1);
813
814                 kmem_cache_free(ceph_file_cachep, fi);
815         }
816
817         /* wake up anyone waiting for caps on this inode */
818         wake_up_all(&ci->i_cap_wq);
819         return 0;
820 }
821
822 enum {
823         HAVE_RETRIED = 1,
824         CHECK_EOF =    2,
825         READ_INLINE =  3,
826 };
827
828 /*
829  * Completely synchronous read and write methods.  Direct from __user
830  * buffer to osd, or directly to user pages (if O_DIRECT).
831  *
832  * If the read spans object boundary, just do multiple reads.  (That's not
833  * atomic, but good enough for now.)
834  *
835  * If we get a short result from the OSD, check against i_size; we need to
836  * only return a short read to the caller if we hit EOF.
837  */
838 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
839                               int *retry_op)
840 {
841         struct file *file = iocb->ki_filp;
842         struct inode *inode = file_inode(file);
843         struct ceph_inode_info *ci = ceph_inode(inode);
844         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
845         struct ceph_osd_client *osdc = &fsc->client->osdc;
846         ssize_t ret;
847         u64 off = iocb->ki_pos;
848         u64 len = iov_iter_count(to);
849
850         dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
851              (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
852
853         if (!len)
854                 return 0;
855         /*
856          * flush any page cache pages in this range.  this
857          * will make concurrent normal and sync io slow,
858          * but it will at least behave sensibly when they are
859          * in sequence.
860          */
861         ret = filemap_write_and_wait_range(inode->i_mapping,
862                                            off, off + len - 1);
863         if (ret < 0)
864                 return ret;
865
866         ret = 0;
867         while ((len = iov_iter_count(to)) > 0) {
868                 struct ceph_osd_request *req;
869                 struct page **pages;
870                 int num_pages;
871                 size_t page_off;
872                 u64 i_size;
873                 bool more;
874                 int idx;
875                 size_t left;
876
877                 req = ceph_osdc_new_request(osdc, &ci->i_layout,
878                                         ci->i_vino, off, &len, 0, 1,
879                                         CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
880                                         NULL, ci->i_truncate_seq,
881                                         ci->i_truncate_size, false);
882                 if (IS_ERR(req)) {
883                         ret = PTR_ERR(req);
884                         break;
885                 }
886
887                 more = len < iov_iter_count(to);
888
889                 num_pages = calc_pages_for(off, len);
890                 page_off = off & ~PAGE_MASK;
891                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
892                 if (IS_ERR(pages)) {
893                         ceph_osdc_put_request(req);
894                         ret = PTR_ERR(pages);
895                         break;
896                 }
897
898                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
899                                                  false, false);
900                 ret = ceph_osdc_start_request(osdc, req, false);
901                 if (!ret)
902                         ret = ceph_osdc_wait_request(osdc, req);
903
904                 ceph_update_read_metrics(&fsc->mdsc->metric,
905                                          req->r_start_latency,
906                                          req->r_end_latency,
907                                          len, ret);
908
909                 ceph_osdc_put_request(req);
910
911                 i_size = i_size_read(inode);
912                 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
913                      off, len, ret, i_size, (more ? " MORE" : ""));
914
915                 if (ret == -ENOENT)
916                         ret = 0;
917                 if (ret >= 0 && ret < len && (off + ret < i_size)) {
918                         int zlen = min(len - ret, i_size - off - ret);
919                         int zoff = page_off + ret;
920                         dout("sync_read zero gap %llu~%llu\n",
921                              off + ret, off + ret + zlen);
922                         ceph_zero_page_vector_range(zoff, zlen, pages);
923                         ret += zlen;
924                 }
925
926                 idx = 0;
927                 left = ret > 0 ? ret : 0;
928                 while (left > 0) {
929                         size_t len, copied;
930                         page_off = off & ~PAGE_MASK;
931                         len = min_t(size_t, left, PAGE_SIZE - page_off);
932                         SetPageUptodate(pages[idx]);
933                         copied = copy_page_to_iter(pages[idx++],
934                                                    page_off, len, to);
935                         off += copied;
936                         left -= copied;
937                         if (copied < len) {
938                                 ret = -EFAULT;
939                                 break;
940                         }
941                 }
942                 ceph_release_page_vector(pages, num_pages);
943
944                 if (ret < 0) {
945                         if (ret == -EBLOCKLISTED)
946                                 fsc->blocklisted = true;
947                         break;
948                 }
949
950                 if (off >= i_size || !more)
951                         break;
952         }
953
954         if (off > iocb->ki_pos) {
955                 if (ret >= 0 &&
956                     iov_iter_count(to) > 0 && off >= i_size_read(inode))
957                         *retry_op = CHECK_EOF;
958                 ret = off - iocb->ki_pos;
959                 iocb->ki_pos = off;
960         }
961
962         dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
963         return ret;
964 }
965
966 struct ceph_aio_request {
967         struct kiocb *iocb;
968         size_t total_len;
969         bool write;
970         bool should_dirty;
971         int error;
972         struct list_head osd_reqs;
973         unsigned num_reqs;
974         atomic_t pending_reqs;
975         struct timespec64 mtime;
976         struct ceph_cap_flush *prealloc_cf;
977 };
978
979 struct ceph_aio_work {
980         struct work_struct work;
981         struct ceph_osd_request *req;
982 };
983
984 static void ceph_aio_retry_work(struct work_struct *work);
985
986 static void ceph_aio_complete(struct inode *inode,
987                               struct ceph_aio_request *aio_req)
988 {
989         struct ceph_inode_info *ci = ceph_inode(inode);
990         int ret;
991
992         if (!atomic_dec_and_test(&aio_req->pending_reqs))
993                 return;
994
995         if (aio_req->iocb->ki_flags & IOCB_DIRECT)
996                 inode_dio_end(inode);
997
998         ret = aio_req->error;
999         if (!ret)
1000                 ret = aio_req->total_len;
1001
1002         dout("ceph_aio_complete %p rc %d\n", inode, ret);
1003
1004         if (ret >= 0 && aio_req->write) {
1005                 int dirty;
1006
1007                 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1008                 if (endoff > i_size_read(inode)) {
1009                         if (ceph_inode_set_size(inode, endoff))
1010                                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1011                 }
1012
1013                 spin_lock(&ci->i_ceph_lock);
1014                 ci->i_inline_version = CEPH_INLINE_NONE;
1015                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1016                                                &aio_req->prealloc_cf);
1017                 spin_unlock(&ci->i_ceph_lock);
1018                 if (dirty)
1019                         __mark_inode_dirty(inode, dirty);
1020
1021         }
1022
1023         ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1024                                                 CEPH_CAP_FILE_RD));
1025
1026         aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
1027
1028         ceph_free_cap_flush(aio_req->prealloc_cf);
1029         kfree(aio_req);
1030 }
1031
1032 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1033 {
1034         int rc = req->r_result;
1035         struct inode *inode = req->r_inode;
1036         struct ceph_aio_request *aio_req = req->r_priv;
1037         struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1038         struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1039         unsigned int len = osd_data->bvec_pos.iter.bi_size;
1040
1041         BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1042         BUG_ON(!osd_data->num_bvecs);
1043
1044         dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1045
1046         if (rc == -EOLDSNAPC) {
1047                 struct ceph_aio_work *aio_work;
1048                 BUG_ON(!aio_req->write);
1049
1050                 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1051                 if (aio_work) {
1052                         INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1053                         aio_work->req = req;
1054                         queue_work(ceph_inode_to_client(inode)->inode_wq,
1055                                    &aio_work->work);
1056                         return;
1057                 }
1058                 rc = -ENOMEM;
1059         } else if (!aio_req->write) {
1060                 if (rc == -ENOENT)
1061                         rc = 0;
1062                 if (rc >= 0 && len > rc) {
1063                         struct iov_iter i;
1064                         int zlen = len - rc;
1065
1066                         /*
1067                          * If read is satisfied by single OSD request,
1068                          * it can pass EOF. Otherwise read is within
1069                          * i_size.
1070                          */
1071                         if (aio_req->num_reqs == 1) {
1072                                 loff_t i_size = i_size_read(inode);
1073                                 loff_t endoff = aio_req->iocb->ki_pos + rc;
1074                                 if (endoff < i_size)
1075                                         zlen = min_t(size_t, zlen,
1076                                                      i_size - endoff);
1077                                 aio_req->total_len = rc + zlen;
1078                         }
1079
1080                         iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1081                                       osd_data->num_bvecs, len);
1082                         iov_iter_advance(&i, rc);
1083                         iov_iter_zero(zlen, &i);
1084                 }
1085         }
1086
1087         /* r_start_latency == 0 means the request was not submitted */
1088         if (req->r_start_latency) {
1089                 if (aio_req->write)
1090                         ceph_update_write_metrics(metric, req->r_start_latency,
1091                                                   req->r_end_latency, len, rc);
1092                 else
1093                         ceph_update_read_metrics(metric, req->r_start_latency,
1094                                                  req->r_end_latency, len, rc);
1095         }
1096
1097         put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1098                   aio_req->should_dirty);
1099         ceph_osdc_put_request(req);
1100
1101         if (rc < 0)
1102                 cmpxchg(&aio_req->error, 0, rc);
1103
1104         ceph_aio_complete(inode, aio_req);
1105         return;
1106 }
1107
1108 static void ceph_aio_retry_work(struct work_struct *work)
1109 {
1110         struct ceph_aio_work *aio_work =
1111                 container_of(work, struct ceph_aio_work, work);
1112         struct ceph_osd_request *orig_req = aio_work->req;
1113         struct ceph_aio_request *aio_req = orig_req->r_priv;
1114         struct inode *inode = orig_req->r_inode;
1115         struct ceph_inode_info *ci = ceph_inode(inode);
1116         struct ceph_snap_context *snapc;
1117         struct ceph_osd_request *req;
1118         int ret;
1119
1120         spin_lock(&ci->i_ceph_lock);
1121         if (__ceph_have_pending_cap_snap(ci)) {
1122                 struct ceph_cap_snap *capsnap =
1123                         list_last_entry(&ci->i_cap_snaps,
1124                                         struct ceph_cap_snap,
1125                                         ci_item);
1126                 snapc = ceph_get_snap_context(capsnap->context);
1127         } else {
1128                 BUG_ON(!ci->i_head_snapc);
1129                 snapc = ceph_get_snap_context(ci->i_head_snapc);
1130         }
1131         spin_unlock(&ci->i_ceph_lock);
1132
1133         req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1134                         false, GFP_NOFS);
1135         if (!req) {
1136                 ret = -ENOMEM;
1137                 req = orig_req;
1138                 goto out;
1139         }
1140
1141         req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1142         ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1143         ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1144
1145         req->r_ops[0] = orig_req->r_ops[0];
1146
1147         req->r_mtime = aio_req->mtime;
1148         req->r_data_offset = req->r_ops[0].extent.offset;
1149
1150         ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1151         if (ret) {
1152                 ceph_osdc_put_request(req);
1153                 req = orig_req;
1154                 goto out;
1155         }
1156
1157         ceph_osdc_put_request(orig_req);
1158
1159         req->r_callback = ceph_aio_complete_req;
1160         req->r_inode = inode;
1161         req->r_priv = aio_req;
1162
1163         ret = ceph_osdc_start_request(req->r_osdc, req, false);
1164 out:
1165         if (ret < 0) {
1166                 req->r_result = ret;
1167                 ceph_aio_complete_req(req);
1168         }
1169
1170         ceph_put_snap_context(snapc);
1171         kfree(aio_work);
1172 }
1173
1174 static ssize_t
1175 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1176                        struct ceph_snap_context *snapc,
1177                        struct ceph_cap_flush **pcf)
1178 {
1179         struct file *file = iocb->ki_filp;
1180         struct inode *inode = file_inode(file);
1181         struct ceph_inode_info *ci = ceph_inode(inode);
1182         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1183         struct ceph_client_metric *metric = &fsc->mdsc->metric;
1184         struct ceph_vino vino;
1185         struct ceph_osd_request *req;
1186         struct bio_vec *bvecs;
1187         struct ceph_aio_request *aio_req = NULL;
1188         int num_pages = 0;
1189         int flags;
1190         int ret = 0;
1191         struct timespec64 mtime = current_time(inode);
1192         size_t count = iov_iter_count(iter);
1193         loff_t pos = iocb->ki_pos;
1194         bool write = iov_iter_rw(iter) == WRITE;
1195         bool should_dirty = !write && iter_is_iovec(iter);
1196
1197         if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1198                 return -EROFS;
1199
1200         dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1201              (write ? "write" : "read"), file, pos, (unsigned)count,
1202              snapc, snapc ? snapc->seq : 0);
1203
1204         if (write) {
1205                 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1206                                         pos >> PAGE_SHIFT,
1207                                         (pos + count - 1) >> PAGE_SHIFT);
1208                 if (ret2 < 0)
1209                         dout("invalidate_inode_pages2_range returned %d\n", ret2);
1210
1211                 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1212         } else {
1213                 flags = CEPH_OSD_FLAG_READ;
1214         }
1215
1216         while (iov_iter_count(iter) > 0) {
1217                 u64 size = iov_iter_count(iter);
1218                 ssize_t len;
1219
1220                 if (write)
1221                         size = min_t(u64, size, fsc->mount_options->wsize);
1222                 else
1223                         size = min_t(u64, size, fsc->mount_options->rsize);
1224
1225                 vino = ceph_vino(inode);
1226                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1227                                             vino, pos, &size, 0,
1228                                             1,
1229                                             write ? CEPH_OSD_OP_WRITE :
1230                                                     CEPH_OSD_OP_READ,
1231                                             flags, snapc,
1232                                             ci->i_truncate_seq,
1233                                             ci->i_truncate_size,
1234                                             false);
1235                 if (IS_ERR(req)) {
1236                         ret = PTR_ERR(req);
1237                         break;
1238                 }
1239
1240                 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1241                 if (len < 0) {
1242                         ceph_osdc_put_request(req);
1243                         ret = len;
1244                         break;
1245                 }
1246                 if (len != size)
1247                         osd_req_op_extent_update(req, 0, len);
1248
1249                 /*
1250                  * To simplify error handling, allow AIO when IO within i_size
1251                  * or IO can be satisfied by single OSD request.
1252                  */
1253                 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1254                     (len == count || pos + count <= i_size_read(inode))) {
1255                         aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1256                         if (aio_req) {
1257                                 aio_req->iocb = iocb;
1258                                 aio_req->write = write;
1259                                 aio_req->should_dirty = should_dirty;
1260                                 INIT_LIST_HEAD(&aio_req->osd_reqs);
1261                                 if (write) {
1262                                         aio_req->mtime = mtime;
1263                                         swap(aio_req->prealloc_cf, *pcf);
1264                                 }
1265                         }
1266                         /* ignore error */
1267                 }
1268
1269                 if (write) {
1270                         /*
1271                          * throw out any page cache pages in this range. this
1272                          * may block.
1273                          */
1274                         truncate_inode_pages_range(inode->i_mapping, pos,
1275                                                    PAGE_ALIGN(pos + len) - 1);
1276
1277                         req->r_mtime = mtime;
1278                 }
1279
1280                 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1281
1282                 if (aio_req) {
1283                         aio_req->total_len += len;
1284                         aio_req->num_reqs++;
1285                         atomic_inc(&aio_req->pending_reqs);
1286
1287                         req->r_callback = ceph_aio_complete_req;
1288                         req->r_inode = inode;
1289                         req->r_priv = aio_req;
1290                         list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1291
1292                         pos += len;
1293                         continue;
1294                 }
1295
1296                 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1297                 if (!ret)
1298                         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1299
1300                 if (write)
1301                         ceph_update_write_metrics(metric, req->r_start_latency,
1302                                                   req->r_end_latency, len, ret);
1303                 else
1304                         ceph_update_read_metrics(metric, req->r_start_latency,
1305                                                  req->r_end_latency, len, ret);
1306
1307                 size = i_size_read(inode);
1308                 if (!write) {
1309                         if (ret == -ENOENT)
1310                                 ret = 0;
1311                         if (ret >= 0 && ret < len && pos + ret < size) {
1312                                 struct iov_iter i;
1313                                 int zlen = min_t(size_t, len - ret,
1314                                                  size - pos - ret);
1315
1316                                 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1317                                 iov_iter_advance(&i, ret);
1318                                 iov_iter_zero(zlen, &i);
1319                                 ret += zlen;
1320                         }
1321                         if (ret >= 0)
1322                                 len = ret;
1323                 }
1324
1325                 put_bvecs(bvecs, num_pages, should_dirty);
1326                 ceph_osdc_put_request(req);
1327                 if (ret < 0)
1328                         break;
1329
1330                 pos += len;
1331                 if (!write && pos >= size)
1332                         break;
1333
1334                 if (write && pos > size) {
1335                         if (ceph_inode_set_size(inode, pos))
1336                                 ceph_check_caps(ceph_inode(inode),
1337                                                 CHECK_CAPS_AUTHONLY,
1338                                                 NULL);
1339                 }
1340         }
1341
1342         if (aio_req) {
1343                 LIST_HEAD(osd_reqs);
1344
1345                 if (aio_req->num_reqs == 0) {
1346                         kfree(aio_req);
1347                         return ret;
1348                 }
1349
1350                 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1351                                               CEPH_CAP_FILE_RD);
1352
1353                 list_splice(&aio_req->osd_reqs, &osd_reqs);
1354                 inode_dio_begin(inode);
1355                 while (!list_empty(&osd_reqs)) {
1356                         req = list_first_entry(&osd_reqs,
1357                                                struct ceph_osd_request,
1358                                                r_private_item);
1359                         list_del_init(&req->r_private_item);
1360                         if (ret >= 0)
1361                                 ret = ceph_osdc_start_request(req->r_osdc,
1362                                                               req, false);
1363                         if (ret < 0) {
1364                                 req->r_result = ret;
1365                                 ceph_aio_complete_req(req);
1366                         }
1367                 }
1368                 return -EIOCBQUEUED;
1369         }
1370
1371         if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1372                 ret = pos - iocb->ki_pos;
1373                 iocb->ki_pos = pos;
1374         }
1375         return ret;
1376 }
1377
1378 /*
1379  * Synchronous write, straight from __user pointer or user pages.
1380  *
1381  * If write spans object boundary, just do multiple writes.  (For a
1382  * correct atomic write, we should e.g. take write locks on all
1383  * objects, rollback on failure, etc.)
1384  */
1385 static ssize_t
1386 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1387                 struct ceph_snap_context *snapc)
1388 {
1389         struct file *file = iocb->ki_filp;
1390         struct inode *inode = file_inode(file);
1391         struct ceph_inode_info *ci = ceph_inode(inode);
1392         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1393         struct ceph_vino vino;
1394         struct ceph_osd_request *req;
1395         struct page **pages;
1396         u64 len;
1397         int num_pages;
1398         int written = 0;
1399         int flags;
1400         int ret;
1401         bool check_caps = false;
1402         struct timespec64 mtime = current_time(inode);
1403         size_t count = iov_iter_count(from);
1404
1405         if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1406                 return -EROFS;
1407
1408         dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1409              file, pos, (unsigned)count, snapc, snapc->seq);
1410
1411         ret = filemap_write_and_wait_range(inode->i_mapping,
1412                                            pos, pos + count - 1);
1413         if (ret < 0)
1414                 return ret;
1415
1416         ret = invalidate_inode_pages2_range(inode->i_mapping,
1417                                             pos >> PAGE_SHIFT,
1418                                             (pos + count - 1) >> PAGE_SHIFT);
1419         if (ret < 0)
1420                 dout("invalidate_inode_pages2_range returned %d\n", ret);
1421
1422         flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1423
1424         while ((len = iov_iter_count(from)) > 0) {
1425                 size_t left;
1426                 int n;
1427
1428                 vino = ceph_vino(inode);
1429                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1430                                             vino, pos, &len, 0, 1,
1431                                             CEPH_OSD_OP_WRITE, flags, snapc,
1432                                             ci->i_truncate_seq,
1433                                             ci->i_truncate_size,
1434                                             false);
1435                 if (IS_ERR(req)) {
1436                         ret = PTR_ERR(req);
1437                         break;
1438                 }
1439
1440                 /*
1441                  * write from beginning of first page,
1442                  * regardless of io alignment
1443                  */
1444                 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1445
1446                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1447                 if (IS_ERR(pages)) {
1448                         ret = PTR_ERR(pages);
1449                         goto out;
1450                 }
1451
1452                 left = len;
1453                 for (n = 0; n < num_pages; n++) {
1454                         size_t plen = min_t(size_t, left, PAGE_SIZE);
1455                         ret = copy_page_from_iter(pages[n], 0, plen, from);
1456                         if (ret != plen) {
1457                                 ret = -EFAULT;
1458                                 break;
1459                         }
1460                         left -= ret;
1461                 }
1462
1463                 if (ret < 0) {
1464                         ceph_release_page_vector(pages, num_pages);
1465                         goto out;
1466                 }
1467
1468                 req->r_inode = inode;
1469
1470                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1471                                                 false, true);
1472
1473                 req->r_mtime = mtime;
1474                 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1475                 if (!ret)
1476                         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1477
1478                 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1479                                           req->r_end_latency, len, ret);
1480 out:
1481                 ceph_osdc_put_request(req);
1482                 if (ret != 0) {
1483                         ceph_set_error_write(ci);
1484                         break;
1485                 }
1486
1487                 ceph_clear_error_write(ci);
1488                 pos += len;
1489                 written += len;
1490                 if (pos > i_size_read(inode)) {
1491                         check_caps = ceph_inode_set_size(inode, pos);
1492                         if (check_caps)
1493                                 ceph_check_caps(ceph_inode(inode),
1494                                                 CHECK_CAPS_AUTHONLY,
1495                                                 NULL);
1496                 }
1497
1498         }
1499
1500         if (ret != -EOLDSNAPC && written > 0) {
1501                 ret = written;
1502                 iocb->ki_pos = pos;
1503         }
1504         return ret;
1505 }
1506
1507 /*
1508  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1509  * Atomically grab references, so that those bits are not released
1510  * back to the MDS mid-read.
1511  *
1512  * Hmm, the sync read case isn't actually async... should it be?
1513  */
1514 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1515 {
1516         struct file *filp = iocb->ki_filp;
1517         struct ceph_file_info *fi = filp->private_data;
1518         size_t len = iov_iter_count(to);
1519         struct inode *inode = file_inode(filp);
1520         struct ceph_inode_info *ci = ceph_inode(inode);
1521         bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1522         ssize_t ret;
1523         int want, got = 0;
1524         int retry_op = 0, read = 0;
1525
1526 again:
1527         dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1528              inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1529
1530         if (direct_lock)
1531                 ceph_start_io_direct(inode);
1532         else
1533                 ceph_start_io_read(inode);
1534
1535         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1536                 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1537         else
1538                 want = CEPH_CAP_FILE_CACHE;
1539         ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1540         if (ret < 0) {
1541                 if (iocb->ki_flags & IOCB_DIRECT)
1542                         ceph_end_io_direct(inode);
1543                 else
1544                         ceph_end_io_read(inode);
1545                 return ret;
1546         }
1547
1548         if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1549             (iocb->ki_flags & IOCB_DIRECT) ||
1550             (fi->flags & CEPH_F_SYNC)) {
1551
1552                 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1553                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1554                      ceph_cap_string(got));
1555
1556                 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1557                         if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1558                                 ret = ceph_direct_read_write(iocb, to,
1559                                                              NULL, NULL);
1560                                 if (ret >= 0 && ret < len)
1561                                         retry_op = CHECK_EOF;
1562                         } else {
1563                                 ret = ceph_sync_read(iocb, to, &retry_op);
1564                         }
1565                 } else {
1566                         retry_op = READ_INLINE;
1567                 }
1568         } else {
1569                 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1570                 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1571                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1572                      ceph_cap_string(got));
1573                 ceph_add_rw_context(fi, &rw_ctx);
1574                 ret = generic_file_read_iter(iocb, to);
1575                 ceph_del_rw_context(fi, &rw_ctx);
1576         }
1577
1578         dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1579              inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1580         ceph_put_cap_refs(ci, got);
1581
1582         if (direct_lock)
1583                 ceph_end_io_direct(inode);
1584         else
1585                 ceph_end_io_read(inode);
1586
1587         if (retry_op > HAVE_RETRIED && ret >= 0) {
1588                 int statret;
1589                 struct page *page = NULL;
1590                 loff_t i_size;
1591                 if (retry_op == READ_INLINE) {
1592                         page = __page_cache_alloc(GFP_KERNEL);
1593                         if (!page)
1594                                 return -ENOMEM;
1595                 }
1596
1597                 statret = __ceph_do_getattr(inode, page,
1598                                             CEPH_STAT_CAP_INLINE_DATA, !!page);
1599                 if (statret < 0) {
1600                         if (page)
1601                                 __free_page(page);
1602                         if (statret == -ENODATA) {
1603                                 BUG_ON(retry_op != READ_INLINE);
1604                                 goto again;
1605                         }
1606                         return statret;
1607                 }
1608
1609                 i_size = i_size_read(inode);
1610                 if (retry_op == READ_INLINE) {
1611                         BUG_ON(ret > 0 || read > 0);
1612                         if (iocb->ki_pos < i_size &&
1613                             iocb->ki_pos < PAGE_SIZE) {
1614                                 loff_t end = min_t(loff_t, i_size,
1615                                                    iocb->ki_pos + len);
1616                                 end = min_t(loff_t, end, PAGE_SIZE);
1617                                 if (statret < end)
1618                                         zero_user_segment(page, statret, end);
1619                                 ret = copy_page_to_iter(page,
1620                                                 iocb->ki_pos & ~PAGE_MASK,
1621                                                 end - iocb->ki_pos, to);
1622                                 iocb->ki_pos += ret;
1623                                 read += ret;
1624                         }
1625                         if (iocb->ki_pos < i_size && read < len) {
1626                                 size_t zlen = min_t(size_t, len - read,
1627                                                     i_size - iocb->ki_pos);
1628                                 ret = iov_iter_zero(zlen, to);
1629                                 iocb->ki_pos += ret;
1630                                 read += ret;
1631                         }
1632                         __free_pages(page, 0);
1633                         return read;
1634                 }
1635
1636                 /* hit EOF or hole? */
1637                 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1638                     ret < len) {
1639                         dout("sync_read hit hole, ppos %lld < size %lld"
1640                              ", reading more\n", iocb->ki_pos, i_size);
1641
1642                         read += ret;
1643                         len -= ret;
1644                         retry_op = HAVE_RETRIED;
1645                         goto again;
1646                 }
1647         }
1648
1649         if (ret >= 0)
1650                 ret += read;
1651
1652         return ret;
1653 }
1654
1655 /*
1656  * Take cap references to avoid releasing caps to MDS mid-write.
1657  *
1658  * If we are synchronous, and write with an old snap context, the OSD
1659  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1660  * dropping our cap refs and allowing the pending snap to logically
1661  * complete _before_ this write occurs.
1662  *
1663  * If we are near ENOSPC, write synchronously.
1664  */
1665 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1666 {
1667         struct file *file = iocb->ki_filp;
1668         struct ceph_file_info *fi = file->private_data;
1669         struct inode *inode = file_inode(file);
1670         struct ceph_inode_info *ci = ceph_inode(inode);
1671         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1672         struct ceph_osd_client *osdc = &fsc->client->osdc;
1673         struct ceph_cap_flush *prealloc_cf;
1674         ssize_t count, written = 0;
1675         int err, want, got;
1676         bool direct_lock = false;
1677         u32 map_flags;
1678         u64 pool_flags;
1679         loff_t pos;
1680         loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1681
1682         if (ceph_snap(inode) != CEPH_NOSNAP)
1683                 return -EROFS;
1684
1685         prealloc_cf = ceph_alloc_cap_flush();
1686         if (!prealloc_cf)
1687                 return -ENOMEM;
1688
1689         if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1690                 direct_lock = true;
1691
1692 retry_snap:
1693         if (direct_lock)
1694                 ceph_start_io_direct(inode);
1695         else
1696                 ceph_start_io_write(inode);
1697
1698         /* We can write back this queue in page reclaim */
1699         current->backing_dev_info = inode_to_bdi(inode);
1700
1701         if (iocb->ki_flags & IOCB_APPEND) {
1702                 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1703                 if (err < 0)
1704                         goto out;
1705         }
1706
1707         err = generic_write_checks(iocb, from);
1708         if (err <= 0)
1709                 goto out;
1710
1711         pos = iocb->ki_pos;
1712         if (unlikely(pos >= limit)) {
1713                 err = -EFBIG;
1714                 goto out;
1715         } else {
1716                 iov_iter_truncate(from, limit - pos);
1717         }
1718
1719         count = iov_iter_count(from);
1720         if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1721                 err = -EDQUOT;
1722                 goto out;
1723         }
1724
1725         down_read(&osdc->lock);
1726         map_flags = osdc->osdmap->flags;
1727         pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1728         up_read(&osdc->lock);
1729         if ((map_flags & CEPH_OSDMAP_FULL) ||
1730             (pool_flags & CEPH_POOL_FLAG_FULL)) {
1731                 err = -ENOSPC;
1732                 goto out;
1733         }
1734
1735         err = file_remove_privs(file);
1736         if (err)
1737                 goto out;
1738
1739         if (ci->i_inline_version != CEPH_INLINE_NONE) {
1740                 err = ceph_uninline_data(file, NULL);
1741                 if (err < 0)
1742                         goto out;
1743         }
1744
1745         dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1746              inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1747         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1748                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1749         else
1750                 want = CEPH_CAP_FILE_BUFFER;
1751         got = 0;
1752         err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1753         if (err < 0)
1754                 goto out;
1755
1756         err = file_update_time(file);
1757         if (err)
1758                 goto out_caps;
1759
1760         inode_inc_iversion_raw(inode);
1761
1762         dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1763              inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1764
1765         if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1766             (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1767             (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1768                 struct ceph_snap_context *snapc;
1769                 struct iov_iter data;
1770
1771                 spin_lock(&ci->i_ceph_lock);
1772                 if (__ceph_have_pending_cap_snap(ci)) {
1773                         struct ceph_cap_snap *capsnap =
1774                                         list_last_entry(&ci->i_cap_snaps,
1775                                                         struct ceph_cap_snap,
1776                                                         ci_item);
1777                         snapc = ceph_get_snap_context(capsnap->context);
1778                 } else {
1779                         BUG_ON(!ci->i_head_snapc);
1780                         snapc = ceph_get_snap_context(ci->i_head_snapc);
1781                 }
1782                 spin_unlock(&ci->i_ceph_lock);
1783
1784                 /* we might need to revert back to that point */
1785                 data = *from;
1786                 if (iocb->ki_flags & IOCB_DIRECT)
1787                         written = ceph_direct_read_write(iocb, &data, snapc,
1788                                                          &prealloc_cf);
1789                 else
1790                         written = ceph_sync_write(iocb, &data, pos, snapc);
1791                 if (direct_lock)
1792                         ceph_end_io_direct(inode);
1793                 else
1794                         ceph_end_io_write(inode);
1795                 if (written > 0)
1796                         iov_iter_advance(from, written);
1797                 ceph_put_snap_context(snapc);
1798         } else {
1799                 /*
1800                  * No need to acquire the i_truncate_mutex. Because
1801                  * the MDS revokes Fwb caps before sending truncate
1802                  * message to us. We can't get Fwb cap while there
1803                  * are pending vmtruncate. So write and vmtruncate
1804                  * can not run at the same time
1805                  */
1806                 written = generic_perform_write(file, from, pos);
1807                 if (likely(written >= 0))
1808                         iocb->ki_pos = pos + written;
1809                 ceph_end_io_write(inode);
1810         }
1811
1812         if (written >= 0) {
1813                 int dirty;
1814
1815                 spin_lock(&ci->i_ceph_lock);
1816                 ci->i_inline_version = CEPH_INLINE_NONE;
1817                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1818                                                &prealloc_cf);
1819                 spin_unlock(&ci->i_ceph_lock);
1820                 if (dirty)
1821                         __mark_inode_dirty(inode, dirty);
1822                 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1823                         ceph_check_caps(ci, 0, NULL);
1824         }
1825
1826         dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1827              inode, ceph_vinop(inode), pos, (unsigned)count,
1828              ceph_cap_string(got));
1829         ceph_put_cap_refs(ci, got);
1830
1831         if (written == -EOLDSNAPC) {
1832                 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1833                      inode, ceph_vinop(inode), pos, (unsigned)count);
1834                 goto retry_snap;
1835         }
1836
1837         if (written >= 0) {
1838                 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1839                     (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1840                         iocb->ki_flags |= IOCB_DSYNC;
1841                 written = generic_write_sync(iocb, written);
1842         }
1843
1844         goto out_unlocked;
1845 out_caps:
1846         ceph_put_cap_refs(ci, got);
1847 out:
1848         if (direct_lock)
1849                 ceph_end_io_direct(inode);
1850         else
1851                 ceph_end_io_write(inode);
1852 out_unlocked:
1853         ceph_free_cap_flush(prealloc_cf);
1854         current->backing_dev_info = NULL;
1855         return written ? written : err;
1856 }
1857
1858 /*
1859  * llseek.  be sure to verify file size on SEEK_END.
1860  */
1861 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1862 {
1863         struct inode *inode = file->f_mapping->host;
1864         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1865         loff_t i_size;
1866         loff_t ret;
1867
1868         inode_lock(inode);
1869
1870         if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1871                 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1872                 if (ret < 0)
1873                         goto out;
1874         }
1875
1876         i_size = i_size_read(inode);
1877         switch (whence) {
1878         case SEEK_END:
1879                 offset += i_size;
1880                 break;
1881         case SEEK_CUR:
1882                 /*
1883                  * Here we special-case the lseek(fd, 0, SEEK_CUR)
1884                  * position-querying operation.  Avoid rewriting the "same"
1885                  * f_pos value back to the file because a concurrent read(),
1886                  * write() or lseek() might have altered it
1887                  */
1888                 if (offset == 0) {
1889                         ret = file->f_pos;
1890                         goto out;
1891                 }
1892                 offset += file->f_pos;
1893                 break;
1894         case SEEK_DATA:
1895                 if (offset < 0 || offset >= i_size) {
1896                         ret = -ENXIO;
1897                         goto out;
1898                 }
1899                 break;
1900         case SEEK_HOLE:
1901                 if (offset < 0 || offset >= i_size) {
1902                         ret = -ENXIO;
1903                         goto out;
1904                 }
1905                 offset = i_size;
1906                 break;
1907         }
1908
1909         ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1910
1911 out:
1912         inode_unlock(inode);
1913         return ret;
1914 }
1915
1916 static inline void ceph_zero_partial_page(
1917         struct inode *inode, loff_t offset, unsigned size)
1918 {
1919         struct page *page;
1920         pgoff_t index = offset >> PAGE_SHIFT;
1921
1922         page = find_lock_page(inode->i_mapping, index);
1923         if (page) {
1924                 wait_on_page_writeback(page);
1925                 zero_user(page, offset & (PAGE_SIZE - 1), size);
1926                 unlock_page(page);
1927                 put_page(page);
1928         }
1929 }
1930
1931 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1932                                       loff_t length)
1933 {
1934         loff_t nearly = round_up(offset, PAGE_SIZE);
1935         if (offset < nearly) {
1936                 loff_t size = nearly - offset;
1937                 if (length < size)
1938                         size = length;
1939                 ceph_zero_partial_page(inode, offset, size);
1940                 offset += size;
1941                 length -= size;
1942         }
1943         if (length >= PAGE_SIZE) {
1944                 loff_t size = round_down(length, PAGE_SIZE);
1945                 truncate_pagecache_range(inode, offset, offset + size - 1);
1946                 offset += size;
1947                 length -= size;
1948         }
1949         if (length)
1950                 ceph_zero_partial_page(inode, offset, length);
1951 }
1952
1953 static int ceph_zero_partial_object(struct inode *inode,
1954                                     loff_t offset, loff_t *length)
1955 {
1956         struct ceph_inode_info *ci = ceph_inode(inode);
1957         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1958         struct ceph_osd_request *req;
1959         int ret = 0;
1960         loff_t zero = 0;
1961         int op;
1962
1963         if (!length) {
1964                 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1965                 length = &zero;
1966         } else {
1967                 op = CEPH_OSD_OP_ZERO;
1968         }
1969
1970         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1971                                         ceph_vino(inode),
1972                                         offset, length,
1973                                         0, 1, op,
1974                                         CEPH_OSD_FLAG_WRITE,
1975                                         NULL, 0, 0, false);
1976         if (IS_ERR(req)) {
1977                 ret = PTR_ERR(req);
1978                 goto out;
1979         }
1980
1981         req->r_mtime = inode->i_mtime;
1982         ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1983         if (!ret) {
1984                 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1985                 if (ret == -ENOENT)
1986                         ret = 0;
1987         }
1988         ceph_osdc_put_request(req);
1989
1990 out:
1991         return ret;
1992 }
1993
1994 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1995 {
1996         int ret = 0;
1997         struct ceph_inode_info *ci = ceph_inode(inode);
1998         s32 stripe_unit = ci->i_layout.stripe_unit;
1999         s32 stripe_count = ci->i_layout.stripe_count;
2000         s32 object_size = ci->i_layout.object_size;
2001         u64 object_set_size = object_size * stripe_count;
2002         u64 nearly, t;
2003
2004         /* round offset up to next period boundary */
2005         nearly = offset + object_set_size - 1;
2006         t = nearly;
2007         nearly -= do_div(t, object_set_size);
2008
2009         while (length && offset < nearly) {
2010                 loff_t size = length;
2011                 ret = ceph_zero_partial_object(inode, offset, &size);
2012                 if (ret < 0)
2013                         return ret;
2014                 offset += size;
2015                 length -= size;
2016         }
2017         while (length >= object_set_size) {
2018                 int i;
2019                 loff_t pos = offset;
2020                 for (i = 0; i < stripe_count; ++i) {
2021                         ret = ceph_zero_partial_object(inode, pos, NULL);
2022                         if (ret < 0)
2023                                 return ret;
2024                         pos += stripe_unit;
2025                 }
2026                 offset += object_set_size;
2027                 length -= object_set_size;
2028         }
2029         while (length) {
2030                 loff_t size = length;
2031                 ret = ceph_zero_partial_object(inode, offset, &size);
2032                 if (ret < 0)
2033                         return ret;
2034                 offset += size;
2035                 length -= size;
2036         }
2037         return ret;
2038 }
2039
2040 static long ceph_fallocate(struct file *file, int mode,
2041                                 loff_t offset, loff_t length)
2042 {
2043         struct ceph_file_info *fi = file->private_data;
2044         struct inode *inode = file_inode(file);
2045         struct ceph_inode_info *ci = ceph_inode(inode);
2046         struct ceph_cap_flush *prealloc_cf;
2047         int want, got = 0;
2048         int dirty;
2049         int ret = 0;
2050         loff_t endoff = 0;
2051         loff_t size;
2052
2053         if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2054                 return -EOPNOTSUPP;
2055
2056         if (!S_ISREG(inode->i_mode))
2057                 return -EOPNOTSUPP;
2058
2059         prealloc_cf = ceph_alloc_cap_flush();
2060         if (!prealloc_cf)
2061                 return -ENOMEM;
2062
2063         inode_lock(inode);
2064
2065         if (ceph_snap(inode) != CEPH_NOSNAP) {
2066                 ret = -EROFS;
2067                 goto unlock;
2068         }
2069
2070         if (ci->i_inline_version != CEPH_INLINE_NONE) {
2071                 ret = ceph_uninline_data(file, NULL);
2072                 if (ret < 0)
2073                         goto unlock;
2074         }
2075
2076         size = i_size_read(inode);
2077
2078         /* Are we punching a hole beyond EOF? */
2079         if (offset >= size)
2080                 goto unlock;
2081         if ((offset + length) > size)
2082                 length = size - offset;
2083
2084         if (fi->fmode & CEPH_FILE_MODE_LAZY)
2085                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2086         else
2087                 want = CEPH_CAP_FILE_BUFFER;
2088
2089         ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2090         if (ret < 0)
2091                 goto unlock;
2092
2093         filemap_invalidate_lock(inode->i_mapping);
2094         ceph_zero_pagecache_range(inode, offset, length);
2095         ret = ceph_zero_objects(inode, offset, length);
2096
2097         if (!ret) {
2098                 spin_lock(&ci->i_ceph_lock);
2099                 ci->i_inline_version = CEPH_INLINE_NONE;
2100                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2101                                                &prealloc_cf);
2102                 spin_unlock(&ci->i_ceph_lock);
2103                 if (dirty)
2104                         __mark_inode_dirty(inode, dirty);
2105         }
2106         filemap_invalidate_unlock(inode->i_mapping);
2107
2108         ceph_put_cap_refs(ci, got);
2109 unlock:
2110         inode_unlock(inode);
2111         ceph_free_cap_flush(prealloc_cf);
2112         return ret;
2113 }
2114
2115 /*
2116  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2117  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2118  * this fails; zero is returned on success.
2119  */
2120 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2121                           struct file *dst_filp,
2122                           loff_t dst_endoff, int *dst_got)
2123 {
2124         int ret = 0;
2125         bool retrying = false;
2126
2127 retry_caps:
2128         ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2129                             dst_endoff, dst_got);
2130         if (ret < 0)
2131                 return ret;
2132
2133         /*
2134          * Since we're already holding the FILE_WR capability for the dst file,
2135          * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2136          * retry dance instead to try to get both capabilities.
2137          */
2138         ret = ceph_try_get_caps(file_inode(src_filp),
2139                                 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2140                                 false, src_got);
2141         if (ret <= 0) {
2142                 /* Start by dropping dst_ci caps and getting src_ci caps */
2143                 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2144                 if (retrying) {
2145                         if (!ret)
2146                                 /* ceph_try_get_caps masks EAGAIN */
2147                                 ret = -EAGAIN;
2148                         return ret;
2149                 }
2150                 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2151                                     CEPH_CAP_FILE_SHARED, -1, src_got);
2152                 if (ret < 0)
2153                         return ret;
2154                 /*... drop src_ci caps too, and retry */
2155                 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2156                 retrying = true;
2157                 goto retry_caps;
2158         }
2159         return ret;
2160 }
2161
2162 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2163                            struct ceph_inode_info *dst_ci, int dst_got)
2164 {
2165         ceph_put_cap_refs(src_ci, src_got);
2166         ceph_put_cap_refs(dst_ci, dst_got);
2167 }
2168
2169 /*
2170  * This function does several size-related checks, returning an error if:
2171  *  - source file is smaller than off+len
2172  *  - destination file size is not OK (inode_newsize_ok())
2173  *  - max bytes quotas is exceeded
2174  */
2175 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2176                            loff_t src_off, loff_t dst_off, size_t len)
2177 {
2178         loff_t size, endoff;
2179
2180         size = i_size_read(src_inode);
2181         /*
2182          * Don't copy beyond source file EOF.  Instead of simply setting length
2183          * to (size - src_off), just drop to VFS default implementation, as the
2184          * local i_size may be stale due to other clients writing to the source
2185          * inode.
2186          */
2187         if (src_off + len > size) {
2188                 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2189                      src_off, len, size);
2190                 return -EOPNOTSUPP;
2191         }
2192         size = i_size_read(dst_inode);
2193
2194         endoff = dst_off + len;
2195         if (inode_newsize_ok(dst_inode, endoff))
2196                 return -EOPNOTSUPP;
2197
2198         if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2199                 return -EDQUOT;
2200
2201         return 0;
2202 }
2203
2204 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2205                                     struct ceph_inode_info *dst_ci, u64 *dst_off,
2206                                     struct ceph_fs_client *fsc,
2207                                     size_t len, unsigned int flags)
2208 {
2209         struct ceph_object_locator src_oloc, dst_oloc;
2210         struct ceph_object_id src_oid, dst_oid;
2211         size_t bytes = 0;
2212         u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2213         u32 src_objlen, dst_objlen;
2214         u32 object_size = src_ci->i_layout.object_size;
2215         int ret;
2216
2217         src_oloc.pool = src_ci->i_layout.pool_id;
2218         src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2219         dst_oloc.pool = dst_ci->i_layout.pool_id;
2220         dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2221
2222         while (len >= object_size) {
2223                 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2224                                               object_size, &src_objnum,
2225                                               &src_objoff, &src_objlen);
2226                 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2227                                               object_size, &dst_objnum,
2228                                               &dst_objoff, &dst_objlen);
2229                 ceph_oid_init(&src_oid);
2230                 ceph_oid_printf(&src_oid, "%llx.%08llx",
2231                                 src_ci->i_vino.ino, src_objnum);
2232                 ceph_oid_init(&dst_oid);
2233                 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2234                                 dst_ci->i_vino.ino, dst_objnum);
2235                 /* Do an object remote copy */
2236                 ret = ceph_osdc_copy_from(&fsc->client->osdc,
2237                                           src_ci->i_vino.snap, 0,
2238                                           &src_oid, &src_oloc,
2239                                           CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2240                                           CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2241                                           &dst_oid, &dst_oloc,
2242                                           CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2243                                           CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
2244                                           dst_ci->i_truncate_seq,
2245                                           dst_ci->i_truncate_size,
2246                                           CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2247                 if (ret) {
2248                         if (ret == -EOPNOTSUPP) {
2249                                 fsc->have_copy_from2 = false;
2250                                 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2251                         }
2252                         dout("ceph_osdc_copy_from returned %d\n", ret);
2253                         if (!bytes)
2254                                 bytes = ret;
2255                         goto out;
2256                 }
2257                 len -= object_size;
2258                 bytes += object_size;
2259                 *src_off += object_size;
2260                 *dst_off += object_size;
2261         }
2262
2263 out:
2264         ceph_oloc_destroy(&src_oloc);
2265         ceph_oloc_destroy(&dst_oloc);
2266         return bytes;
2267 }
2268
2269 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2270                                       struct file *dst_file, loff_t dst_off,
2271                                       size_t len, unsigned int flags)
2272 {
2273         struct inode *src_inode = file_inode(src_file);
2274         struct inode *dst_inode = file_inode(dst_file);
2275         struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2276         struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2277         struct ceph_cap_flush *prealloc_cf;
2278         struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2279         loff_t size;
2280         ssize_t ret = -EIO, bytes;
2281         u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2282         u32 src_objlen, dst_objlen;
2283         int src_got = 0, dst_got = 0, err, dirty;
2284
2285         if (src_inode->i_sb != dst_inode->i_sb) {
2286                 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2287
2288                 if (ceph_fsid_compare(&src_fsc->client->fsid,
2289                                       &dst_fsc->client->fsid)) {
2290                         dout("Copying files across clusters: src: %pU dst: %pU\n",
2291                              &src_fsc->client->fsid, &dst_fsc->client->fsid);
2292                         return -EXDEV;
2293                 }
2294         }
2295         if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2296                 return -EROFS;
2297
2298         /*
2299          * Some of the checks below will return -EOPNOTSUPP, which will force a
2300          * fallback to the default VFS copy_file_range implementation.  This is
2301          * desirable in several cases (for ex, the 'len' is smaller than the
2302          * size of the objects, or in cases where that would be more
2303          * efficient).
2304          */
2305
2306         if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2307                 return -EOPNOTSUPP;
2308
2309         if (!src_fsc->have_copy_from2)
2310                 return -EOPNOTSUPP;
2311
2312         /*
2313          * Striped file layouts require that we copy partial objects, but the
2314          * OSD copy-from operation only supports full-object copies.  Limit
2315          * this to non-striped file layouts for now.
2316          */
2317         if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2318             (src_ci->i_layout.stripe_count != 1) ||
2319             (dst_ci->i_layout.stripe_count != 1) ||
2320             (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2321                 dout("Invalid src/dst files layout\n");
2322                 return -EOPNOTSUPP;
2323         }
2324
2325         if (len < src_ci->i_layout.object_size)
2326                 return -EOPNOTSUPP; /* no remote copy will be done */
2327
2328         prealloc_cf = ceph_alloc_cap_flush();
2329         if (!prealloc_cf)
2330                 return -ENOMEM;
2331
2332         /* Start by sync'ing the source and destination files */
2333         ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2334         if (ret < 0) {
2335                 dout("failed to write src file (%zd)\n", ret);
2336                 goto out;
2337         }
2338         ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2339         if (ret < 0) {
2340                 dout("failed to write dst file (%zd)\n", ret);
2341                 goto out;
2342         }
2343
2344         /*
2345          * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2346          * clients may have dirty data in their caches.  And OSDs know nothing
2347          * about caps, so they can't safely do the remote object copies.
2348          */
2349         err = get_rd_wr_caps(src_file, &src_got,
2350                              dst_file, (dst_off + len), &dst_got);
2351         if (err < 0) {
2352                 dout("get_rd_wr_caps returned %d\n", err);
2353                 ret = -EOPNOTSUPP;
2354                 goto out;
2355         }
2356
2357         ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2358         if (ret < 0)
2359                 goto out_caps;
2360
2361         /* Drop dst file cached pages */
2362         ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2363                                             dst_off >> PAGE_SHIFT,
2364                                             (dst_off + len) >> PAGE_SHIFT);
2365         if (ret < 0) {
2366                 dout("Failed to invalidate inode pages (%zd)\n", ret);
2367                 ret = 0; /* XXX */
2368         }
2369         ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2370                                       src_ci->i_layout.object_size,
2371                                       &src_objnum, &src_objoff, &src_objlen);
2372         ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2373                                       dst_ci->i_layout.object_size,
2374                                       &dst_objnum, &dst_objoff, &dst_objlen);
2375         /* object-level offsets need to the same */
2376         if (src_objoff != dst_objoff) {
2377                 ret = -EOPNOTSUPP;
2378                 goto out_caps;
2379         }
2380
2381         /*
2382          * Do a manual copy if the object offset isn't object aligned.
2383          * 'src_objlen' contains the bytes left until the end of the object,
2384          * starting at the src_off
2385          */
2386         if (src_objoff) {
2387                 dout("Initial partial copy of %u bytes\n", src_objlen);
2388
2389                 /*
2390                  * we need to temporarily drop all caps as we'll be calling
2391                  * {read,write}_iter, which will get caps again.
2392                  */
2393                 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2394                 ret = do_splice_direct(src_file, &src_off, dst_file,
2395                                        &dst_off, src_objlen, flags);
2396                 /* Abort on short copies or on error */
2397                 if (ret < src_objlen) {
2398                         dout("Failed partial copy (%zd)\n", ret);
2399                         goto out;
2400                 }
2401                 len -= ret;
2402                 err = get_rd_wr_caps(src_file, &src_got,
2403                                      dst_file, (dst_off + len), &dst_got);
2404                 if (err < 0)
2405                         goto out;
2406                 err = is_file_size_ok(src_inode, dst_inode,
2407                                       src_off, dst_off, len);
2408                 if (err < 0)
2409                         goto out_caps;
2410         }
2411
2412         size = i_size_read(dst_inode);
2413         bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2414                                      src_fsc, len, flags);
2415         if (bytes <= 0) {
2416                 if (!ret)
2417                         ret = bytes;
2418                 goto out_caps;
2419         }
2420         dout("Copied %zu bytes out of %zu\n", bytes, len);
2421         len -= bytes;
2422         ret += bytes;
2423
2424         file_update_time(dst_file);
2425         inode_inc_iversion_raw(dst_inode);
2426
2427         if (dst_off > size) {
2428                 /* Let the MDS know about dst file size change */
2429                 if (ceph_inode_set_size(dst_inode, dst_off) ||
2430                     ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2431                         ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
2432         }
2433         /* Mark Fw dirty */
2434         spin_lock(&dst_ci->i_ceph_lock);
2435         dst_ci->i_inline_version = CEPH_INLINE_NONE;
2436         dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2437         spin_unlock(&dst_ci->i_ceph_lock);
2438         if (dirty)
2439                 __mark_inode_dirty(dst_inode, dirty);
2440
2441 out_caps:
2442         put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2443
2444         /*
2445          * Do the final manual copy if we still have some bytes left, unless
2446          * there were errors in remote object copies (len >= object_size).
2447          */
2448         if (len && (len < src_ci->i_layout.object_size)) {
2449                 dout("Final partial copy of %zu bytes\n", len);
2450                 bytes = do_splice_direct(src_file, &src_off, dst_file,
2451                                          &dst_off, len, flags);
2452                 if (bytes > 0)
2453                         ret += bytes;
2454                 else
2455                         dout("Failed partial copy (%zd)\n", bytes);
2456         }
2457
2458 out:
2459         ceph_free_cap_flush(prealloc_cf);
2460
2461         return ret;
2462 }
2463
2464 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2465                                     struct file *dst_file, loff_t dst_off,
2466                                     size_t len, unsigned int flags)
2467 {
2468         ssize_t ret;
2469
2470         ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2471                                      len, flags);
2472
2473         if (ret == -EOPNOTSUPP || ret == -EXDEV)
2474                 ret = generic_copy_file_range(src_file, src_off, dst_file,
2475                                               dst_off, len, flags);
2476         return ret;
2477 }
2478
2479 const struct file_operations ceph_file_fops = {
2480         .open = ceph_open,
2481         .release = ceph_release,
2482         .llseek = ceph_llseek,
2483         .read_iter = ceph_read_iter,
2484         .write_iter = ceph_write_iter,
2485         .mmap = ceph_mmap,
2486         .fsync = ceph_fsync,
2487         .lock = ceph_lock,
2488         .setlease = simple_nosetlease,
2489         .flock = ceph_flock,
2490         .splice_read = generic_file_splice_read,
2491         .splice_write = iter_file_splice_write,
2492         .unlocked_ioctl = ceph_ioctl,
2493         .compat_ioctl = compat_ptr_ioctl,
2494         .fallocate      = ceph_fallocate,
2495         .copy_file_range = ceph_copy_file_range,
2496 };