iov_iter: advancing variants of iov_iter_get_pages{,_alloc}()
[linux-2.6-microblaze.git] / fs / ceph / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21
22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24         u32 wire_flags = 0;
25
26         switch (flags & O_ACCMODE) {
27         case O_RDONLY:
28                 wire_flags |= CEPH_O_RDONLY;
29                 break;
30         case O_WRONLY:
31                 wire_flags |= CEPH_O_WRONLY;
32                 break;
33         case O_RDWR:
34                 wire_flags |= CEPH_O_RDWR;
35                 break;
36         }
37
38         flags &= ~O_ACCMODE;
39
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41
42         ceph_sys2wire(O_CREAT);
43         ceph_sys2wire(O_EXCL);
44         ceph_sys2wire(O_TRUNC);
45         ceph_sys2wire(O_DIRECTORY);
46         ceph_sys2wire(O_NOFOLLOW);
47
48 #undef ceph_sys2wire
49
50         if (flags)
51                 dout("unused open flags: %x\n", flags);
52
53         return cpu_to_le32(wire_flags);
54 }
55
56 /*
57  * Ceph file operations
58  *
59  * Implement basic open/close functionality, and implement
60  * read/write.
61  *
62  * We implement three modes of file I/O:
63  *  - buffered uses the generic_file_aio_{read,write} helpers
64  *
65  *  - synchronous is used when there is multi-client read/write
66  *    sharing, avoids the page cache, and synchronously waits for an
67  *    ack from the OSD.
68  *
69  *  - direct io takes the variant of the sync path that references
70  *    user pages directly.
71  *
72  * fsync() flushes and waits on dirty pages, but just queues metadata
73  * for writeback: since the MDS can recover size and mtime there is no
74  * need to wait for MDS acknowledgement.
75  */
76
77 /*
78  * How many pages to get in one call to iov_iter_get_pages().  This
79  * determines the size of the on-stack array used as a buffer.
80  */
81 #define ITER_GET_BVECS_PAGES    64
82
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84                                 struct bio_vec *bvecs)
85 {
86         size_t size = 0;
87         int bvec_idx = 0;
88
89         if (maxsize > iov_iter_count(iter))
90                 maxsize = iov_iter_count(iter);
91
92         while (size < maxsize) {
93                 struct page *pages[ITER_GET_BVECS_PAGES];
94                 ssize_t bytes;
95                 size_t start;
96                 int idx = 0;
97
98                 bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
99                                            ITER_GET_BVECS_PAGES, &start);
100                 if (bytes < 0)
101                         return size ?: bytes;
102
103                 size += bytes;
104
105                 for ( ; bytes; idx++, bvec_idx++) {
106                         struct bio_vec bv = {
107                                 .bv_page = pages[idx],
108                                 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
109                                 .bv_offset = start,
110                         };
111
112                         bvecs[bvec_idx] = bv;
113                         bytes -= bv.bv_len;
114                         start = 0;
115                 }
116         }
117
118         return size;
119 }
120
121 /*
122  * iov_iter_get_pages() only considers one iov_iter segment, no matter
123  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
124  * page.
125  *
126  * Attempt to get up to @maxsize bytes worth of pages from @iter.
127  * Return the number of bytes in the created bio_vec array, or an error.
128  */
129 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
130                                     struct bio_vec **bvecs, int *num_bvecs)
131 {
132         struct bio_vec *bv;
133         size_t orig_count = iov_iter_count(iter);
134         ssize_t bytes;
135         int npages;
136
137         iov_iter_truncate(iter, maxsize);
138         npages = iov_iter_npages(iter, INT_MAX);
139         iov_iter_reexpand(iter, orig_count);
140
141         /*
142          * __iter_get_bvecs() may populate only part of the array -- zero it
143          * out.
144          */
145         bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
146         if (!bv)
147                 return -ENOMEM;
148
149         bytes = __iter_get_bvecs(iter, maxsize, bv);
150         if (bytes < 0) {
151                 /*
152                  * No pages were pinned -- just free the array.
153                  */
154                 kvfree(bv);
155                 return bytes;
156         }
157
158         *bvecs = bv;
159         *num_bvecs = npages;
160         return bytes;
161 }
162
163 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
164 {
165         int i;
166
167         for (i = 0; i < num_bvecs; i++) {
168                 if (bvecs[i].bv_page) {
169                         if (should_dirty)
170                                 set_page_dirty_lock(bvecs[i].bv_page);
171                         put_page(bvecs[i].bv_page);
172                 }
173         }
174         kvfree(bvecs);
175 }
176
177 /*
178  * Prepare an open request.  Preallocate ceph_cap to avoid an
179  * inopportune ENOMEM later.
180  */
181 static struct ceph_mds_request *
182 prepare_open_request(struct super_block *sb, int flags, int create_mode)
183 {
184         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
185         struct ceph_mds_request *req;
186         int want_auth = USE_ANY_MDS;
187         int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
188
189         if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
190                 want_auth = USE_AUTH_MDS;
191
192         req = ceph_mdsc_create_request(mdsc, op, want_auth);
193         if (IS_ERR(req))
194                 goto out;
195         req->r_fmode = ceph_flags_to_mode(flags);
196         req->r_args.open.flags = ceph_flags_sys2wire(flags);
197         req->r_args.open.mode = cpu_to_le32(create_mode);
198 out:
199         return req;
200 }
201
202 static int ceph_init_file_info(struct inode *inode, struct file *file,
203                                         int fmode, bool isdir)
204 {
205         struct ceph_inode_info *ci = ceph_inode(inode);
206         struct ceph_mount_options *opt =
207                 ceph_inode_to_client(&ci->netfs.inode)->mount_options;
208         struct ceph_file_info *fi;
209         int ret;
210
211         dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
212                         inode->i_mode, isdir ? "dir" : "regular");
213         BUG_ON(inode->i_fop->release != ceph_release);
214
215         if (isdir) {
216                 struct ceph_dir_file_info *dfi =
217                         kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
218                 if (!dfi)
219                         return -ENOMEM;
220
221                 file->private_data = dfi;
222                 fi = &dfi->file_info;
223                 dfi->next_offset = 2;
224                 dfi->readdir_cache_idx = -1;
225         } else {
226                 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
227                 if (!fi)
228                         return -ENOMEM;
229
230                 if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
231                         fi->flags |= CEPH_F_SYNC;
232
233                 file->private_data = fi;
234         }
235
236         ceph_get_fmode(ci, fmode, 1);
237         fi->fmode = fmode;
238
239         spin_lock_init(&fi->rw_contexts_lock);
240         INIT_LIST_HEAD(&fi->rw_contexts);
241         fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
242
243         if ((file->f_mode & FMODE_WRITE) &&
244             ci->i_inline_version != CEPH_INLINE_NONE) {
245                 ret = ceph_uninline_data(file);
246                 if (ret < 0)
247                         goto error;
248         }
249
250         return 0;
251
252 error:
253         ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
254         ceph_put_fmode(ci, fi->fmode, 1);
255         kmem_cache_free(ceph_file_cachep, fi);
256         /* wake up anyone waiting for caps on this inode */
257         wake_up_all(&ci->i_cap_wq);
258         return ret;
259 }
260
261 /*
262  * initialize private struct file data.
263  * if we fail, clean up by dropping fmode reference on the ceph_inode
264  */
265 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
266 {
267         int ret = 0;
268
269         switch (inode->i_mode & S_IFMT) {
270         case S_IFREG:
271                 ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
272                 fallthrough;
273         case S_IFDIR:
274                 ret = ceph_init_file_info(inode, file, fmode,
275                                                 S_ISDIR(inode->i_mode));
276                 break;
277
278         case S_IFLNK:
279                 dout("init_file %p %p 0%o (symlink)\n", inode, file,
280                      inode->i_mode);
281                 break;
282
283         default:
284                 dout("init_file %p %p 0%o (special)\n", inode, file,
285                      inode->i_mode);
286                 /*
287                  * we need to drop the open ref now, since we don't
288                  * have .release set to ceph_release.
289                  */
290                 BUG_ON(inode->i_fop->release == ceph_release);
291
292                 /* call the proper open fop */
293                 ret = inode->i_fop->open(inode, file);
294         }
295         return ret;
296 }
297
298 /*
299  * try renew caps after session gets killed.
300  */
301 int ceph_renew_caps(struct inode *inode, int fmode)
302 {
303         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
304         struct ceph_inode_info *ci = ceph_inode(inode);
305         struct ceph_mds_request *req;
306         int err, flags, wanted;
307
308         spin_lock(&ci->i_ceph_lock);
309         __ceph_touch_fmode(ci, mdsc, fmode);
310         wanted = __ceph_caps_file_wanted(ci);
311         if (__ceph_is_any_real_caps(ci) &&
312             (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
313                 int issued = __ceph_caps_issued(ci, NULL);
314                 spin_unlock(&ci->i_ceph_lock);
315                 dout("renew caps %p want %s issued %s updating mds_wanted\n",
316                      inode, ceph_cap_string(wanted), ceph_cap_string(issued));
317                 ceph_check_caps(ci, 0, NULL);
318                 return 0;
319         }
320         spin_unlock(&ci->i_ceph_lock);
321
322         flags = 0;
323         if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
324                 flags = O_RDWR;
325         else if (wanted & CEPH_CAP_FILE_RD)
326                 flags = O_RDONLY;
327         else if (wanted & CEPH_CAP_FILE_WR)
328                 flags = O_WRONLY;
329 #ifdef O_LAZY
330         if (wanted & CEPH_CAP_FILE_LAZYIO)
331                 flags |= O_LAZY;
332 #endif
333
334         req = prepare_open_request(inode->i_sb, flags, 0);
335         if (IS_ERR(req)) {
336                 err = PTR_ERR(req);
337                 goto out;
338         }
339
340         req->r_inode = inode;
341         ihold(inode);
342         req->r_num_caps = 1;
343
344         err = ceph_mdsc_do_request(mdsc, NULL, req);
345         ceph_mdsc_put_request(req);
346 out:
347         dout("renew caps %p open result=%d\n", inode, err);
348         return err < 0 ? err : 0;
349 }
350
351 /*
352  * If we already have the requisite capabilities, we can satisfy
353  * the open request locally (no need to request new caps from the
354  * MDS).  We do, however, need to inform the MDS (asynchronously)
355  * if our wanted caps set expands.
356  */
357 int ceph_open(struct inode *inode, struct file *file)
358 {
359         struct ceph_inode_info *ci = ceph_inode(inode);
360         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
361         struct ceph_mds_client *mdsc = fsc->mdsc;
362         struct ceph_mds_request *req;
363         struct ceph_file_info *fi = file->private_data;
364         int err;
365         int flags, fmode, wanted;
366
367         if (fi) {
368                 dout("open file %p is already opened\n", file);
369                 return 0;
370         }
371
372         /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
373         flags = file->f_flags & ~(O_CREAT|O_EXCL);
374         if (S_ISDIR(inode->i_mode))
375                 flags = O_DIRECTORY;  /* mds likes to know */
376
377         dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
378              ceph_vinop(inode), file, flags, file->f_flags);
379         fmode = ceph_flags_to_mode(flags);
380         wanted = ceph_caps_for_mode(fmode);
381
382         /* snapped files are read-only */
383         if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
384                 return -EROFS;
385
386         /* trivially open snapdir */
387         if (ceph_snap(inode) == CEPH_SNAPDIR) {
388                 return ceph_init_file(inode, file, fmode);
389         }
390
391         /*
392          * No need to block if we have caps on the auth MDS (for
393          * write) or any MDS (for read).  Update wanted set
394          * asynchronously.
395          */
396         spin_lock(&ci->i_ceph_lock);
397         if (__ceph_is_any_real_caps(ci) &&
398             (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
399                 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
400                 int issued = __ceph_caps_issued(ci, NULL);
401
402                 dout("open %p fmode %d want %s issued %s using existing\n",
403                      inode, fmode, ceph_cap_string(wanted),
404                      ceph_cap_string(issued));
405                 __ceph_touch_fmode(ci, mdsc, fmode);
406                 spin_unlock(&ci->i_ceph_lock);
407
408                 /* adjust wanted? */
409                 if ((issued & wanted) != wanted &&
410                     (mds_wanted & wanted) != wanted &&
411                     ceph_snap(inode) != CEPH_SNAPDIR)
412                         ceph_check_caps(ci, 0, NULL);
413
414                 return ceph_init_file(inode, file, fmode);
415         } else if (ceph_snap(inode) != CEPH_NOSNAP &&
416                    (ci->i_snap_caps & wanted) == wanted) {
417                 __ceph_touch_fmode(ci, mdsc, fmode);
418                 spin_unlock(&ci->i_ceph_lock);
419                 return ceph_init_file(inode, file, fmode);
420         }
421
422         spin_unlock(&ci->i_ceph_lock);
423
424         dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
425         req = prepare_open_request(inode->i_sb, flags, 0);
426         if (IS_ERR(req)) {
427                 err = PTR_ERR(req);
428                 goto out;
429         }
430         req->r_inode = inode;
431         ihold(inode);
432
433         req->r_num_caps = 1;
434         err = ceph_mdsc_do_request(mdsc, NULL, req);
435         if (!err)
436                 err = ceph_init_file(inode, file, req->r_fmode);
437         ceph_mdsc_put_request(req);
438         dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
439 out:
440         return err;
441 }
442
443 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
444 static void
445 cache_file_layout(struct inode *dst, struct inode *src)
446 {
447         struct ceph_inode_info *cdst = ceph_inode(dst);
448         struct ceph_inode_info *csrc = ceph_inode(src);
449
450         spin_lock(&cdst->i_ceph_lock);
451         if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
452             !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
453                 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
454                         sizeof(cdst->i_cached_layout));
455                 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
456                                    ceph_try_get_string(csrc->i_layout.pool_ns));
457         }
458         spin_unlock(&cdst->i_ceph_lock);
459 }
460
461 /*
462  * Try to set up an async create. We need caps, a file layout, and inode number,
463  * and either a lease on the dentry or complete dir info. If any of those
464  * criteria are not satisfied, then return false and the caller can go
465  * synchronous.
466  */
467 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
468                                  struct ceph_file_layout *lo, u64 *pino)
469 {
470         struct ceph_inode_info *ci = ceph_inode(dir);
471         struct ceph_dentry_info *di = ceph_dentry(dentry);
472         int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
473         u64 ino;
474
475         spin_lock(&ci->i_ceph_lock);
476         /* No auth cap means no chance for Dc caps */
477         if (!ci->i_auth_cap)
478                 goto no_async;
479
480         /* Any delegated inos? */
481         if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
482                 goto no_async;
483
484         if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
485                 goto no_async;
486
487         if ((__ceph_caps_issued(ci, NULL) & want) != want)
488                 goto no_async;
489
490         if (d_in_lookup(dentry)) {
491                 if (!__ceph_dir_is_complete(ci))
492                         goto no_async;
493                 spin_lock(&dentry->d_lock);
494                 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
495                 spin_unlock(&dentry->d_lock);
496         } else if (atomic_read(&ci->i_shared_gen) !=
497                    READ_ONCE(di->lease_shared_gen)) {
498                 goto no_async;
499         }
500
501         ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
502         if (!ino)
503                 goto no_async;
504
505         *pino = ino;
506         ceph_take_cap_refs(ci, want, false);
507         memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
508         rcu_assign_pointer(lo->pool_ns,
509                            ceph_try_get_string(ci->i_cached_layout.pool_ns));
510         got = want;
511 no_async:
512         spin_unlock(&ci->i_ceph_lock);
513         return got;
514 }
515
516 static void restore_deleg_ino(struct inode *dir, u64 ino)
517 {
518         struct ceph_inode_info *ci = ceph_inode(dir);
519         struct ceph_mds_session *s = NULL;
520
521         spin_lock(&ci->i_ceph_lock);
522         if (ci->i_auth_cap)
523                 s = ceph_get_mds_session(ci->i_auth_cap->session);
524         spin_unlock(&ci->i_ceph_lock);
525         if (s) {
526                 int err = ceph_restore_deleg_ino(s, ino);
527                 if (err)
528                         pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
529                                 ino, err);
530                 ceph_put_mds_session(s);
531         }
532 }
533
534 static void wake_async_create_waiters(struct inode *inode,
535                                       struct ceph_mds_session *session)
536 {
537         struct ceph_inode_info *ci = ceph_inode(inode);
538
539         spin_lock(&ci->i_ceph_lock);
540         if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
541                 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
542                 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
543         }
544         ceph_kick_flushing_inode_caps(session, ci);
545         spin_unlock(&ci->i_ceph_lock);
546 }
547
548 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
549                                  struct ceph_mds_request *req)
550 {
551         struct dentry *dentry = req->r_dentry;
552         struct inode *dinode = d_inode(dentry);
553         struct inode *tinode = req->r_target_inode;
554         int result = req->r_err ? req->r_err :
555                         le32_to_cpu(req->r_reply_info.head->result);
556
557         WARN_ON_ONCE(dinode && tinode && dinode != tinode);
558
559         /* MDS changed -- caller must resubmit */
560         if (result == -EJUKEBOX)
561                 goto out;
562
563         mapping_set_error(req->r_parent->i_mapping, result);
564
565         if (result) {
566                 int pathlen = 0;
567                 u64 base = 0;
568                 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
569                                                   &base, 0);
570
571                 pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
572                         base, IS_ERR(path) ? "<<bad>>" : path, result);
573                 ceph_mdsc_free_path(path, pathlen);
574
575                 ceph_dir_clear_complete(req->r_parent);
576                 if (!d_unhashed(dentry))
577                         d_drop(dentry);
578
579                 if (dinode) {
580                         mapping_set_error(dinode->i_mapping, result);
581                         ceph_inode_shutdown(dinode);
582                         wake_async_create_waiters(dinode, req->r_session);
583                 }
584         }
585
586         if (tinode) {
587                 u64 ino = ceph_vino(tinode).ino;
588
589                 if (req->r_deleg_ino != ino)
590                         pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
591                                 __func__, req->r_err, req->r_deleg_ino, ino);
592
593                 mapping_set_error(tinode->i_mapping, result);
594                 wake_async_create_waiters(tinode, req->r_session);
595         } else if (!result) {
596                 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
597                         req->r_deleg_ino);
598         }
599 out:
600         ceph_mdsc_release_dir_caps(req);
601 }
602
603 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
604                                     struct file *file, umode_t mode,
605                                     struct ceph_mds_request *req,
606                                     struct ceph_acl_sec_ctx *as_ctx,
607                                     struct ceph_file_layout *lo)
608 {
609         int ret;
610         char xattr_buf[4];
611         struct ceph_mds_reply_inode in = { };
612         struct ceph_mds_reply_info_in iinfo = { .in = &in };
613         struct ceph_inode_info *ci = ceph_inode(dir);
614         struct inode *inode;
615         struct timespec64 now;
616         struct ceph_string *pool_ns;
617         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
618         struct ceph_vino vino = { .ino = req->r_deleg_ino,
619                                   .snap = CEPH_NOSNAP };
620
621         ktime_get_real_ts64(&now);
622
623         inode = ceph_get_inode(dentry->d_sb, vino);
624         if (IS_ERR(inode))
625                 return PTR_ERR(inode);
626
627         iinfo.inline_version = CEPH_INLINE_NONE;
628         iinfo.change_attr = 1;
629         ceph_encode_timespec64(&iinfo.btime, &now);
630
631         if (req->r_pagelist) {
632                 iinfo.xattr_len = req->r_pagelist->length;
633                 iinfo.xattr_data = req->r_pagelist->mapped_tail;
634         } else {
635                 /* fake it */
636                 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
637                 iinfo.xattr_data = xattr_buf;
638                 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
639         }
640
641         in.ino = cpu_to_le64(vino.ino);
642         in.snapid = cpu_to_le64(CEPH_NOSNAP);
643         in.version = cpu_to_le64(1);    // ???
644         in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
645         in.cap.cap_id = cpu_to_le64(1);
646         in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
647         in.cap.flags = CEPH_CAP_FLAG_AUTH;
648         in.ctime = in.mtime = in.atime = iinfo.btime;
649         in.truncate_seq = cpu_to_le32(1);
650         in.truncate_size = cpu_to_le64(-1ULL);
651         in.xattr_version = cpu_to_le64(1);
652         in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
653         if (dir->i_mode & S_ISGID) {
654                 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
655
656                 /* Directories always inherit the setgid bit. */
657                 if (S_ISDIR(mode))
658                         mode |= S_ISGID;
659                 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
660                          !in_group_p(dir->i_gid) &&
661                          !capable_wrt_inode_uidgid(&init_user_ns, dir, CAP_FSETID))
662                         mode &= ~S_ISGID;
663         } else {
664                 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
665         }
666         in.mode = cpu_to_le32((u32)mode);
667
668         in.nlink = cpu_to_le32(1);
669         in.max_size = cpu_to_le64(lo->stripe_unit);
670
671         ceph_file_layout_to_legacy(lo, &in.layout);
672         /* lo is private, so pool_ns can't change */
673         pool_ns = rcu_dereference_raw(lo->pool_ns);
674         if (pool_ns) {
675                 iinfo.pool_ns_len = pool_ns->len;
676                 iinfo.pool_ns_data = pool_ns->str;
677         }
678
679         down_read(&mdsc->snap_rwsem);
680         ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
681                               req->r_fmode, NULL);
682         up_read(&mdsc->snap_rwsem);
683         if (ret) {
684                 dout("%s failed to fill inode: %d\n", __func__, ret);
685                 ceph_dir_clear_complete(dir);
686                 if (!d_unhashed(dentry))
687                         d_drop(dentry);
688                 if (inode->i_state & I_NEW)
689                         discard_new_inode(inode);
690         } else {
691                 struct dentry *dn;
692
693                 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
694                         vino.ino, ceph_ino(dir), dentry->d_name.name);
695                 ceph_dir_clear_ordered(dir);
696                 ceph_init_inode_acls(inode, as_ctx);
697                 if (inode->i_state & I_NEW) {
698                         /*
699                          * If it's not I_NEW, then someone created this before
700                          * we got here. Assume the server is aware of it at
701                          * that point and don't worry about setting
702                          * CEPH_I_ASYNC_CREATE.
703                          */
704                         ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
705                         unlock_new_inode(inode);
706                 }
707                 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
708                         if (!d_unhashed(dentry))
709                                 d_drop(dentry);
710                         dn = d_splice_alias(inode, dentry);
711                         WARN_ON_ONCE(dn && dn != dentry);
712                 }
713                 file->f_mode |= FMODE_CREATED;
714                 ret = finish_open(file, dentry, ceph_open);
715         }
716         return ret;
717 }
718
719 /*
720  * Do a lookup + open with a single request.  If we get a non-existent
721  * file or symlink, return 1 so the VFS can retry.
722  */
723 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
724                      struct file *file, unsigned flags, umode_t mode)
725 {
726         struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
727         struct ceph_mds_client *mdsc = fsc->mdsc;
728         struct ceph_mds_request *req;
729         struct dentry *dn;
730         struct ceph_acl_sec_ctx as_ctx = {};
731         bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
732         int mask;
733         int err;
734
735         dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
736              dir, dentry, dentry,
737              d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
738
739         if (dentry->d_name.len > NAME_MAX)
740                 return -ENAMETOOLONG;
741
742         if (flags & O_CREAT) {
743                 if (ceph_quota_is_max_files_exceeded(dir))
744                         return -EDQUOT;
745                 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
746                 if (err < 0)
747                         return err;
748                 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
749                 if (err < 0)
750                         goto out_ctx;
751                 /* Async create can't handle more than a page of xattrs */
752                 if (as_ctx.pagelist &&
753                     !list_is_singular(&as_ctx.pagelist->head))
754                         try_async = false;
755         } else if (!d_in_lookup(dentry)) {
756                 /* If it's not being looked up, it's negative */
757                 return -ENOENT;
758         }
759 retry:
760         /* do the open */
761         req = prepare_open_request(dir->i_sb, flags, mode);
762         if (IS_ERR(req)) {
763                 err = PTR_ERR(req);
764                 goto out_ctx;
765         }
766         req->r_dentry = dget(dentry);
767         req->r_num_caps = 2;
768         mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
769         if (ceph_security_xattr_wanted(dir))
770                 mask |= CEPH_CAP_XATTR_SHARED;
771         req->r_args.open.mask = cpu_to_le32(mask);
772         req->r_parent = dir;
773         ihold(dir);
774
775         if (flags & O_CREAT) {
776                 struct ceph_file_layout lo;
777
778                 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
779                 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
780                 if (as_ctx.pagelist) {
781                         req->r_pagelist = as_ctx.pagelist;
782                         as_ctx.pagelist = NULL;
783                 }
784                 if (try_async &&
785                     (req->r_dir_caps =
786                       try_prep_async_create(dir, dentry, &lo,
787                                             &req->r_deleg_ino))) {
788                         set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
789                         req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
790                         req->r_callback = ceph_async_create_cb;
791                         err = ceph_mdsc_submit_request(mdsc, dir, req);
792                         if (!err) {
793                                 err = ceph_finish_async_create(dir, dentry,
794                                                         file, mode, req,
795                                                         &as_ctx, &lo);
796                         } else if (err == -EJUKEBOX) {
797                                 restore_deleg_ino(dir, req->r_deleg_ino);
798                                 ceph_mdsc_put_request(req);
799                                 try_async = false;
800                                 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
801                                 goto retry;
802                         }
803                         ceph_put_string(rcu_dereference_raw(lo.pool_ns));
804                         goto out_req;
805                 }
806         }
807
808         set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
809         err = ceph_mdsc_do_request(mdsc,
810                                    (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
811                                    req);
812         if (err == -ENOENT) {
813                 dentry = ceph_handle_snapdir(req, dentry);
814                 if (IS_ERR(dentry)) {
815                         err = PTR_ERR(dentry);
816                         goto out_req;
817                 }
818                 err = 0;
819         }
820
821         if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
822                 err = ceph_handle_notrace_create(dir, dentry);
823
824         if (d_in_lookup(dentry)) {
825                 dn = ceph_finish_lookup(req, dentry, err);
826                 if (IS_ERR(dn))
827                         err = PTR_ERR(dn);
828         } else {
829                 /* we were given a hashed negative dentry */
830                 dn = NULL;
831         }
832         if (err)
833                 goto out_req;
834         if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
835                 /* make vfs retry on splice, ENOENT, or symlink */
836                 dout("atomic_open finish_no_open on dn %p\n", dn);
837                 err = finish_no_open(file, dn);
838         } else {
839                 dout("atomic_open finish_open on dn %p\n", dn);
840                 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
841                         struct inode *newino = d_inode(dentry);
842
843                         cache_file_layout(dir, newino);
844                         ceph_init_inode_acls(newino, &as_ctx);
845                         file->f_mode |= FMODE_CREATED;
846                 }
847                 err = finish_open(file, dentry, ceph_open);
848         }
849 out_req:
850         ceph_mdsc_put_request(req);
851 out_ctx:
852         ceph_release_acl_sec_ctx(&as_ctx);
853         dout("atomic_open result=%d\n", err);
854         return err;
855 }
856
857 int ceph_release(struct inode *inode, struct file *file)
858 {
859         struct ceph_inode_info *ci = ceph_inode(inode);
860
861         if (S_ISDIR(inode->i_mode)) {
862                 struct ceph_dir_file_info *dfi = file->private_data;
863                 dout("release inode %p dir file %p\n", inode, file);
864                 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
865
866                 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
867
868                 if (dfi->last_readdir)
869                         ceph_mdsc_put_request(dfi->last_readdir);
870                 kfree(dfi->last_name);
871                 kfree(dfi->dir_info);
872                 kmem_cache_free(ceph_dir_file_cachep, dfi);
873         } else {
874                 struct ceph_file_info *fi = file->private_data;
875                 dout("release inode %p regular file %p\n", inode, file);
876                 WARN_ON(!list_empty(&fi->rw_contexts));
877
878                 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
879                 ceph_put_fmode(ci, fi->fmode, 1);
880
881                 kmem_cache_free(ceph_file_cachep, fi);
882         }
883
884         /* wake up anyone waiting for caps on this inode */
885         wake_up_all(&ci->i_cap_wq);
886         return 0;
887 }
888
889 enum {
890         HAVE_RETRIED = 1,
891         CHECK_EOF =    2,
892         READ_INLINE =  3,
893 };
894
895 /*
896  * Completely synchronous read and write methods.  Direct from __user
897  * buffer to osd, or directly to user pages (if O_DIRECT).
898  *
899  * If the read spans object boundary, just do multiple reads.  (That's not
900  * atomic, but good enough for now.)
901  *
902  * If we get a short result from the OSD, check against i_size; we need to
903  * only return a short read to the caller if we hit EOF.
904  */
905 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
906                               int *retry_op)
907 {
908         struct file *file = iocb->ki_filp;
909         struct inode *inode = file_inode(file);
910         struct ceph_inode_info *ci = ceph_inode(inode);
911         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
912         struct ceph_osd_client *osdc = &fsc->client->osdc;
913         ssize_t ret;
914         u64 off = iocb->ki_pos;
915         u64 len = iov_iter_count(to);
916         u64 i_size = i_size_read(inode);
917
918         dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
919              (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
920
921         if (!len)
922                 return 0;
923         /*
924          * flush any page cache pages in this range.  this
925          * will make concurrent normal and sync io slow,
926          * but it will at least behave sensibly when they are
927          * in sequence.
928          */
929         ret = filemap_write_and_wait_range(inode->i_mapping,
930                                            off, off + len - 1);
931         if (ret < 0)
932                 return ret;
933
934         ret = 0;
935         while ((len = iov_iter_count(to)) > 0) {
936                 struct ceph_osd_request *req;
937                 struct page **pages;
938                 int num_pages;
939                 size_t page_off;
940                 bool more;
941                 int idx;
942                 size_t left;
943
944                 req = ceph_osdc_new_request(osdc, &ci->i_layout,
945                                         ci->i_vino, off, &len, 0, 1,
946                                         CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
947                                         NULL, ci->i_truncate_seq,
948                                         ci->i_truncate_size, false);
949                 if (IS_ERR(req)) {
950                         ret = PTR_ERR(req);
951                         break;
952                 }
953
954                 more = len < iov_iter_count(to);
955
956                 num_pages = calc_pages_for(off, len);
957                 page_off = off & ~PAGE_MASK;
958                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
959                 if (IS_ERR(pages)) {
960                         ceph_osdc_put_request(req);
961                         ret = PTR_ERR(pages);
962                         break;
963                 }
964
965                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
966                                                  false, false);
967                 ret = ceph_osdc_start_request(osdc, req, false);
968                 if (!ret)
969                         ret = ceph_osdc_wait_request(osdc, req);
970
971                 ceph_update_read_metrics(&fsc->mdsc->metric,
972                                          req->r_start_latency,
973                                          req->r_end_latency,
974                                          len, ret);
975
976                 ceph_osdc_put_request(req);
977
978                 i_size = i_size_read(inode);
979                 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
980                      off, len, ret, i_size, (more ? " MORE" : ""));
981
982                 if (ret == -ENOENT)
983                         ret = 0;
984                 if (ret >= 0 && ret < len && (off + ret < i_size)) {
985                         int zlen = min(len - ret, i_size - off - ret);
986                         int zoff = page_off + ret;
987                         dout("sync_read zero gap %llu~%llu\n",
988                              off + ret, off + ret + zlen);
989                         ceph_zero_page_vector_range(zoff, zlen, pages);
990                         ret += zlen;
991                 }
992
993                 idx = 0;
994                 left = ret > 0 ? ret : 0;
995                 while (left > 0) {
996                         size_t len, copied;
997                         page_off = off & ~PAGE_MASK;
998                         len = min_t(size_t, left, PAGE_SIZE - page_off);
999                         SetPageUptodate(pages[idx]);
1000                         copied = copy_page_to_iter(pages[idx++],
1001                                                    page_off, len, to);
1002                         off += copied;
1003                         left -= copied;
1004                         if (copied < len) {
1005                                 ret = -EFAULT;
1006                                 break;
1007                         }
1008                 }
1009                 ceph_release_page_vector(pages, num_pages);
1010
1011                 if (ret < 0) {
1012                         if (ret == -EBLOCKLISTED)
1013                                 fsc->blocklisted = true;
1014                         break;
1015                 }
1016
1017                 if (off >= i_size || !more)
1018                         break;
1019         }
1020
1021         if (off > iocb->ki_pos) {
1022                 if (off >= i_size) {
1023                         *retry_op = CHECK_EOF;
1024                         ret = i_size - iocb->ki_pos;
1025                         iocb->ki_pos = i_size;
1026                 } else {
1027                         ret = off - iocb->ki_pos;
1028                         iocb->ki_pos = off;
1029                 }
1030         }
1031
1032         dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
1033         return ret;
1034 }
1035
1036 struct ceph_aio_request {
1037         struct kiocb *iocb;
1038         size_t total_len;
1039         bool write;
1040         bool should_dirty;
1041         int error;
1042         struct list_head osd_reqs;
1043         unsigned num_reqs;
1044         atomic_t pending_reqs;
1045         struct timespec64 mtime;
1046         struct ceph_cap_flush *prealloc_cf;
1047 };
1048
1049 struct ceph_aio_work {
1050         struct work_struct work;
1051         struct ceph_osd_request *req;
1052 };
1053
1054 static void ceph_aio_retry_work(struct work_struct *work);
1055
1056 static void ceph_aio_complete(struct inode *inode,
1057                               struct ceph_aio_request *aio_req)
1058 {
1059         struct ceph_inode_info *ci = ceph_inode(inode);
1060         int ret;
1061
1062         if (!atomic_dec_and_test(&aio_req->pending_reqs))
1063                 return;
1064
1065         if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1066                 inode_dio_end(inode);
1067
1068         ret = aio_req->error;
1069         if (!ret)
1070                 ret = aio_req->total_len;
1071
1072         dout("ceph_aio_complete %p rc %d\n", inode, ret);
1073
1074         if (ret >= 0 && aio_req->write) {
1075                 int dirty;
1076
1077                 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1078                 if (endoff > i_size_read(inode)) {
1079                         if (ceph_inode_set_size(inode, endoff))
1080                                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1081                 }
1082
1083                 spin_lock(&ci->i_ceph_lock);
1084                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1085                                                &aio_req->prealloc_cf);
1086                 spin_unlock(&ci->i_ceph_lock);
1087                 if (dirty)
1088                         __mark_inode_dirty(inode, dirty);
1089
1090         }
1091
1092         ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1093                                                 CEPH_CAP_FILE_RD));
1094
1095         aio_req->iocb->ki_complete(aio_req->iocb, ret);
1096
1097         ceph_free_cap_flush(aio_req->prealloc_cf);
1098         kfree(aio_req);
1099 }
1100
1101 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1102 {
1103         int rc = req->r_result;
1104         struct inode *inode = req->r_inode;
1105         struct ceph_aio_request *aio_req = req->r_priv;
1106         struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1107         struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1108         unsigned int len = osd_data->bvec_pos.iter.bi_size;
1109
1110         BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1111         BUG_ON(!osd_data->num_bvecs);
1112
1113         dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1114
1115         if (rc == -EOLDSNAPC) {
1116                 struct ceph_aio_work *aio_work;
1117                 BUG_ON(!aio_req->write);
1118
1119                 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1120                 if (aio_work) {
1121                         INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1122                         aio_work->req = req;
1123                         queue_work(ceph_inode_to_client(inode)->inode_wq,
1124                                    &aio_work->work);
1125                         return;
1126                 }
1127                 rc = -ENOMEM;
1128         } else if (!aio_req->write) {
1129                 if (rc == -ENOENT)
1130                         rc = 0;
1131                 if (rc >= 0 && len > rc) {
1132                         struct iov_iter i;
1133                         int zlen = len - rc;
1134
1135                         /*
1136                          * If read is satisfied by single OSD request,
1137                          * it can pass EOF. Otherwise read is within
1138                          * i_size.
1139                          */
1140                         if (aio_req->num_reqs == 1) {
1141                                 loff_t i_size = i_size_read(inode);
1142                                 loff_t endoff = aio_req->iocb->ki_pos + rc;
1143                                 if (endoff < i_size)
1144                                         zlen = min_t(size_t, zlen,
1145                                                      i_size - endoff);
1146                                 aio_req->total_len = rc + zlen;
1147                         }
1148
1149                         iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1150                                       osd_data->num_bvecs, len);
1151                         iov_iter_advance(&i, rc);
1152                         iov_iter_zero(zlen, &i);
1153                 }
1154         }
1155
1156         /* r_start_latency == 0 means the request was not submitted */
1157         if (req->r_start_latency) {
1158                 if (aio_req->write)
1159                         ceph_update_write_metrics(metric, req->r_start_latency,
1160                                                   req->r_end_latency, len, rc);
1161                 else
1162                         ceph_update_read_metrics(metric, req->r_start_latency,
1163                                                  req->r_end_latency, len, rc);
1164         }
1165
1166         put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1167                   aio_req->should_dirty);
1168         ceph_osdc_put_request(req);
1169
1170         if (rc < 0)
1171                 cmpxchg(&aio_req->error, 0, rc);
1172
1173         ceph_aio_complete(inode, aio_req);
1174         return;
1175 }
1176
1177 static void ceph_aio_retry_work(struct work_struct *work)
1178 {
1179         struct ceph_aio_work *aio_work =
1180                 container_of(work, struct ceph_aio_work, work);
1181         struct ceph_osd_request *orig_req = aio_work->req;
1182         struct ceph_aio_request *aio_req = orig_req->r_priv;
1183         struct inode *inode = orig_req->r_inode;
1184         struct ceph_inode_info *ci = ceph_inode(inode);
1185         struct ceph_snap_context *snapc;
1186         struct ceph_osd_request *req;
1187         int ret;
1188
1189         spin_lock(&ci->i_ceph_lock);
1190         if (__ceph_have_pending_cap_snap(ci)) {
1191                 struct ceph_cap_snap *capsnap =
1192                         list_last_entry(&ci->i_cap_snaps,
1193                                         struct ceph_cap_snap,
1194                                         ci_item);
1195                 snapc = ceph_get_snap_context(capsnap->context);
1196         } else {
1197                 BUG_ON(!ci->i_head_snapc);
1198                 snapc = ceph_get_snap_context(ci->i_head_snapc);
1199         }
1200         spin_unlock(&ci->i_ceph_lock);
1201
1202         req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1203                         false, GFP_NOFS);
1204         if (!req) {
1205                 ret = -ENOMEM;
1206                 req = orig_req;
1207                 goto out;
1208         }
1209
1210         req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1211         ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1212         ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1213
1214         req->r_ops[0] = orig_req->r_ops[0];
1215
1216         req->r_mtime = aio_req->mtime;
1217         req->r_data_offset = req->r_ops[0].extent.offset;
1218
1219         ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1220         if (ret) {
1221                 ceph_osdc_put_request(req);
1222                 req = orig_req;
1223                 goto out;
1224         }
1225
1226         ceph_osdc_put_request(orig_req);
1227
1228         req->r_callback = ceph_aio_complete_req;
1229         req->r_inode = inode;
1230         req->r_priv = aio_req;
1231
1232         ret = ceph_osdc_start_request(req->r_osdc, req, false);
1233 out:
1234         if (ret < 0) {
1235                 req->r_result = ret;
1236                 ceph_aio_complete_req(req);
1237         }
1238
1239         ceph_put_snap_context(snapc);
1240         kfree(aio_work);
1241 }
1242
1243 static ssize_t
1244 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1245                        struct ceph_snap_context *snapc,
1246                        struct ceph_cap_flush **pcf)
1247 {
1248         struct file *file = iocb->ki_filp;
1249         struct inode *inode = file_inode(file);
1250         struct ceph_inode_info *ci = ceph_inode(inode);
1251         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1252         struct ceph_client_metric *metric = &fsc->mdsc->metric;
1253         struct ceph_vino vino;
1254         struct ceph_osd_request *req;
1255         struct bio_vec *bvecs;
1256         struct ceph_aio_request *aio_req = NULL;
1257         int num_pages = 0;
1258         int flags;
1259         int ret = 0;
1260         struct timespec64 mtime = current_time(inode);
1261         size_t count = iov_iter_count(iter);
1262         loff_t pos = iocb->ki_pos;
1263         bool write = iov_iter_rw(iter) == WRITE;
1264         bool should_dirty = !write && user_backed_iter(iter);
1265
1266         if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1267                 return -EROFS;
1268
1269         dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1270              (write ? "write" : "read"), file, pos, (unsigned)count,
1271              snapc, snapc ? snapc->seq : 0);
1272
1273         if (write) {
1274                 int ret2;
1275
1276                 ceph_fscache_invalidate(inode, true);
1277
1278                 ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1279                                         pos >> PAGE_SHIFT,
1280                                         (pos + count - 1) >> PAGE_SHIFT);
1281                 if (ret2 < 0)
1282                         dout("invalidate_inode_pages2_range returned %d\n", ret2);
1283
1284                 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1285         } else {
1286                 flags = CEPH_OSD_FLAG_READ;
1287         }
1288
1289         while (iov_iter_count(iter) > 0) {
1290                 u64 size = iov_iter_count(iter);
1291                 ssize_t len;
1292
1293                 if (write)
1294                         size = min_t(u64, size, fsc->mount_options->wsize);
1295                 else
1296                         size = min_t(u64, size, fsc->mount_options->rsize);
1297
1298                 vino = ceph_vino(inode);
1299                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1300                                             vino, pos, &size, 0,
1301                                             1,
1302                                             write ? CEPH_OSD_OP_WRITE :
1303                                                     CEPH_OSD_OP_READ,
1304                                             flags, snapc,
1305                                             ci->i_truncate_seq,
1306                                             ci->i_truncate_size,
1307                                             false);
1308                 if (IS_ERR(req)) {
1309                         ret = PTR_ERR(req);
1310                         break;
1311                 }
1312
1313                 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1314                 if (len < 0) {
1315                         ceph_osdc_put_request(req);
1316                         ret = len;
1317                         break;
1318                 }
1319                 if (len != size)
1320                         osd_req_op_extent_update(req, 0, len);
1321
1322                 /*
1323                  * To simplify error handling, allow AIO when IO within i_size
1324                  * or IO can be satisfied by single OSD request.
1325                  */
1326                 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1327                     (len == count || pos + count <= i_size_read(inode))) {
1328                         aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1329                         if (aio_req) {
1330                                 aio_req->iocb = iocb;
1331                                 aio_req->write = write;
1332                                 aio_req->should_dirty = should_dirty;
1333                                 INIT_LIST_HEAD(&aio_req->osd_reqs);
1334                                 if (write) {
1335                                         aio_req->mtime = mtime;
1336                                         swap(aio_req->prealloc_cf, *pcf);
1337                                 }
1338                         }
1339                         /* ignore error */
1340                 }
1341
1342                 if (write) {
1343                         /*
1344                          * throw out any page cache pages in this range. this
1345                          * may block.
1346                          */
1347                         truncate_inode_pages_range(inode->i_mapping, pos,
1348                                                    PAGE_ALIGN(pos + len) - 1);
1349
1350                         req->r_mtime = mtime;
1351                 }
1352
1353                 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1354
1355                 if (aio_req) {
1356                         aio_req->total_len += len;
1357                         aio_req->num_reqs++;
1358                         atomic_inc(&aio_req->pending_reqs);
1359
1360                         req->r_callback = ceph_aio_complete_req;
1361                         req->r_inode = inode;
1362                         req->r_priv = aio_req;
1363                         list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1364
1365                         pos += len;
1366                         continue;
1367                 }
1368
1369                 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1370                 if (!ret)
1371                         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1372
1373                 if (write)
1374                         ceph_update_write_metrics(metric, req->r_start_latency,
1375                                                   req->r_end_latency, len, ret);
1376                 else
1377                         ceph_update_read_metrics(metric, req->r_start_latency,
1378                                                  req->r_end_latency, len, ret);
1379
1380                 size = i_size_read(inode);
1381                 if (!write) {
1382                         if (ret == -ENOENT)
1383                                 ret = 0;
1384                         if (ret >= 0 && ret < len && pos + ret < size) {
1385                                 struct iov_iter i;
1386                                 int zlen = min_t(size_t, len - ret,
1387                                                  size - pos - ret);
1388
1389                                 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1390                                 iov_iter_advance(&i, ret);
1391                                 iov_iter_zero(zlen, &i);
1392                                 ret += zlen;
1393                         }
1394                         if (ret >= 0)
1395                                 len = ret;
1396                 }
1397
1398                 put_bvecs(bvecs, num_pages, should_dirty);
1399                 ceph_osdc_put_request(req);
1400                 if (ret < 0)
1401                         break;
1402
1403                 pos += len;
1404                 if (!write && pos >= size)
1405                         break;
1406
1407                 if (write && pos > size) {
1408                         if (ceph_inode_set_size(inode, pos))
1409                                 ceph_check_caps(ceph_inode(inode),
1410                                                 CHECK_CAPS_AUTHONLY,
1411                                                 NULL);
1412                 }
1413         }
1414
1415         if (aio_req) {
1416                 LIST_HEAD(osd_reqs);
1417
1418                 if (aio_req->num_reqs == 0) {
1419                         kfree(aio_req);
1420                         return ret;
1421                 }
1422
1423                 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1424                                               CEPH_CAP_FILE_RD);
1425
1426                 list_splice(&aio_req->osd_reqs, &osd_reqs);
1427                 inode_dio_begin(inode);
1428                 while (!list_empty(&osd_reqs)) {
1429                         req = list_first_entry(&osd_reqs,
1430                                                struct ceph_osd_request,
1431                                                r_private_item);
1432                         list_del_init(&req->r_private_item);
1433                         if (ret >= 0)
1434                                 ret = ceph_osdc_start_request(req->r_osdc,
1435                                                               req, false);
1436                         if (ret < 0) {
1437                                 req->r_result = ret;
1438                                 ceph_aio_complete_req(req);
1439                         }
1440                 }
1441                 return -EIOCBQUEUED;
1442         }
1443
1444         if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1445                 ret = pos - iocb->ki_pos;
1446                 iocb->ki_pos = pos;
1447         }
1448         return ret;
1449 }
1450
1451 /*
1452  * Synchronous write, straight from __user pointer or user pages.
1453  *
1454  * If write spans object boundary, just do multiple writes.  (For a
1455  * correct atomic write, we should e.g. take write locks on all
1456  * objects, rollback on failure, etc.)
1457  */
1458 static ssize_t
1459 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1460                 struct ceph_snap_context *snapc)
1461 {
1462         struct file *file = iocb->ki_filp;
1463         struct inode *inode = file_inode(file);
1464         struct ceph_inode_info *ci = ceph_inode(inode);
1465         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1466         struct ceph_vino vino;
1467         struct ceph_osd_request *req;
1468         struct page **pages;
1469         u64 len;
1470         int num_pages;
1471         int written = 0;
1472         int flags;
1473         int ret;
1474         bool check_caps = false;
1475         struct timespec64 mtime = current_time(inode);
1476         size_t count = iov_iter_count(from);
1477
1478         if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1479                 return -EROFS;
1480
1481         dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1482              file, pos, (unsigned)count, snapc, snapc->seq);
1483
1484         ret = filemap_write_and_wait_range(inode->i_mapping,
1485                                            pos, pos + count - 1);
1486         if (ret < 0)
1487                 return ret;
1488
1489         ceph_fscache_invalidate(inode, false);
1490         ret = invalidate_inode_pages2_range(inode->i_mapping,
1491                                             pos >> PAGE_SHIFT,
1492                                             (pos + count - 1) >> PAGE_SHIFT);
1493         if (ret < 0)
1494                 dout("invalidate_inode_pages2_range returned %d\n", ret);
1495
1496         flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1497
1498         while ((len = iov_iter_count(from)) > 0) {
1499                 size_t left;
1500                 int n;
1501
1502                 vino = ceph_vino(inode);
1503                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1504                                             vino, pos, &len, 0, 1,
1505                                             CEPH_OSD_OP_WRITE, flags, snapc,
1506                                             ci->i_truncate_seq,
1507                                             ci->i_truncate_size,
1508                                             false);
1509                 if (IS_ERR(req)) {
1510                         ret = PTR_ERR(req);
1511                         break;
1512                 }
1513
1514                 /*
1515                  * write from beginning of first page,
1516                  * regardless of io alignment
1517                  */
1518                 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1519
1520                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1521                 if (IS_ERR(pages)) {
1522                         ret = PTR_ERR(pages);
1523                         goto out;
1524                 }
1525
1526                 left = len;
1527                 for (n = 0; n < num_pages; n++) {
1528                         size_t plen = min_t(size_t, left, PAGE_SIZE);
1529                         ret = copy_page_from_iter(pages[n], 0, plen, from);
1530                         if (ret != plen) {
1531                                 ret = -EFAULT;
1532                                 break;
1533                         }
1534                         left -= ret;
1535                 }
1536
1537                 if (ret < 0) {
1538                         ceph_release_page_vector(pages, num_pages);
1539                         goto out;
1540                 }
1541
1542                 req->r_inode = inode;
1543
1544                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1545                                                 false, true);
1546
1547                 req->r_mtime = mtime;
1548                 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1549                 if (!ret)
1550                         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1551
1552                 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1553                                           req->r_end_latency, len, ret);
1554 out:
1555                 ceph_osdc_put_request(req);
1556                 if (ret != 0) {
1557                         ceph_set_error_write(ci);
1558                         break;
1559                 }
1560
1561                 ceph_clear_error_write(ci);
1562                 pos += len;
1563                 written += len;
1564                 if (pos > i_size_read(inode)) {
1565                         check_caps = ceph_inode_set_size(inode, pos);
1566                         if (check_caps)
1567                                 ceph_check_caps(ceph_inode(inode),
1568                                                 CHECK_CAPS_AUTHONLY,
1569                                                 NULL);
1570                 }
1571
1572         }
1573
1574         if (ret != -EOLDSNAPC && written > 0) {
1575                 ret = written;
1576                 iocb->ki_pos = pos;
1577         }
1578         return ret;
1579 }
1580
1581 /*
1582  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1583  * Atomically grab references, so that those bits are not released
1584  * back to the MDS mid-read.
1585  *
1586  * Hmm, the sync read case isn't actually async... should it be?
1587  */
1588 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1589 {
1590         struct file *filp = iocb->ki_filp;
1591         struct ceph_file_info *fi = filp->private_data;
1592         size_t len = iov_iter_count(to);
1593         struct inode *inode = file_inode(filp);
1594         struct ceph_inode_info *ci = ceph_inode(inode);
1595         bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1596         ssize_t ret;
1597         int want = 0, got = 0;
1598         int retry_op = 0, read = 0;
1599
1600 again:
1601         dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1602              inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1603
1604         if (ceph_inode_is_shutdown(inode))
1605                 return -ESTALE;
1606
1607         if (direct_lock)
1608                 ceph_start_io_direct(inode);
1609         else
1610                 ceph_start_io_read(inode);
1611
1612         if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1613                 want |= CEPH_CAP_FILE_CACHE;
1614         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1615                 want |= CEPH_CAP_FILE_LAZYIO;
1616
1617         ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1618         if (ret < 0) {
1619                 if (direct_lock)
1620                         ceph_end_io_direct(inode);
1621                 else
1622                         ceph_end_io_read(inode);
1623                 return ret;
1624         }
1625
1626         if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1627             (iocb->ki_flags & IOCB_DIRECT) ||
1628             (fi->flags & CEPH_F_SYNC)) {
1629
1630                 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1631                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1632                      ceph_cap_string(got));
1633
1634                 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1635                         if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1636                                 ret = ceph_direct_read_write(iocb, to,
1637                                                              NULL, NULL);
1638                                 if (ret >= 0 && ret < len)
1639                                         retry_op = CHECK_EOF;
1640                         } else {
1641                                 ret = ceph_sync_read(iocb, to, &retry_op);
1642                         }
1643                 } else {
1644                         retry_op = READ_INLINE;
1645                 }
1646         } else {
1647                 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1648                 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1649                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1650                      ceph_cap_string(got));
1651                 ceph_add_rw_context(fi, &rw_ctx);
1652                 ret = generic_file_read_iter(iocb, to);
1653                 ceph_del_rw_context(fi, &rw_ctx);
1654         }
1655
1656         dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1657              inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1658         ceph_put_cap_refs(ci, got);
1659
1660         if (direct_lock)
1661                 ceph_end_io_direct(inode);
1662         else
1663                 ceph_end_io_read(inode);
1664
1665         if (retry_op > HAVE_RETRIED && ret >= 0) {
1666                 int statret;
1667                 struct page *page = NULL;
1668                 loff_t i_size;
1669                 if (retry_op == READ_INLINE) {
1670                         page = __page_cache_alloc(GFP_KERNEL);
1671                         if (!page)
1672                                 return -ENOMEM;
1673                 }
1674
1675                 statret = __ceph_do_getattr(inode, page,
1676                                             CEPH_STAT_CAP_INLINE_DATA, !!page);
1677                 if (statret < 0) {
1678                         if (page)
1679                                 __free_page(page);
1680                         if (statret == -ENODATA) {
1681                                 BUG_ON(retry_op != READ_INLINE);
1682                                 goto again;
1683                         }
1684                         return statret;
1685                 }
1686
1687                 i_size = i_size_read(inode);
1688                 if (retry_op == READ_INLINE) {
1689                         BUG_ON(ret > 0 || read > 0);
1690                         if (iocb->ki_pos < i_size &&
1691                             iocb->ki_pos < PAGE_SIZE) {
1692                                 loff_t end = min_t(loff_t, i_size,
1693                                                    iocb->ki_pos + len);
1694                                 end = min_t(loff_t, end, PAGE_SIZE);
1695                                 if (statret < end)
1696                                         zero_user_segment(page, statret, end);
1697                                 ret = copy_page_to_iter(page,
1698                                                 iocb->ki_pos & ~PAGE_MASK,
1699                                                 end - iocb->ki_pos, to);
1700                                 iocb->ki_pos += ret;
1701                                 read += ret;
1702                         }
1703                         if (iocb->ki_pos < i_size && read < len) {
1704                                 size_t zlen = min_t(size_t, len - read,
1705                                                     i_size - iocb->ki_pos);
1706                                 ret = iov_iter_zero(zlen, to);
1707                                 iocb->ki_pos += ret;
1708                                 read += ret;
1709                         }
1710                         __free_pages(page, 0);
1711                         return read;
1712                 }
1713
1714                 /* hit EOF or hole? */
1715                 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1716                     ret < len) {
1717                         dout("sync_read hit hole, ppos %lld < size %lld"
1718                              ", reading more\n", iocb->ki_pos, i_size);
1719
1720                         read += ret;
1721                         len -= ret;
1722                         retry_op = HAVE_RETRIED;
1723                         goto again;
1724                 }
1725         }
1726
1727         if (ret >= 0)
1728                 ret += read;
1729
1730         return ret;
1731 }
1732
1733 /*
1734  * Take cap references to avoid releasing caps to MDS mid-write.
1735  *
1736  * If we are synchronous, and write with an old snap context, the OSD
1737  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1738  * dropping our cap refs and allowing the pending snap to logically
1739  * complete _before_ this write occurs.
1740  *
1741  * If we are near ENOSPC, write synchronously.
1742  */
1743 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1744 {
1745         struct file *file = iocb->ki_filp;
1746         struct ceph_file_info *fi = file->private_data;
1747         struct inode *inode = file_inode(file);
1748         struct ceph_inode_info *ci = ceph_inode(inode);
1749         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1750         struct ceph_osd_client *osdc = &fsc->client->osdc;
1751         struct ceph_cap_flush *prealloc_cf;
1752         ssize_t count, written = 0;
1753         int err, want = 0, got;
1754         bool direct_lock = false;
1755         u32 map_flags;
1756         u64 pool_flags;
1757         loff_t pos;
1758         loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1759
1760         if (ceph_inode_is_shutdown(inode))
1761                 return -ESTALE;
1762
1763         if (ceph_snap(inode) != CEPH_NOSNAP)
1764                 return -EROFS;
1765
1766         prealloc_cf = ceph_alloc_cap_flush();
1767         if (!prealloc_cf)
1768                 return -ENOMEM;
1769
1770         if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1771                 direct_lock = true;
1772
1773 retry_snap:
1774         if (direct_lock)
1775                 ceph_start_io_direct(inode);
1776         else
1777                 ceph_start_io_write(inode);
1778
1779         /* We can write back this queue in page reclaim */
1780         current->backing_dev_info = inode_to_bdi(inode);
1781
1782         if (iocb->ki_flags & IOCB_APPEND) {
1783                 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1784                 if (err < 0)
1785                         goto out;
1786         }
1787
1788         err = generic_write_checks(iocb, from);
1789         if (err <= 0)
1790                 goto out;
1791
1792         pos = iocb->ki_pos;
1793         if (unlikely(pos >= limit)) {
1794                 err = -EFBIG;
1795                 goto out;
1796         } else {
1797                 iov_iter_truncate(from, limit - pos);
1798         }
1799
1800         count = iov_iter_count(from);
1801         if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1802                 err = -EDQUOT;
1803                 goto out;
1804         }
1805
1806         down_read(&osdc->lock);
1807         map_flags = osdc->osdmap->flags;
1808         pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1809         up_read(&osdc->lock);
1810         if ((map_flags & CEPH_OSDMAP_FULL) ||
1811             (pool_flags & CEPH_POOL_FLAG_FULL)) {
1812                 err = -ENOSPC;
1813                 goto out;
1814         }
1815
1816         err = file_remove_privs(file);
1817         if (err)
1818                 goto out;
1819
1820         dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1821              inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1822         if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1823                 want |= CEPH_CAP_FILE_BUFFER;
1824         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1825                 want |= CEPH_CAP_FILE_LAZYIO;
1826         got = 0;
1827         err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1828         if (err < 0)
1829                 goto out;
1830
1831         err = file_update_time(file);
1832         if (err)
1833                 goto out_caps;
1834
1835         inode_inc_iversion_raw(inode);
1836
1837         dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1838              inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1839
1840         if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1841             (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1842             (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1843                 struct ceph_snap_context *snapc;
1844                 struct iov_iter data;
1845
1846                 spin_lock(&ci->i_ceph_lock);
1847                 if (__ceph_have_pending_cap_snap(ci)) {
1848                         struct ceph_cap_snap *capsnap =
1849                                         list_last_entry(&ci->i_cap_snaps,
1850                                                         struct ceph_cap_snap,
1851                                                         ci_item);
1852                         snapc = ceph_get_snap_context(capsnap->context);
1853                 } else {
1854                         BUG_ON(!ci->i_head_snapc);
1855                         snapc = ceph_get_snap_context(ci->i_head_snapc);
1856                 }
1857                 spin_unlock(&ci->i_ceph_lock);
1858
1859                 /* we might need to revert back to that point */
1860                 data = *from;
1861                 if (iocb->ki_flags & IOCB_DIRECT)
1862                         written = ceph_direct_read_write(iocb, &data, snapc,
1863                                                          &prealloc_cf);
1864                 else
1865                         written = ceph_sync_write(iocb, &data, pos, snapc);
1866                 if (direct_lock)
1867                         ceph_end_io_direct(inode);
1868                 else
1869                         ceph_end_io_write(inode);
1870                 if (written > 0)
1871                         iov_iter_advance(from, written);
1872                 ceph_put_snap_context(snapc);
1873         } else {
1874                 /*
1875                  * No need to acquire the i_truncate_mutex. Because
1876                  * the MDS revokes Fwb caps before sending truncate
1877                  * message to us. We can't get Fwb cap while there
1878                  * are pending vmtruncate. So write and vmtruncate
1879                  * can not run at the same time
1880                  */
1881                 written = generic_perform_write(iocb, from);
1882                 if (likely(written >= 0))
1883                         iocb->ki_pos = pos + written;
1884                 ceph_end_io_write(inode);
1885         }
1886
1887         if (written >= 0) {
1888                 int dirty;
1889
1890                 spin_lock(&ci->i_ceph_lock);
1891                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1892                                                &prealloc_cf);
1893                 spin_unlock(&ci->i_ceph_lock);
1894                 if (dirty)
1895                         __mark_inode_dirty(inode, dirty);
1896                 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1897                         ceph_check_caps(ci, 0, NULL);
1898         }
1899
1900         dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1901              inode, ceph_vinop(inode), pos, (unsigned)count,
1902              ceph_cap_string(got));
1903         ceph_put_cap_refs(ci, got);
1904
1905         if (written == -EOLDSNAPC) {
1906                 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1907                      inode, ceph_vinop(inode), pos, (unsigned)count);
1908                 goto retry_snap;
1909         }
1910
1911         if (written >= 0) {
1912                 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1913                     (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1914                         iocb->ki_flags |= IOCB_DSYNC;
1915                 written = generic_write_sync(iocb, written);
1916         }
1917
1918         goto out_unlocked;
1919 out_caps:
1920         ceph_put_cap_refs(ci, got);
1921 out:
1922         if (direct_lock)
1923                 ceph_end_io_direct(inode);
1924         else
1925                 ceph_end_io_write(inode);
1926 out_unlocked:
1927         ceph_free_cap_flush(prealloc_cf);
1928         current->backing_dev_info = NULL;
1929         return written ? written : err;
1930 }
1931
1932 /*
1933  * llseek.  be sure to verify file size on SEEK_END.
1934  */
1935 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1936 {
1937         struct inode *inode = file->f_mapping->host;
1938         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1939         loff_t i_size;
1940         loff_t ret;
1941
1942         inode_lock(inode);
1943
1944         if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1945                 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1946                 if (ret < 0)
1947                         goto out;
1948         }
1949
1950         i_size = i_size_read(inode);
1951         switch (whence) {
1952         case SEEK_END:
1953                 offset += i_size;
1954                 break;
1955         case SEEK_CUR:
1956                 /*
1957                  * Here we special-case the lseek(fd, 0, SEEK_CUR)
1958                  * position-querying operation.  Avoid rewriting the "same"
1959                  * f_pos value back to the file because a concurrent read(),
1960                  * write() or lseek() might have altered it
1961                  */
1962                 if (offset == 0) {
1963                         ret = file->f_pos;
1964                         goto out;
1965                 }
1966                 offset += file->f_pos;
1967                 break;
1968         case SEEK_DATA:
1969                 if (offset < 0 || offset >= i_size) {
1970                         ret = -ENXIO;
1971                         goto out;
1972                 }
1973                 break;
1974         case SEEK_HOLE:
1975                 if (offset < 0 || offset >= i_size) {
1976                         ret = -ENXIO;
1977                         goto out;
1978                 }
1979                 offset = i_size;
1980                 break;
1981         }
1982
1983         ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1984
1985 out:
1986         inode_unlock(inode);
1987         return ret;
1988 }
1989
1990 static inline void ceph_zero_partial_page(
1991         struct inode *inode, loff_t offset, unsigned size)
1992 {
1993         struct page *page;
1994         pgoff_t index = offset >> PAGE_SHIFT;
1995
1996         page = find_lock_page(inode->i_mapping, index);
1997         if (page) {
1998                 wait_on_page_writeback(page);
1999                 zero_user(page, offset & (PAGE_SIZE - 1), size);
2000                 unlock_page(page);
2001                 put_page(page);
2002         }
2003 }
2004
2005 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
2006                                       loff_t length)
2007 {
2008         loff_t nearly = round_up(offset, PAGE_SIZE);
2009         if (offset < nearly) {
2010                 loff_t size = nearly - offset;
2011                 if (length < size)
2012                         size = length;
2013                 ceph_zero_partial_page(inode, offset, size);
2014                 offset += size;
2015                 length -= size;
2016         }
2017         if (length >= PAGE_SIZE) {
2018                 loff_t size = round_down(length, PAGE_SIZE);
2019                 truncate_pagecache_range(inode, offset, offset + size - 1);
2020                 offset += size;
2021                 length -= size;
2022         }
2023         if (length)
2024                 ceph_zero_partial_page(inode, offset, length);
2025 }
2026
2027 static int ceph_zero_partial_object(struct inode *inode,
2028                                     loff_t offset, loff_t *length)
2029 {
2030         struct ceph_inode_info *ci = ceph_inode(inode);
2031         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2032         struct ceph_osd_request *req;
2033         int ret = 0;
2034         loff_t zero = 0;
2035         int op;
2036
2037         if (!length) {
2038                 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2039                 length = &zero;
2040         } else {
2041                 op = CEPH_OSD_OP_ZERO;
2042         }
2043
2044         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2045                                         ceph_vino(inode),
2046                                         offset, length,
2047                                         0, 1, op,
2048                                         CEPH_OSD_FLAG_WRITE,
2049                                         NULL, 0, 0, false);
2050         if (IS_ERR(req)) {
2051                 ret = PTR_ERR(req);
2052                 goto out;
2053         }
2054
2055         req->r_mtime = inode->i_mtime;
2056         ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
2057         if (!ret) {
2058                 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2059                 if (ret == -ENOENT)
2060                         ret = 0;
2061         }
2062         ceph_osdc_put_request(req);
2063
2064 out:
2065         return ret;
2066 }
2067
2068 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2069 {
2070         int ret = 0;
2071         struct ceph_inode_info *ci = ceph_inode(inode);
2072         s32 stripe_unit = ci->i_layout.stripe_unit;
2073         s32 stripe_count = ci->i_layout.stripe_count;
2074         s32 object_size = ci->i_layout.object_size;
2075         u64 object_set_size = object_size * stripe_count;
2076         u64 nearly, t;
2077
2078         /* round offset up to next period boundary */
2079         nearly = offset + object_set_size - 1;
2080         t = nearly;
2081         nearly -= do_div(t, object_set_size);
2082
2083         while (length && offset < nearly) {
2084                 loff_t size = length;
2085                 ret = ceph_zero_partial_object(inode, offset, &size);
2086                 if (ret < 0)
2087                         return ret;
2088                 offset += size;
2089                 length -= size;
2090         }
2091         while (length >= object_set_size) {
2092                 int i;
2093                 loff_t pos = offset;
2094                 for (i = 0; i < stripe_count; ++i) {
2095                         ret = ceph_zero_partial_object(inode, pos, NULL);
2096                         if (ret < 0)
2097                                 return ret;
2098                         pos += stripe_unit;
2099                 }
2100                 offset += object_set_size;
2101                 length -= object_set_size;
2102         }
2103         while (length) {
2104                 loff_t size = length;
2105                 ret = ceph_zero_partial_object(inode, offset, &size);
2106                 if (ret < 0)
2107                         return ret;
2108                 offset += size;
2109                 length -= size;
2110         }
2111         return ret;
2112 }
2113
2114 static long ceph_fallocate(struct file *file, int mode,
2115                                 loff_t offset, loff_t length)
2116 {
2117         struct ceph_file_info *fi = file->private_data;
2118         struct inode *inode = file_inode(file);
2119         struct ceph_inode_info *ci = ceph_inode(inode);
2120         struct ceph_cap_flush *prealloc_cf;
2121         int want, got = 0;
2122         int dirty;
2123         int ret = 0;
2124         loff_t endoff = 0;
2125         loff_t size;
2126
2127         if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2128                 return -EOPNOTSUPP;
2129
2130         if (!S_ISREG(inode->i_mode))
2131                 return -EOPNOTSUPP;
2132
2133         prealloc_cf = ceph_alloc_cap_flush();
2134         if (!prealloc_cf)
2135                 return -ENOMEM;
2136
2137         inode_lock(inode);
2138
2139         if (ceph_snap(inode) != CEPH_NOSNAP) {
2140                 ret = -EROFS;
2141                 goto unlock;
2142         }
2143
2144         size = i_size_read(inode);
2145
2146         /* Are we punching a hole beyond EOF? */
2147         if (offset >= size)
2148                 goto unlock;
2149         if ((offset + length) > size)
2150                 length = size - offset;
2151
2152         if (fi->fmode & CEPH_FILE_MODE_LAZY)
2153                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2154         else
2155                 want = CEPH_CAP_FILE_BUFFER;
2156
2157         ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2158         if (ret < 0)
2159                 goto unlock;
2160
2161         filemap_invalidate_lock(inode->i_mapping);
2162         ceph_fscache_invalidate(inode, false);
2163         ceph_zero_pagecache_range(inode, offset, length);
2164         ret = ceph_zero_objects(inode, offset, length);
2165
2166         if (!ret) {
2167                 spin_lock(&ci->i_ceph_lock);
2168                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2169                                                &prealloc_cf);
2170                 spin_unlock(&ci->i_ceph_lock);
2171                 if (dirty)
2172                         __mark_inode_dirty(inode, dirty);
2173         }
2174         filemap_invalidate_unlock(inode->i_mapping);
2175
2176         ceph_put_cap_refs(ci, got);
2177 unlock:
2178         inode_unlock(inode);
2179         ceph_free_cap_flush(prealloc_cf);
2180         return ret;
2181 }
2182
2183 /*
2184  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2185  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2186  * this fails; zero is returned on success.
2187  */
2188 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2189                           struct file *dst_filp,
2190                           loff_t dst_endoff, int *dst_got)
2191 {
2192         int ret = 0;
2193         bool retrying = false;
2194
2195 retry_caps:
2196         ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2197                             dst_endoff, dst_got);
2198         if (ret < 0)
2199                 return ret;
2200
2201         /*
2202          * Since we're already holding the FILE_WR capability for the dst file,
2203          * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2204          * retry dance instead to try to get both capabilities.
2205          */
2206         ret = ceph_try_get_caps(file_inode(src_filp),
2207                                 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2208                                 false, src_got);
2209         if (ret <= 0) {
2210                 /* Start by dropping dst_ci caps and getting src_ci caps */
2211                 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2212                 if (retrying) {
2213                         if (!ret)
2214                                 /* ceph_try_get_caps masks EAGAIN */
2215                                 ret = -EAGAIN;
2216                         return ret;
2217                 }
2218                 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2219                                     CEPH_CAP_FILE_SHARED, -1, src_got);
2220                 if (ret < 0)
2221                         return ret;
2222                 /*... drop src_ci caps too, and retry */
2223                 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2224                 retrying = true;
2225                 goto retry_caps;
2226         }
2227         return ret;
2228 }
2229
2230 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2231                            struct ceph_inode_info *dst_ci, int dst_got)
2232 {
2233         ceph_put_cap_refs(src_ci, src_got);
2234         ceph_put_cap_refs(dst_ci, dst_got);
2235 }
2236
2237 /*
2238  * This function does several size-related checks, returning an error if:
2239  *  - source file is smaller than off+len
2240  *  - destination file size is not OK (inode_newsize_ok())
2241  *  - max bytes quotas is exceeded
2242  */
2243 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2244                            loff_t src_off, loff_t dst_off, size_t len)
2245 {
2246         loff_t size, endoff;
2247
2248         size = i_size_read(src_inode);
2249         /*
2250          * Don't copy beyond source file EOF.  Instead of simply setting length
2251          * to (size - src_off), just drop to VFS default implementation, as the
2252          * local i_size may be stale due to other clients writing to the source
2253          * inode.
2254          */
2255         if (src_off + len > size) {
2256                 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2257                      src_off, len, size);
2258                 return -EOPNOTSUPP;
2259         }
2260         size = i_size_read(dst_inode);
2261
2262         endoff = dst_off + len;
2263         if (inode_newsize_ok(dst_inode, endoff))
2264                 return -EOPNOTSUPP;
2265
2266         if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2267                 return -EDQUOT;
2268
2269         return 0;
2270 }
2271
2272 static struct ceph_osd_request *
2273 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2274                             u64 src_snapid,
2275                             struct ceph_object_id *src_oid,
2276                             struct ceph_object_locator *src_oloc,
2277                             struct ceph_object_id *dst_oid,
2278                             struct ceph_object_locator *dst_oloc,
2279                             u32 truncate_seq, u64 truncate_size)
2280 {
2281         struct ceph_osd_request *req;
2282         int ret;
2283         u32 src_fadvise_flags =
2284                 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2285                 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2286         u32 dst_fadvise_flags =
2287                 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2288                 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2289
2290         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2291         if (!req)
2292                 return ERR_PTR(-ENOMEM);
2293
2294         req->r_flags = CEPH_OSD_FLAG_WRITE;
2295
2296         ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2297         ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2298
2299         ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2300                                         src_oid, src_oloc,
2301                                         src_fadvise_flags,
2302                                         dst_fadvise_flags,
2303                                         truncate_seq,
2304                                         truncate_size,
2305                                         CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2306         if (ret)
2307                 goto out;
2308
2309         ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2310         if (ret)
2311                 goto out;
2312
2313         return req;
2314
2315 out:
2316         ceph_osdc_put_request(req);
2317         return ERR_PTR(ret);
2318 }
2319
2320 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2321                                     struct ceph_inode_info *dst_ci, u64 *dst_off,
2322                                     struct ceph_fs_client *fsc,
2323                                     size_t len, unsigned int flags)
2324 {
2325         struct ceph_object_locator src_oloc, dst_oloc;
2326         struct ceph_object_id src_oid, dst_oid;
2327         struct ceph_osd_client *osdc;
2328         struct ceph_osd_request *req;
2329         size_t bytes = 0;
2330         u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2331         u32 src_objlen, dst_objlen;
2332         u32 object_size = src_ci->i_layout.object_size;
2333         int ret;
2334
2335         src_oloc.pool = src_ci->i_layout.pool_id;
2336         src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2337         dst_oloc.pool = dst_ci->i_layout.pool_id;
2338         dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2339         osdc = &fsc->client->osdc;
2340
2341         while (len >= object_size) {
2342                 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2343                                               object_size, &src_objnum,
2344                                               &src_objoff, &src_objlen);
2345                 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2346                                               object_size, &dst_objnum,
2347                                               &dst_objoff, &dst_objlen);
2348                 ceph_oid_init(&src_oid);
2349                 ceph_oid_printf(&src_oid, "%llx.%08llx",
2350                                 src_ci->i_vino.ino, src_objnum);
2351                 ceph_oid_init(&dst_oid);
2352                 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2353                                 dst_ci->i_vino.ino, dst_objnum);
2354                 /* Do an object remote copy */
2355                 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2356                                                   &src_oid, &src_oloc,
2357                                                   &dst_oid, &dst_oloc,
2358                                                   dst_ci->i_truncate_seq,
2359                                                   dst_ci->i_truncate_size);
2360                 if (IS_ERR(req))
2361                         ret = PTR_ERR(req);
2362                 else {
2363                         ceph_osdc_start_request(osdc, req, false);
2364                         ret = ceph_osdc_wait_request(osdc, req);
2365                         ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2366                                                      req->r_start_latency,
2367                                                      req->r_end_latency,
2368                                                      object_size, ret);
2369                         ceph_osdc_put_request(req);
2370                 }
2371                 if (ret) {
2372                         if (ret == -EOPNOTSUPP) {
2373                                 fsc->have_copy_from2 = false;
2374                                 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2375                         }
2376                         dout("ceph_osdc_copy_from returned %d\n", ret);
2377                         if (!bytes)
2378                                 bytes = ret;
2379                         goto out;
2380                 }
2381                 len -= object_size;
2382                 bytes += object_size;
2383                 *src_off += object_size;
2384                 *dst_off += object_size;
2385         }
2386
2387 out:
2388         ceph_oloc_destroy(&src_oloc);
2389         ceph_oloc_destroy(&dst_oloc);
2390         return bytes;
2391 }
2392
2393 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2394                                       struct file *dst_file, loff_t dst_off,
2395                                       size_t len, unsigned int flags)
2396 {
2397         struct inode *src_inode = file_inode(src_file);
2398         struct inode *dst_inode = file_inode(dst_file);
2399         struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2400         struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2401         struct ceph_cap_flush *prealloc_cf;
2402         struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2403         loff_t size;
2404         ssize_t ret = -EIO, bytes;
2405         u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2406         u32 src_objlen, dst_objlen;
2407         int src_got = 0, dst_got = 0, err, dirty;
2408
2409         if (src_inode->i_sb != dst_inode->i_sb) {
2410                 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2411
2412                 if (ceph_fsid_compare(&src_fsc->client->fsid,
2413                                       &dst_fsc->client->fsid)) {
2414                         dout("Copying files across clusters: src: %pU dst: %pU\n",
2415                              &src_fsc->client->fsid, &dst_fsc->client->fsid);
2416                         return -EXDEV;
2417                 }
2418         }
2419         if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2420                 return -EROFS;
2421
2422         /*
2423          * Some of the checks below will return -EOPNOTSUPP, which will force a
2424          * fallback to the default VFS copy_file_range implementation.  This is
2425          * desirable in several cases (for ex, the 'len' is smaller than the
2426          * size of the objects, or in cases where that would be more
2427          * efficient).
2428          */
2429
2430         if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2431                 return -EOPNOTSUPP;
2432
2433         if (!src_fsc->have_copy_from2)
2434                 return -EOPNOTSUPP;
2435
2436         /*
2437          * Striped file layouts require that we copy partial objects, but the
2438          * OSD copy-from operation only supports full-object copies.  Limit
2439          * this to non-striped file layouts for now.
2440          */
2441         if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2442             (src_ci->i_layout.stripe_count != 1) ||
2443             (dst_ci->i_layout.stripe_count != 1) ||
2444             (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2445                 dout("Invalid src/dst files layout\n");
2446                 return -EOPNOTSUPP;
2447         }
2448
2449         if (len < src_ci->i_layout.object_size)
2450                 return -EOPNOTSUPP; /* no remote copy will be done */
2451
2452         prealloc_cf = ceph_alloc_cap_flush();
2453         if (!prealloc_cf)
2454                 return -ENOMEM;
2455
2456         /* Start by sync'ing the source and destination files */
2457         ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2458         if (ret < 0) {
2459                 dout("failed to write src file (%zd)\n", ret);
2460                 goto out;
2461         }
2462         ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2463         if (ret < 0) {
2464                 dout("failed to write dst file (%zd)\n", ret);
2465                 goto out;
2466         }
2467
2468         /*
2469          * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2470          * clients may have dirty data in their caches.  And OSDs know nothing
2471          * about caps, so they can't safely do the remote object copies.
2472          */
2473         err = get_rd_wr_caps(src_file, &src_got,
2474                              dst_file, (dst_off + len), &dst_got);
2475         if (err < 0) {
2476                 dout("get_rd_wr_caps returned %d\n", err);
2477                 ret = -EOPNOTSUPP;
2478                 goto out;
2479         }
2480
2481         ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2482         if (ret < 0)
2483                 goto out_caps;
2484
2485         /* Drop dst file cached pages */
2486         ceph_fscache_invalidate(dst_inode, false);
2487         ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2488                                             dst_off >> PAGE_SHIFT,
2489                                             (dst_off + len) >> PAGE_SHIFT);
2490         if (ret < 0) {
2491                 dout("Failed to invalidate inode pages (%zd)\n", ret);
2492                 ret = 0; /* XXX */
2493         }
2494         ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2495                                       src_ci->i_layout.object_size,
2496                                       &src_objnum, &src_objoff, &src_objlen);
2497         ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2498                                       dst_ci->i_layout.object_size,
2499                                       &dst_objnum, &dst_objoff, &dst_objlen);
2500         /* object-level offsets need to the same */
2501         if (src_objoff != dst_objoff) {
2502                 ret = -EOPNOTSUPP;
2503                 goto out_caps;
2504         }
2505
2506         /*
2507          * Do a manual copy if the object offset isn't object aligned.
2508          * 'src_objlen' contains the bytes left until the end of the object,
2509          * starting at the src_off
2510          */
2511         if (src_objoff) {
2512                 dout("Initial partial copy of %u bytes\n", src_objlen);
2513
2514                 /*
2515                  * we need to temporarily drop all caps as we'll be calling
2516                  * {read,write}_iter, which will get caps again.
2517                  */
2518                 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2519                 ret = do_splice_direct(src_file, &src_off, dst_file,
2520                                        &dst_off, src_objlen, flags);
2521                 /* Abort on short copies or on error */
2522                 if (ret < src_objlen) {
2523                         dout("Failed partial copy (%zd)\n", ret);
2524                         goto out;
2525                 }
2526                 len -= ret;
2527                 err = get_rd_wr_caps(src_file, &src_got,
2528                                      dst_file, (dst_off + len), &dst_got);
2529                 if (err < 0)
2530                         goto out;
2531                 err = is_file_size_ok(src_inode, dst_inode,
2532                                       src_off, dst_off, len);
2533                 if (err < 0)
2534                         goto out_caps;
2535         }
2536
2537         size = i_size_read(dst_inode);
2538         bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2539                                      src_fsc, len, flags);
2540         if (bytes <= 0) {
2541                 if (!ret)
2542                         ret = bytes;
2543                 goto out_caps;
2544         }
2545         dout("Copied %zu bytes out of %zu\n", bytes, len);
2546         len -= bytes;
2547         ret += bytes;
2548
2549         file_update_time(dst_file);
2550         inode_inc_iversion_raw(dst_inode);
2551
2552         if (dst_off > size) {
2553                 /* Let the MDS know about dst file size change */
2554                 if (ceph_inode_set_size(dst_inode, dst_off) ||
2555                     ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2556                         ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
2557         }
2558         /* Mark Fw dirty */
2559         spin_lock(&dst_ci->i_ceph_lock);
2560         dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2561         spin_unlock(&dst_ci->i_ceph_lock);
2562         if (dirty)
2563                 __mark_inode_dirty(dst_inode, dirty);
2564
2565 out_caps:
2566         put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2567
2568         /*
2569          * Do the final manual copy if we still have some bytes left, unless
2570          * there were errors in remote object copies (len >= object_size).
2571          */
2572         if (len && (len < src_ci->i_layout.object_size)) {
2573                 dout("Final partial copy of %zu bytes\n", len);
2574                 bytes = do_splice_direct(src_file, &src_off, dst_file,
2575                                          &dst_off, len, flags);
2576                 if (bytes > 0)
2577                         ret += bytes;
2578                 else
2579                         dout("Failed partial copy (%zd)\n", bytes);
2580         }
2581
2582 out:
2583         ceph_free_cap_flush(prealloc_cf);
2584
2585         return ret;
2586 }
2587
2588 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2589                                     struct file *dst_file, loff_t dst_off,
2590                                     size_t len, unsigned int flags)
2591 {
2592         ssize_t ret;
2593
2594         ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2595                                      len, flags);
2596
2597         if (ret == -EOPNOTSUPP || ret == -EXDEV)
2598                 ret = generic_copy_file_range(src_file, src_off, dst_file,
2599                                               dst_off, len, flags);
2600         return ret;
2601 }
2602
2603 const struct file_operations ceph_file_fops = {
2604         .open = ceph_open,
2605         .release = ceph_release,
2606         .llseek = ceph_llseek,
2607         .read_iter = ceph_read_iter,
2608         .write_iter = ceph_write_iter,
2609         .mmap = ceph_mmap,
2610         .fsync = ceph_fsync,
2611         .lock = ceph_lock,
2612         .setlease = simple_nosetlease,
2613         .flock = ceph_flock,
2614         .splice_read = generic_file_splice_read,
2615         .splice_write = iter_file_splice_write,
2616         .unlocked_ioctl = ceph_ioctl,
2617         .compat_ioctl = compat_ptr_ioctl,
2618         .fallocate      = ceph_fallocate,
2619         .copy_file_range = ceph_copy_file_range,
2620 };