cifs: Fix unbuffered read
[linux-2.6-microblaze.git] / fs / cifs / file.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39
40 /*
41  * Remove the dirty flags from a span of pages.
42  */
43 static void cifs_undirty_folios(struct inode *inode, loff_t start, unsigned int len)
44 {
45         struct address_space *mapping = inode->i_mapping;
46         struct folio *folio;
47         pgoff_t end;
48
49         XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
50
51         rcu_read_lock();
52
53         end = (start + len - 1) / PAGE_SIZE;
54         xas_for_each_marked(&xas, folio, end, PAGECACHE_TAG_DIRTY) {
55                 if (xas_retry(&xas, folio))
56                         continue;
57                 xas_pause(&xas);
58                 rcu_read_unlock();
59                 folio_lock(folio);
60                 folio_clear_dirty_for_io(folio);
61                 folio_unlock(folio);
62                 rcu_read_lock();
63         }
64
65         rcu_read_unlock();
66 }
67
68 /*
69  * Completion of write to server.
70  */
71 void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len)
72 {
73         struct address_space *mapping = inode->i_mapping;
74         struct folio *folio;
75         pgoff_t end;
76
77         XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
78
79         if (!len)
80                 return;
81
82         rcu_read_lock();
83
84         end = (start + len - 1) / PAGE_SIZE;
85         xas_for_each(&xas, folio, end) {
86                 if (xas_retry(&xas, folio))
87                         continue;
88                 if (!folio_test_writeback(folio)) {
89                         WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
90                                   len, start, folio_index(folio), end);
91                         continue;
92                 }
93
94                 folio_detach_private(folio);
95                 folio_end_writeback(folio);
96         }
97
98         rcu_read_unlock();
99 }
100
101 /*
102  * Failure of write to server.
103  */
104 void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len)
105 {
106         struct address_space *mapping = inode->i_mapping;
107         struct folio *folio;
108         pgoff_t end;
109
110         XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
111
112         if (!len)
113                 return;
114
115         rcu_read_lock();
116
117         end = (start + len - 1) / PAGE_SIZE;
118         xas_for_each(&xas, folio, end) {
119                 if (xas_retry(&xas, folio))
120                         continue;
121                 if (!folio_test_writeback(folio)) {
122                         WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
123                                   len, start, folio_index(folio), end);
124                         continue;
125                 }
126
127                 folio_set_error(folio);
128                 folio_end_writeback(folio);
129         }
130
131         rcu_read_unlock();
132 }
133
134 /*
135  * Redirty pages after a temporary failure.
136  */
137 void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int len)
138 {
139         struct address_space *mapping = inode->i_mapping;
140         struct folio *folio;
141         pgoff_t end;
142
143         XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
144
145         if (!len)
146                 return;
147
148         rcu_read_lock();
149
150         end = (start + len - 1) / PAGE_SIZE;
151         xas_for_each(&xas, folio, end) {
152                 if (!folio_test_writeback(folio)) {
153                         WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
154                                   len, start, folio_index(folio), end);
155                         continue;
156                 }
157
158                 filemap_dirty_folio(folio->mapping, folio);
159                 folio_end_writeback(folio);
160         }
161
162         rcu_read_unlock();
163 }
164
165 /*
166  * Mark as invalid, all open files on tree connections since they
167  * were closed when session to server was lost.
168  */
169 void
170 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
171 {
172         struct cifsFileInfo *open_file = NULL;
173         struct list_head *tmp;
174         struct list_head *tmp1;
175
176         /* only send once per connect */
177         spin_lock(&tcon->tc_lock);
178         if (tcon->status != TID_NEED_RECON) {
179                 spin_unlock(&tcon->tc_lock);
180                 return;
181         }
182         tcon->status = TID_IN_FILES_INVALIDATE;
183         spin_unlock(&tcon->tc_lock);
184
185         /* list all files open on tree connection and mark them invalid */
186         spin_lock(&tcon->open_file_lock);
187         list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
188                 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
189                 open_file->invalidHandle = true;
190                 open_file->oplock_break_cancelled = true;
191         }
192         spin_unlock(&tcon->open_file_lock);
193
194         invalidate_all_cached_dirs(tcon);
195         spin_lock(&tcon->tc_lock);
196         if (tcon->status == TID_IN_FILES_INVALIDATE)
197                 tcon->status = TID_NEED_TCON;
198         spin_unlock(&tcon->tc_lock);
199
200         /*
201          * BB Add call to invalidate_inodes(sb) for all superblocks mounted
202          * to this tcon.
203          */
204 }
205
206 static inline int cifs_convert_flags(unsigned int flags)
207 {
208         if ((flags & O_ACCMODE) == O_RDONLY)
209                 return GENERIC_READ;
210         else if ((flags & O_ACCMODE) == O_WRONLY)
211                 return GENERIC_WRITE;
212         else if ((flags & O_ACCMODE) == O_RDWR) {
213                 /* GENERIC_ALL is too much permission to request
214                    can cause unnecessary access denied on create */
215                 /* return GENERIC_ALL; */
216                 return (GENERIC_READ | GENERIC_WRITE);
217         }
218
219         return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
220                 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
221                 FILE_READ_DATA);
222 }
223
224 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
225 static u32 cifs_posix_convert_flags(unsigned int flags)
226 {
227         u32 posix_flags = 0;
228
229         if ((flags & O_ACCMODE) == O_RDONLY)
230                 posix_flags = SMB_O_RDONLY;
231         else if ((flags & O_ACCMODE) == O_WRONLY)
232                 posix_flags = SMB_O_WRONLY;
233         else if ((flags & O_ACCMODE) == O_RDWR)
234                 posix_flags = SMB_O_RDWR;
235
236         if (flags & O_CREAT) {
237                 posix_flags |= SMB_O_CREAT;
238                 if (flags & O_EXCL)
239                         posix_flags |= SMB_O_EXCL;
240         } else if (flags & O_EXCL)
241                 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
242                          current->comm, current->tgid);
243
244         if (flags & O_TRUNC)
245                 posix_flags |= SMB_O_TRUNC;
246         /* be safe and imply O_SYNC for O_DSYNC */
247         if (flags & O_DSYNC)
248                 posix_flags |= SMB_O_SYNC;
249         if (flags & O_DIRECTORY)
250                 posix_flags |= SMB_O_DIRECTORY;
251         if (flags & O_NOFOLLOW)
252                 posix_flags |= SMB_O_NOFOLLOW;
253         if (flags & O_DIRECT)
254                 posix_flags |= SMB_O_DIRECT;
255
256         return posix_flags;
257 }
258 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
259
260 static inline int cifs_get_disposition(unsigned int flags)
261 {
262         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
263                 return FILE_CREATE;
264         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
265                 return FILE_OVERWRITE_IF;
266         else if ((flags & O_CREAT) == O_CREAT)
267                 return FILE_OPEN_IF;
268         else if ((flags & O_TRUNC) == O_TRUNC)
269                 return FILE_OVERWRITE;
270         else
271                 return FILE_OPEN;
272 }
273
274 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
275 int cifs_posix_open(const char *full_path, struct inode **pinode,
276                         struct super_block *sb, int mode, unsigned int f_flags,
277                         __u32 *poplock, __u16 *pnetfid, unsigned int xid)
278 {
279         int rc;
280         FILE_UNIX_BASIC_INFO *presp_data;
281         __u32 posix_flags = 0;
282         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
283         struct cifs_fattr fattr;
284         struct tcon_link *tlink;
285         struct cifs_tcon *tcon;
286
287         cifs_dbg(FYI, "posix open %s\n", full_path);
288
289         presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
290         if (presp_data == NULL)
291                 return -ENOMEM;
292
293         tlink = cifs_sb_tlink(cifs_sb);
294         if (IS_ERR(tlink)) {
295                 rc = PTR_ERR(tlink);
296                 goto posix_open_ret;
297         }
298
299         tcon = tlink_tcon(tlink);
300         mode &= ~current_umask();
301
302         posix_flags = cifs_posix_convert_flags(f_flags);
303         rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
304                              poplock, full_path, cifs_sb->local_nls,
305                              cifs_remap(cifs_sb));
306         cifs_put_tlink(tlink);
307
308         if (rc)
309                 goto posix_open_ret;
310
311         if (presp_data->Type == cpu_to_le32(-1))
312                 goto posix_open_ret; /* open ok, caller does qpathinfo */
313
314         if (!pinode)
315                 goto posix_open_ret; /* caller does not need info */
316
317         cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
318
319         /* get new inode and set it up */
320         if (*pinode == NULL) {
321                 cifs_fill_uniqueid(sb, &fattr);
322                 *pinode = cifs_iget(sb, &fattr);
323                 if (!*pinode) {
324                         rc = -ENOMEM;
325                         goto posix_open_ret;
326                 }
327         } else {
328                 cifs_revalidate_mapping(*pinode);
329                 rc = cifs_fattr_to_inode(*pinode, &fattr);
330         }
331
332 posix_open_ret:
333         kfree(presp_data);
334         return rc;
335 }
336 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
337
338 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
339                         struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
340                         struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
341 {
342         int rc;
343         int desired_access;
344         int disposition;
345         int create_options = CREATE_NOT_DIR;
346         struct TCP_Server_Info *server = tcon->ses->server;
347         struct cifs_open_parms oparms;
348
349         if (!server->ops->open)
350                 return -ENOSYS;
351
352         desired_access = cifs_convert_flags(f_flags);
353
354 /*********************************************************************
355  *  open flag mapping table:
356  *
357  *      POSIX Flag            CIFS Disposition
358  *      ----------            ----------------
359  *      O_CREAT               FILE_OPEN_IF
360  *      O_CREAT | O_EXCL      FILE_CREATE
361  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
362  *      O_TRUNC               FILE_OVERWRITE
363  *      none of the above     FILE_OPEN
364  *
365  *      Note that there is not a direct match between disposition
366  *      FILE_SUPERSEDE (ie create whether or not file exists although
367  *      O_CREAT | O_TRUNC is similar but truncates the existing
368  *      file rather than creating a new file as FILE_SUPERSEDE does
369  *      (which uses the attributes / metadata passed in on open call)
370  *?
371  *?  O_SYNC is a reasonable match to CIFS writethrough flag
372  *?  and the read write flags match reasonably.  O_LARGEFILE
373  *?  is irrelevant because largefile support is always used
374  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
375  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
376  *********************************************************************/
377
378         disposition = cifs_get_disposition(f_flags);
379
380         /* BB pass O_SYNC flag through on file attributes .. BB */
381
382         /* O_SYNC also has bit for O_DSYNC so following check picks up either */
383         if (f_flags & O_SYNC)
384                 create_options |= CREATE_WRITE_THROUGH;
385
386         if (f_flags & O_DIRECT)
387                 create_options |= CREATE_NO_BUFFER;
388
389         oparms = (struct cifs_open_parms) {
390                 .tcon = tcon,
391                 .cifs_sb = cifs_sb,
392                 .desired_access = desired_access,
393                 .create_options = cifs_create_options(cifs_sb, create_options),
394                 .disposition = disposition,
395                 .path = full_path,
396                 .fid = fid,
397         };
398
399         rc = server->ops->open(xid, &oparms, oplock, buf);
400         if (rc)
401                 return rc;
402
403         /* TODO: Add support for calling posix query info but with passing in fid */
404         if (tcon->unix_ext)
405                 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
406                                               xid);
407         else
408                 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
409                                          xid, fid);
410
411         if (rc) {
412                 server->ops->close(xid, tcon, fid);
413                 if (rc == -ESTALE)
414                         rc = -EOPENSTALE;
415         }
416
417         return rc;
418 }
419
420 static bool
421 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
422 {
423         struct cifs_fid_locks *cur;
424         bool has_locks = false;
425
426         down_read(&cinode->lock_sem);
427         list_for_each_entry(cur, &cinode->llist, llist) {
428                 if (!list_empty(&cur->locks)) {
429                         has_locks = true;
430                         break;
431                 }
432         }
433         up_read(&cinode->lock_sem);
434         return has_locks;
435 }
436
437 void
438 cifs_down_write(struct rw_semaphore *sem)
439 {
440         while (!down_write_trylock(sem))
441                 msleep(10);
442 }
443
444 static void cifsFileInfo_put_work(struct work_struct *work);
445
446 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
447                                        struct tcon_link *tlink, __u32 oplock,
448                                        const char *symlink_target)
449 {
450         struct dentry *dentry = file_dentry(file);
451         struct inode *inode = d_inode(dentry);
452         struct cifsInodeInfo *cinode = CIFS_I(inode);
453         struct cifsFileInfo *cfile;
454         struct cifs_fid_locks *fdlocks;
455         struct cifs_tcon *tcon = tlink_tcon(tlink);
456         struct TCP_Server_Info *server = tcon->ses->server;
457
458         cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
459         if (cfile == NULL)
460                 return cfile;
461
462         fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
463         if (!fdlocks) {
464                 kfree(cfile);
465                 return NULL;
466         }
467
468         if (symlink_target) {
469                 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
470                 if (!cfile->symlink_target) {
471                         kfree(fdlocks);
472                         kfree(cfile);
473                         return NULL;
474                 }
475         }
476
477         INIT_LIST_HEAD(&fdlocks->locks);
478         fdlocks->cfile = cfile;
479         cfile->llist = fdlocks;
480
481         cfile->count = 1;
482         cfile->pid = current->tgid;
483         cfile->uid = current_fsuid();
484         cfile->dentry = dget(dentry);
485         cfile->f_flags = file->f_flags;
486         cfile->invalidHandle = false;
487         cfile->deferred_close_scheduled = false;
488         cfile->tlink = cifs_get_tlink(tlink);
489         INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
490         INIT_WORK(&cfile->put, cifsFileInfo_put_work);
491         INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
492         mutex_init(&cfile->fh_mutex);
493         spin_lock_init(&cfile->file_info_lock);
494
495         cifs_sb_active(inode->i_sb);
496
497         /*
498          * If the server returned a read oplock and we have mandatory brlocks,
499          * set oplock level to None.
500          */
501         if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
502                 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
503                 oplock = 0;
504         }
505
506         cifs_down_write(&cinode->lock_sem);
507         list_add(&fdlocks->llist, &cinode->llist);
508         up_write(&cinode->lock_sem);
509
510         spin_lock(&tcon->open_file_lock);
511         if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
512                 oplock = fid->pending_open->oplock;
513         list_del(&fid->pending_open->olist);
514
515         fid->purge_cache = false;
516         server->ops->set_fid(cfile, fid, oplock);
517
518         list_add(&cfile->tlist, &tcon->openFileList);
519         atomic_inc(&tcon->num_local_opens);
520
521         /* if readable file instance put first in list*/
522         spin_lock(&cinode->open_file_lock);
523         if (file->f_mode & FMODE_READ)
524                 list_add(&cfile->flist, &cinode->openFileList);
525         else
526                 list_add_tail(&cfile->flist, &cinode->openFileList);
527         spin_unlock(&cinode->open_file_lock);
528         spin_unlock(&tcon->open_file_lock);
529
530         if (fid->purge_cache)
531                 cifs_zap_mapping(inode);
532
533         file->private_data = cfile;
534         return cfile;
535 }
536
537 struct cifsFileInfo *
538 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
539 {
540         spin_lock(&cifs_file->file_info_lock);
541         cifsFileInfo_get_locked(cifs_file);
542         spin_unlock(&cifs_file->file_info_lock);
543         return cifs_file;
544 }
545
546 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
547 {
548         struct inode *inode = d_inode(cifs_file->dentry);
549         struct cifsInodeInfo *cifsi = CIFS_I(inode);
550         struct cifsLockInfo *li, *tmp;
551         struct super_block *sb = inode->i_sb;
552
553         /*
554          * Delete any outstanding lock records. We'll lose them when the file
555          * is closed anyway.
556          */
557         cifs_down_write(&cifsi->lock_sem);
558         list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
559                 list_del(&li->llist);
560                 cifs_del_lock_waiters(li);
561                 kfree(li);
562         }
563         list_del(&cifs_file->llist->llist);
564         kfree(cifs_file->llist);
565         up_write(&cifsi->lock_sem);
566
567         cifs_put_tlink(cifs_file->tlink);
568         dput(cifs_file->dentry);
569         cifs_sb_deactive(sb);
570         kfree(cifs_file->symlink_target);
571         kfree(cifs_file);
572 }
573
574 static void cifsFileInfo_put_work(struct work_struct *work)
575 {
576         struct cifsFileInfo *cifs_file = container_of(work,
577                         struct cifsFileInfo, put);
578
579         cifsFileInfo_put_final(cifs_file);
580 }
581
582 /**
583  * cifsFileInfo_put - release a reference of file priv data
584  *
585  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
586  *
587  * @cifs_file:  cifs/smb3 specific info (eg refcounts) for an open file
588  */
589 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
590 {
591         _cifsFileInfo_put(cifs_file, true, true);
592 }
593
594 /**
595  * _cifsFileInfo_put - release a reference of file priv data
596  *
597  * This may involve closing the filehandle @cifs_file out on the
598  * server. Must be called without holding tcon->open_file_lock,
599  * cinode->open_file_lock and cifs_file->file_info_lock.
600  *
601  * If @wait_for_oplock_handler is true and we are releasing the last
602  * reference, wait for any running oplock break handler of the file
603  * and cancel any pending one.
604  *
605  * @cifs_file:  cifs/smb3 specific info (eg refcounts) for an open file
606  * @wait_oplock_handler: must be false if called from oplock_break_handler
607  * @offload:    not offloaded on close and oplock breaks
608  *
609  */
610 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
611                        bool wait_oplock_handler, bool offload)
612 {
613         struct inode *inode = d_inode(cifs_file->dentry);
614         struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
615         struct TCP_Server_Info *server = tcon->ses->server;
616         struct cifsInodeInfo *cifsi = CIFS_I(inode);
617         struct super_block *sb = inode->i_sb;
618         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
619         struct cifs_fid fid = {};
620         struct cifs_pending_open open;
621         bool oplock_break_cancelled;
622
623         spin_lock(&tcon->open_file_lock);
624         spin_lock(&cifsi->open_file_lock);
625         spin_lock(&cifs_file->file_info_lock);
626         if (--cifs_file->count > 0) {
627                 spin_unlock(&cifs_file->file_info_lock);
628                 spin_unlock(&cifsi->open_file_lock);
629                 spin_unlock(&tcon->open_file_lock);
630                 return;
631         }
632         spin_unlock(&cifs_file->file_info_lock);
633
634         if (server->ops->get_lease_key)
635                 server->ops->get_lease_key(inode, &fid);
636
637         /* store open in pending opens to make sure we don't miss lease break */
638         cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
639
640         /* remove it from the lists */
641         list_del(&cifs_file->flist);
642         list_del(&cifs_file->tlist);
643         atomic_dec(&tcon->num_local_opens);
644
645         if (list_empty(&cifsi->openFileList)) {
646                 cifs_dbg(FYI, "closing last open instance for inode %p\n",
647                          d_inode(cifs_file->dentry));
648                 /*
649                  * In strict cache mode we need invalidate mapping on the last
650                  * close  because it may cause a error when we open this file
651                  * again and get at least level II oplock.
652                  */
653                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
654                         set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
655                 cifs_set_oplock_level(cifsi, 0);
656         }
657
658         spin_unlock(&cifsi->open_file_lock);
659         spin_unlock(&tcon->open_file_lock);
660
661         oplock_break_cancelled = wait_oplock_handler ?
662                 cancel_work_sync(&cifs_file->oplock_break) : false;
663
664         if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
665                 struct TCP_Server_Info *server = tcon->ses->server;
666                 unsigned int xid;
667
668                 xid = get_xid();
669                 if (server->ops->close_getattr)
670                         server->ops->close_getattr(xid, tcon, cifs_file);
671                 else if (server->ops->close)
672                         server->ops->close(xid, tcon, &cifs_file->fid);
673                 _free_xid(xid);
674         }
675
676         if (oplock_break_cancelled)
677                 cifs_done_oplock_break(cifsi);
678
679         cifs_del_pending_open(&open);
680
681         if (offload)
682                 queue_work(fileinfo_put_wq, &cifs_file->put);
683         else
684                 cifsFileInfo_put_final(cifs_file);
685 }
686
687 int cifs_open(struct inode *inode, struct file *file)
688
689 {
690         int rc = -EACCES;
691         unsigned int xid;
692         __u32 oplock;
693         struct cifs_sb_info *cifs_sb;
694         struct TCP_Server_Info *server;
695         struct cifs_tcon *tcon;
696         struct tcon_link *tlink;
697         struct cifsFileInfo *cfile = NULL;
698         void *page;
699         const char *full_path;
700         bool posix_open_ok = false;
701         struct cifs_fid fid = {};
702         struct cifs_pending_open open;
703         struct cifs_open_info_data data = {};
704
705         xid = get_xid();
706
707         cifs_sb = CIFS_SB(inode->i_sb);
708         if (unlikely(cifs_forced_shutdown(cifs_sb))) {
709                 free_xid(xid);
710                 return -EIO;
711         }
712
713         tlink = cifs_sb_tlink(cifs_sb);
714         if (IS_ERR(tlink)) {
715                 free_xid(xid);
716                 return PTR_ERR(tlink);
717         }
718         tcon = tlink_tcon(tlink);
719         server = tcon->ses->server;
720
721         page = alloc_dentry_path();
722         full_path = build_path_from_dentry(file_dentry(file), page);
723         if (IS_ERR(full_path)) {
724                 rc = PTR_ERR(full_path);
725                 goto out;
726         }
727
728         cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
729                  inode, file->f_flags, full_path);
730
731         if (file->f_flags & O_DIRECT &&
732             cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
733                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
734                         file->f_op = &cifs_file_direct_nobrl_ops;
735                 else
736                         file->f_op = &cifs_file_direct_ops;
737         }
738
739         /* Get the cached handle as SMB2 close is deferred */
740         rc = cifs_get_readable_path(tcon, full_path, &cfile);
741         if (rc == 0) {
742                 if (file->f_flags == cfile->f_flags) {
743                         file->private_data = cfile;
744                         spin_lock(&CIFS_I(inode)->deferred_lock);
745                         cifs_del_deferred_close(cfile);
746                         spin_unlock(&CIFS_I(inode)->deferred_lock);
747                         goto use_cache;
748                 } else {
749                         _cifsFileInfo_put(cfile, true, false);
750                 }
751         }
752
753         if (server->oplocks)
754                 oplock = REQ_OPLOCK;
755         else
756                 oplock = 0;
757
758 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
759         if (!tcon->broken_posix_open && tcon->unix_ext &&
760             cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
761                                 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
762                 /* can not refresh inode info since size could be stale */
763                 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
764                                 cifs_sb->ctx->file_mode /* ignored */,
765                                 file->f_flags, &oplock, &fid.netfid, xid);
766                 if (rc == 0) {
767                         cifs_dbg(FYI, "posix open succeeded\n");
768                         posix_open_ok = true;
769                 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
770                         if (tcon->ses->serverNOS)
771                                 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
772                                          tcon->ses->ip_addr,
773                                          tcon->ses->serverNOS);
774                         tcon->broken_posix_open = true;
775                 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
776                          (rc != -EOPNOTSUPP)) /* path not found or net err */
777                         goto out;
778                 /*
779                  * Else fallthrough to retry open the old way on network i/o
780                  * or DFS errors.
781                  */
782         }
783 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
784
785         if (server->ops->get_lease_key)
786                 server->ops->get_lease_key(inode, &fid);
787
788         cifs_add_pending_open(&fid, tlink, &open);
789
790         if (!posix_open_ok) {
791                 if (server->ops->get_lease_key)
792                         server->ops->get_lease_key(inode, &fid);
793
794                 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
795                                   xid, &data);
796                 if (rc) {
797                         cifs_del_pending_open(&open);
798                         goto out;
799                 }
800         }
801
802         cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
803         if (cfile == NULL) {
804                 if (server->ops->close)
805                         server->ops->close(xid, tcon, &fid);
806                 cifs_del_pending_open(&open);
807                 rc = -ENOMEM;
808                 goto out;
809         }
810
811 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
812         if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
813                 /*
814                  * Time to set mode which we can not set earlier due to
815                  * problems creating new read-only files.
816                  */
817                 struct cifs_unix_set_info_args args = {
818                         .mode   = inode->i_mode,
819                         .uid    = INVALID_UID, /* no change */
820                         .gid    = INVALID_GID, /* no change */
821                         .ctime  = NO_CHANGE_64,
822                         .atime  = NO_CHANGE_64,
823                         .mtime  = NO_CHANGE_64,
824                         .device = 0,
825                 };
826                 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
827                                        cfile->pid);
828         }
829 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
830
831 use_cache:
832         fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
833                            file->f_mode & FMODE_WRITE);
834         if (file->f_flags & O_DIRECT &&
835             (!((file->f_flags & O_ACCMODE) != O_RDONLY) ||
836              file->f_flags & O_APPEND))
837                 cifs_invalidate_cache(file_inode(file),
838                                       FSCACHE_INVAL_DIO_WRITE);
839
840 out:
841         free_dentry_path(page);
842         free_xid(xid);
843         cifs_put_tlink(tlink);
844         cifs_free_open_info(&data);
845         return rc;
846 }
847
848 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
849 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
850 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
851
852 /*
853  * Try to reacquire byte range locks that were released when session
854  * to server was lost.
855  */
856 static int
857 cifs_relock_file(struct cifsFileInfo *cfile)
858 {
859         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
860         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
861         int rc = 0;
862 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
863         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
864 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
865
866         down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
867         if (cinode->can_cache_brlcks) {
868                 /* can cache locks - no need to relock */
869                 up_read(&cinode->lock_sem);
870                 return rc;
871         }
872
873 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
874         if (cap_unix(tcon->ses) &&
875             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
876             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
877                 rc = cifs_push_posix_locks(cfile);
878         else
879 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
880                 rc = tcon->ses->server->ops->push_mand_locks(cfile);
881
882         up_read(&cinode->lock_sem);
883         return rc;
884 }
885
886 static int
887 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
888 {
889         int rc = -EACCES;
890         unsigned int xid;
891         __u32 oplock;
892         struct cifs_sb_info *cifs_sb;
893         struct cifs_tcon *tcon;
894         struct TCP_Server_Info *server;
895         struct cifsInodeInfo *cinode;
896         struct inode *inode;
897         void *page;
898         const char *full_path;
899         int desired_access;
900         int disposition = FILE_OPEN;
901         int create_options = CREATE_NOT_DIR;
902         struct cifs_open_parms oparms;
903
904         xid = get_xid();
905         mutex_lock(&cfile->fh_mutex);
906         if (!cfile->invalidHandle) {
907                 mutex_unlock(&cfile->fh_mutex);
908                 free_xid(xid);
909                 return 0;
910         }
911
912         inode = d_inode(cfile->dentry);
913         cifs_sb = CIFS_SB(inode->i_sb);
914         tcon = tlink_tcon(cfile->tlink);
915         server = tcon->ses->server;
916
917         /*
918          * Can not grab rename sem here because various ops, including those
919          * that already have the rename sem can end up causing writepage to get
920          * called and if the server was down that means we end up here, and we
921          * can never tell if the caller already has the rename_sem.
922          */
923         page = alloc_dentry_path();
924         full_path = build_path_from_dentry(cfile->dentry, page);
925         if (IS_ERR(full_path)) {
926                 mutex_unlock(&cfile->fh_mutex);
927                 free_dentry_path(page);
928                 free_xid(xid);
929                 return PTR_ERR(full_path);
930         }
931
932         cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
933                  inode, cfile->f_flags, full_path);
934
935         if (tcon->ses->server->oplocks)
936                 oplock = REQ_OPLOCK;
937         else
938                 oplock = 0;
939
940 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
941         if (tcon->unix_ext && cap_unix(tcon->ses) &&
942             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
943                                 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
944                 /*
945                  * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
946                  * original open. Must mask them off for a reopen.
947                  */
948                 unsigned int oflags = cfile->f_flags &
949                                                 ~(O_CREAT | O_EXCL | O_TRUNC);
950
951                 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
952                                      cifs_sb->ctx->file_mode /* ignored */,
953                                      oflags, &oplock, &cfile->fid.netfid, xid);
954                 if (rc == 0) {
955                         cifs_dbg(FYI, "posix reopen succeeded\n");
956                         oparms.reconnect = true;
957                         goto reopen_success;
958                 }
959                 /*
960                  * fallthrough to retry open the old way on errors, especially
961                  * in the reconnect path it is important to retry hard
962                  */
963         }
964 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
965
966         desired_access = cifs_convert_flags(cfile->f_flags);
967
968         /* O_SYNC also has bit for O_DSYNC so following check picks up either */
969         if (cfile->f_flags & O_SYNC)
970                 create_options |= CREATE_WRITE_THROUGH;
971
972         if (cfile->f_flags & O_DIRECT)
973                 create_options |= CREATE_NO_BUFFER;
974
975         if (server->ops->get_lease_key)
976                 server->ops->get_lease_key(inode, &cfile->fid);
977
978         oparms = (struct cifs_open_parms) {
979                 .tcon = tcon,
980                 .cifs_sb = cifs_sb,
981                 .desired_access = desired_access,
982                 .create_options = cifs_create_options(cifs_sb, create_options),
983                 .disposition = disposition,
984                 .path = full_path,
985                 .fid = &cfile->fid,
986                 .reconnect = true,
987         };
988
989         /*
990          * Can not refresh inode by passing in file_info buf to be returned by
991          * ops->open and then calling get_inode_info with returned buf since
992          * file might have write behind data that needs to be flushed and server
993          * version of file size can be stale. If we knew for sure that inode was
994          * not dirty locally we could do this.
995          */
996         rc = server->ops->open(xid, &oparms, &oplock, NULL);
997         if (rc == -ENOENT && oparms.reconnect == false) {
998                 /* durable handle timeout is expired - open the file again */
999                 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1000                 /* indicate that we need to relock the file */
1001                 oparms.reconnect = true;
1002         }
1003
1004         if (rc) {
1005                 mutex_unlock(&cfile->fh_mutex);
1006                 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1007                 cifs_dbg(FYI, "oplock: %d\n", oplock);
1008                 goto reopen_error_exit;
1009         }
1010
1011 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1012 reopen_success:
1013 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1014         cfile->invalidHandle = false;
1015         mutex_unlock(&cfile->fh_mutex);
1016         cinode = CIFS_I(inode);
1017
1018         if (can_flush) {
1019                 rc = filemap_write_and_wait(inode->i_mapping);
1020                 if (!is_interrupt_error(rc))
1021                         mapping_set_error(inode->i_mapping, rc);
1022
1023                 if (tcon->posix_extensions)
1024                         rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
1025                 else if (tcon->unix_ext)
1026                         rc = cifs_get_inode_info_unix(&inode, full_path,
1027                                                       inode->i_sb, xid);
1028                 else
1029                         rc = cifs_get_inode_info(&inode, full_path, NULL,
1030                                                  inode->i_sb, xid, NULL);
1031         }
1032         /*
1033          * Else we are writing out data to server already and could deadlock if
1034          * we tried to flush data, and since we do not know if we have data that
1035          * would invalidate the current end of file on the server we can not go
1036          * to the server to get the new inode info.
1037          */
1038
1039         /*
1040          * If the server returned a read oplock and we have mandatory brlocks,
1041          * set oplock level to None.
1042          */
1043         if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1044                 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1045                 oplock = 0;
1046         }
1047
1048         server->ops->set_fid(cfile, &cfile->fid, oplock);
1049         if (oparms.reconnect)
1050                 cifs_relock_file(cfile);
1051
1052 reopen_error_exit:
1053         free_dentry_path(page);
1054         free_xid(xid);
1055         return rc;
1056 }
1057
1058 void smb2_deferred_work_close(struct work_struct *work)
1059 {
1060         struct cifsFileInfo *cfile = container_of(work,
1061                         struct cifsFileInfo, deferred.work);
1062
1063         spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1064         cifs_del_deferred_close(cfile);
1065         cfile->deferred_close_scheduled = false;
1066         spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1067         _cifsFileInfo_put(cfile, true, false);
1068 }
1069
1070 int cifs_close(struct inode *inode, struct file *file)
1071 {
1072         struct cifsFileInfo *cfile;
1073         struct cifsInodeInfo *cinode = CIFS_I(inode);
1074         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1075         struct cifs_deferred_close *dclose;
1076
1077         cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1078
1079         if (file->private_data != NULL) {
1080                 cfile = file->private_data;
1081                 file->private_data = NULL;
1082                 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1083                 if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
1084                     cinode->lease_granted &&
1085                     !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
1086                     dclose) {
1087                         if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
1088                                 inode->i_ctime = inode->i_mtime = current_time(inode);
1089                         }
1090                         spin_lock(&cinode->deferred_lock);
1091                         cifs_add_deferred_close(cfile, dclose);
1092                         if (cfile->deferred_close_scheduled &&
1093                             delayed_work_pending(&cfile->deferred)) {
1094                                 /*
1095                                  * If there is no pending work, mod_delayed_work queues new work.
1096                                  * So, Increase the ref count to avoid use-after-free.
1097                                  */
1098                                 if (!mod_delayed_work(deferredclose_wq,
1099                                                 &cfile->deferred, cifs_sb->ctx->closetimeo))
1100                                         cifsFileInfo_get(cfile);
1101                         } else {
1102                                 /* Deferred close for files */
1103                                 queue_delayed_work(deferredclose_wq,
1104                                                 &cfile->deferred, cifs_sb->ctx->closetimeo);
1105                                 cfile->deferred_close_scheduled = true;
1106                                 spin_unlock(&cinode->deferred_lock);
1107                                 return 0;
1108                         }
1109                         spin_unlock(&cinode->deferred_lock);
1110                         _cifsFileInfo_put(cfile, true, false);
1111                 } else {
1112                         _cifsFileInfo_put(cfile, true, false);
1113                         kfree(dclose);
1114                 }
1115         }
1116
1117         /* return code from the ->release op is always ignored */
1118         return 0;
1119 }
1120
1121 void
1122 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1123 {
1124         struct cifsFileInfo *open_file, *tmp;
1125         struct list_head tmp_list;
1126
1127         if (!tcon->use_persistent || !tcon->need_reopen_files)
1128                 return;
1129
1130         tcon->need_reopen_files = false;
1131
1132         cifs_dbg(FYI, "Reopen persistent handles\n");
1133         INIT_LIST_HEAD(&tmp_list);
1134
1135         /* list all files open on tree connection, reopen resilient handles  */
1136         spin_lock(&tcon->open_file_lock);
1137         list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1138                 if (!open_file->invalidHandle)
1139                         continue;
1140                 cifsFileInfo_get(open_file);
1141                 list_add_tail(&open_file->rlist, &tmp_list);
1142         }
1143         spin_unlock(&tcon->open_file_lock);
1144
1145         list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1146                 if (cifs_reopen_file(open_file, false /* do not flush */))
1147                         tcon->need_reopen_files = true;
1148                 list_del_init(&open_file->rlist);
1149                 cifsFileInfo_put(open_file);
1150         }
1151 }
1152
1153 int cifs_closedir(struct inode *inode, struct file *file)
1154 {
1155         int rc = 0;
1156         unsigned int xid;
1157         struct cifsFileInfo *cfile = file->private_data;
1158         struct cifs_tcon *tcon;
1159         struct TCP_Server_Info *server;
1160         char *buf;
1161
1162         cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1163
1164         if (cfile == NULL)
1165                 return rc;
1166
1167         xid = get_xid();
1168         tcon = tlink_tcon(cfile->tlink);
1169         server = tcon->ses->server;
1170
1171         cifs_dbg(FYI, "Freeing private data in close dir\n");
1172         spin_lock(&cfile->file_info_lock);
1173         if (server->ops->dir_needs_close(cfile)) {
1174                 cfile->invalidHandle = true;
1175                 spin_unlock(&cfile->file_info_lock);
1176                 if (server->ops->close_dir)
1177                         rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1178                 else
1179                         rc = -ENOSYS;
1180                 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1181                 /* not much we can do if it fails anyway, ignore rc */
1182                 rc = 0;
1183         } else
1184                 spin_unlock(&cfile->file_info_lock);
1185
1186         buf = cfile->srch_inf.ntwrk_buf_start;
1187         if (buf) {
1188                 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1189                 cfile->srch_inf.ntwrk_buf_start = NULL;
1190                 if (cfile->srch_inf.smallBuf)
1191                         cifs_small_buf_release(buf);
1192                 else
1193                         cifs_buf_release(buf);
1194         }
1195
1196         cifs_put_tlink(cfile->tlink);
1197         kfree(file->private_data);
1198         file->private_data = NULL;
1199         /* BB can we lock the filestruct while this is going on? */
1200         free_xid(xid);
1201         return rc;
1202 }
1203
1204 static struct cifsLockInfo *
1205 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1206 {
1207         struct cifsLockInfo *lock =
1208                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1209         if (!lock)
1210                 return lock;
1211         lock->offset = offset;
1212         lock->length = length;
1213         lock->type = type;
1214         lock->pid = current->tgid;
1215         lock->flags = flags;
1216         INIT_LIST_HEAD(&lock->blist);
1217         init_waitqueue_head(&lock->block_q);
1218         return lock;
1219 }
1220
1221 void
1222 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1223 {
1224         struct cifsLockInfo *li, *tmp;
1225         list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1226                 list_del_init(&li->blist);
1227                 wake_up(&li->block_q);
1228         }
1229 }
1230
1231 #define CIFS_LOCK_OP    0
1232 #define CIFS_READ_OP    1
1233 #define CIFS_WRITE_OP   2
1234
1235 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1236 static bool
1237 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1238                             __u64 length, __u8 type, __u16 flags,
1239                             struct cifsFileInfo *cfile,
1240                             struct cifsLockInfo **conf_lock, int rw_check)
1241 {
1242         struct cifsLockInfo *li;
1243         struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1244         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1245
1246         list_for_each_entry(li, &fdlocks->locks, llist) {
1247                 if (offset + length <= li->offset ||
1248                     offset >= li->offset + li->length)
1249                         continue;
1250                 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1251                     server->ops->compare_fids(cfile, cur_cfile)) {
1252                         /* shared lock prevents write op through the same fid */
1253                         if (!(li->type & server->vals->shared_lock_type) ||
1254                             rw_check != CIFS_WRITE_OP)
1255                                 continue;
1256                 }
1257                 if ((type & server->vals->shared_lock_type) &&
1258                     ((server->ops->compare_fids(cfile, cur_cfile) &&
1259                      current->tgid == li->pid) || type == li->type))
1260                         continue;
1261                 if (rw_check == CIFS_LOCK_OP &&
1262                     (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1263                     server->ops->compare_fids(cfile, cur_cfile))
1264                         continue;
1265                 if (conf_lock)
1266                         *conf_lock = li;
1267                 return true;
1268         }
1269         return false;
1270 }
1271
1272 bool
1273 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1274                         __u8 type, __u16 flags,
1275                         struct cifsLockInfo **conf_lock, int rw_check)
1276 {
1277         bool rc = false;
1278         struct cifs_fid_locks *cur;
1279         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1280
1281         list_for_each_entry(cur, &cinode->llist, llist) {
1282                 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1283                                                  flags, cfile, conf_lock,
1284                                                  rw_check);
1285                 if (rc)
1286                         break;
1287         }
1288
1289         return rc;
1290 }
1291
1292 /*
1293  * Check if there is another lock that prevents us to set the lock (mandatory
1294  * style). If such a lock exists, update the flock structure with its
1295  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1296  * or leave it the same if we can't. Returns 0 if we don't need to request to
1297  * the server or 1 otherwise.
1298  */
1299 static int
1300 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1301                __u8 type, struct file_lock *flock)
1302 {
1303         int rc = 0;
1304         struct cifsLockInfo *conf_lock;
1305         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1306         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1307         bool exist;
1308
1309         down_read(&cinode->lock_sem);
1310
1311         exist = cifs_find_lock_conflict(cfile, offset, length, type,
1312                                         flock->fl_flags, &conf_lock,
1313                                         CIFS_LOCK_OP);
1314         if (exist) {
1315                 flock->fl_start = conf_lock->offset;
1316                 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1317                 flock->fl_pid = conf_lock->pid;
1318                 if (conf_lock->type & server->vals->shared_lock_type)
1319                         flock->fl_type = F_RDLCK;
1320                 else
1321                         flock->fl_type = F_WRLCK;
1322         } else if (!cinode->can_cache_brlcks)
1323                 rc = 1;
1324         else
1325                 flock->fl_type = F_UNLCK;
1326
1327         up_read(&cinode->lock_sem);
1328         return rc;
1329 }
1330
1331 static void
1332 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1333 {
1334         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1335         cifs_down_write(&cinode->lock_sem);
1336         list_add_tail(&lock->llist, &cfile->llist->locks);
1337         up_write(&cinode->lock_sem);
1338 }
1339
1340 /*
1341  * Set the byte-range lock (mandatory style). Returns:
1342  * 1) 0, if we set the lock and don't need to request to the server;
1343  * 2) 1, if no locks prevent us but we need to request to the server;
1344  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1345  */
1346 static int
1347 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1348                  bool wait)
1349 {
1350         struct cifsLockInfo *conf_lock;
1351         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1352         bool exist;
1353         int rc = 0;
1354
1355 try_again:
1356         exist = false;
1357         cifs_down_write(&cinode->lock_sem);
1358
1359         exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1360                                         lock->type, lock->flags, &conf_lock,
1361                                         CIFS_LOCK_OP);
1362         if (!exist && cinode->can_cache_brlcks) {
1363                 list_add_tail(&lock->llist, &cfile->llist->locks);
1364                 up_write(&cinode->lock_sem);
1365                 return rc;
1366         }
1367
1368         if (!exist)
1369                 rc = 1;
1370         else if (!wait)
1371                 rc = -EACCES;
1372         else {
1373                 list_add_tail(&lock->blist, &conf_lock->blist);
1374                 up_write(&cinode->lock_sem);
1375                 rc = wait_event_interruptible(lock->block_q,
1376                                         (lock->blist.prev == &lock->blist) &&
1377                                         (lock->blist.next == &lock->blist));
1378                 if (!rc)
1379                         goto try_again;
1380                 cifs_down_write(&cinode->lock_sem);
1381                 list_del_init(&lock->blist);
1382         }
1383
1384         up_write(&cinode->lock_sem);
1385         return rc;
1386 }
1387
1388 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1389 /*
1390  * Check if there is another lock that prevents us to set the lock (posix
1391  * style). If such a lock exists, update the flock structure with its
1392  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1393  * or leave it the same if we can't. Returns 0 if we don't need to request to
1394  * the server or 1 otherwise.
1395  */
1396 static int
1397 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1398 {
1399         int rc = 0;
1400         struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1401         unsigned char saved_type = flock->fl_type;
1402
1403         if ((flock->fl_flags & FL_POSIX) == 0)
1404                 return 1;
1405
1406         down_read(&cinode->lock_sem);
1407         posix_test_lock(file, flock);
1408
1409         if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1410                 flock->fl_type = saved_type;
1411                 rc = 1;
1412         }
1413
1414         up_read(&cinode->lock_sem);
1415         return rc;
1416 }
1417
1418 /*
1419  * Set the byte-range lock (posix style). Returns:
1420  * 1) <0, if the error occurs while setting the lock;
1421  * 2) 0, if we set the lock and don't need to request to the server;
1422  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1423  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1424  */
1425 static int
1426 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1427 {
1428         struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1429         int rc = FILE_LOCK_DEFERRED + 1;
1430
1431         if ((flock->fl_flags & FL_POSIX) == 0)
1432                 return rc;
1433
1434         cifs_down_write(&cinode->lock_sem);
1435         if (!cinode->can_cache_brlcks) {
1436                 up_write(&cinode->lock_sem);
1437                 return rc;
1438         }
1439
1440         rc = posix_lock_file(file, flock, NULL);
1441         up_write(&cinode->lock_sem);
1442         return rc;
1443 }
1444
1445 int
1446 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1447 {
1448         unsigned int xid;
1449         int rc = 0, stored_rc;
1450         struct cifsLockInfo *li, *tmp;
1451         struct cifs_tcon *tcon;
1452         unsigned int num, max_num, max_buf;
1453         LOCKING_ANDX_RANGE *buf, *cur;
1454         static const int types[] = {
1455                 LOCKING_ANDX_LARGE_FILES,
1456                 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1457         };
1458         int i;
1459
1460         xid = get_xid();
1461         tcon = tlink_tcon(cfile->tlink);
1462
1463         /*
1464          * Accessing maxBuf is racy with cifs_reconnect - need to store value
1465          * and check it before using.
1466          */
1467         max_buf = tcon->ses->server->maxBuf;
1468         if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1469                 free_xid(xid);
1470                 return -EINVAL;
1471         }
1472
1473         BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1474                      PAGE_SIZE);
1475         max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1476                         PAGE_SIZE);
1477         max_num = (max_buf - sizeof(struct smb_hdr)) /
1478                                                 sizeof(LOCKING_ANDX_RANGE);
1479         buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1480         if (!buf) {
1481                 free_xid(xid);
1482                 return -ENOMEM;
1483         }
1484
1485         for (i = 0; i < 2; i++) {
1486                 cur = buf;
1487                 num = 0;
1488                 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1489                         if (li->type != types[i])
1490                                 continue;
1491                         cur->Pid = cpu_to_le16(li->pid);
1492                         cur->LengthLow = cpu_to_le32((u32)li->length);
1493                         cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1494                         cur->OffsetLow = cpu_to_le32((u32)li->offset);
1495                         cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1496                         if (++num == max_num) {
1497                                 stored_rc = cifs_lockv(xid, tcon,
1498                                                        cfile->fid.netfid,
1499                                                        (__u8)li->type, 0, num,
1500                                                        buf);
1501                                 if (stored_rc)
1502                                         rc = stored_rc;
1503                                 cur = buf;
1504                                 num = 0;
1505                         } else
1506                                 cur++;
1507                 }
1508
1509                 if (num) {
1510                         stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1511                                                (__u8)types[i], 0, num, buf);
1512                         if (stored_rc)
1513                                 rc = stored_rc;
1514                 }
1515         }
1516
1517         kfree(buf);
1518         free_xid(xid);
1519         return rc;
1520 }
1521
1522 static __u32
1523 hash_lockowner(fl_owner_t owner)
1524 {
1525         return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1526 }
1527 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1528
1529 struct lock_to_push {
1530         struct list_head llist;
1531         __u64 offset;
1532         __u64 length;
1533         __u32 pid;
1534         __u16 netfid;
1535         __u8 type;
1536 };
1537
1538 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1539 static int
1540 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1541 {
1542         struct inode *inode = d_inode(cfile->dentry);
1543         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1544         struct file_lock *flock;
1545         struct file_lock_context *flctx = locks_inode_context(inode);
1546         unsigned int count = 0, i;
1547         int rc = 0, xid, type;
1548         struct list_head locks_to_send, *el;
1549         struct lock_to_push *lck, *tmp;
1550         __u64 length;
1551
1552         xid = get_xid();
1553
1554         if (!flctx)
1555                 goto out;
1556
1557         spin_lock(&flctx->flc_lock);
1558         list_for_each(el, &flctx->flc_posix) {
1559                 count++;
1560         }
1561         spin_unlock(&flctx->flc_lock);
1562
1563         INIT_LIST_HEAD(&locks_to_send);
1564
1565         /*
1566          * Allocating count locks is enough because no FL_POSIX locks can be
1567          * added to the list while we are holding cinode->lock_sem that
1568          * protects locking operations of this inode.
1569          */
1570         for (i = 0; i < count; i++) {
1571                 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1572                 if (!lck) {
1573                         rc = -ENOMEM;
1574                         goto err_out;
1575                 }
1576                 list_add_tail(&lck->llist, &locks_to_send);
1577         }
1578
1579         el = locks_to_send.next;
1580         spin_lock(&flctx->flc_lock);
1581         list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1582                 if (el == &locks_to_send) {
1583                         /*
1584                          * The list ended. We don't have enough allocated
1585                          * structures - something is really wrong.
1586                          */
1587                         cifs_dbg(VFS, "Can't push all brlocks!\n");
1588                         break;
1589                 }
1590                 length = cifs_flock_len(flock);
1591                 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1592                         type = CIFS_RDLCK;
1593                 else
1594                         type = CIFS_WRLCK;
1595                 lck = list_entry(el, struct lock_to_push, llist);
1596                 lck->pid = hash_lockowner(flock->fl_owner);
1597                 lck->netfid = cfile->fid.netfid;
1598                 lck->length = length;
1599                 lck->type = type;
1600                 lck->offset = flock->fl_start;
1601         }
1602         spin_unlock(&flctx->flc_lock);
1603
1604         list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1605                 int stored_rc;
1606
1607                 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1608                                              lck->offset, lck->length, NULL,
1609                                              lck->type, 0);
1610                 if (stored_rc)
1611                         rc = stored_rc;
1612                 list_del(&lck->llist);
1613                 kfree(lck);
1614         }
1615
1616 out:
1617         free_xid(xid);
1618         return rc;
1619 err_out:
1620         list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1621                 list_del(&lck->llist);
1622                 kfree(lck);
1623         }
1624         goto out;
1625 }
1626 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1627
1628 static int
1629 cifs_push_locks(struct cifsFileInfo *cfile)
1630 {
1631         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1632         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1633         int rc = 0;
1634 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1635         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1636 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1637
1638         /* we are going to update can_cache_brlcks here - need a write access */
1639         cifs_down_write(&cinode->lock_sem);
1640         if (!cinode->can_cache_brlcks) {
1641                 up_write(&cinode->lock_sem);
1642                 return rc;
1643         }
1644
1645 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1646         if (cap_unix(tcon->ses) &&
1647             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1648             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1649                 rc = cifs_push_posix_locks(cfile);
1650         else
1651 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1652                 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1653
1654         cinode->can_cache_brlcks = false;
1655         up_write(&cinode->lock_sem);
1656         return rc;
1657 }
1658
1659 static void
1660 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1661                 bool *wait_flag, struct TCP_Server_Info *server)
1662 {
1663         if (flock->fl_flags & FL_POSIX)
1664                 cifs_dbg(FYI, "Posix\n");
1665         if (flock->fl_flags & FL_FLOCK)
1666                 cifs_dbg(FYI, "Flock\n");
1667         if (flock->fl_flags & FL_SLEEP) {
1668                 cifs_dbg(FYI, "Blocking lock\n");
1669                 *wait_flag = true;
1670         }
1671         if (flock->fl_flags & FL_ACCESS)
1672                 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1673         if (flock->fl_flags & FL_LEASE)
1674                 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1675         if (flock->fl_flags &
1676             (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1677                FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1678                 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1679
1680         *type = server->vals->large_lock_type;
1681         if (flock->fl_type == F_WRLCK) {
1682                 cifs_dbg(FYI, "F_WRLCK\n");
1683                 *type |= server->vals->exclusive_lock_type;
1684                 *lock = 1;
1685         } else if (flock->fl_type == F_UNLCK) {
1686                 cifs_dbg(FYI, "F_UNLCK\n");
1687                 *type |= server->vals->unlock_lock_type;
1688                 *unlock = 1;
1689                 /* Check if unlock includes more than one lock range */
1690         } else if (flock->fl_type == F_RDLCK) {
1691                 cifs_dbg(FYI, "F_RDLCK\n");
1692                 *type |= server->vals->shared_lock_type;
1693                 *lock = 1;
1694         } else if (flock->fl_type == F_EXLCK) {
1695                 cifs_dbg(FYI, "F_EXLCK\n");
1696                 *type |= server->vals->exclusive_lock_type;
1697                 *lock = 1;
1698         } else if (flock->fl_type == F_SHLCK) {
1699                 cifs_dbg(FYI, "F_SHLCK\n");
1700                 *type |= server->vals->shared_lock_type;
1701                 *lock = 1;
1702         } else
1703                 cifs_dbg(FYI, "Unknown type of lock\n");
1704 }
1705
1706 static int
1707 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1708            bool wait_flag, bool posix_lck, unsigned int xid)
1709 {
1710         int rc = 0;
1711         __u64 length = cifs_flock_len(flock);
1712         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1713         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1714         struct TCP_Server_Info *server = tcon->ses->server;
1715 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1716         __u16 netfid = cfile->fid.netfid;
1717
1718         if (posix_lck) {
1719                 int posix_lock_type;
1720
1721                 rc = cifs_posix_lock_test(file, flock);
1722                 if (!rc)
1723                         return rc;
1724
1725                 if (type & server->vals->shared_lock_type)
1726                         posix_lock_type = CIFS_RDLCK;
1727                 else
1728                         posix_lock_type = CIFS_WRLCK;
1729                 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1730                                       hash_lockowner(flock->fl_owner),
1731                                       flock->fl_start, length, flock,
1732                                       posix_lock_type, wait_flag);
1733                 return rc;
1734         }
1735 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1736
1737         rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1738         if (!rc)
1739                 return rc;
1740
1741         /* BB we could chain these into one lock request BB */
1742         rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1743                                     1, 0, false);
1744         if (rc == 0) {
1745                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1746                                             type, 0, 1, false);
1747                 flock->fl_type = F_UNLCK;
1748                 if (rc != 0)
1749                         cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1750                                  rc);
1751                 return 0;
1752         }
1753
1754         if (type & server->vals->shared_lock_type) {
1755                 flock->fl_type = F_WRLCK;
1756                 return 0;
1757         }
1758
1759         type &= ~server->vals->exclusive_lock_type;
1760
1761         rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1762                                     type | server->vals->shared_lock_type,
1763                                     1, 0, false);
1764         if (rc == 0) {
1765                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1766                         type | server->vals->shared_lock_type, 0, 1, false);
1767                 flock->fl_type = F_RDLCK;
1768                 if (rc != 0)
1769                         cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1770                                  rc);
1771         } else
1772                 flock->fl_type = F_WRLCK;
1773
1774         return 0;
1775 }
1776
1777 void
1778 cifs_move_llist(struct list_head *source, struct list_head *dest)
1779 {
1780         struct list_head *li, *tmp;
1781         list_for_each_safe(li, tmp, source)
1782                 list_move(li, dest);
1783 }
1784
1785 void
1786 cifs_free_llist(struct list_head *llist)
1787 {
1788         struct cifsLockInfo *li, *tmp;
1789         list_for_each_entry_safe(li, tmp, llist, llist) {
1790                 cifs_del_lock_waiters(li);
1791                 list_del(&li->llist);
1792                 kfree(li);
1793         }
1794 }
1795
1796 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1797 int
1798 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1799                   unsigned int xid)
1800 {
1801         int rc = 0, stored_rc;
1802         static const int types[] = {
1803                 LOCKING_ANDX_LARGE_FILES,
1804                 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1805         };
1806         unsigned int i;
1807         unsigned int max_num, num, max_buf;
1808         LOCKING_ANDX_RANGE *buf, *cur;
1809         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1810         struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1811         struct cifsLockInfo *li, *tmp;
1812         __u64 length = cifs_flock_len(flock);
1813         struct list_head tmp_llist;
1814
1815         INIT_LIST_HEAD(&tmp_llist);
1816
1817         /*
1818          * Accessing maxBuf is racy with cifs_reconnect - need to store value
1819          * and check it before using.
1820          */
1821         max_buf = tcon->ses->server->maxBuf;
1822         if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1823                 return -EINVAL;
1824
1825         BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1826                      PAGE_SIZE);
1827         max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1828                         PAGE_SIZE);
1829         max_num = (max_buf - sizeof(struct smb_hdr)) /
1830                                                 sizeof(LOCKING_ANDX_RANGE);
1831         buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1832         if (!buf)
1833                 return -ENOMEM;
1834
1835         cifs_down_write(&cinode->lock_sem);
1836         for (i = 0; i < 2; i++) {
1837                 cur = buf;
1838                 num = 0;
1839                 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1840                         if (flock->fl_start > li->offset ||
1841                             (flock->fl_start + length) <
1842                             (li->offset + li->length))
1843                                 continue;
1844                         if (current->tgid != li->pid)
1845                                 continue;
1846                         if (types[i] != li->type)
1847                                 continue;
1848                         if (cinode->can_cache_brlcks) {
1849                                 /*
1850                                  * We can cache brlock requests - simply remove
1851                                  * a lock from the file's list.
1852                                  */
1853                                 list_del(&li->llist);
1854                                 cifs_del_lock_waiters(li);
1855                                 kfree(li);
1856                                 continue;
1857                         }
1858                         cur->Pid = cpu_to_le16(li->pid);
1859                         cur->LengthLow = cpu_to_le32((u32)li->length);
1860                         cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1861                         cur->OffsetLow = cpu_to_le32((u32)li->offset);
1862                         cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1863                         /*
1864                          * We need to save a lock here to let us add it again to
1865                          * the file's list if the unlock range request fails on
1866                          * the server.
1867                          */
1868                         list_move(&li->llist, &tmp_llist);
1869                         if (++num == max_num) {
1870                                 stored_rc = cifs_lockv(xid, tcon,
1871                                                        cfile->fid.netfid,
1872                                                        li->type, num, 0, buf);
1873                                 if (stored_rc) {
1874                                         /*
1875                                          * We failed on the unlock range
1876                                          * request - add all locks from the tmp
1877                                          * list to the head of the file's list.
1878                                          */
1879                                         cifs_move_llist(&tmp_llist,
1880                                                         &cfile->llist->locks);
1881                                         rc = stored_rc;
1882                                 } else
1883                                         /*
1884                                          * The unlock range request succeed -
1885                                          * free the tmp list.
1886                                          */
1887                                         cifs_free_llist(&tmp_llist);
1888                                 cur = buf;
1889                                 num = 0;
1890                         } else
1891                                 cur++;
1892                 }
1893                 if (num) {
1894                         stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1895                                                types[i], num, 0, buf);
1896                         if (stored_rc) {
1897                                 cifs_move_llist(&tmp_llist,
1898                                                 &cfile->llist->locks);
1899                                 rc = stored_rc;
1900                         } else
1901                                 cifs_free_llist(&tmp_llist);
1902                 }
1903         }
1904
1905         up_write(&cinode->lock_sem);
1906         kfree(buf);
1907         return rc;
1908 }
1909 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1910
1911 static int
1912 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1913            bool wait_flag, bool posix_lck, int lock, int unlock,
1914            unsigned int xid)
1915 {
1916         int rc = 0;
1917         __u64 length = cifs_flock_len(flock);
1918         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1919         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1920         struct TCP_Server_Info *server = tcon->ses->server;
1921         struct inode *inode = d_inode(cfile->dentry);
1922
1923 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1924         if (posix_lck) {
1925                 int posix_lock_type;
1926
1927                 rc = cifs_posix_lock_set(file, flock);
1928                 if (rc <= FILE_LOCK_DEFERRED)
1929                         return rc;
1930
1931                 if (type & server->vals->shared_lock_type)
1932                         posix_lock_type = CIFS_RDLCK;
1933                 else
1934                         posix_lock_type = CIFS_WRLCK;
1935
1936                 if (unlock == 1)
1937                         posix_lock_type = CIFS_UNLCK;
1938
1939                 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1940                                       hash_lockowner(flock->fl_owner),
1941                                       flock->fl_start, length,
1942                                       NULL, posix_lock_type, wait_flag);
1943                 goto out;
1944         }
1945 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1946         if (lock) {
1947                 struct cifsLockInfo *lock;
1948
1949                 lock = cifs_lock_init(flock->fl_start, length, type,
1950                                       flock->fl_flags);
1951                 if (!lock)
1952                         return -ENOMEM;
1953
1954                 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1955                 if (rc < 0) {
1956                         kfree(lock);
1957                         return rc;
1958                 }
1959                 if (!rc)
1960                         goto out;
1961
1962                 /*
1963                  * Windows 7 server can delay breaking lease from read to None
1964                  * if we set a byte-range lock on a file - break it explicitly
1965                  * before sending the lock to the server to be sure the next
1966                  * read won't conflict with non-overlapted locks due to
1967                  * pagereading.
1968                  */
1969                 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1970                                         CIFS_CACHE_READ(CIFS_I(inode))) {
1971                         cifs_zap_mapping(inode);
1972                         cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1973                                  inode);
1974                         CIFS_I(inode)->oplock = 0;
1975                 }
1976
1977                 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1978                                             type, 1, 0, wait_flag);
1979                 if (rc) {
1980                         kfree(lock);
1981                         return rc;
1982                 }
1983
1984                 cifs_lock_add(cfile, lock);
1985         } else if (unlock)
1986                 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1987
1988 out:
1989         if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
1990                 /*
1991                  * If this is a request to remove all locks because we
1992                  * are closing the file, it doesn't matter if the
1993                  * unlocking failed as both cifs.ko and the SMB server
1994                  * remove the lock on file close
1995                  */
1996                 if (rc) {
1997                         cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1998                         if (!(flock->fl_flags & FL_CLOSE))
1999                                 return rc;
2000                 }
2001                 rc = locks_lock_file_wait(file, flock);
2002         }
2003         return rc;
2004 }
2005
2006 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2007 {
2008         int rc, xid;
2009         int lock = 0, unlock = 0;
2010         bool wait_flag = false;
2011         bool posix_lck = false;
2012         struct cifs_sb_info *cifs_sb;
2013         struct cifs_tcon *tcon;
2014         struct cifsFileInfo *cfile;
2015         __u32 type;
2016
2017         xid = get_xid();
2018
2019         if (!(fl->fl_flags & FL_FLOCK)) {
2020                 rc = -ENOLCK;
2021                 free_xid(xid);
2022                 return rc;
2023         }
2024
2025         cfile = (struct cifsFileInfo *)file->private_data;
2026         tcon = tlink_tcon(cfile->tlink);
2027
2028         cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2029                         tcon->ses->server);
2030         cifs_sb = CIFS_FILE_SB(file);
2031
2032         if (cap_unix(tcon->ses) &&
2033             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2034             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2035                 posix_lck = true;
2036
2037         if (!lock && !unlock) {
2038                 /*
2039                  * if no lock or unlock then nothing to do since we do not
2040                  * know what it is
2041                  */
2042                 rc = -EOPNOTSUPP;
2043                 free_xid(xid);
2044                 return rc;
2045         }
2046
2047         rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2048                         xid);
2049         free_xid(xid);
2050         return rc;
2051
2052
2053 }
2054
2055 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2056 {
2057         int rc, xid;
2058         int lock = 0, unlock = 0;
2059         bool wait_flag = false;
2060         bool posix_lck = false;
2061         struct cifs_sb_info *cifs_sb;
2062         struct cifs_tcon *tcon;
2063         struct cifsFileInfo *cfile;
2064         __u32 type;
2065
2066         rc = -EACCES;
2067         xid = get_xid();
2068
2069         cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2070                  flock->fl_flags, flock->fl_type, (long long)flock->fl_start,
2071                  (long long)flock->fl_end);
2072
2073         cfile = (struct cifsFileInfo *)file->private_data;
2074         tcon = tlink_tcon(cfile->tlink);
2075
2076         cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2077                         tcon->ses->server);
2078         cifs_sb = CIFS_FILE_SB(file);
2079         set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2080
2081         if (cap_unix(tcon->ses) &&
2082             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2083             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2084                 posix_lck = true;
2085         /*
2086          * BB add code here to normalize offset and length to account for
2087          * negative length which we can not accept over the wire.
2088          */
2089         if (IS_GETLK(cmd)) {
2090                 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2091                 free_xid(xid);
2092                 return rc;
2093         }
2094
2095         if (!lock && !unlock) {
2096                 /*
2097                  * if no lock or unlock then nothing to do since we do not
2098                  * know what it is
2099                  */
2100                 free_xid(xid);
2101                 return -EOPNOTSUPP;
2102         }
2103
2104         rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2105                         xid);
2106         free_xid(xid);
2107         return rc;
2108 }
2109
2110 /*
2111  * update the file size (if needed) after a write. Should be called with
2112  * the inode->i_lock held
2113  */
2114 void
2115 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
2116                       unsigned int bytes_written)
2117 {
2118         loff_t end_of_write = offset + bytes_written;
2119
2120         if (end_of_write > cifsi->server_eof)
2121                 cifsi->server_eof = end_of_write;
2122 }
2123
2124 static ssize_t
2125 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
2126            size_t write_size, loff_t *offset)
2127 {
2128         int rc = 0;
2129         unsigned int bytes_written = 0;
2130         unsigned int total_written;
2131         struct cifs_tcon *tcon;
2132         struct TCP_Server_Info *server;
2133         unsigned int xid;
2134         struct dentry *dentry = open_file->dentry;
2135         struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
2136         struct cifs_io_parms io_parms = {0};
2137
2138         cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
2139                  write_size, *offset, dentry);
2140
2141         tcon = tlink_tcon(open_file->tlink);
2142         server = tcon->ses->server;
2143
2144         if (!server->ops->sync_write)
2145                 return -ENOSYS;
2146
2147         xid = get_xid();
2148
2149         for (total_written = 0; write_size > total_written;
2150              total_written += bytes_written) {
2151                 rc = -EAGAIN;
2152                 while (rc == -EAGAIN) {
2153                         struct kvec iov[2];
2154                         unsigned int len;
2155
2156                         if (open_file->invalidHandle) {
2157                                 /* we could deadlock if we called
2158                                    filemap_fdatawait from here so tell
2159                                    reopen_file not to flush data to
2160                                    server now */
2161                                 rc = cifs_reopen_file(open_file, false);
2162                                 if (rc != 0)
2163                                         break;
2164                         }
2165
2166                         len = min(server->ops->wp_retry_size(d_inode(dentry)),
2167                                   (unsigned int)write_size - total_written);
2168                         /* iov[0] is reserved for smb header */
2169                         iov[1].iov_base = (char *)write_data + total_written;
2170                         iov[1].iov_len = len;
2171                         io_parms.pid = pid;
2172                         io_parms.tcon = tcon;
2173                         io_parms.offset = *offset;
2174                         io_parms.length = len;
2175                         rc = server->ops->sync_write(xid, &open_file->fid,
2176                                         &io_parms, &bytes_written, iov, 1);
2177                 }
2178                 if (rc || (bytes_written == 0)) {
2179                         if (total_written)
2180                                 break;
2181                         else {
2182                                 free_xid(xid);
2183                                 return rc;
2184                         }
2185                 } else {
2186                         spin_lock(&d_inode(dentry)->i_lock);
2187                         cifs_update_eof(cifsi, *offset, bytes_written);
2188                         spin_unlock(&d_inode(dentry)->i_lock);
2189                         *offset += bytes_written;
2190                 }
2191         }
2192
2193         cifs_stats_bytes_written(tcon, total_written);
2194
2195         if (total_written > 0) {
2196                 spin_lock(&d_inode(dentry)->i_lock);
2197                 if (*offset > d_inode(dentry)->i_size) {
2198                         i_size_write(d_inode(dentry), *offset);
2199                         d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
2200                 }
2201                 spin_unlock(&d_inode(dentry)->i_lock);
2202         }
2203         mark_inode_dirty_sync(d_inode(dentry));
2204         free_xid(xid);
2205         return total_written;
2206 }
2207
2208 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2209                                         bool fsuid_only)
2210 {
2211         struct cifsFileInfo *open_file = NULL;
2212         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2213
2214         /* only filter by fsuid on multiuser mounts */
2215         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2216                 fsuid_only = false;
2217
2218         spin_lock(&cifs_inode->open_file_lock);
2219         /* we could simply get the first_list_entry since write-only entries
2220            are always at the end of the list but since the first entry might
2221            have a close pending, we go through the whole list */
2222         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2223                 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2224                         continue;
2225                 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2226                         if ((!open_file->invalidHandle)) {
2227                                 /* found a good file */
2228                                 /* lock it so it will not be closed on us */
2229                                 cifsFileInfo_get(open_file);
2230                                 spin_unlock(&cifs_inode->open_file_lock);
2231                                 return open_file;
2232                         } /* else might as well continue, and look for
2233                              another, or simply have the caller reopen it
2234                              again rather than trying to fix this handle */
2235                 } else /* write only file */
2236                         break; /* write only files are last so must be done */
2237         }
2238         spin_unlock(&cifs_inode->open_file_lock);
2239         return NULL;
2240 }
2241
2242 /* Return -EBADF if no handle is found and general rc otherwise */
2243 int
2244 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2245                        struct cifsFileInfo **ret_file)
2246 {
2247         struct cifsFileInfo *open_file, *inv_file = NULL;
2248         struct cifs_sb_info *cifs_sb;
2249         bool any_available = false;
2250         int rc = -EBADF;
2251         unsigned int refind = 0;
2252         bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2253         bool with_delete = flags & FIND_WR_WITH_DELETE;
2254         *ret_file = NULL;
2255
2256         /*
2257          * Having a null inode here (because mapping->host was set to zero by
2258          * the VFS or MM) should not happen but we had reports of on oops (due
2259          * to it being zero) during stress testcases so we need to check for it
2260          */
2261
2262         if (cifs_inode == NULL) {
2263                 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2264                 dump_stack();
2265                 return rc;
2266         }
2267
2268         cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2269
2270         /* only filter by fsuid on multiuser mounts */
2271         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2272                 fsuid_only = false;
2273
2274         spin_lock(&cifs_inode->open_file_lock);
2275 refind_writable:
2276         if (refind > MAX_REOPEN_ATT) {
2277                 spin_unlock(&cifs_inode->open_file_lock);
2278                 return rc;
2279         }
2280         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2281                 if (!any_available && open_file->pid != current->tgid)
2282                         continue;
2283                 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2284                         continue;
2285                 if (with_delete && !(open_file->fid.access & DELETE))
2286                         continue;
2287                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2288                         if (!open_file->invalidHandle) {
2289                                 /* found a good writable file */
2290                                 cifsFileInfo_get(open_file);
2291                                 spin_unlock(&cifs_inode->open_file_lock);
2292                                 *ret_file = open_file;
2293                                 return 0;
2294                         } else {
2295                                 if (!inv_file)
2296                                         inv_file = open_file;
2297                         }
2298                 }
2299         }
2300         /* couldn't find useable FH with same pid, try any available */
2301         if (!any_available) {
2302                 any_available = true;
2303                 goto refind_writable;
2304         }
2305
2306         if (inv_file) {
2307                 any_available = false;
2308                 cifsFileInfo_get(inv_file);
2309         }
2310
2311         spin_unlock(&cifs_inode->open_file_lock);
2312
2313         if (inv_file) {
2314                 rc = cifs_reopen_file(inv_file, false);
2315                 if (!rc) {
2316                         *ret_file = inv_file;
2317                         return 0;
2318                 }
2319
2320                 spin_lock(&cifs_inode->open_file_lock);
2321                 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2322                 spin_unlock(&cifs_inode->open_file_lock);
2323                 cifsFileInfo_put(inv_file);
2324                 ++refind;
2325                 inv_file = NULL;
2326                 spin_lock(&cifs_inode->open_file_lock);
2327                 goto refind_writable;
2328         }
2329
2330         return rc;
2331 }
2332
2333 struct cifsFileInfo *
2334 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2335 {
2336         struct cifsFileInfo *cfile;
2337         int rc;
2338
2339         rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2340         if (rc)
2341                 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2342
2343         return cfile;
2344 }
2345
2346 int
2347 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2348                        int flags,
2349                        struct cifsFileInfo **ret_file)
2350 {
2351         struct cifsFileInfo *cfile;
2352         void *page = alloc_dentry_path();
2353
2354         *ret_file = NULL;
2355
2356         spin_lock(&tcon->open_file_lock);
2357         list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2358                 struct cifsInodeInfo *cinode;
2359                 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2360                 if (IS_ERR(full_path)) {
2361                         spin_unlock(&tcon->open_file_lock);
2362                         free_dentry_path(page);
2363                         return PTR_ERR(full_path);
2364                 }
2365                 if (strcmp(full_path, name))
2366                         continue;
2367
2368                 cinode = CIFS_I(d_inode(cfile->dentry));
2369                 spin_unlock(&tcon->open_file_lock);
2370                 free_dentry_path(page);
2371                 return cifs_get_writable_file(cinode, flags, ret_file);
2372         }
2373
2374         spin_unlock(&tcon->open_file_lock);
2375         free_dentry_path(page);
2376         return -ENOENT;
2377 }
2378
2379 int
2380 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2381                        struct cifsFileInfo **ret_file)
2382 {
2383         struct cifsFileInfo *cfile;
2384         void *page = alloc_dentry_path();
2385
2386         *ret_file = NULL;
2387
2388         spin_lock(&tcon->open_file_lock);
2389         list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2390                 struct cifsInodeInfo *cinode;
2391                 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2392                 if (IS_ERR(full_path)) {
2393                         spin_unlock(&tcon->open_file_lock);
2394                         free_dentry_path(page);
2395                         return PTR_ERR(full_path);
2396                 }
2397                 if (strcmp(full_path, name))
2398                         continue;
2399
2400                 cinode = CIFS_I(d_inode(cfile->dentry));
2401                 spin_unlock(&tcon->open_file_lock);
2402                 free_dentry_path(page);
2403                 *ret_file = find_readable_file(cinode, 0);
2404                 return *ret_file ? 0 : -ENOENT;
2405         }
2406
2407         spin_unlock(&tcon->open_file_lock);
2408         free_dentry_path(page);
2409         return -ENOENT;
2410 }
2411
2412 void
2413 cifs_writedata_release(struct kref *refcount)
2414 {
2415         struct cifs_writedata *wdata = container_of(refcount,
2416                                         struct cifs_writedata, refcount);
2417 #ifdef CONFIG_CIFS_SMB_DIRECT
2418         if (wdata->mr) {
2419                 smbd_deregister_mr(wdata->mr);
2420                 wdata->mr = NULL;
2421         }
2422 #endif
2423
2424         if (wdata->cfile)
2425                 cifsFileInfo_put(wdata->cfile);
2426
2427         kfree(wdata);
2428 }
2429
2430 /*
2431  * Write failed with a retryable error. Resend the write request. It's also
2432  * possible that the page was redirtied so re-clean the page.
2433  */
2434 static void
2435 cifs_writev_requeue(struct cifs_writedata *wdata)
2436 {
2437         int rc = 0;
2438         struct inode *inode = d_inode(wdata->cfile->dentry);
2439         struct TCP_Server_Info *server;
2440         unsigned int rest_len = wdata->bytes;
2441         loff_t fpos = wdata->offset;
2442
2443         server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2444         do {
2445                 struct cifs_writedata *wdata2;
2446                 unsigned int wsize, cur_len;
2447
2448                 wsize = server->ops->wp_retry_size(inode);
2449                 if (wsize < rest_len) {
2450                         if (wsize < PAGE_SIZE) {
2451                                 rc = -EOPNOTSUPP;
2452                                 break;
2453                         }
2454                         cur_len = min(round_down(wsize, PAGE_SIZE), rest_len);
2455                 } else {
2456                         cur_len = rest_len;
2457                 }
2458
2459                 wdata2 = cifs_writedata_alloc(cifs_writev_complete);
2460                 if (!wdata2) {
2461                         rc = -ENOMEM;
2462                         break;
2463                 }
2464
2465                 wdata2->sync_mode = wdata->sync_mode;
2466                 wdata2->offset  = fpos;
2467                 wdata2->bytes   = cur_len;
2468                 wdata2->iter    = wdata->iter;
2469
2470                 iov_iter_advance(&wdata2->iter, fpos - wdata->offset);
2471                 iov_iter_truncate(&wdata2->iter, wdata2->bytes);
2472
2473                 if (iov_iter_is_xarray(&wdata2->iter))
2474                         /* Check for pages having been redirtied and clean
2475                          * them.  We can do this by walking the xarray.  If
2476                          * it's not an xarray, then it's a DIO and we shouldn't
2477                          * be mucking around with the page bits.
2478                          */
2479                         cifs_undirty_folios(inode, fpos, cur_len);
2480
2481                 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
2482                                             &wdata2->cfile);
2483                 if (!wdata2->cfile) {
2484                         cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
2485                                  rc);
2486                         if (!is_retryable_error(rc))
2487                                 rc = -EBADF;
2488                 } else {
2489                         wdata2->pid = wdata2->cfile->pid;
2490                         rc = server->ops->async_writev(wdata2,
2491                                                        cifs_writedata_release);
2492                 }
2493
2494                 kref_put(&wdata2->refcount, cifs_writedata_release);
2495                 if (rc) {
2496                         if (is_retryable_error(rc))
2497                                 continue;
2498                         fpos += cur_len;
2499                         rest_len -= cur_len;
2500                         break;
2501                 }
2502
2503                 fpos += cur_len;
2504                 rest_len -= cur_len;
2505         } while (rest_len > 0);
2506
2507         /* Clean up remaining pages from the original wdata */
2508         if (iov_iter_is_xarray(&wdata->iter))
2509                 cifs_pages_write_failed(inode, fpos, rest_len);
2510
2511         if (rc != 0 && !is_retryable_error(rc))
2512                 mapping_set_error(inode->i_mapping, rc);
2513         kref_put(&wdata->refcount, cifs_writedata_release);
2514 }
2515
2516 void
2517 cifs_writev_complete(struct work_struct *work)
2518 {
2519         struct cifs_writedata *wdata = container_of(work,
2520                                                 struct cifs_writedata, work);
2521         struct inode *inode = d_inode(wdata->cfile->dentry);
2522
2523         if (wdata->result == 0) {
2524                 spin_lock(&inode->i_lock);
2525                 cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
2526                 spin_unlock(&inode->i_lock);
2527                 cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
2528                                          wdata->bytes);
2529         } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
2530                 return cifs_writev_requeue(wdata);
2531
2532         if (wdata->result == -EAGAIN)
2533                 cifs_pages_write_redirty(inode, wdata->offset, wdata->bytes);
2534         else if (wdata->result < 0)
2535                 cifs_pages_write_failed(inode, wdata->offset, wdata->bytes);
2536         else
2537                 cifs_pages_written_back(inode, wdata->offset, wdata->bytes);
2538
2539         if (wdata->result != -EAGAIN)
2540                 mapping_set_error(inode->i_mapping, wdata->result);
2541         kref_put(&wdata->refcount, cifs_writedata_release);
2542 }
2543
2544 struct cifs_writedata *cifs_writedata_alloc(work_func_t complete)
2545 {
2546         struct cifs_writedata *wdata;
2547
2548         wdata = kzalloc(sizeof(*wdata), GFP_NOFS);
2549         if (wdata != NULL) {
2550                 kref_init(&wdata->refcount);
2551                 INIT_LIST_HEAD(&wdata->list);
2552                 init_completion(&wdata->done);
2553                 INIT_WORK(&wdata->work, complete);
2554         }
2555         return wdata;
2556 }
2557
2558 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2559 {
2560         struct address_space *mapping = page->mapping;
2561         loff_t offset = (loff_t)page->index << PAGE_SHIFT;
2562         char *write_data;
2563         int rc = -EFAULT;
2564         int bytes_written = 0;
2565         struct inode *inode;
2566         struct cifsFileInfo *open_file;
2567
2568         if (!mapping || !mapping->host)
2569                 return -EFAULT;
2570
2571         inode = page->mapping->host;
2572
2573         offset += (loff_t)from;
2574         write_data = kmap(page);
2575         write_data += from;
2576
2577         if ((to > PAGE_SIZE) || (from > to)) {
2578                 kunmap(page);
2579                 return -EIO;
2580         }
2581
2582         /* racing with truncate? */
2583         if (offset > mapping->host->i_size) {
2584                 kunmap(page);
2585                 return 0; /* don't care */
2586         }
2587
2588         /* check to make sure that we are not extending the file */
2589         if (mapping->host->i_size - offset < (loff_t)to)
2590                 to = (unsigned)(mapping->host->i_size - offset);
2591
2592         rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
2593                                     &open_file);
2594         if (!rc) {
2595                 bytes_written = cifs_write(open_file, open_file->pid,
2596                                            write_data, to - from, &offset);
2597                 cifsFileInfo_put(open_file);
2598                 /* Does mm or vfs already set times? */
2599                 inode->i_atime = inode->i_mtime = current_time(inode);
2600                 if ((bytes_written > 0) && (offset))
2601                         rc = 0;
2602                 else if (bytes_written < 0)
2603                         rc = bytes_written;
2604                 else
2605                         rc = -EFAULT;
2606         } else {
2607                 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2608                 if (!is_retryable_error(rc))
2609                         rc = -EIO;
2610         }
2611
2612         kunmap(page);
2613         return rc;
2614 }
2615
2616 /*
2617  * Extend the region to be written back to include subsequent contiguously
2618  * dirty pages if possible, but don't sleep while doing so.
2619  */
2620 static void cifs_extend_writeback(struct address_space *mapping,
2621                                   long *_count,
2622                                   loff_t start,
2623                                   int max_pages,
2624                                   size_t max_len,
2625                                   unsigned int *_len)
2626 {
2627         struct folio_batch batch;
2628         struct folio *folio;
2629         unsigned int psize, nr_pages;
2630         size_t len = *_len;
2631         pgoff_t index = (start + len) / PAGE_SIZE;
2632         bool stop = true;
2633         unsigned int i;
2634         XA_STATE(xas, &mapping->i_pages, index);
2635
2636         folio_batch_init(&batch);
2637
2638         do {
2639                 /* Firstly, we gather up a batch of contiguous dirty pages
2640                  * under the RCU read lock - but we can't clear the dirty flags
2641                  * there if any of those pages are mapped.
2642                  */
2643                 rcu_read_lock();
2644
2645                 xas_for_each(&xas, folio, ULONG_MAX) {
2646                         stop = true;
2647                         if (xas_retry(&xas, folio))
2648                                 continue;
2649                         if (xa_is_value(folio))
2650                                 break;
2651                         if (folio_index(folio) != index)
2652                                 break;
2653                         if (!folio_try_get_rcu(folio)) {
2654                                 xas_reset(&xas);
2655                                 continue;
2656                         }
2657                         nr_pages = folio_nr_pages(folio);
2658                         if (nr_pages > max_pages)
2659                                 break;
2660
2661                         /* Has the page moved or been split? */
2662                         if (unlikely(folio != xas_reload(&xas))) {
2663                                 folio_put(folio);
2664                                 break;
2665                         }
2666
2667                         if (!folio_trylock(folio)) {
2668                                 folio_put(folio);
2669                                 break;
2670                         }
2671                         if (!folio_test_dirty(folio) || folio_test_writeback(folio)) {
2672                                 folio_unlock(folio);
2673                                 folio_put(folio);
2674                                 break;
2675                         }
2676
2677                         max_pages -= nr_pages;
2678                         psize = folio_size(folio);
2679                         len += psize;
2680                         stop = false;
2681                         if (max_pages <= 0 || len >= max_len || *_count <= 0)
2682                                 stop = true;
2683
2684                         index += nr_pages;
2685                         if (!folio_batch_add(&batch, folio))
2686                                 break;
2687                         if (stop)
2688                                 break;
2689                 }
2690
2691                 if (!stop)
2692                         xas_pause(&xas);
2693                 rcu_read_unlock();
2694
2695                 /* Now, if we obtained any pages, we can shift them to being
2696                  * writable and mark them for caching.
2697                  */
2698                 if (!folio_batch_count(&batch))
2699                         break;
2700
2701                 for (i = 0; i < folio_batch_count(&batch); i++) {
2702                         folio = batch.folios[i];
2703                         /* The folio should be locked, dirty and not undergoing
2704                          * writeback from the loop above.
2705                          */
2706                         if (!folio_clear_dirty_for_io(folio))
2707                                 WARN_ON(1);
2708                         if (folio_start_writeback(folio))
2709                                 WARN_ON(1);
2710
2711                         *_count -= folio_nr_pages(folio);
2712                         folio_unlock(folio);
2713                 }
2714
2715                 folio_batch_release(&batch);
2716                 cond_resched();
2717         } while (!stop);
2718
2719         *_len = len;
2720 }
2721
2722 /*
2723  * Write back the locked page and any subsequent non-locked dirty pages.
2724  */
2725 static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
2726                                                  struct writeback_control *wbc,
2727                                                  struct folio *folio,
2728                                                  loff_t start, loff_t end)
2729 {
2730         struct inode *inode = mapping->host;
2731         struct TCP_Server_Info *server;
2732         struct cifs_writedata *wdata;
2733         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2734         struct cifs_credits credits_on_stack;
2735         struct cifs_credits *credits = &credits_on_stack;
2736         struct cifsFileInfo *cfile = NULL;
2737         unsigned int xid, wsize, len;
2738         loff_t i_size = i_size_read(inode);
2739         size_t max_len;
2740         long count = wbc->nr_to_write;
2741         int rc;
2742
2743         /* The folio should be locked, dirty and not undergoing writeback. */
2744         if (folio_start_writeback(folio))
2745                 WARN_ON(1);
2746
2747         count -= folio_nr_pages(folio);
2748         len = folio_size(folio);
2749
2750         xid = get_xid();
2751         server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
2752
2753         rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
2754         if (rc) {
2755                 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n", rc);
2756                 goto err_xid;
2757         }
2758
2759         rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
2760                                            &wsize, credits);
2761         if (rc != 0)
2762                 goto err_close;
2763
2764         wdata = cifs_writedata_alloc(cifs_writev_complete);
2765         if (!wdata) {
2766                 rc = -ENOMEM;
2767                 goto err_uncredit;
2768         }
2769
2770         wdata->sync_mode = wbc->sync_mode;
2771         wdata->offset = folio_pos(folio);
2772         wdata->pid = cfile->pid;
2773         wdata->credits = credits_on_stack;
2774         wdata->cfile = cfile;
2775         wdata->server = server;
2776         cfile = NULL;
2777
2778         /* Find all consecutive lockable dirty pages, stopping when we find a
2779          * page that is not immediately lockable, is not dirty or is missing,
2780          * or we reach the end of the range.
2781          */
2782         if (start < i_size) {
2783                 /* Trim the write to the EOF; the extra data is ignored.  Also
2784                  * put an upper limit on the size of a single storedata op.
2785                  */
2786                 max_len = wsize;
2787                 max_len = min_t(unsigned long long, max_len, end - start + 1);
2788                 max_len = min_t(unsigned long long, max_len, i_size - start);
2789
2790                 if (len < max_len) {
2791                         int max_pages = INT_MAX;
2792
2793 #ifdef CONFIG_CIFS_SMB_DIRECT
2794                         if (server->smbd_conn)
2795                                 max_pages = server->smbd_conn->max_frmr_depth;
2796 #endif
2797                         max_pages -= folio_nr_pages(folio);
2798
2799                         if (max_pages > 0)
2800                                 cifs_extend_writeback(mapping, &count, start,
2801                                                       max_pages, max_len, &len);
2802                 }
2803                 len = min_t(loff_t, len, max_len);
2804         }
2805
2806         wdata->bytes = len;
2807
2808         /* We now have a contiguous set of dirty pages, each with writeback
2809          * set; the first page is still locked at this point, but all the rest
2810          * have been unlocked.
2811          */
2812         folio_unlock(folio);
2813
2814         if (start < i_size) {
2815                 iov_iter_xarray(&wdata->iter, ITER_SOURCE, &mapping->i_pages,
2816                                 start, len);
2817
2818                 rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
2819                 if (rc)
2820                         goto err_wdata;
2821
2822                 if (wdata->cfile->invalidHandle)
2823                         rc = -EAGAIN;
2824                 else
2825                         rc = wdata->server->ops->async_writev(wdata,
2826                                                               cifs_writedata_release);
2827                 if (rc >= 0) {
2828                         kref_put(&wdata->refcount, cifs_writedata_release);
2829                         goto err_close;
2830                 }
2831         } else {
2832                 /* The dirty region was entirely beyond the EOF. */
2833                 cifs_pages_written_back(inode, start, len);
2834                 rc = 0;
2835         }
2836
2837 err_wdata:
2838         kref_put(&wdata->refcount, cifs_writedata_release);
2839 err_uncredit:
2840         add_credits_and_wake_if(server, credits, 0);
2841 err_close:
2842         if (cfile)
2843                 cifsFileInfo_put(cfile);
2844 err_xid:
2845         free_xid(xid);
2846         if (rc == 0) {
2847                 wbc->nr_to_write = count;
2848                 rc = len;
2849         } else if (is_retryable_error(rc)) {
2850                 cifs_pages_write_redirty(inode, start, len);
2851         } else {
2852                 cifs_pages_write_failed(inode, start, len);
2853                 mapping_set_error(mapping, rc);
2854         }
2855         /* Indication to update ctime and mtime as close is deferred */
2856         set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
2857         return rc;
2858 }
2859
2860 /*
2861  * write a region of pages back to the server
2862  */
2863 static int cifs_writepages_region(struct address_space *mapping,
2864                                   struct writeback_control *wbc,
2865                                   loff_t start, loff_t end, loff_t *_next)
2866 {
2867         struct folio_batch fbatch;
2868         int skips = 0;
2869
2870         folio_batch_init(&fbatch);
2871         do {
2872                 int nr;
2873                 pgoff_t index = start / PAGE_SIZE;
2874
2875                 nr = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE,
2876                                             PAGECACHE_TAG_DIRTY, &fbatch);
2877                 if (!nr)
2878                         break;
2879
2880                 for (int i = 0; i < nr; i++) {
2881                         ssize_t ret;
2882                         struct folio *folio = fbatch.folios[i];
2883
2884 redo_folio:
2885                         start = folio_pos(folio); /* May regress with THPs */
2886
2887                         /* At this point we hold neither the i_pages lock nor the
2888                          * page lock: the page may be truncated or invalidated
2889                          * (changing page->mapping to NULL), or even swizzled
2890                          * back from swapper_space to tmpfs file mapping
2891                          */
2892                         if (wbc->sync_mode != WB_SYNC_NONE) {
2893                                 ret = folio_lock_killable(folio);
2894                                 if (ret < 0)
2895                                         goto write_error;
2896                         } else {
2897                                 if (!folio_trylock(folio))
2898                                         goto skip_write;
2899                         }
2900
2901                         if (folio_mapping(folio) != mapping ||
2902                             !folio_test_dirty(folio)) {
2903                                 start += folio_size(folio);
2904                                 folio_unlock(folio);
2905                                 continue;
2906                         }
2907
2908                         if (folio_test_writeback(folio) ||
2909                             folio_test_fscache(folio)) {
2910                                 folio_unlock(folio);
2911                                 if (wbc->sync_mode == WB_SYNC_NONE)
2912                                         goto skip_write;
2913
2914                                 folio_wait_writeback(folio);
2915 #ifdef CONFIG_CIFS_FSCACHE
2916                                 folio_wait_fscache(folio);
2917 #endif
2918                                 goto redo_folio;
2919                         }
2920
2921                         if (!folio_clear_dirty_for_io(folio))
2922                                 /* We hold the page lock - it should've been dirty. */
2923                                 WARN_ON(1);
2924
2925                         ret = cifs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
2926                         if (ret < 0)
2927                                 goto write_error;
2928
2929                         start += ret;
2930                         continue;
2931
2932 write_error:
2933                         folio_batch_release(&fbatch);
2934                         *_next = start;
2935                         return ret;
2936
2937 skip_write:
2938                         /*
2939                          * Too many skipped writes, or need to reschedule?
2940                          * Treat it as a write error without an error code.
2941                          */
2942                         if (skips >= 5 || need_resched()) {
2943                                 ret = 0;
2944                                 goto write_error;
2945                         }
2946
2947                         /* Otherwise, just skip that folio and go on to the next */
2948                         skips++;
2949                         start += folio_size(folio);
2950                         continue;
2951                 }
2952
2953                 folio_batch_release(&fbatch);           
2954                 cond_resched();
2955         } while (wbc->nr_to_write > 0);
2956
2957         *_next = start;
2958         return 0;
2959 }
2960
2961 /*
2962  * Write some of the pending data back to the server
2963  */
2964 static int cifs_writepages(struct address_space *mapping,
2965                            struct writeback_control *wbc)
2966 {
2967         loff_t start, next;
2968         int ret;
2969
2970         /* We have to be careful as we can end up racing with setattr()
2971          * truncating the pagecache since the caller doesn't take a lock here
2972          * to prevent it.
2973          */
2974
2975         if (wbc->range_cyclic) {
2976                 start = mapping->writeback_index * PAGE_SIZE;
2977                 ret = cifs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
2978                 if (ret == 0) {
2979                         mapping->writeback_index = next / PAGE_SIZE;
2980                         if (start > 0 && wbc->nr_to_write > 0) {
2981                                 ret = cifs_writepages_region(mapping, wbc, 0,
2982                                                              start, &next);
2983                                 if (ret == 0)
2984                                         mapping->writeback_index =
2985                                                 next / PAGE_SIZE;
2986                         }
2987                 }
2988         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
2989                 ret = cifs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
2990                 if (wbc->nr_to_write > 0 && ret == 0)
2991                         mapping->writeback_index = next / PAGE_SIZE;
2992         } else {
2993                 ret = cifs_writepages_region(mapping, wbc,
2994                                              wbc->range_start, wbc->range_end, &next);
2995         }
2996
2997         return ret;
2998 }
2999
3000 static int
3001 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
3002 {
3003         int rc;
3004         unsigned int xid;
3005
3006         xid = get_xid();
3007 /* BB add check for wbc flags */
3008         get_page(page);
3009         if (!PageUptodate(page))
3010                 cifs_dbg(FYI, "ppw - page not up to date\n");
3011
3012         /*
3013          * Set the "writeback" flag, and clear "dirty" in the radix tree.
3014          *
3015          * A writepage() implementation always needs to do either this,
3016          * or re-dirty the page with "redirty_page_for_writepage()" in
3017          * the case of a failure.
3018          *
3019          * Just unlocking the page will cause the radix tree tag-bits
3020          * to fail to update with the state of the page correctly.
3021          */
3022         set_page_writeback(page);
3023 retry_write:
3024         rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
3025         if (is_retryable_error(rc)) {
3026                 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
3027                         goto retry_write;
3028                 redirty_page_for_writepage(wbc, page);
3029         } else if (rc != 0) {
3030                 SetPageError(page);
3031                 mapping_set_error(page->mapping, rc);
3032         } else {
3033                 SetPageUptodate(page);
3034         }
3035         end_page_writeback(page);
3036         put_page(page);
3037         free_xid(xid);
3038         return rc;
3039 }
3040
3041 static int cifs_write_end(struct file *file, struct address_space *mapping,
3042                         loff_t pos, unsigned len, unsigned copied,
3043                         struct page *page, void *fsdata)
3044 {
3045         int rc;
3046         struct inode *inode = mapping->host;
3047         struct cifsFileInfo *cfile = file->private_data;
3048         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
3049         struct folio *folio = page_folio(page);
3050         __u32 pid;
3051
3052         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3053                 pid = cfile->pid;
3054         else
3055                 pid = current->tgid;
3056
3057         cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
3058                  page, pos, copied);
3059
3060         if (folio_test_checked(folio)) {
3061                 if (copied == len)
3062                         folio_mark_uptodate(folio);
3063                 folio_clear_checked(folio);
3064         } else if (!folio_test_uptodate(folio) && copied == PAGE_SIZE)
3065                 folio_mark_uptodate(folio);
3066
3067         if (!folio_test_uptodate(folio)) {
3068                 char *page_data;
3069                 unsigned offset = pos & (PAGE_SIZE - 1);
3070                 unsigned int xid;
3071
3072                 xid = get_xid();
3073                 /* this is probably better than directly calling
3074                    partialpage_write since in this function the file handle is
3075                    known which we might as well leverage */
3076                 /* BB check if anything else missing out of ppw
3077                    such as updating last write time */
3078                 page_data = kmap(page);
3079                 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
3080                 /* if (rc < 0) should we set writebehind rc? */
3081                 kunmap(page);
3082
3083                 free_xid(xid);
3084         } else {
3085                 rc = copied;
3086                 pos += copied;
3087                 set_page_dirty(page);
3088         }
3089
3090         if (rc > 0) {
3091                 spin_lock(&inode->i_lock);
3092                 if (pos > inode->i_size) {
3093                         i_size_write(inode, pos);
3094                         inode->i_blocks = (512 - 1 + pos) >> 9;
3095                 }
3096                 spin_unlock(&inode->i_lock);
3097         }
3098
3099         unlock_page(page);
3100         put_page(page);
3101         /* Indication to update ctime and mtime as close is deferred */
3102         set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
3103
3104         return rc;
3105 }
3106
3107 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
3108                       int datasync)
3109 {
3110         unsigned int xid;
3111         int rc = 0;
3112         struct cifs_tcon *tcon;
3113         struct TCP_Server_Info *server;
3114         struct cifsFileInfo *smbfile = file->private_data;
3115         struct inode *inode = file_inode(file);
3116         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3117
3118         rc = file_write_and_wait_range(file, start, end);
3119         if (rc) {
3120                 trace_cifs_fsync_err(inode->i_ino, rc);
3121                 return rc;
3122         }
3123
3124         xid = get_xid();
3125
3126         cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
3127                  file, datasync);
3128
3129         if (!CIFS_CACHE_READ(CIFS_I(inode))) {
3130                 rc = cifs_zap_mapping(inode);
3131                 if (rc) {
3132                         cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
3133                         rc = 0; /* don't care about it in fsync */
3134                 }
3135         }
3136
3137         tcon = tlink_tcon(smbfile->tlink);
3138         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
3139                 server = tcon->ses->server;
3140                 if (server->ops->flush == NULL) {
3141                         rc = -ENOSYS;
3142                         goto strict_fsync_exit;
3143                 }
3144
3145                 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
3146                         smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
3147                         if (smbfile) {
3148                                 rc = server->ops->flush(xid, tcon, &smbfile->fid);
3149                                 cifsFileInfo_put(smbfile);
3150                         } else
3151                                 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
3152                 } else
3153                         rc = server->ops->flush(xid, tcon, &smbfile->fid);
3154         }
3155
3156 strict_fsync_exit:
3157         free_xid(xid);
3158         return rc;
3159 }
3160
3161 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
3162 {
3163         unsigned int xid;
3164         int rc = 0;
3165         struct cifs_tcon *tcon;
3166         struct TCP_Server_Info *server;
3167         struct cifsFileInfo *smbfile = file->private_data;
3168         struct inode *inode = file_inode(file);
3169         struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
3170
3171         rc = file_write_and_wait_range(file, start, end);
3172         if (rc) {
3173                 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
3174                 return rc;
3175         }
3176
3177         xid = get_xid();
3178
3179         cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
3180                  file, datasync);
3181
3182         tcon = tlink_tcon(smbfile->tlink);
3183         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
3184                 server = tcon->ses->server;
3185                 if (server->ops->flush == NULL) {
3186                         rc = -ENOSYS;
3187                         goto fsync_exit;
3188                 }
3189
3190                 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
3191                         smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
3192                         if (smbfile) {
3193                                 rc = server->ops->flush(xid, tcon, &smbfile->fid);
3194                                 cifsFileInfo_put(smbfile);
3195                         } else
3196                                 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
3197                 } else
3198                         rc = server->ops->flush(xid, tcon, &smbfile->fid);
3199         }
3200
3201 fsync_exit:
3202         free_xid(xid);
3203         return rc;
3204 }
3205
3206 /*
3207  * As file closes, flush all cached write data for this inode checking
3208  * for write behind errors.
3209  */
3210 int cifs_flush(struct file *file, fl_owner_t id)
3211 {
3212         struct inode *inode = file_inode(file);
3213         int rc = 0;
3214
3215         if (file->f_mode & FMODE_WRITE)
3216                 rc = filemap_write_and_wait(inode->i_mapping);
3217
3218         cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
3219         if (rc) {
3220                 /* get more nuanced writeback errors */
3221                 rc = filemap_check_wb_err(file->f_mapping, 0);
3222                 trace_cifs_flush_err(inode->i_ino, rc);
3223         }
3224         return rc;
3225 }
3226
3227 static void
3228 cifs_uncached_writedata_release(struct kref *refcount)
3229 {
3230         struct cifs_writedata *wdata = container_of(refcount,
3231                                         struct cifs_writedata, refcount);
3232
3233         kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
3234         cifs_writedata_release(refcount);
3235 }
3236
3237 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
3238
3239 static void
3240 cifs_uncached_writev_complete(struct work_struct *work)
3241 {
3242         struct cifs_writedata *wdata = container_of(work,
3243                                         struct cifs_writedata, work);
3244         struct inode *inode = d_inode(wdata->cfile->dentry);
3245         struct cifsInodeInfo *cifsi = CIFS_I(inode);
3246
3247         spin_lock(&inode->i_lock);
3248         cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
3249         if (cifsi->server_eof > inode->i_size)
3250                 i_size_write(inode, cifsi->server_eof);
3251         spin_unlock(&inode->i_lock);
3252
3253         complete(&wdata->done);
3254         collect_uncached_write_data(wdata->ctx);
3255         /* the below call can possibly free the last ref to aio ctx */
3256         kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3257 }
3258
3259 static int
3260 cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
3261         struct cifs_aio_ctx *ctx)
3262 {
3263         unsigned int wsize;
3264         struct cifs_credits credits;
3265         int rc;
3266         struct TCP_Server_Info *server = wdata->server;
3267
3268         do {
3269                 if (wdata->cfile->invalidHandle) {
3270                         rc = cifs_reopen_file(wdata->cfile, false);
3271                         if (rc == -EAGAIN)
3272                                 continue;
3273                         else if (rc)
3274                                 break;
3275                 }
3276
3277
3278                 /*
3279                  * Wait for credits to resend this wdata.
3280                  * Note: we are attempting to resend the whole wdata not in
3281                  * segments
3282                  */
3283                 do {
3284                         rc = server->ops->wait_mtu_credits(server, wdata->bytes,
3285                                                 &wsize, &credits);
3286                         if (rc)
3287                                 goto fail;
3288
3289                         if (wsize < wdata->bytes) {
3290                                 add_credits_and_wake_if(server, &credits, 0);
3291                                 msleep(1000);
3292                         }
3293                 } while (wsize < wdata->bytes);
3294                 wdata->credits = credits;
3295
3296                 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
3297
3298                 if (!rc) {
3299                         if (wdata->cfile->invalidHandle)
3300                                 rc = -EAGAIN;
3301                         else {
3302 #ifdef CONFIG_CIFS_SMB_DIRECT
3303                                 if (wdata->mr) {
3304                                         wdata->mr->need_invalidate = true;
3305                                         smbd_deregister_mr(wdata->mr);
3306                                         wdata->mr = NULL;
3307                                 }
3308 #endif
3309                                 rc = server->ops->async_writev(wdata,
3310                                         cifs_uncached_writedata_release);
3311                         }
3312                 }
3313
3314                 /* If the write was successfully sent, we are done */
3315                 if (!rc) {
3316                         list_add_tail(&wdata->list, wdata_list);
3317                         return 0;
3318                 }
3319
3320                 /* Roll back credits and retry if needed */
3321                 add_credits_and_wake_if(server, &wdata->credits, 0);
3322         } while (rc == -EAGAIN);
3323
3324 fail:
3325         kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3326         return rc;
3327 }
3328
3329 /*
3330  * Select span of a bvec iterator we're going to use.  Limit it by both maximum
3331  * size and maximum number of segments.
3332  */
3333 static size_t cifs_limit_bvec_subset(const struct iov_iter *iter, size_t max_size,
3334                                      size_t max_segs, unsigned int *_nsegs)
3335 {
3336         const struct bio_vec *bvecs = iter->bvec;
3337         unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0;
3338         size_t len, span = 0, n = iter->count;
3339         size_t skip = iter->iov_offset;
3340
3341         if (WARN_ON(!iov_iter_is_bvec(iter)) || n == 0)
3342                 return 0;
3343
3344         while (n && ix < nbv && skip) {
3345                 len = bvecs[ix].bv_len;
3346                 if (skip < len)
3347                         break;
3348                 skip -= len;
3349                 n -= len;
3350                 ix++;
3351         }
3352
3353         while (n && ix < nbv) {
3354                 len = min3(n, bvecs[ix].bv_len - skip, max_size);
3355                 span += len;
3356                 nsegs++;
3357                 ix++;
3358                 if (span >= max_size || nsegs >= max_segs)
3359                         break;
3360                 skip = 0;
3361                 n -= len;
3362         }
3363
3364         *_nsegs = nsegs;
3365         return span;
3366 }
3367
3368 static int
3369 cifs_write_from_iter(loff_t fpos, size_t len, struct iov_iter *from,
3370                      struct cifsFileInfo *open_file,
3371                      struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
3372                      struct cifs_aio_ctx *ctx)
3373 {
3374         int rc = 0;
3375         size_t cur_len, max_len;
3376         struct cifs_writedata *wdata;
3377         pid_t pid;
3378         struct TCP_Server_Info *server;
3379         unsigned int xid, max_segs = INT_MAX;
3380
3381         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3382                 pid = open_file->pid;
3383         else
3384                 pid = current->tgid;
3385
3386         server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
3387         xid = get_xid();
3388
3389 #ifdef CONFIG_CIFS_SMB_DIRECT
3390         if (server->smbd_conn)
3391                 max_segs = server->smbd_conn->max_frmr_depth;
3392 #endif
3393
3394         do {
3395                 struct cifs_credits credits_on_stack;
3396                 struct cifs_credits *credits = &credits_on_stack;
3397                 unsigned int wsize, nsegs = 0;
3398
3399                 if (signal_pending(current)) {
3400                         rc = -EINTR;
3401                         break;
3402                 }
3403
3404                 if (open_file->invalidHandle) {
3405                         rc = cifs_reopen_file(open_file, false);
3406                         if (rc == -EAGAIN)
3407                                 continue;
3408                         else if (rc)
3409                                 break;
3410                 }
3411
3412                 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
3413                                                    &wsize, credits);
3414                 if (rc)
3415                         break;
3416
3417                 max_len = min_t(const size_t, len, wsize);
3418                 if (!max_len) {
3419                         rc = -EAGAIN;
3420                         add_credits_and_wake_if(server, credits, 0);
3421                         break;
3422                 }
3423
3424                 cur_len = cifs_limit_bvec_subset(from, max_len, max_segs, &nsegs);
3425                 cifs_dbg(FYI, "write_from_iter len=%zx/%zx nsegs=%u/%lu/%u\n",
3426                          cur_len, max_len, nsegs, from->nr_segs, max_segs);
3427                 if (cur_len == 0) {
3428                         rc = -EIO;
3429                         add_credits_and_wake_if(server, credits, 0);
3430                         break;
3431                 }
3432
3433                 wdata = cifs_writedata_alloc(cifs_uncached_writev_complete);
3434                 if (!wdata) {
3435                         rc = -ENOMEM;
3436                         add_credits_and_wake_if(server, credits, 0);
3437                         break;
3438                 }
3439
3440                 wdata->sync_mode = WB_SYNC_ALL;
3441                 wdata->offset   = (__u64)fpos;
3442                 wdata->cfile    = cifsFileInfo_get(open_file);
3443                 wdata->server   = server;
3444                 wdata->pid      = pid;
3445                 wdata->bytes    = cur_len;
3446                 wdata->credits  = credits_on_stack;
3447                 wdata->iter     = *from;
3448                 wdata->ctx      = ctx;
3449                 kref_get(&ctx->refcount);
3450
3451                 iov_iter_truncate(&wdata->iter, cur_len);
3452
3453                 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
3454
3455                 if (!rc) {
3456                         if (wdata->cfile->invalidHandle)
3457                                 rc = -EAGAIN;
3458                         else
3459                                 rc = server->ops->async_writev(wdata,
3460                                         cifs_uncached_writedata_release);
3461                 }
3462
3463                 if (rc) {
3464                         add_credits_and_wake_if(server, &wdata->credits, 0);
3465                         kref_put(&wdata->refcount,
3466                                  cifs_uncached_writedata_release);
3467                         if (rc == -EAGAIN)
3468                                 continue;
3469                         break;
3470                 }
3471
3472                 list_add_tail(&wdata->list, wdata_list);
3473                 iov_iter_advance(from, cur_len);
3474                 fpos += cur_len;
3475                 len -= cur_len;
3476         } while (len > 0);
3477
3478         free_xid(xid);
3479         return rc;
3480 }
3481
3482 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
3483 {
3484         struct cifs_writedata *wdata, *tmp;
3485         struct cifs_tcon *tcon;
3486         struct cifs_sb_info *cifs_sb;
3487         struct dentry *dentry = ctx->cfile->dentry;
3488         ssize_t rc;
3489
3490         tcon = tlink_tcon(ctx->cfile->tlink);
3491         cifs_sb = CIFS_SB(dentry->d_sb);
3492
3493         mutex_lock(&ctx->aio_mutex);
3494
3495         if (list_empty(&ctx->list)) {
3496                 mutex_unlock(&ctx->aio_mutex);
3497                 return;
3498         }
3499
3500         rc = ctx->rc;
3501         /*
3502          * Wait for and collect replies for any successful sends in order of
3503          * increasing offset. Once an error is hit, then return without waiting
3504          * for any more replies.
3505          */
3506 restart_loop:
3507         list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
3508                 if (!rc) {
3509                         if (!try_wait_for_completion(&wdata->done)) {
3510                                 mutex_unlock(&ctx->aio_mutex);
3511                                 return;
3512                         }
3513
3514                         if (wdata->result)
3515                                 rc = wdata->result;
3516                         else
3517                                 ctx->total_len += wdata->bytes;
3518
3519                         /* resend call if it's a retryable error */
3520                         if (rc == -EAGAIN) {
3521                                 struct list_head tmp_list;
3522                                 struct iov_iter tmp_from = ctx->iter;
3523
3524                                 INIT_LIST_HEAD(&tmp_list);
3525                                 list_del_init(&wdata->list);
3526
3527                                 if (ctx->direct_io)
3528                                         rc = cifs_resend_wdata(
3529                                                 wdata, &tmp_list, ctx);
3530                                 else {
3531                                         iov_iter_advance(&tmp_from,
3532                                                  wdata->offset - ctx->pos);
3533
3534                                         rc = cifs_write_from_iter(wdata->offset,
3535                                                 wdata->bytes, &tmp_from,
3536                                                 ctx->cfile, cifs_sb, &tmp_list,
3537                                                 ctx);
3538
3539                                         kref_put(&wdata->refcount,
3540                                                 cifs_uncached_writedata_release);
3541                                 }
3542
3543                                 list_splice(&tmp_list, &ctx->list);
3544                                 goto restart_loop;
3545                         }
3546                 }
3547                 list_del_init(&wdata->list);
3548                 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3549         }
3550
3551         cifs_stats_bytes_written(tcon, ctx->total_len);
3552         set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3553
3554         ctx->rc = (rc == 0) ? ctx->total_len : rc;
3555
3556         mutex_unlock(&ctx->aio_mutex);
3557
3558         if (ctx->iocb && ctx->iocb->ki_complete)
3559                 ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
3560         else
3561                 complete(&ctx->done);
3562 }
3563
3564 static ssize_t __cifs_writev(
3565         struct kiocb *iocb, struct iov_iter *from, bool direct)
3566 {
3567         struct file *file = iocb->ki_filp;
3568         ssize_t total_written = 0;
3569         struct cifsFileInfo *cfile;
3570         struct cifs_tcon *tcon;
3571         struct cifs_sb_info *cifs_sb;
3572         struct cifs_aio_ctx *ctx;
3573         int rc;
3574
3575         rc = generic_write_checks(iocb, from);
3576         if (rc <= 0)
3577                 return rc;
3578
3579         cifs_sb = CIFS_FILE_SB(file);
3580         cfile = file->private_data;
3581         tcon = tlink_tcon(cfile->tlink);
3582
3583         if (!tcon->ses->server->ops->async_writev)
3584                 return -ENOSYS;
3585
3586         ctx = cifs_aio_ctx_alloc();
3587         if (!ctx)
3588                 return -ENOMEM;
3589
3590         ctx->cfile = cifsFileInfo_get(cfile);
3591
3592         if (!is_sync_kiocb(iocb))
3593                 ctx->iocb = iocb;
3594
3595         ctx->pos = iocb->ki_pos;
3596         ctx->direct_io = direct;
3597         ctx->nr_pinned_pages = 0;
3598
3599         if (user_backed_iter(from)) {
3600                 /*
3601                  * Extract IOVEC/UBUF-type iterators to a BVEC-type iterator as
3602                  * they contain references to the calling process's virtual
3603                  * memory layout which won't be available in an async worker
3604                  * thread.  This also takes a pin on every folio involved.
3605                  */
3606                 rc = netfs_extract_user_iter(from, iov_iter_count(from),
3607                                              &ctx->iter, 0);
3608                 if (rc < 0) {
3609                         kref_put(&ctx->refcount, cifs_aio_ctx_release);
3610                         return rc;
3611                 }
3612
3613                 ctx->nr_pinned_pages = rc;
3614                 ctx->bv = (void *)ctx->iter.bvec;
3615                 ctx->bv_need_unpin = iov_iter_extract_will_pin(from);
3616         } else if ((iov_iter_is_bvec(from) || iov_iter_is_kvec(from)) &&
3617                    !is_sync_kiocb(iocb)) {
3618                 /*
3619                  * If the op is asynchronous, we need to copy the list attached
3620                  * to a BVEC/KVEC-type iterator, but we assume that the storage
3621                  * will be pinned by the caller; in any case, we may or may not
3622                  * be able to pin the pages, so we don't try.
3623                  */
3624                 ctx->bv = (void *)dup_iter(&ctx->iter, from, GFP_KERNEL);
3625                 if (!ctx->bv) {
3626                         kref_put(&ctx->refcount, cifs_aio_ctx_release);
3627                         return -ENOMEM;
3628                 }
3629         } else {
3630                 /*
3631                  * Otherwise, we just pass the iterator down as-is and rely on
3632                  * the caller to make sure the pages referred to by the
3633                  * iterator don't evaporate.
3634                  */
3635                 ctx->iter = *from;
3636         }
3637
3638         ctx->len = iov_iter_count(&ctx->iter);
3639
3640         /* grab a lock here due to read response handlers can access ctx */
3641         mutex_lock(&ctx->aio_mutex);
3642
3643         rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &ctx->iter,
3644                                   cfile, cifs_sb, &ctx->list, ctx);
3645
3646         /*
3647          * If at least one write was successfully sent, then discard any rc
3648          * value from the later writes. If the other write succeeds, then
3649          * we'll end up returning whatever was written. If it fails, then
3650          * we'll get a new rc value from that.
3651          */
3652         if (!list_empty(&ctx->list))
3653                 rc = 0;
3654
3655         mutex_unlock(&ctx->aio_mutex);
3656
3657         if (rc) {
3658                 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3659                 return rc;
3660         }
3661
3662         if (!is_sync_kiocb(iocb)) {
3663                 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3664                 return -EIOCBQUEUED;
3665         }
3666
3667         rc = wait_for_completion_killable(&ctx->done);
3668         if (rc) {
3669                 mutex_lock(&ctx->aio_mutex);
3670                 ctx->rc = rc = -EINTR;
3671                 total_written = ctx->total_len;
3672                 mutex_unlock(&ctx->aio_mutex);
3673         } else {
3674                 rc = ctx->rc;
3675                 total_written = ctx->total_len;
3676         }
3677
3678         kref_put(&ctx->refcount, cifs_aio_ctx_release);
3679
3680         if (unlikely(!total_written))
3681                 return rc;
3682
3683         iocb->ki_pos += total_written;
3684         return total_written;
3685 }
3686
3687 ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3688 {
3689         struct file *file = iocb->ki_filp;
3690
3691         cifs_revalidate_mapping(file->f_inode);
3692         return __cifs_writev(iocb, from, true);
3693 }
3694
3695 ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3696 {
3697         return __cifs_writev(iocb, from, false);
3698 }
3699
3700 static ssize_t
3701 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
3702 {
3703         struct file *file = iocb->ki_filp;
3704         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3705         struct inode *inode = file->f_mapping->host;
3706         struct cifsInodeInfo *cinode = CIFS_I(inode);
3707         struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
3708         ssize_t rc;
3709
3710         inode_lock(inode);
3711         /*
3712          * We need to hold the sem to be sure nobody modifies lock list
3713          * with a brlock that prevents writing.
3714          */
3715         down_read(&cinode->lock_sem);
3716
3717         rc = generic_write_checks(iocb, from);
3718         if (rc <= 0)
3719                 goto out;
3720
3721         if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
3722                                      server->vals->exclusive_lock_type, 0,
3723                                      NULL, CIFS_WRITE_OP))
3724                 rc = __generic_file_write_iter(iocb, from);
3725         else
3726                 rc = -EACCES;
3727 out:
3728         up_read(&cinode->lock_sem);
3729         inode_unlock(inode);
3730
3731         if (rc > 0)
3732                 rc = generic_write_sync(iocb, rc);
3733         return rc;
3734 }
3735
3736 ssize_t
3737 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
3738 {
3739         struct inode *inode = file_inode(iocb->ki_filp);
3740         struct cifsInodeInfo *cinode = CIFS_I(inode);
3741         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3742         struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3743                                                 iocb->ki_filp->private_data;
3744         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3745         ssize_t written;
3746
3747         written = cifs_get_writer(cinode);
3748         if (written)
3749                 return written;
3750
3751         if (CIFS_CACHE_WRITE(cinode)) {
3752                 if (cap_unix(tcon->ses) &&
3753                 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
3754                   && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
3755                         written = generic_file_write_iter(iocb, from);
3756                         goto out;
3757                 }
3758                 written = cifs_writev(iocb, from);
3759                 goto out;
3760         }
3761         /*
3762          * For non-oplocked files in strict cache mode we need to write the data
3763          * to the server exactly from the pos to pos+len-1 rather than flush all
3764          * affected pages because it may cause a error with mandatory locks on
3765          * these pages but not on the region from pos to ppos+len-1.
3766          */
3767         written = cifs_user_writev(iocb, from);
3768         if (CIFS_CACHE_READ(cinode)) {
3769                 /*
3770                  * We have read level caching and we have just sent a write
3771                  * request to the server thus making data in the cache stale.
3772                  * Zap the cache and set oplock/lease level to NONE to avoid
3773                  * reading stale data from the cache. All subsequent read
3774                  * operations will read new data from the server.
3775                  */
3776                 cifs_zap_mapping(inode);
3777                 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
3778                          inode);
3779                 cinode->oplock = 0;
3780         }
3781 out:
3782         cifs_put_writer(cinode);
3783         return written;
3784 }
3785
3786 static struct cifs_readdata *cifs_readdata_alloc(work_func_t complete)
3787 {
3788         struct cifs_readdata *rdata;
3789
3790         rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
3791         if (rdata) {
3792                 kref_init(&rdata->refcount);
3793                 INIT_LIST_HEAD(&rdata->list);
3794                 init_completion(&rdata->done);
3795                 INIT_WORK(&rdata->work, complete);
3796         }
3797
3798         return rdata;
3799 }
3800
3801 void
3802 cifs_readdata_release(struct kref *refcount)
3803 {
3804         struct cifs_readdata *rdata = container_of(refcount,
3805                                         struct cifs_readdata, refcount);
3806
3807         if (rdata->ctx)
3808                 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
3809 #ifdef CONFIG_CIFS_SMB_DIRECT
3810         if (rdata->mr) {
3811                 smbd_deregister_mr(rdata->mr);
3812                 rdata->mr = NULL;
3813         }
3814 #endif
3815         if (rdata->cfile)
3816                 cifsFileInfo_put(rdata->cfile);
3817
3818         kfree(rdata);
3819 }
3820
3821 static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3822
3823 static void
3824 cifs_uncached_readv_complete(struct work_struct *work)
3825 {
3826         struct cifs_readdata *rdata = container_of(work,
3827                                                 struct cifs_readdata, work);
3828
3829         complete(&rdata->done);
3830         collect_uncached_read_data(rdata->ctx);
3831         /* the below call can possibly free the last ref to aio ctx */
3832         kref_put(&rdata->refcount, cifs_readdata_release);
3833 }
3834
3835 static int cifs_resend_rdata(struct cifs_readdata *rdata,
3836                         struct list_head *rdata_list,
3837                         struct cifs_aio_ctx *ctx)
3838 {
3839         unsigned int rsize;
3840         struct cifs_credits credits;
3841         int rc;
3842         struct TCP_Server_Info *server;
3843
3844         /* XXX: should we pick a new channel here? */
3845         server = rdata->server;
3846
3847         do {
3848                 if (rdata->cfile->invalidHandle) {
3849                         rc = cifs_reopen_file(rdata->cfile, true);
3850                         if (rc == -EAGAIN)
3851                                 continue;
3852                         else if (rc)
3853                                 break;
3854                 }
3855
3856                 /*
3857                  * Wait for credits to resend this rdata.
3858                  * Note: we are attempting to resend the whole rdata not in
3859                  * segments
3860                  */
3861                 do {
3862                         rc = server->ops->wait_mtu_credits(server, rdata->bytes,
3863                                                 &rsize, &credits);
3864
3865                         if (rc)
3866                                 goto fail;
3867
3868                         if (rsize < rdata->bytes) {
3869                                 add_credits_and_wake_if(server, &credits, 0);
3870                                 msleep(1000);
3871                         }
3872                 } while (rsize < rdata->bytes);
3873                 rdata->credits = credits;
3874
3875                 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3876                 if (!rc) {
3877                         if (rdata->cfile->invalidHandle)
3878                                 rc = -EAGAIN;
3879                         else {
3880 #ifdef CONFIG_CIFS_SMB_DIRECT
3881                                 if (rdata->mr) {
3882                                         rdata->mr->need_invalidate = true;
3883                                         smbd_deregister_mr(rdata->mr);
3884                                         rdata->mr = NULL;
3885                                 }
3886 #endif
3887                                 rc = server->ops->async_readv(rdata);
3888                         }
3889                 }
3890
3891                 /* If the read was successfully sent, we are done */
3892                 if (!rc) {
3893                         /* Add to aio pending list */
3894                         list_add_tail(&rdata->list, rdata_list);
3895                         return 0;
3896                 }
3897
3898                 /* Roll back credits and retry if needed */
3899                 add_credits_and_wake_if(server, &rdata->credits, 0);
3900         } while (rc == -EAGAIN);
3901
3902 fail:
3903         kref_put(&rdata->refcount, cifs_readdata_release);
3904         return rc;
3905 }
3906
3907 static int
3908 cifs_send_async_read(loff_t fpos, size_t len, struct cifsFileInfo *open_file,
3909                      struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3910                      struct cifs_aio_ctx *ctx)
3911 {
3912         struct cifs_readdata *rdata;
3913         unsigned int rsize, nsegs, max_segs = INT_MAX;
3914         struct cifs_credits credits_on_stack;
3915         struct cifs_credits *credits = &credits_on_stack;
3916         size_t cur_len, max_len;
3917         int rc;
3918         pid_t pid;
3919         struct TCP_Server_Info *server;
3920
3921         server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
3922
3923 #ifdef CONFIG_CIFS_SMB_DIRECT
3924         if (server->smbd_conn)
3925                 max_segs = server->smbd_conn->max_frmr_depth;
3926 #endif
3927
3928         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3929                 pid = open_file->pid;
3930         else
3931                 pid = current->tgid;
3932
3933         do {
3934                 if (open_file->invalidHandle) {
3935                         rc = cifs_reopen_file(open_file, true);
3936                         if (rc == -EAGAIN)
3937                                 continue;
3938                         else if (rc)
3939                                 break;
3940                 }
3941
3942                 if (cifs_sb->ctx->rsize == 0)
3943                         cifs_sb->ctx->rsize =
3944                                 server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
3945                                                              cifs_sb->ctx);
3946
3947                 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
3948                                                    &rsize, credits);
3949                 if (rc)
3950                         break;
3951
3952                 max_len = min_t(size_t, len, rsize);
3953
3954                 cur_len = cifs_limit_bvec_subset(&ctx->iter, max_len,
3955                                                  max_segs, &nsegs);
3956                 cifs_dbg(FYI, "read-to-iter len=%zx/%zx nsegs=%u/%lu/%u\n",
3957                          cur_len, max_len, nsegs, ctx->iter.nr_segs, max_segs);
3958                 if (cur_len == 0) {
3959                         rc = -EIO;
3960                         add_credits_and_wake_if(server, credits, 0);
3961                         break;
3962                 }
3963
3964                 rdata = cifs_readdata_alloc(cifs_uncached_readv_complete);
3965                 if (!rdata) {
3966                         add_credits_and_wake_if(server, credits, 0);
3967                         rc = -ENOMEM;
3968                         break;
3969                 }
3970
3971                 rdata->server   = server;
3972                 rdata->cfile    = cifsFileInfo_get(open_file);
3973                 rdata->offset   = fpos;
3974                 rdata->bytes    = cur_len;
3975                 rdata->pid      = pid;
3976                 rdata->credits  = credits_on_stack;
3977                 rdata->ctx      = ctx;
3978                 kref_get(&ctx->refcount);
3979
3980                 rdata->iter     = ctx->iter;
3981                 iov_iter_truncate(&rdata->iter, cur_len);
3982
3983                 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3984
3985                 if (!rc) {
3986                         if (rdata->cfile->invalidHandle)
3987                                 rc = -EAGAIN;
3988                         else
3989                                 rc = server->ops->async_readv(rdata);
3990                 }
3991
3992                 if (rc) {
3993                         add_credits_and_wake_if(server, &rdata->credits, 0);
3994                         kref_put(&rdata->refcount, cifs_readdata_release);
3995                         if (rc == -EAGAIN)
3996                                 continue;
3997                         break;
3998                 }
3999
4000                 list_add_tail(&rdata->list, rdata_list);
4001                 iov_iter_advance(&ctx->iter, cur_len);
4002                 fpos += cur_len;
4003                 len -= cur_len;
4004         } while (len > 0);
4005
4006         return rc;
4007 }
4008
4009 static void
4010 collect_uncached_read_data(struct cifs_aio_ctx *ctx)
4011 {
4012         struct cifs_readdata *rdata, *tmp;
4013         struct cifs_sb_info *cifs_sb;
4014         int rc;
4015
4016         cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
4017
4018         mutex_lock(&ctx->aio_mutex);
4019
4020         if (list_empty(&ctx->list)) {
4021                 mutex_unlock(&ctx->aio_mutex);
4022                 return;
4023         }
4024
4025         rc = ctx->rc;
4026         /* the loop below should proceed in the order of increasing offsets */
4027 again:
4028         list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
4029                 if (!rc) {
4030                         if (!try_wait_for_completion(&rdata->done)) {
4031                                 mutex_unlock(&ctx->aio_mutex);
4032                                 return;
4033                         }
4034
4035                         if (rdata->result == -EAGAIN) {
4036                                 /* resend call if it's a retryable error */
4037                                 struct list_head tmp_list;
4038                                 unsigned int got_bytes = rdata->got_bytes;
4039
4040                                 list_del_init(&rdata->list);
4041                                 INIT_LIST_HEAD(&tmp_list);
4042
4043                                 if (ctx->direct_io) {
4044                                         /*
4045                                          * Re-use rdata as this is a
4046                                          * direct I/O
4047                                          */
4048                                         rc = cifs_resend_rdata(
4049                                                 rdata,
4050                                                 &tmp_list, ctx);
4051                                 } else {
4052                                         rc = cifs_send_async_read(
4053                                                 rdata->offset + got_bytes,
4054                                                 rdata->bytes - got_bytes,
4055                                                 rdata->cfile, cifs_sb,
4056                                                 &tmp_list, ctx);
4057
4058                                         kref_put(&rdata->refcount,
4059                                                 cifs_readdata_release);
4060                                 }
4061
4062                                 list_splice(&tmp_list, &ctx->list);
4063
4064                                 goto again;
4065                         } else if (rdata->result)
4066                                 rc = rdata->result;
4067
4068                         /* if there was a short read -- discard anything left */
4069                         if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
4070                                 rc = -ENODATA;
4071
4072                         ctx->total_len += rdata->got_bytes;
4073                 }
4074                 list_del_init(&rdata->list);
4075                 kref_put(&rdata->refcount, cifs_readdata_release);
4076         }
4077
4078         /* mask nodata case */
4079         if (rc == -ENODATA)
4080                 rc = 0;
4081
4082         ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
4083
4084         mutex_unlock(&ctx->aio_mutex);
4085
4086         if (ctx->iocb && ctx->iocb->ki_complete)
4087                 ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
4088         else
4089                 complete(&ctx->done);
4090 }
4091
4092 static ssize_t __cifs_readv(
4093         struct kiocb *iocb, struct iov_iter *to, bool direct)
4094 {
4095         size_t len;
4096         struct file *file = iocb->ki_filp;
4097         struct cifs_sb_info *cifs_sb;
4098         struct cifsFileInfo *cfile;
4099         struct cifs_tcon *tcon;
4100         ssize_t rc, total_read = 0;
4101         loff_t offset = iocb->ki_pos;
4102         struct cifs_aio_ctx *ctx;
4103
4104         len = iov_iter_count(to);
4105         if (!len)
4106                 return 0;
4107
4108         cifs_sb = CIFS_FILE_SB(file);
4109         cfile = file->private_data;
4110         tcon = tlink_tcon(cfile->tlink);
4111
4112         if (!tcon->ses->server->ops->async_readv)
4113                 return -ENOSYS;
4114
4115         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
4116                 cifs_dbg(FYI, "attempting read on write only file instance\n");
4117
4118         ctx = cifs_aio_ctx_alloc();
4119         if (!ctx)
4120                 return -ENOMEM;
4121
4122         ctx->pos        = offset;
4123         ctx->direct_io  = direct;
4124         ctx->len        = len;
4125         ctx->cfile      = cifsFileInfo_get(cfile);
4126         ctx->nr_pinned_pages = 0;
4127
4128         if (!is_sync_kiocb(iocb))
4129                 ctx->iocb = iocb;
4130
4131         if (user_backed_iter(to)) {
4132                 /*
4133                  * Extract IOVEC/UBUF-type iterators to a BVEC-type iterator as
4134                  * they contain references to the calling process's virtual
4135                  * memory layout which won't be available in an async worker
4136                  * thread.  This also takes a pin on every folio involved.
4137                  */
4138                 rc = netfs_extract_user_iter(to, iov_iter_count(to),
4139                                              &ctx->iter, 0);
4140                 if (rc < 0) {
4141                         kref_put(&ctx->refcount, cifs_aio_ctx_release);
4142                         return rc;
4143                 }
4144
4145                 ctx->nr_pinned_pages = rc;
4146                 ctx->bv = (void *)ctx->iter.bvec;
4147                 ctx->bv_need_unpin = iov_iter_extract_will_pin(to);
4148                 ctx->should_dirty = true;
4149         } else if ((iov_iter_is_bvec(to) || iov_iter_is_kvec(to)) &&
4150                    !is_sync_kiocb(iocb)) {
4151                 /*
4152                  * If the op is asynchronous, we need to copy the list attached
4153                  * to a BVEC/KVEC-type iterator, but we assume that the storage
4154                  * will be retained by the caller; in any case, we may or may
4155                  * not be able to pin the pages, so we don't try.
4156                  */
4157                 ctx->bv = (void *)dup_iter(&ctx->iter, to, GFP_KERNEL);
4158                 if (!ctx->bv) {
4159                         kref_put(&ctx->refcount, cifs_aio_ctx_release);
4160                         return -ENOMEM;
4161                 }
4162         } else {
4163                 /*
4164                  * Otherwise, we just pass the iterator down as-is and rely on
4165                  * the caller to make sure the pages referred to by the
4166                  * iterator don't evaporate.
4167                  */
4168                 ctx->iter = *to;
4169         }
4170
4171         if (direct) {
4172                 rc = filemap_write_and_wait_range(file->f_inode->i_mapping,
4173                                                   offset, offset + len - 1);
4174                 if (rc) {
4175                         kref_put(&ctx->refcount, cifs_aio_ctx_release);
4176                         return -EAGAIN;
4177                 }
4178         }
4179
4180         /* grab a lock here due to read response handlers can access ctx */
4181         mutex_lock(&ctx->aio_mutex);
4182
4183         rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
4184
4185         /* if at least one read request send succeeded, then reset rc */
4186         if (!list_empty(&ctx->list))
4187                 rc = 0;
4188
4189         mutex_unlock(&ctx->aio_mutex);
4190
4191         if (rc) {
4192                 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4193                 return rc;
4194         }
4195
4196         if (!is_sync_kiocb(iocb)) {
4197                 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4198                 return -EIOCBQUEUED;
4199         }
4200
4201         rc = wait_for_completion_killable(&ctx->done);
4202         if (rc) {
4203                 mutex_lock(&ctx->aio_mutex);
4204                 ctx->rc = rc = -EINTR;
4205                 total_read = ctx->total_len;
4206                 mutex_unlock(&ctx->aio_mutex);
4207         } else {
4208                 rc = ctx->rc;
4209                 total_read = ctx->total_len;
4210         }
4211
4212         kref_put(&ctx->refcount, cifs_aio_ctx_release);
4213
4214         if (total_read) {
4215                 iocb->ki_pos += total_read;
4216                 return total_read;
4217         }
4218         return rc;
4219 }
4220
4221 ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
4222 {
4223         return __cifs_readv(iocb, to, true);
4224 }
4225
4226 ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
4227 {
4228         return __cifs_readv(iocb, to, false);
4229 }
4230
4231 ssize_t
4232 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
4233 {
4234         struct inode *inode = file_inode(iocb->ki_filp);
4235         struct cifsInodeInfo *cinode = CIFS_I(inode);
4236         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4237         struct cifsFileInfo *cfile = (struct cifsFileInfo *)
4238                                                 iocb->ki_filp->private_data;
4239         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4240         int rc = -EACCES;
4241
4242         /*
4243          * In strict cache mode we need to read from the server all the time
4244          * if we don't have level II oplock because the server can delay mtime
4245          * change - so we can't make a decision about inode invalidating.
4246          * And we can also fail with pagereading if there are mandatory locks
4247          * on pages affected by this read but not on the region from pos to
4248          * pos+len-1.
4249          */
4250         if (!CIFS_CACHE_READ(cinode))
4251                 return cifs_user_readv(iocb, to);
4252
4253         if (cap_unix(tcon->ses) &&
4254             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
4255             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
4256                 return generic_file_read_iter(iocb, to);
4257
4258         /*
4259          * We need to hold the sem to be sure nobody modifies lock list
4260          * with a brlock that prevents reading.
4261          */
4262         down_read(&cinode->lock_sem);
4263         if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
4264                                      tcon->ses->server->vals->shared_lock_type,
4265                                      0, NULL, CIFS_READ_OP))
4266                 rc = generic_file_read_iter(iocb, to);
4267         up_read(&cinode->lock_sem);
4268         return rc;
4269 }
4270
4271 static ssize_t
4272 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
4273 {
4274         int rc = -EACCES;
4275         unsigned int bytes_read = 0;
4276         unsigned int total_read;
4277         unsigned int current_read_size;
4278         unsigned int rsize;
4279         struct cifs_sb_info *cifs_sb;
4280         struct cifs_tcon *tcon;
4281         struct TCP_Server_Info *server;
4282         unsigned int xid;
4283         char *cur_offset;
4284         struct cifsFileInfo *open_file;
4285         struct cifs_io_parms io_parms = {0};
4286         int buf_type = CIFS_NO_BUFFER;
4287         __u32 pid;
4288
4289         xid = get_xid();
4290         cifs_sb = CIFS_FILE_SB(file);
4291
4292         /* FIXME: set up handlers for larger reads and/or convert to async */
4293         rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
4294
4295         if (file->private_data == NULL) {
4296                 rc = -EBADF;
4297                 free_xid(xid);
4298                 return rc;
4299         }
4300         open_file = file->private_data;
4301         tcon = tlink_tcon(open_file->tlink);
4302         server = cifs_pick_channel(tcon->ses);
4303
4304         if (!server->ops->sync_read) {
4305                 free_xid(xid);
4306                 return -ENOSYS;
4307         }
4308
4309         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4310                 pid = open_file->pid;
4311         else
4312                 pid = current->tgid;
4313
4314         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
4315                 cifs_dbg(FYI, "attempting read on write only file instance\n");
4316
4317         for (total_read = 0, cur_offset = read_data; read_size > total_read;
4318              total_read += bytes_read, cur_offset += bytes_read) {
4319                 do {
4320                         current_read_size = min_t(uint, read_size - total_read,
4321                                                   rsize);
4322                         /*
4323                          * For windows me and 9x we do not want to request more
4324                          * than it negotiated since it will refuse the read
4325                          * then.
4326                          */
4327                         if (!(tcon->ses->capabilities &
4328                                 tcon->ses->server->vals->cap_large_files)) {
4329                                 current_read_size = min_t(uint,
4330                                         current_read_size, CIFSMaxBufSize);
4331                         }
4332                         if (open_file->invalidHandle) {
4333                                 rc = cifs_reopen_file(open_file, true);
4334                                 if (rc != 0)
4335                                         break;
4336                         }
4337                         io_parms.pid = pid;
4338                         io_parms.tcon = tcon;
4339                         io_parms.offset = *offset;
4340                         io_parms.length = current_read_size;
4341                         io_parms.server = server;
4342                         rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
4343                                                     &bytes_read, &cur_offset,
4344                                                     &buf_type);
4345                 } while (rc == -EAGAIN);
4346
4347                 if (rc || (bytes_read == 0)) {
4348                         if (total_read) {
4349                                 break;
4350                         } else {
4351                                 free_xid(xid);
4352                                 return rc;
4353                         }
4354                 } else {
4355                         cifs_stats_bytes_read(tcon, total_read);
4356                         *offset += bytes_read;
4357                 }
4358         }
4359         free_xid(xid);
4360         return total_read;
4361 }
4362
4363 /*
4364  * If the page is mmap'ed into a process' page tables, then we need to make
4365  * sure that it doesn't change while being written back.
4366  */
4367 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
4368 {
4369         struct folio *folio = page_folio(vmf->page);
4370
4371         /* Wait for the folio to be written to the cache before we allow it to
4372          * be modified.  We then assume the entire folio will need writing back.
4373          */
4374 #ifdef CONFIG_CIFS_FSCACHE
4375         if (folio_test_fscache(folio) &&
4376             folio_wait_fscache_killable(folio) < 0)
4377                 return VM_FAULT_RETRY;
4378 #endif
4379
4380         folio_wait_writeback(folio);
4381
4382         if (folio_lock_killable(folio) < 0)
4383                 return VM_FAULT_RETRY;
4384         return VM_FAULT_LOCKED;
4385 }
4386
4387 static const struct vm_operations_struct cifs_file_vm_ops = {
4388         .fault = filemap_fault,
4389         .map_pages = filemap_map_pages,
4390         .page_mkwrite = cifs_page_mkwrite,
4391 };
4392
4393 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4394 {
4395         int xid, rc = 0;
4396         struct inode *inode = file_inode(file);
4397
4398         xid = get_xid();
4399
4400         if (!CIFS_CACHE_READ(CIFS_I(inode)))
4401                 rc = cifs_zap_mapping(inode);
4402         if (!rc)
4403                 rc = generic_file_mmap(file, vma);
4404         if (!rc)
4405                 vma->vm_ops = &cifs_file_vm_ops;
4406
4407         free_xid(xid);
4408         return rc;
4409 }
4410
4411 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4412 {
4413         int rc, xid;
4414
4415         xid = get_xid();
4416
4417         rc = cifs_revalidate_file(file);
4418         if (rc)
4419                 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4420                          rc);
4421         if (!rc)
4422                 rc = generic_file_mmap(file, vma);
4423         if (!rc)
4424                 vma->vm_ops = &cifs_file_vm_ops;
4425
4426         free_xid(xid);
4427         return rc;
4428 }
4429
4430 /*
4431  * Unlock a bunch of folios in the pagecache.
4432  */
4433 static void cifs_unlock_folios(struct address_space *mapping, pgoff_t first, pgoff_t last)
4434 {
4435         struct folio *folio;
4436         XA_STATE(xas, &mapping->i_pages, first);
4437
4438         rcu_read_lock();
4439         xas_for_each(&xas, folio, last) {
4440                 folio_unlock(folio);
4441         }
4442         rcu_read_unlock();
4443 }
4444
4445 static void cifs_readahead_complete(struct work_struct *work)
4446 {
4447         struct cifs_readdata *rdata = container_of(work,
4448                                                    struct cifs_readdata, work);
4449         struct folio *folio;
4450         pgoff_t last;
4451         bool good = rdata->result == 0 || (rdata->result == -EAGAIN && rdata->got_bytes);
4452
4453         XA_STATE(xas, &rdata->mapping->i_pages, rdata->offset / PAGE_SIZE);
4454
4455         if (good)
4456                 cifs_readahead_to_fscache(rdata->mapping->host,
4457                                           rdata->offset, rdata->bytes);
4458
4459         if (iov_iter_count(&rdata->iter) > 0)
4460                 iov_iter_zero(iov_iter_count(&rdata->iter), &rdata->iter);
4461
4462         last = (rdata->offset + rdata->bytes - 1) / PAGE_SIZE;
4463
4464         rcu_read_lock();
4465         xas_for_each(&xas, folio, last) {
4466                 if (good) {
4467                         flush_dcache_folio(folio);
4468                         folio_mark_uptodate(folio);
4469                 }
4470                 folio_unlock(folio);
4471         }
4472         rcu_read_unlock();
4473
4474         kref_put(&rdata->refcount, cifs_readdata_release);
4475 }
4476
4477 static void cifs_readahead(struct readahead_control *ractl)
4478 {
4479         struct cifsFileInfo *open_file = ractl->file->private_data;
4480         struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
4481         struct TCP_Server_Info *server;
4482         unsigned int xid, nr_pages, cache_nr_pages = 0;
4483         unsigned int ra_pages;
4484         pgoff_t next_cached = ULONG_MAX, ra_index;
4485         bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
4486                 cifs_inode_cookie(ractl->mapping->host)->cache_priv;
4487         bool check_cache = caching;
4488         pid_t pid;
4489         int rc = 0;
4490
4491         /* Note that readahead_count() lags behind our dequeuing of pages from
4492          * the ractl, wo we have to keep track for ourselves.
4493          */
4494         ra_pages = readahead_count(ractl);
4495         ra_index = readahead_index(ractl);
4496
4497         xid = get_xid();
4498
4499         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4500                 pid = open_file->pid;
4501         else
4502                 pid = current->tgid;
4503
4504         server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
4505
4506         cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4507                  __func__, ractl->file, ractl->mapping, ra_pages);
4508
4509         /*
4510          * Chop the readahead request up into rsize-sized read requests.
4511          */
4512         while ((nr_pages = ra_pages)) {
4513                 unsigned int i, rsize;
4514                 struct cifs_readdata *rdata;
4515                 struct cifs_credits credits_on_stack;
4516                 struct cifs_credits *credits = &credits_on_stack;
4517                 struct folio *folio;
4518                 pgoff_t fsize;
4519
4520                 /*
4521                  * Find out if we have anything cached in the range of
4522                  * interest, and if so, where the next chunk of cached data is.
4523                  */
4524                 if (caching) {
4525                         if (check_cache) {
4526                                 rc = cifs_fscache_query_occupancy(
4527                                         ractl->mapping->host, ra_index, nr_pages,
4528                                         &next_cached, &cache_nr_pages);
4529                                 if (rc < 0)
4530                                         caching = false;
4531                                 check_cache = false;
4532                         }
4533
4534                         if (ra_index == next_cached) {
4535                                 /*
4536                                  * TODO: Send a whole batch of pages to be read
4537                                  * by the cache.
4538                                  */
4539                                 folio = readahead_folio(ractl);
4540                                 fsize = folio_nr_pages(folio);
4541                                 ra_pages -= fsize;
4542                                 ra_index += fsize;
4543                                 if (cifs_readpage_from_fscache(ractl->mapping->host,
4544                                                                &folio->page) < 0) {
4545                                         /*
4546                                          * TODO: Deal with cache read failure
4547                                          * here, but for the moment, delegate
4548                                          * that to readpage.
4549                                          */
4550                                         caching = false;
4551                                 }
4552                                 folio_unlock(folio);
4553                                 next_cached += fsize;
4554                                 cache_nr_pages -= fsize;
4555                                 if (cache_nr_pages == 0)
4556                                         check_cache = true;
4557                                 continue;
4558                         }
4559                 }
4560
4561                 if (open_file->invalidHandle) {
4562                         rc = cifs_reopen_file(open_file, true);
4563                         if (rc) {
4564                                 if (rc == -EAGAIN)
4565                                         continue;
4566                                 break;
4567                         }
4568                 }
4569
4570                 if (cifs_sb->ctx->rsize == 0)
4571                         cifs_sb->ctx->rsize =
4572                                 server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
4573                                                              cifs_sb->ctx);
4574
4575                 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
4576                                                    &rsize, credits);
4577                 if (rc)
4578                         break;
4579                 nr_pages = min_t(size_t, rsize / PAGE_SIZE, ra_pages);
4580                 if (next_cached != ULONG_MAX)
4581                         nr_pages = min_t(size_t, nr_pages, next_cached - ra_index);
4582
4583                 /*
4584                  * Give up immediately if rsize is too small to read an entire
4585                  * page. The VFS will fall back to readpage. We should never
4586                  * reach this point however since we set ra_pages to 0 when the
4587                  * rsize is smaller than a cache page.
4588                  */
4589                 if (unlikely(!nr_pages)) {
4590                         add_credits_and_wake_if(server, credits, 0);
4591                         break;
4592                 }
4593
4594                 rdata = cifs_readdata_alloc(cifs_readahead_complete);
4595                 if (!rdata) {
4596                         /* best to give up if we're out of mem */
4597                         add_credits_and_wake_if(server, credits, 0);
4598                         break;
4599                 }
4600
4601                 rdata->offset   = ra_index * PAGE_SIZE;
4602                 rdata->bytes    = nr_pages * PAGE_SIZE;
4603                 rdata->cfile    = cifsFileInfo_get(open_file);
4604                 rdata->server   = server;
4605                 rdata->mapping  = ractl->mapping;
4606                 rdata->pid      = pid;
4607                 rdata->credits  = credits_on_stack;
4608
4609                 for (i = 0; i < nr_pages; i++) {
4610                         if (!readahead_folio(ractl))
4611                                 WARN_ON(1);
4612                 }
4613                 ra_pages -= nr_pages;
4614                 ra_index += nr_pages;
4615
4616                 iov_iter_xarray(&rdata->iter, ITER_DEST, &rdata->mapping->i_pages,
4617                                 rdata->offset, rdata->bytes);
4618
4619                 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4620                 if (!rc) {
4621                         if (rdata->cfile->invalidHandle)
4622                                 rc = -EAGAIN;
4623                         else
4624                                 rc = server->ops->async_readv(rdata);
4625                 }
4626
4627                 if (rc) {
4628                         add_credits_and_wake_if(server, &rdata->credits, 0);
4629                         cifs_unlock_folios(rdata->mapping,
4630                                            rdata->offset / PAGE_SIZE,
4631                                            (rdata->offset + rdata->bytes - 1) / PAGE_SIZE);
4632                         /* Fallback to the readpage in error/reconnect cases */
4633                         kref_put(&rdata->refcount, cifs_readdata_release);
4634                         break;
4635                 }
4636
4637                 kref_put(&rdata->refcount, cifs_readdata_release);
4638         }
4639
4640         free_xid(xid);
4641 }
4642
4643 /*
4644  * cifs_readpage_worker must be called with the page pinned
4645  */
4646 static int cifs_readpage_worker(struct file *file, struct page *page,
4647         loff_t *poffset)
4648 {
4649         char *read_data;
4650         int rc;
4651
4652         /* Is the page cached? */
4653         rc = cifs_readpage_from_fscache(file_inode(file), page);
4654         if (rc == 0)
4655                 goto read_complete;
4656
4657         read_data = kmap(page);
4658         /* for reads over a certain size could initiate async read ahead */
4659
4660         rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
4661
4662         if (rc < 0)
4663                 goto io_error;
4664         else
4665                 cifs_dbg(FYI, "Bytes read %d\n", rc);
4666
4667         /* we do not want atime to be less than mtime, it broke some apps */
4668         file_inode(file)->i_atime = current_time(file_inode(file));
4669         if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4670                 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4671         else
4672                 file_inode(file)->i_atime = current_time(file_inode(file));
4673
4674         if (PAGE_SIZE > rc)
4675                 memset(read_data + rc, 0, PAGE_SIZE - rc);
4676
4677         flush_dcache_page(page);
4678         SetPageUptodate(page);
4679         rc = 0;
4680
4681 io_error:
4682         kunmap(page);
4683         unlock_page(page);
4684
4685 read_complete:
4686         return rc;
4687 }
4688
4689 static int cifs_read_folio(struct file *file, struct folio *folio)
4690 {
4691         struct page *page = &folio->page;
4692         loff_t offset = page_file_offset(page);
4693         int rc = -EACCES;
4694         unsigned int xid;
4695
4696         xid = get_xid();
4697
4698         if (file->private_data == NULL) {
4699                 rc = -EBADF;
4700                 free_xid(xid);
4701                 return rc;
4702         }
4703
4704         cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n",
4705                  page, (int)offset, (int)offset);
4706
4707         rc = cifs_readpage_worker(file, page, &offset);
4708
4709         free_xid(xid);
4710         return rc;
4711 }
4712
4713 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4714 {
4715         struct cifsFileInfo *open_file;
4716
4717         spin_lock(&cifs_inode->open_file_lock);
4718         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
4719                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4720                         spin_unlock(&cifs_inode->open_file_lock);
4721                         return 1;
4722                 }
4723         }
4724         spin_unlock(&cifs_inode->open_file_lock);
4725         return 0;
4726 }
4727
4728 /* We do not want to update the file size from server for inodes
4729    open for write - to avoid races with writepage extending
4730    the file - in the future we could consider allowing
4731    refreshing the inode only on increases in the file size
4732    but this is tricky to do without racing with writebehind
4733    page caching in the current Linux kernel design */
4734 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
4735 {
4736         if (!cifsInode)
4737                 return true;
4738
4739         if (is_inode_writable(cifsInode)) {
4740                 /* This inode is open for write at least once */
4741                 struct cifs_sb_info *cifs_sb;
4742
4743                 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
4744                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
4745                         /* since no page cache to corrupt on directio
4746                         we can change size safely */
4747                         return true;
4748                 }
4749
4750                 if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
4751                         return true;
4752
4753                 return false;
4754         } else
4755                 return true;
4756 }
4757
4758 static int cifs_write_begin(struct file *file, struct address_space *mapping,
4759                         loff_t pos, unsigned len,
4760                         struct page **pagep, void **fsdata)
4761 {
4762         int oncethru = 0;
4763         pgoff_t index = pos >> PAGE_SHIFT;
4764         loff_t offset = pos & (PAGE_SIZE - 1);
4765         loff_t page_start = pos & PAGE_MASK;
4766         loff_t i_size;
4767         struct page *page;
4768         int rc = 0;
4769
4770         cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
4771
4772 start:
4773         page = grab_cache_page_write_begin(mapping, index);
4774         if (!page) {
4775                 rc = -ENOMEM;
4776                 goto out;
4777         }
4778
4779         if (PageUptodate(page))
4780                 goto out;
4781
4782         /*
4783          * If we write a full page it will be up to date, no need to read from
4784          * the server. If the write is short, we'll end up doing a sync write
4785          * instead.
4786          */
4787         if (len == PAGE_SIZE)
4788                 goto out;
4789
4790         /*
4791          * optimize away the read when we have an oplock, and we're not
4792          * expecting to use any of the data we'd be reading in. That
4793          * is, when the page lies beyond the EOF, or straddles the EOF
4794          * and the write will cover all of the existing data.
4795          */
4796         if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
4797                 i_size = i_size_read(mapping->host);
4798                 if (page_start >= i_size ||
4799                     (offset == 0 && (pos + len) >= i_size)) {
4800                         zero_user_segments(page, 0, offset,
4801                                            offset + len,
4802                                            PAGE_SIZE);
4803                         /*
4804                          * PageChecked means that the parts of the page
4805                          * to which we're not writing are considered up
4806                          * to date. Once the data is copied to the
4807                          * page, it can be set uptodate.
4808                          */
4809                         SetPageChecked(page);
4810                         goto out;
4811                 }
4812         }
4813
4814         if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
4815                 /*
4816                  * might as well read a page, it is fast enough. If we get
4817                  * an error, we don't need to return it. cifs_write_end will
4818                  * do a sync write instead since PG_uptodate isn't set.
4819                  */
4820                 cifs_readpage_worker(file, page, &page_start);
4821                 put_page(page);
4822                 oncethru = 1;
4823                 goto start;
4824         } else {
4825                 /* we could try using another file handle if there is one -
4826                    but how would we lock it to prevent close of that handle
4827                    racing with this read? In any case
4828                    this will be written out by write_end so is fine */
4829         }
4830 out:
4831         *pagep = page;
4832         return rc;
4833 }
4834
4835 static bool cifs_release_folio(struct folio *folio, gfp_t gfp)
4836 {
4837         if (folio_test_private(folio))
4838                 return 0;
4839         if (folio_test_fscache(folio)) {
4840                 if (current_is_kswapd() || !(gfp & __GFP_FS))
4841                         return false;
4842                 folio_wait_fscache(folio);
4843         }
4844         fscache_note_page_release(cifs_inode_cookie(folio->mapping->host));
4845         return true;
4846 }
4847
4848 static void cifs_invalidate_folio(struct folio *folio, size_t offset,
4849                                  size_t length)
4850 {
4851         folio_wait_fscache(folio);
4852 }
4853
4854 static int cifs_launder_folio(struct folio *folio)
4855 {
4856         int rc = 0;
4857         loff_t range_start = folio_pos(folio);
4858         loff_t range_end = range_start + folio_size(folio);
4859         struct writeback_control wbc = {
4860                 .sync_mode = WB_SYNC_ALL,
4861                 .nr_to_write = 0,
4862                 .range_start = range_start,
4863                 .range_end = range_end,
4864         };
4865
4866         cifs_dbg(FYI, "Launder page: %lu\n", folio->index);
4867
4868         if (folio_clear_dirty_for_io(folio))
4869                 rc = cifs_writepage_locked(&folio->page, &wbc);
4870
4871         folio_wait_fscache(folio);
4872         return rc;
4873 }
4874
4875 void cifs_oplock_break(struct work_struct *work)
4876 {
4877         struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4878                                                   oplock_break);
4879         struct inode *inode = d_inode(cfile->dentry);
4880         struct cifsInodeInfo *cinode = CIFS_I(inode);
4881         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4882         struct TCP_Server_Info *server = tcon->ses->server;
4883         int rc = 0;
4884         bool purge_cache = false;
4885
4886         wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
4887                         TASK_UNINTERRUPTIBLE);
4888
4889         server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
4890                                       cfile->oplock_epoch, &purge_cache);
4891
4892         if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
4893                                                 cifs_has_mand_locks(cinode)) {
4894                 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4895                          inode);
4896                 cinode->oplock = 0;
4897         }
4898
4899         if (inode && S_ISREG(inode->i_mode)) {
4900                 if (CIFS_CACHE_READ(cinode))
4901                         break_lease(inode, O_RDONLY);
4902                 else
4903                         break_lease(inode, O_WRONLY);
4904                 rc = filemap_fdatawrite(inode->i_mapping);
4905                 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
4906                         rc = filemap_fdatawait(inode->i_mapping);
4907                         mapping_set_error(inode->i_mapping, rc);
4908                         cifs_zap_mapping(inode);
4909                 }
4910                 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
4911                 if (CIFS_CACHE_WRITE(cinode))
4912                         goto oplock_break_ack;
4913         }
4914
4915         rc = cifs_push_locks(cfile);
4916         if (rc)
4917                 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
4918
4919 oplock_break_ack:
4920         /*
4921          * releasing stale oplock after recent reconnect of smb session using
4922          * a now incorrect file handle is not a data integrity issue but do
4923          * not bother sending an oplock release if session to server still is
4924          * disconnected since oplock already released by the server
4925          */
4926         if (!cfile->oplock_break_cancelled) {
4927                 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4928                                                              cinode);
4929                 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
4930         }
4931
4932         _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
4933         cifs_done_oplock_break(cinode);
4934 }
4935
4936 /*
4937  * The presence of cifs_direct_io() in the address space ops vector
4938  * allowes open() O_DIRECT flags which would have failed otherwise.
4939  *
4940  * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4941  * so this method should never be called.
4942  *
4943  * Direct IO is not yet supported in the cached mode.
4944  */
4945 static ssize_t
4946 cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
4947 {
4948         /*
4949          * FIXME
4950          * Eventually need to support direct IO for non forcedirectio mounts
4951          */
4952         return -EINVAL;
4953 }
4954
4955 static int cifs_swap_activate(struct swap_info_struct *sis,
4956                               struct file *swap_file, sector_t *span)
4957 {
4958         struct cifsFileInfo *cfile = swap_file->private_data;
4959         struct inode *inode = swap_file->f_mapping->host;
4960         unsigned long blocks;
4961         long long isize;
4962
4963         cifs_dbg(FYI, "swap activate\n");
4964
4965         if (!swap_file->f_mapping->a_ops->swap_rw)
4966                 /* Cannot support swap */
4967                 return -EINVAL;
4968
4969         spin_lock(&inode->i_lock);
4970         blocks = inode->i_blocks;
4971         isize = inode->i_size;
4972         spin_unlock(&inode->i_lock);
4973         if (blocks*512 < isize) {
4974                 pr_warn("swap activate: swapfile has holes\n");
4975                 return -EINVAL;
4976         }
4977         *span = sis->pages;
4978
4979         pr_warn_once("Swap support over SMB3 is experimental\n");
4980
4981         /*
4982          * TODO: consider adding ACL (or documenting how) to prevent other
4983          * users (on this or other systems) from reading it
4984          */
4985
4986
4987         /* TODO: add sk_set_memalloc(inet) or similar */
4988
4989         if (cfile)
4990                 cfile->swapfile = true;
4991         /*
4992          * TODO: Since file already open, we can't open with DENY_ALL here
4993          * but we could add call to grab a byte range lock to prevent others
4994          * from reading or writing the file
4995          */
4996
4997         sis->flags |= SWP_FS_OPS;
4998         return add_swap_extent(sis, 0, sis->max, 0);
4999 }
5000
5001 static void cifs_swap_deactivate(struct file *file)
5002 {
5003         struct cifsFileInfo *cfile = file->private_data;
5004
5005         cifs_dbg(FYI, "swap deactivate\n");
5006
5007         /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
5008
5009         if (cfile)
5010                 cfile->swapfile = false;
5011
5012         /* do we need to unpin (or unlock) the file */
5013 }
5014
5015 /*
5016  * Mark a page as having been made dirty and thus needing writeback.  We also
5017  * need to pin the cache object to write back to.
5018  */
5019 #ifdef CONFIG_CIFS_FSCACHE
5020 static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
5021 {
5022         return fscache_dirty_folio(mapping, folio,
5023                                         cifs_inode_cookie(mapping->host));
5024 }
5025 #else
5026 #define cifs_dirty_folio filemap_dirty_folio
5027 #endif
5028
5029 const struct address_space_operations cifs_addr_ops = {
5030         .read_folio = cifs_read_folio,
5031         .readahead = cifs_readahead,
5032         .writepages = cifs_writepages,
5033         .write_begin = cifs_write_begin,
5034         .write_end = cifs_write_end,
5035         .dirty_folio = cifs_dirty_folio,
5036         .release_folio = cifs_release_folio,
5037         .direct_IO = cifs_direct_io,
5038         .invalidate_folio = cifs_invalidate_folio,
5039         .launder_folio = cifs_launder_folio,
5040         .migrate_folio = filemap_migrate_folio,
5041         /*
5042          * TODO: investigate and if useful we could add an is_dirty_writeback
5043          * helper if needed
5044          */
5045         .swap_activate = cifs_swap_activate,
5046         .swap_deactivate = cifs_swap_deactivate,
5047 };
5048
5049 /*
5050  * cifs_readahead requires the server to support a buffer large enough to
5051  * contain the header plus one complete page of data.  Otherwise, we need
5052  * to leave cifs_readahead out of the address space operations.
5053  */
5054 const struct address_space_operations cifs_addr_ops_smallbuf = {
5055         .read_folio = cifs_read_folio,
5056         .writepages = cifs_writepages,
5057         .write_begin = cifs_write_begin,
5058         .write_end = cifs_write_end,
5059         .dirty_folio = cifs_dirty_folio,
5060         .release_folio = cifs_release_folio,
5061         .invalidate_folio = cifs_invalidate_folio,
5062         .launder_folio = cifs_launder_folio,
5063         .migrate_folio = filemap_migrate_folio,
5064 };
5065
5066 /*
5067  * Splice data from a file into a pipe.
5068  */
5069 ssize_t cifs_splice_read(struct file *in, loff_t *ppos,
5070                          struct pipe_inode_info *pipe, size_t len,
5071                          unsigned int flags)
5072 {
5073         if (unlikely(*ppos >= file_inode(in)->i_sb->s_maxbytes))
5074                 return 0;
5075         if (unlikely(!len))
5076                 return 0;
5077         if (in->f_flags & O_DIRECT)
5078                 return direct_splice_read(in, ppos, pipe, len, flags);
5079         return filemap_splice_read(in, ppos, pipe, len, flags);
5080 }