/* check for STATUS_NETWORK_SESSION_EXPIRED */
bool (*is_session_expired)(char *);
/* send oplock break response */
- int (*oplock_response)(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid,
- __u16 net_fid, struct cifsInodeInfo *cifs_inode);
+ int (*oplock_response)(struct cifs_tcon *tcon, __u64 persistent_fid,
+ __u64 volatile_fid, __u16 net_fid,
+ struct cifsInodeInfo *cifs_inode,
+ unsigned int oplock);
/* query remote filesystem */
int (*queryfs)(const unsigned int, struct cifs_tcon *,
const char *, struct cifs_sb_info *, struct kstatfs *);
#define CIFS_CACHE_RW_FLG (CIFS_CACHE_READ_FLG | CIFS_CACHE_WRITE_FLG)
#define CIFS_CACHE_RHW_FLG (CIFS_CACHE_RW_FLG | CIFS_CACHE_HANDLE_FLG)
-#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE))
-#define CIFS_CACHE_HANDLE(cinode) (cinode->oplock & CIFS_CACHE_HANDLE_FLG)
-#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE))
-
/*
* One of these for each file inode
*/
queue_delayed_work(cifsiod_wq, &server->reconnect, delay * HZ);
}
+static inline bool __cifs_cache_state_check(struct cifsInodeInfo *cinode,
+ unsigned int oplock_flags,
+ unsigned int sb_flags)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cinode->netfs.inode.i_sb);
+ unsigned int oplock = READ_ONCE(cinode->oplock);
+ unsigned int sflags = cifs_sb->mnt_cifs_flags;
+
+ return (oplock & oplock_flags) || (sflags & sb_flags);
+}
+
+#define CIFS_CACHE_READ(cinode) \
+ __cifs_cache_state_check(cinode, CIFS_CACHE_READ_FLG, \
+ CIFS_MOUNT_RO_CACHE)
+#define CIFS_CACHE_HANDLE(cinode) \
+ __cifs_cache_state_check(cinode, CIFS_CACHE_HANDLE_FLG, 0)
+#define CIFS_CACHE_WRITE(cinode) \
+ __cifs_cache_state_check(cinode, CIFS_CACHE_WRITE_FLG, \
+ CIFS_MOUNT_RW_CACHE)
+
+static inline void cifs_reset_oplock(struct cifsInodeInfo *cinode)
+{
+ scoped_guard(spinlock, &cinode->open_file_lock)
+ WRITE_ONCE(cinode->oplock, 0);
+}
+
#endif /* _CIFS_GLOB_H */
oplock = fid->pending_open->oplock;
list_del(&fid->pending_open->olist);
- fid->purge_cache = false;
- server->ops->set_fid(cfile, fid, oplock);
-
list_add(&cfile->tlist, &tcon->openFileList);
atomic_inc(&tcon->num_local_opens);
/* if readable file instance put first in list*/
spin_lock(&cinode->open_file_lock);
+ fid->purge_cache = false;
+ server->ops->set_fid(cfile, fid, oplock);
+
if (file->f_mode & FMODE_READ)
list_add(&cfile->flist, &cinode->openFileList);
else
oplock = 0;
}
- server->ops->set_fid(cfile, &cfile->fid, oplock);
+ scoped_guard(spinlock, &cinode->open_file_lock)
+ server->ops->set_fid(cfile, &cfile->fid, oplock);
if (oparms.reconnect)
cifs_relock_file(cfile);
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsInodeInfo *cinode = CIFS_I(inode);
+ unsigned int oplock = READ_ONCE(cinode->oplock);
- return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
- (cinode->oplock == CIFS_CACHE_RHW_FLG ||
- cinode->oplock == CIFS_CACHE_RH_FLG) &&
- !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
+ return cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
+ (oplock == CIFS_CACHE_RHW_FLG || oplock == CIFS_CACHE_RH_FLG) &&
+ !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags);
}
cifs_zap_mapping(inode);
cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
inode);
- CIFS_I(inode)->oplock = 0;
+ cifs_reset_oplock(CIFS_I(inode));
}
rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
cifs_zap_mapping(inode);
cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
inode);
- cinode->oplock = 0;
+ cifs_reset_oplock(cinode);
}
out:
cifs_put_writer(cinode);
cifs_dbg(FYI,
"Set no oplock for inode=%p after a write operation\n",
inode);
- cinode->oplock = 0;
+ cifs_reset_oplock(cinode);
}
return written;
}
struct super_block *sb = inode->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifsInodeInfo *cinode = CIFS_I(inode);
+ bool cache_read, cache_write, cache_handle;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct tcon_link *tlink;
+ unsigned int oplock;
int rc = 0;
bool purge_cache = false, oplock_break_cancelled;
__u64 persistent_fid, volatile_fid;
tcon = tlink_tcon(tlink);
server = tcon->ses->server;
- server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
- cfile->oplock_epoch, &purge_cache);
+ scoped_guard(spinlock, &cinode->open_file_lock) {
+ unsigned int sbflags = cifs_sb->mnt_cifs_flags;
+
+ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
+ cfile->oplock_epoch, &purge_cache);
+ oplock = READ_ONCE(cinode->oplock);
+ cache_read = (oplock & CIFS_CACHE_READ_FLG) ||
+ (sbflags & CIFS_MOUNT_RO_CACHE);
+ cache_write = (oplock & CIFS_CACHE_WRITE_FLG) ||
+ (sbflags & CIFS_MOUNT_RW_CACHE);
+ cache_handle = oplock & CIFS_CACHE_HANDLE_FLG;
+ }
- if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
- cifs_has_mand_locks(cinode)) {
+ if (!cache_write && cache_read && cifs_has_mand_locks(cinode)) {
cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
inode);
- cinode->oplock = 0;
+ cifs_reset_oplock(cinode);
+ oplock = 0;
+ cache_read = cache_write = cache_handle = false;
}
if (S_ISREG(inode->i_mode)) {
- if (CIFS_CACHE_READ(cinode))
+ if (cache_read)
break_lease(inode, O_RDONLY);
else
break_lease(inode, O_WRONLY);
rc = filemap_fdatawrite(inode->i_mapping);
- if (!CIFS_CACHE_READ(cinode) || purge_cache) {
+ if (!cache_read || purge_cache) {
rc = filemap_fdatawait(inode->i_mapping);
mapping_set_error(inode->i_mapping, rc);
cifs_zap_mapping(inode);
}
cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
- if (CIFS_CACHE_WRITE(cinode))
+ if (cache_write)
goto oplock_break_ack;
}
* So, new open will not use cached handle.
*/
- if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
+ if (!cache_handle && !list_empty(&cinode->deferred_closes))
cifs_close_deferred_file(cinode);
persistent_fid = cfile->fid.persistent_fid;
if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
spin_unlock(&cinode->open_file_lock);
rc = server->ops->oplock_response(tcon, persistent_fid,
- volatile_fid, net_fid, cinode);
+ volatile_fid, net_fid,
+ cinode, oplock);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
} else
spin_unlock(&cinode->open_file_lock);
struct cifsInodeInfo *cinode, __u32 oplock,
__u16 epoch, bool *purge_cache)
{
+ lockdep_assert_held(&cinode->open_file_lock);
cifs_set_oplock_level(cinode, oplock);
}
cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
{
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+
+ lockdep_assert_held(&cinode->open_file_lock);
+
cfile->fid.netfid = fid->netfid;
cifs_set_oplock_level(cinode, oplock);
cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
return CIFSFindClose(xid, tcon, fid->netfid);
}
-static int
-cifs_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
- __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
+static int cifs_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
+ __u64 volatile_fid, __u16 net_fid,
+ struct cifsInodeInfo *cinode, unsigned int oplock)
{
+ unsigned int sbflags = CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags;
+ __u8 op;
+
+ op = !!((oplock & CIFS_CACHE_READ_FLG) || (sbflags & CIFS_MOUNT_RO_CACHE));
return CIFSSMBLock(0, tcon, net_fid, current->tgid, 0, 0, 0, 0,
- LOCKING_ANDX_OPLOCK_RELEASE, false, CIFS_CACHE_READ(cinode) ? 1 : 0);
+ LOCKING_ANDX_OPLOCK_RELEASE, false, op);
}
static int
return to;
}
-__le32
-smb2_get_lease_state(struct cifsInodeInfo *cinode)
+__le32 smb2_get_lease_state(struct cifsInodeInfo *cinode, unsigned int oplock)
{
+ unsigned int sbflags = CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags;
__le32 lease = 0;
- if (CIFS_CACHE_WRITE(cinode))
+ if ((oplock & CIFS_CACHE_WRITE_FLG) || (sbflags & CIFS_MOUNT_RW_CACHE))
lease |= SMB2_LEASE_WRITE_CACHING_LE;
- if (CIFS_CACHE_HANDLE(cinode))
+ if (oplock & CIFS_CACHE_HANDLE_FLG)
lease |= SMB2_LEASE_HANDLE_CACHING_LE;
- if (CIFS_CACHE_READ(cinode))
+ if ((oplock & CIFS_CACHE_READ_FLG) || (sbflags & CIFS_MOUNT_RO_CACHE))
lease |= SMB2_LEASE_READ_CACHING_LE;
return lease;
}
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
+ lockdep_assert_held(&cinode->open_file_lock);
+
cfile->fid.persistent_fid = fid->persistent_fid;
cfile->fid.volatile_fid = fid->volatile_fid;
cfile->fid.access = fid->access;
return false;
}
-static int
-smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
- __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
+static int smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
+ __u64 volatile_fid, __u16 net_fid,
+ struct cifsInodeInfo *cinode, unsigned int oplock)
{
+ unsigned int sbflags = CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags;
+ __u8 op;
+
if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
return SMB2_lease_break(0, tcon, cinode->lease_key,
- smb2_get_lease_state(cinode));
+ smb2_get_lease_state(cinode, oplock));
- return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid,
- CIFS_CACHE_READ(cinode) ? 1 : 0);
+ op = !!((oplock & CIFS_CACHE_READ_FLG) || (sbflags & CIFS_MOUNT_RO_CACHE));
+ return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid, op);
}
void
struct cifsInodeInfo *cinode, __u32 oplock,
__u16 epoch, bool *purge_cache)
{
+ lockdep_assert_held(&cinode->open_file_lock);
server->ops->set_oplock_level(cinode, oplock, 0, NULL);
}
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;
if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
- cinode->oplock = CIFS_CACHE_RHW_FLG;
+ WRITE_ONCE(cinode->oplock, CIFS_CACHE_RHW_FLG);
cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
&cinode->netfs.inode);
} else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
- cinode->oplock = CIFS_CACHE_RW_FLG;
+ WRITE_ONCE(cinode->oplock, CIFS_CACHE_RW_FLG);
cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
&cinode->netfs.inode);
} else if (oplock == SMB2_OPLOCK_LEVEL_II) {
- cinode->oplock = CIFS_CACHE_READ_FLG;
+ WRITE_ONCE(cinode->oplock, CIFS_CACHE_READ_FLG);
cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
&cinode->netfs.inode);
} else
- cinode->oplock = 0;
+ WRITE_ONCE(cinode->oplock, 0);
}
static void
if (!new_oplock)
strscpy(message, "None");
- cinode->oplock = new_oplock;
+ WRITE_ONCE(cinode->oplock, new_oplock);
cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
&cinode->netfs.inode);
}
smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
__u16 epoch, bool *purge_cache)
{
- unsigned int old_oplock = cinode->oplock;
+ unsigned int old_oplock = READ_ONCE(cinode->oplock);
+ unsigned int new_oplock;
smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
+ new_oplock = READ_ONCE(cinode->oplock);
if (purge_cache) {
*purge_cache = false;
if (old_oplock == CIFS_CACHE_READ_FLG) {
- if (cinode->oplock == CIFS_CACHE_READ_FLG &&
+ if (new_oplock == CIFS_CACHE_READ_FLG &&
(epoch - cinode->epoch > 0))
*purge_cache = true;
- else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
+ else if (new_oplock == CIFS_CACHE_RH_FLG &&
(epoch - cinode->epoch > 1))
*purge_cache = true;
- else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
+ else if (new_oplock == CIFS_CACHE_RHW_FLG &&
(epoch - cinode->epoch > 1))
*purge_cache = true;
- else if (cinode->oplock == 0 &&
+ else if (new_oplock == 0 &&
(epoch - cinode->epoch > 0))
*purge_cache = true;
} else if (old_oplock == CIFS_CACHE_RH_FLG) {
- if (cinode->oplock == CIFS_CACHE_RH_FLG &&
+ if (new_oplock == CIFS_CACHE_RH_FLG &&
(epoch - cinode->epoch > 0))
*purge_cache = true;
- else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
+ else if (new_oplock == CIFS_CACHE_RHW_FLG &&
(epoch - cinode->epoch > 1))
*purge_cache = true;
}
struct smb_rqst *rqst);
struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
__u64 ses_id, __u32 tid);
-__le32 smb2_get_lease_state(struct cifsInodeInfo *cinode);
+__le32 smb2_get_lease_state(struct cifsInodeInfo *cinode, unsigned int oplock);
bool smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server);
int smb3_handle_read_data(struct TCP_Server_Info *server,
struct mid_q_entry *mid);