1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com>
6 #include <linux/sunrpc/addr.h>
7 #include <linux/sunrpc/sched.h>
9 #include <linux/nfs3.h>
10 #include <linux/nfs4.h>
11 #include <linux/nfs_xdr.h>
12 #include <linux/nfs_fs.h>
17 #include "nfs4session.h"
19 #include "delegation.h"
20 #include "nfs4trace.h"
22 #define NFSDBG_FACILITY NFSDBG_PROC
23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client;
28 unsigned short port = 2049;
31 naddr->netid_len = scnprintf(naddr->netid,
32 sizeof(naddr->netid), "%s",
33 rpc_peeraddr2str(clp->cl_rpcclient,
35 naddr->addr_len = scnprintf(naddr->addr,
38 rpc_peeraddr2str(clp->cl_rpcclient,
40 port >> 8, port & 255);
44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
45 struct nfs_lock_context *lock, loff_t offset, loff_t len)
47 struct inode *inode = file_inode(filep);
48 struct nfs_server *server = NFS_SERVER(inode);
49 struct nfs42_falloc_args args = {
50 .falloc_fh = NFS_FH(inode),
51 .falloc_offset = offset,
53 .falloc_bitmask = nfs4_fattr_bitmap,
55 struct nfs42_falloc_res res = {
56 .falloc_server = server,
60 msg->rpc_argp = &args;
63 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
66 if (status == -EAGAIN)
67 status = -NFS4ERR_BAD_STATEID;
71 res.falloc_fattr = nfs_alloc_fattr();
72 if (!res.falloc_fattr)
75 status = nfs4_call_sync(server->client, server, msg,
76 &args.seq_args, &res.seq_res, 0);
78 status = nfs_post_op_update_inode(inode, res.falloc_fattr);
80 kfree(res.falloc_fattr);
84 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
85 loff_t offset, loff_t len)
87 struct nfs_server *server = NFS_SERVER(file_inode(filep));
88 struct nfs4_exception exception = { };
89 struct nfs_lock_context *lock;
92 lock = nfs_get_lock_context(nfs_file_open_context(filep));
96 exception.inode = file_inode(filep);
97 exception.state = lock->open_context->state;
100 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
101 if (err == -ENOTSUPP) {
105 err = nfs4_handle_exception(server, err, &exception);
106 } while (exception.retry);
108 nfs_put_lock_context(lock);
112 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
114 struct rpc_message msg = {
115 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
117 struct inode *inode = file_inode(filep);
120 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
125 err = nfs42_proc_fallocate(&msg, filep, offset, len);
126 if (err == -EOPNOTSUPP)
127 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
133 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
135 struct rpc_message msg = {
136 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE],
138 struct inode *inode = file_inode(filep);
141 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
145 err = nfs_sync_inode(inode);
149 err = nfs42_proc_fallocate(&msg, filep, offset, len);
151 truncate_pagecache_range(inode, offset, (offset + len) -1);
152 if (err == -EOPNOTSUPP)
153 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
159 static int handle_async_copy(struct nfs42_copy_res *res,
160 struct nfs_server *dst_server,
161 struct nfs_server *src_server,
164 nfs4_stateid *src_stateid,
167 struct nfs4_copy_state *copy, *tmp_copy;
168 int status = NFS4_OK;
169 bool found_pending = false;
170 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
171 struct nfs_open_context *src_ctx = nfs_file_open_context(src);
173 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
177 spin_lock(&dst_server->nfs_client->cl_lock);
178 list_for_each_entry(tmp_copy,
179 &dst_server->nfs_client->pending_cb_stateids,
181 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
184 found_pending = true;
185 list_del(&tmp_copy->copies);
189 spin_unlock(&dst_server->nfs_client->cl_lock);
195 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
196 init_completion(©->completion);
197 copy->parent_dst_state = dst_ctx->state;
198 copy->parent_src_state = src_ctx->state;
200 list_add_tail(©->copies, &dst_server->ss_copies);
201 spin_unlock(&dst_server->nfs_client->cl_lock);
203 if (dst_server != src_server) {
204 spin_lock(&src_server->nfs_client->cl_lock);
205 list_add_tail(©->src_copies, &src_server->ss_copies);
206 spin_unlock(&src_server->nfs_client->cl_lock);
209 status = wait_for_completion_interruptible(©->completion);
210 spin_lock(&dst_server->nfs_client->cl_lock);
211 list_del_init(©->copies);
212 spin_unlock(&dst_server->nfs_client->cl_lock);
213 if (dst_server != src_server) {
214 spin_lock(&src_server->nfs_client->cl_lock);
215 list_del_init(©->src_copies);
216 spin_unlock(&src_server->nfs_client->cl_lock);
218 if (status == -ERESTARTSYS) {
220 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
226 res->write_res.count = copy->count;
227 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf));
228 status = -copy->error;
234 nfs42_do_offload_cancel_async(dst, ©->stateid);
235 if (!nfs42_files_from_same_server(src, dst))
236 nfs42_do_offload_cancel_async(src, src_stateid);
240 static int process_copy_commit(struct file *dst, loff_t pos_dst,
241 struct nfs42_copy_res *res)
243 struct nfs_commitres cres;
244 int status = -ENOMEM;
246 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
250 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres);
253 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
254 &cres.verf->verifier)) {
255 dprintk("commit verf differs from copy verf\n");
264 static ssize_t _nfs42_proc_copy(struct file *src,
265 struct nfs_lock_context *src_lock,
267 struct nfs_lock_context *dst_lock,
268 struct nfs42_copy_args *args,
269 struct nfs42_copy_res *res,
270 struct nl4_server *nss,
271 nfs4_stateid *cnr_stateid,
274 struct rpc_message msg = {
275 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
279 struct inode *dst_inode = file_inode(dst);
280 struct inode *src_inode = file_inode(src);
281 struct nfs_server *dst_server = NFS_SERVER(dst_inode);
282 struct nfs_server *src_server = NFS_SERVER(src_inode);
283 loff_t pos_src = args->src_pos;
284 loff_t pos_dst = args->dst_pos;
285 size_t count = args->count;
290 nfs4_stateid_copy(&args->src_stateid, cnr_stateid);
292 status = nfs4_set_rw_stateid(&args->src_stateid,
293 src_lock->open_context, src_lock, FMODE_READ);
295 if (status == -EAGAIN)
296 status = -NFS4ERR_BAD_STATEID;
300 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
301 pos_src, pos_src + (loff_t)count - 1);
305 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
306 dst_lock, FMODE_WRITE);
308 if (status == -EAGAIN)
309 status = -NFS4ERR_BAD_STATEID;
313 status = nfs_sync_inode(dst_inode);
317 res->commit_res.verf = NULL;
319 res->commit_res.verf =
320 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
321 if (!res->commit_res.verf)
324 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE,
325 &src_lock->open_context->state->flags);
326 set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
327 &dst_lock->open_context->state->flags);
329 status = nfs4_call_sync(dst_server->client, dst_server, &msg,
330 &args->seq_args, &res->seq_res, 0);
331 if (status == -ENOTSUPP)
332 dst_server->caps &= ~NFS_CAP_COPY;
337 nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
338 &res->commit_res.verf->verifier)) {
343 if (!res->synchronous) {
344 status = handle_async_copy(res, dst_server, src_server, src,
345 dst, &args->src_stateid, restart);
350 if ((!res->synchronous || !args->sync) &&
351 res->write_res.verifier.committed != NFS_FILE_SYNC) {
352 status = process_copy_commit(dst, pos_dst, res);
357 truncate_pagecache_range(dst_inode, pos_dst,
358 pos_dst + res->write_res.count);
360 status = res->write_res.count;
363 kfree(res->commit_res.verf);
367 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
368 struct file *dst, loff_t pos_dst, size_t count,
369 struct nl4_server *nss,
370 nfs4_stateid *cnr_stateid, bool sync)
372 struct nfs_server *server = NFS_SERVER(file_inode(dst));
373 struct nfs_lock_context *src_lock;
374 struct nfs_lock_context *dst_lock;
375 struct nfs42_copy_args args = {
376 .src_fh = NFS_FH(file_inode(src)),
378 .dst_fh = NFS_FH(file_inode(dst)),
383 struct nfs42_copy_res res;
384 struct nfs4_exception src_exception = {
385 .inode = file_inode(src),
386 .stateid = &args.src_stateid,
388 struct nfs4_exception dst_exception = {
389 .inode = file_inode(dst),
390 .stateid = &args.dst_stateid,
393 bool restart = false;
395 src_lock = nfs_get_lock_context(nfs_file_open_context(src));
396 if (IS_ERR(src_lock))
397 return PTR_ERR(src_lock);
399 src_exception.state = src_lock->open_context->state;
401 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
402 if (IS_ERR(dst_lock)) {
403 err = PTR_ERR(dst_lock);
404 goto out_put_src_lock;
407 dst_exception.state = dst_lock->open_context->state;
410 inode_lock(file_inode(dst));
411 err = _nfs42_proc_copy(src, src_lock,
414 nss, cnr_stateid, &restart);
415 inode_unlock(file_inode(dst));
419 if (err == -ENOTSUPP &&
420 nfs42_files_from_same_server(src, dst)) {
423 } else if (err == -EAGAIN) {
425 dst_exception.retry = 1;
429 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) {
431 dst_exception.retry = 1;
433 } else if ((err == -ESTALE ||
434 err == -NFS4ERR_OFFLOAD_DENIED ||
436 !nfs42_files_from_same_server(src, dst)) {
437 nfs42_do_offload_cancel_async(src, &args.src_stateid);
442 err2 = nfs4_handle_exception(server, err, &src_exception);
443 err = nfs4_handle_exception(server, err, &dst_exception);
446 } while (src_exception.retry || dst_exception.retry);
448 nfs_put_lock_context(dst_lock);
450 nfs_put_lock_context(src_lock);
454 struct nfs42_offloadcancel_data {
455 struct nfs_server *seq_server;
456 struct nfs42_offload_status_args args;
457 struct nfs42_offload_status_res res;
460 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
462 struct nfs42_offloadcancel_data *data = calldata;
464 nfs4_setup_sequence(data->seq_server->nfs_client,
465 &data->args.osa_seq_args,
466 &data->res.osr_seq_res, task);
469 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
471 struct nfs42_offloadcancel_data *data = calldata;
473 nfs41_sequence_done(task, &data->res.osr_seq_res);
474 if (task->tk_status &&
475 nfs4_async_handle_error(task, data->seq_server, NULL,
477 rpc_restart_call_prepare(task);
480 static void nfs42_free_offloadcancel_data(void *data)
485 static const struct rpc_call_ops nfs42_offload_cancel_ops = {
486 .rpc_call_prepare = nfs42_offload_cancel_prepare,
487 .rpc_call_done = nfs42_offload_cancel_done,
488 .rpc_release = nfs42_free_offloadcancel_data,
491 static int nfs42_do_offload_cancel_async(struct file *dst,
492 nfs4_stateid *stateid)
494 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
495 struct nfs42_offloadcancel_data *data = NULL;
496 struct nfs_open_context *ctx = nfs_file_open_context(dst);
497 struct rpc_task *task;
498 struct rpc_message msg = {
499 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL],
500 .rpc_cred = ctx->cred,
502 struct rpc_task_setup task_setup_data = {
503 .rpc_client = dst_server->client,
505 .callback_ops = &nfs42_offload_cancel_ops,
506 .workqueue = nfsiod_workqueue,
507 .flags = RPC_TASK_ASYNC,
511 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
514 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS);
518 data->seq_server = dst_server;
519 data->args.osa_src_fh = NFS_FH(file_inode(dst));
520 memcpy(&data->args.osa_stateid, stateid,
521 sizeof(data->args.osa_stateid));
522 msg.rpc_argp = &data->args;
523 msg.rpc_resp = &data->res;
524 task_setup_data.callback_data = data;
525 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res,
527 task = rpc_run_task(&task_setup_data);
529 return PTR_ERR(task);
530 status = rpc_wait_for_completion_task(task);
531 if (status == -ENOTSUPP)
532 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL;
537 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
538 struct nfs42_copy_notify_args *args,
539 struct nfs42_copy_notify_res *res)
541 struct nfs_server *src_server = NFS_SERVER(file_inode(src));
542 struct rpc_message msg = {
543 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY],
548 struct nfs_open_context *ctx;
549 struct nfs_lock_context *l_ctx;
551 ctx = get_nfs_open_context(nfs_file_open_context(src));
552 l_ctx = nfs_get_lock_context(ctx);
554 return PTR_ERR(l_ctx);
556 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
558 nfs_put_lock_context(l_ctx);
560 if (status == -EAGAIN)
561 status = -NFS4ERR_BAD_STATEID;
565 status = nfs4_call_sync(src_server->client, src_server, &msg,
566 &args->cna_seq_args, &res->cnr_seq_res, 0);
567 if (status == -ENOTSUPP)
568 src_server->caps &= ~NFS_CAP_COPY_NOTIFY;
570 put_nfs_open_context(nfs_file_open_context(src));
574 int nfs42_proc_copy_notify(struct file *src, struct file *dst,
575 struct nfs42_copy_notify_res *res)
577 struct nfs_server *src_server = NFS_SERVER(file_inode(src));
578 struct nfs42_copy_notify_args *args;
579 struct nfs4_exception exception = {
580 .inode = file_inode(src),
584 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY))
587 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS);
591 args->cna_src_fh = NFS_FH(file_inode(src)),
592 args->cna_dst.nl4_type = NL4_NETADDR;
593 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr);
594 exception.stateid = &args->cna_src_stateid;
597 status = _nfs42_proc_copy_notify(src, dst, args, res);
598 if (status == -ENOTSUPP) {
599 status = -EOPNOTSUPP;
602 status = nfs4_handle_exception(src_server, status, &exception);
603 } while (exception.retry);
610 static loff_t _nfs42_proc_llseek(struct file *filep,
611 struct nfs_lock_context *lock, loff_t offset, int whence)
613 struct inode *inode = file_inode(filep);
614 struct nfs42_seek_args args = {
615 .sa_fh = NFS_FH(inode),
617 .sa_what = (whence == SEEK_HOLE) ?
618 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA,
620 struct nfs42_seek_res res;
621 struct rpc_message msg = {
622 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK],
626 struct nfs_server *server = NFS_SERVER(inode);
629 if (!nfs_server_capable(inode, NFS_CAP_SEEK))
632 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
635 if (status == -EAGAIN)
636 status = -NFS4ERR_BAD_STATEID;
640 status = nfs_filemap_write_and_wait_range(inode->i_mapping,
645 status = nfs4_call_sync(server->client, server, &msg,
646 &args.seq_args, &res.seq_res, 0);
647 if (status == -ENOTSUPP)
648 server->caps &= ~NFS_CAP_SEEK;
652 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
655 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
657 struct nfs_server *server = NFS_SERVER(file_inode(filep));
658 struct nfs4_exception exception = { };
659 struct nfs_lock_context *lock;
662 lock = nfs_get_lock_context(nfs_file_open_context(filep));
664 return PTR_ERR(lock);
666 exception.inode = file_inode(filep);
667 exception.state = lock->open_context->state;
670 err = _nfs42_proc_llseek(filep, lock, offset, whence);
673 if (err == -ENOTSUPP) {
677 err = nfs4_handle_exception(server, err, &exception);
678 } while (exception.retry);
680 nfs_put_lock_context(lock);
686 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
688 struct nfs42_layoutstat_data *data = calldata;
689 struct inode *inode = data->inode;
690 struct nfs_server *server = NFS_SERVER(inode);
691 struct pnfs_layout_hdr *lo;
693 spin_lock(&inode->i_lock);
694 lo = NFS_I(inode)->layout;
695 if (!pnfs_layout_is_valid(lo)) {
696 spin_unlock(&inode->i_lock);
700 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
701 spin_unlock(&inode->i_lock);
702 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
703 &data->res.seq_res, task);
707 nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
709 struct nfs42_layoutstat_data *data = calldata;
710 struct inode *inode = data->inode;
711 struct pnfs_layout_hdr *lo;
713 if (!nfs4_sequence_done(task, &data->res.seq_res))
716 switch (task->tk_status) {
719 case -NFS4ERR_BADHANDLE:
721 pnfs_destroy_layout(NFS_I(inode));
723 case -NFS4ERR_EXPIRED:
724 case -NFS4ERR_ADMIN_REVOKED:
725 case -NFS4ERR_DELEG_REVOKED:
726 case -NFS4ERR_STALE_STATEID:
727 case -NFS4ERR_BAD_STATEID:
728 spin_lock(&inode->i_lock);
729 lo = NFS_I(inode)->layout;
730 if (pnfs_layout_is_valid(lo) &&
731 nfs4_stateid_match(&data->args.stateid,
736 * Mark the bad layout state as invalid, then retry
737 * with the current stateid.
739 pnfs_mark_layout_stateid_invalid(lo, &head);
740 spin_unlock(&inode->i_lock);
741 pnfs_free_lseg_list(&head);
742 nfs_commit_inode(inode, 0);
744 spin_unlock(&inode->i_lock);
746 case -NFS4ERR_OLD_STATEID:
747 spin_lock(&inode->i_lock);
748 lo = NFS_I(inode)->layout;
749 if (pnfs_layout_is_valid(lo) &&
750 nfs4_stateid_match_other(&data->args.stateid,
752 /* Do we need to delay before resending? */
753 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
754 &data->args.stateid))
756 rpc_restart_call_prepare(task);
758 spin_unlock(&inode->i_lock);
762 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
765 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status);
769 nfs42_layoutstat_release(void *calldata)
771 struct nfs42_layoutstat_data *data = calldata;
772 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo;
775 for (i = 0; i < data->args.num_dev; i++) {
776 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
777 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
780 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
781 smp_mb__before_atomic();
782 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
783 smp_mb__after_atomic();
784 nfs_iput_and_deactive(data->inode);
785 kfree(data->args.devinfo);
789 static const struct rpc_call_ops nfs42_layoutstat_ops = {
790 .rpc_call_prepare = nfs42_layoutstat_prepare,
791 .rpc_call_done = nfs42_layoutstat_done,
792 .rpc_release = nfs42_layoutstat_release,
795 int nfs42_proc_layoutstats_generic(struct nfs_server *server,
796 struct nfs42_layoutstat_data *data)
798 struct rpc_message msg = {
799 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
800 .rpc_argp = &data->args,
801 .rpc_resp = &data->res,
803 struct rpc_task_setup task_setup = {
804 .rpc_client = server->client,
806 .callback_ops = &nfs42_layoutstat_ops,
807 .callback_data = data,
808 .flags = RPC_TASK_ASYNC,
810 struct rpc_task *task;
812 data->inode = nfs_igrab_and_active(data->args.inode);
814 nfs42_layoutstat_release(data);
817 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
818 task = rpc_run_task(&task_setup);
820 return PTR_ERR(task);
825 static struct nfs42_layouterror_data *
826 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags)
828 struct nfs42_layouterror_data *data;
829 struct inode *inode = lseg->pls_layout->plh_inode;
831 data = kzalloc(sizeof(*data), gfp_flags);
833 data->args.inode = data->inode = nfs_igrab_and_active(inode);
835 data->lseg = pnfs_get_lseg(lseg);
838 nfs_iput_and_deactive(data->inode);
846 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data)
848 pnfs_put_lseg(data->lseg);
849 nfs_iput_and_deactive(data->inode);
854 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata)
856 struct nfs42_layouterror_data *data = calldata;
857 struct inode *inode = data->inode;
858 struct nfs_server *server = NFS_SERVER(inode);
859 struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
862 spin_lock(&inode->i_lock);
863 if (!pnfs_layout_is_valid(lo)) {
864 spin_unlock(&inode->i_lock);
868 for (i = 0; i < data->args.num_errors; i++)
869 nfs4_stateid_copy(&data->args.errors[i].stateid,
871 spin_unlock(&inode->i_lock);
872 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
873 &data->res.seq_res, task);
877 nfs42_layouterror_done(struct rpc_task *task, void *calldata)
879 struct nfs42_layouterror_data *data = calldata;
880 struct inode *inode = data->inode;
881 struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
883 if (!nfs4_sequence_done(task, &data->res.seq_res))
886 switch (task->tk_status) {
889 case -NFS4ERR_BADHANDLE:
891 pnfs_destroy_layout(NFS_I(inode));
893 case -NFS4ERR_EXPIRED:
894 case -NFS4ERR_ADMIN_REVOKED:
895 case -NFS4ERR_DELEG_REVOKED:
896 case -NFS4ERR_STALE_STATEID:
897 case -NFS4ERR_BAD_STATEID:
898 spin_lock(&inode->i_lock);
899 if (pnfs_layout_is_valid(lo) &&
900 nfs4_stateid_match(&data->args.errors[0].stateid,
905 * Mark the bad layout state as invalid, then retry
906 * with the current stateid.
908 pnfs_mark_layout_stateid_invalid(lo, &head);
909 spin_unlock(&inode->i_lock);
910 pnfs_free_lseg_list(&head);
911 nfs_commit_inode(inode, 0);
913 spin_unlock(&inode->i_lock);
915 case -NFS4ERR_OLD_STATEID:
916 spin_lock(&inode->i_lock);
917 if (pnfs_layout_is_valid(lo) &&
918 nfs4_stateid_match_other(&data->args.errors[0].stateid,
920 /* Do we need to delay before resending? */
921 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
922 &data->args.errors[0].stateid))
924 rpc_restart_call_prepare(task);
926 spin_unlock(&inode->i_lock);
930 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
933 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid,
938 nfs42_layouterror_release(void *calldata)
940 struct nfs42_layouterror_data *data = calldata;
942 nfs42_free_layouterror_data(data);
945 static const struct rpc_call_ops nfs42_layouterror_ops = {
946 .rpc_call_prepare = nfs42_layouterror_prepare,
947 .rpc_call_done = nfs42_layouterror_done,
948 .rpc_release = nfs42_layouterror_release,
951 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
952 const struct nfs42_layout_error *errors, size_t n)
954 struct inode *inode = lseg->pls_layout->plh_inode;
955 struct nfs42_layouterror_data *data;
956 struct rpc_task *task;
957 struct rpc_message msg = {
958 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR],
960 struct rpc_task_setup task_setup = {
962 .callback_ops = &nfs42_layouterror_ops,
963 .flags = RPC_TASK_ASYNC,
967 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR))
969 if (n > NFS42_LAYOUTERROR_MAX)
971 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS);
974 for (i = 0; i < n; i++) {
975 data->args.errors[i] = errors[i];
976 data->args.num_errors++;
977 data->res.num_errors++;
979 msg.rpc_argp = &data->args;
980 msg.rpc_resp = &data->res;
981 task_setup.callback_data = data;
982 task_setup.rpc_client = NFS_SERVER(inode)->client;
983 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
984 task = rpc_run_task(&task_setup);
986 return PTR_ERR(task);
990 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror);
992 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
993 struct file *dst_f, struct nfs_lock_context *src_lock,
994 struct nfs_lock_context *dst_lock, loff_t src_offset,
995 loff_t dst_offset, loff_t count)
997 struct inode *src_inode = file_inode(src_f);
998 struct inode *dst_inode = file_inode(dst_f);
999 struct nfs_server *server = NFS_SERVER(dst_inode);
1000 struct nfs42_clone_args args = {
1001 .src_fh = NFS_FH(src_inode),
1002 .dst_fh = NFS_FH(dst_inode),
1003 .src_offset = src_offset,
1004 .dst_offset = dst_offset,
1006 .dst_bitmask = server->cache_consistency_bitmask,
1008 struct nfs42_clone_res res = {
1013 msg->rpc_argp = &args;
1014 msg->rpc_resp = &res;
1016 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
1017 src_lock, FMODE_READ);
1019 if (status == -EAGAIN)
1020 status = -NFS4ERR_BAD_STATEID;
1023 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
1024 dst_lock, FMODE_WRITE);
1026 if (status == -EAGAIN)
1027 status = -NFS4ERR_BAD_STATEID;
1031 res.dst_fattr = nfs_alloc_fattr();
1035 status = nfs4_call_sync(server->client, server, msg,
1036 &args.seq_args, &res.seq_res, 0);
1038 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
1040 kfree(res.dst_fattr);
1044 int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
1045 loff_t src_offset, loff_t dst_offset, loff_t count)
1047 struct rpc_message msg = {
1048 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE],
1050 struct inode *inode = file_inode(src_f);
1051 struct nfs_server *server = NFS_SERVER(file_inode(src_f));
1052 struct nfs_lock_context *src_lock;
1053 struct nfs_lock_context *dst_lock;
1054 struct nfs4_exception src_exception = { };
1055 struct nfs4_exception dst_exception = { };
1058 if (!nfs_server_capable(inode, NFS_CAP_CLONE))
1061 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
1062 if (IS_ERR(src_lock))
1063 return PTR_ERR(src_lock);
1065 src_exception.inode = file_inode(src_f);
1066 src_exception.state = src_lock->open_context->state;
1068 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
1069 if (IS_ERR(dst_lock)) {
1070 err = PTR_ERR(dst_lock);
1071 goto out_put_src_lock;
1074 dst_exception.inode = file_inode(dst_f);
1075 dst_exception.state = dst_lock->open_context->state;
1078 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
1079 src_offset, dst_offset, count);
1080 if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
1081 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
1086 err2 = nfs4_handle_exception(server, err, &src_exception);
1087 err = nfs4_handle_exception(server, err, &dst_exception);
1090 } while (src_exception.retry || dst_exception.retry);
1092 nfs_put_lock_context(dst_lock);
1094 nfs_put_lock_context(src_lock);
1098 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
1100 static int _nfs42_proc_removexattr(struct inode *inode, const char *name)
1102 struct nfs_server *server = NFS_SERVER(inode);
1103 struct nfs42_removexattrargs args = {
1104 .fh = NFS_FH(inode),
1107 struct nfs42_removexattrres res;
1108 struct rpc_message msg = {
1109 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR],
1114 unsigned long timestamp = jiffies;
1116 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
1119 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1124 static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
1125 const void *buf, size_t buflen, int flags)
1127 struct nfs_server *server = NFS_SERVER(inode);
1128 struct page *pages[NFS4XATTR_MAXPAGES];
1129 struct nfs42_setxattrargs arg = {
1130 .fh = NFS_FH(inode),
1131 .xattr_pages = pages,
1132 .xattr_len = buflen,
1134 .xattr_flags = flags,
1136 struct nfs42_setxattrres res;
1137 struct rpc_message msg = {
1138 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR],
1143 unsigned long timestamp = jiffies;
1145 if (buflen > server->sxasize)
1149 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages);
1155 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1158 for (; np > 0; np--)
1159 put_page(pages[np - 1]);
1162 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1167 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name,
1168 void *buf, size_t buflen)
1170 struct nfs_server *server = NFS_SERVER(inode);
1171 struct page *pages[NFS4XATTR_MAXPAGES] = {};
1172 struct nfs42_getxattrargs arg = {
1173 .fh = NFS_FH(inode),
1174 .xattr_pages = pages,
1175 .xattr_len = buflen,
1178 struct nfs42_getxattrres res;
1179 struct rpc_message msg = {
1180 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR],
1186 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1192 * Normally, the caching is done one layer up, but for successful
1193 * RPCS, always cache the result here, even if the caller was
1194 * just querying the length, or if the reply was too big for
1195 * the caller. This avoids a second RPC in the case of the
1196 * common query-alloc-retrieve cycle for xattrs.
1198 * Note that xattr_len is always capped to XATTR_SIZE_MAX.
1201 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len);
1204 if (res.xattr_len > buflen)
1206 _copy_from_pages(buf, pages, 0, res.xattr_len);
1209 np = DIV_ROUND_UP(res.xattr_len, PAGE_SIZE);
1211 __free_page(pages[np]);
1213 return res.xattr_len;
1216 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
1217 size_t buflen, u64 *cookiep, bool *eofp)
1219 struct nfs_server *server = NFS_SERVER(inode);
1220 struct page **pages;
1221 struct nfs42_listxattrsargs arg = {
1222 .fh = NFS_FH(inode),
1225 struct nfs42_listxattrsres res = {
1228 .xattr_len = buflen,
1230 struct rpc_message msg = {
1231 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS],
1239 res.scratch = alloc_page(GFP_KERNEL);
1243 xdrlen = nfs42_listxattr_xdrsize(buflen);
1244 if (xdrlen > server->lxasize)
1245 xdrlen = server->lxasize;
1246 np = xdrlen / PAGE_SIZE + 1;
1248 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
1249 if (pages == NULL) {
1250 __free_page(res.scratch);
1254 arg.xattr_pages = pages;
1257 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1262 *cookiep = res.cookie;
1268 __free_page(pages[np]);
1271 __free_page(res.scratch);
1278 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
1279 void *buf, size_t buflen)
1281 struct nfs4_exception exception = { };
1285 err = _nfs42_proc_getxattr(inode, name, buf, buflen);
1288 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1290 } while (exception.retry);
1295 int nfs42_proc_setxattr(struct inode *inode, const char *name,
1296 const void *buf, size_t buflen, int flags)
1298 struct nfs4_exception exception = { };
1302 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags);
1305 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1307 } while (exception.retry);
1312 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf,
1313 size_t buflen, u64 *cookiep, bool *eofp)
1315 struct nfs4_exception exception = { };
1319 err = _nfs42_proc_listxattrs(inode, buf, buflen,
1323 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1325 } while (exception.retry);
1330 int nfs42_proc_removexattr(struct inode *inode, const char *name)
1332 struct nfs4_exception exception = { };
1336 err = _nfs42_proc_removexattr(inode, name);
1339 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1341 } while (exception.retry);