1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com>
6 #include <linux/sunrpc/addr.h>
7 #include <linux/sunrpc/sched.h>
9 #include <linux/nfs3.h>
10 #include <linux/nfs4.h>
11 #include <linux/nfs_xdr.h>
12 #include <linux/nfs_fs.h>
17 #include "nfs4session.h"
19 #include "delegation.h"
20 #include "nfs4trace.h"
22 #define NFSDBG_FACILITY NFSDBG_PROC
23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client;
28 unsigned short port = 2049;
31 naddr->netid_len = scnprintf(naddr->netid,
32 sizeof(naddr->netid), "%s",
33 rpc_peeraddr2str(clp->cl_rpcclient,
35 naddr->addr_len = scnprintf(naddr->addr,
38 rpc_peeraddr2str(clp->cl_rpcclient,
40 port >> 8, port & 255);
44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
45 struct nfs_lock_context *lock, loff_t offset, loff_t len)
47 struct inode *inode = file_inode(filep);
48 struct nfs_server *server = NFS_SERVER(inode);
49 struct nfs42_falloc_args args = {
50 .falloc_fh = NFS_FH(inode),
51 .falloc_offset = offset,
53 .falloc_bitmask = nfs4_fattr_bitmap,
55 struct nfs42_falloc_res res = {
56 .falloc_server = server,
60 msg->rpc_argp = &args;
63 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
66 if (status == -EAGAIN)
67 status = -NFS4ERR_BAD_STATEID;
71 res.falloc_fattr = nfs_alloc_fattr();
72 if (!res.falloc_fattr)
75 status = nfs4_call_sync(server->client, server, msg,
76 &args.seq_args, &res.seq_res, 0);
78 status = nfs_post_op_update_inode(inode, res.falloc_fattr);
80 kfree(res.falloc_fattr);
84 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
85 loff_t offset, loff_t len)
87 struct nfs_server *server = NFS_SERVER(file_inode(filep));
88 struct nfs4_exception exception = { };
89 struct nfs_lock_context *lock;
92 lock = nfs_get_lock_context(nfs_file_open_context(filep));
96 exception.inode = file_inode(filep);
97 exception.state = lock->open_context->state;
100 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
101 if (err == -ENOTSUPP) {
105 err = nfs4_handle_exception(server, err, &exception);
106 } while (exception.retry);
108 nfs_put_lock_context(lock);
112 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
114 struct rpc_message msg = {
115 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
117 struct inode *inode = file_inode(filep);
120 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
125 err = nfs42_proc_fallocate(&msg, filep, offset, len);
126 if (err == -EOPNOTSUPP)
127 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
133 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
135 struct rpc_message msg = {
136 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE],
138 struct inode *inode = file_inode(filep);
141 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
145 err = nfs_sync_inode(inode);
149 err = nfs42_proc_fallocate(&msg, filep, offset, len);
151 truncate_pagecache_range(inode, offset, (offset + len) -1);
152 if (err == -EOPNOTSUPP)
153 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
159 static int handle_async_copy(struct nfs42_copy_res *res,
160 struct nfs_server *dst_server,
161 struct nfs_server *src_server,
164 nfs4_stateid *src_stateid,
167 struct nfs4_copy_state *copy, *tmp_copy;
168 int status = NFS4_OK;
169 bool found_pending = false;
170 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
171 struct nfs_open_context *src_ctx = nfs_file_open_context(src);
173 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
177 spin_lock(&dst_server->nfs_client->cl_lock);
178 list_for_each_entry(tmp_copy,
179 &dst_server->nfs_client->pending_cb_stateids,
181 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
184 found_pending = true;
185 list_del(&tmp_copy->copies);
189 spin_unlock(&dst_server->nfs_client->cl_lock);
195 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
196 init_completion(©->completion);
197 copy->parent_dst_state = dst_ctx->state;
198 copy->parent_src_state = src_ctx->state;
200 list_add_tail(©->copies, &dst_server->ss_copies);
201 spin_unlock(&dst_server->nfs_client->cl_lock);
203 if (dst_server != src_server) {
204 spin_lock(&src_server->nfs_client->cl_lock);
205 list_add_tail(©->src_copies, &src_server->ss_copies);
206 spin_unlock(&src_server->nfs_client->cl_lock);
209 status = wait_for_completion_interruptible(©->completion);
210 spin_lock(&dst_server->nfs_client->cl_lock);
211 list_del_init(©->copies);
212 spin_unlock(&dst_server->nfs_client->cl_lock);
213 if (dst_server != src_server) {
214 spin_lock(&src_server->nfs_client->cl_lock);
215 list_del_init(©->src_copies);
216 spin_unlock(&src_server->nfs_client->cl_lock);
218 if (status == -ERESTARTSYS) {
220 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
226 res->write_res.count = copy->count;
227 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf));
228 status = -copy->error;
234 nfs42_do_offload_cancel_async(dst, ©->stateid);
235 if (!nfs42_files_from_same_server(src, dst))
236 nfs42_do_offload_cancel_async(src, src_stateid);
240 static int process_copy_commit(struct file *dst, loff_t pos_dst,
241 struct nfs42_copy_res *res)
243 struct nfs_commitres cres;
244 int status = -ENOMEM;
246 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
250 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres);
253 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
254 &cres.verf->verifier)) {
255 dprintk("commit verf differs from copy verf\n");
264 static ssize_t _nfs42_proc_copy(struct file *src,
265 struct nfs_lock_context *src_lock,
267 struct nfs_lock_context *dst_lock,
268 struct nfs42_copy_args *args,
269 struct nfs42_copy_res *res,
270 struct nl4_server *nss,
271 nfs4_stateid *cnr_stateid,
274 struct rpc_message msg = {
275 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
279 struct inode *dst_inode = file_inode(dst);
280 struct inode *src_inode = file_inode(src);
281 struct nfs_server *dst_server = NFS_SERVER(dst_inode);
282 struct nfs_server *src_server = NFS_SERVER(src_inode);
283 loff_t pos_src = args->src_pos;
284 loff_t pos_dst = args->dst_pos;
285 size_t count = args->count;
290 nfs4_stateid_copy(&args->src_stateid, cnr_stateid);
292 status = nfs4_set_rw_stateid(&args->src_stateid,
293 src_lock->open_context, src_lock, FMODE_READ);
295 if (status == -EAGAIN)
296 status = -NFS4ERR_BAD_STATEID;
300 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
301 pos_src, pos_src + (loff_t)count - 1);
305 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
306 dst_lock, FMODE_WRITE);
308 if (status == -EAGAIN)
309 status = -NFS4ERR_BAD_STATEID;
313 status = nfs_sync_inode(dst_inode);
317 res->commit_res.verf = NULL;
319 res->commit_res.verf =
320 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
321 if (!res->commit_res.verf)
324 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE,
325 &src_lock->open_context->state->flags);
326 set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
327 &dst_lock->open_context->state->flags);
329 status = nfs4_call_sync(dst_server->client, dst_server, &msg,
330 &args->seq_args, &res->seq_res, 0);
331 if (status == -ENOTSUPP)
332 dst_server->caps &= ~NFS_CAP_COPY;
337 nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
338 &res->commit_res.verf->verifier)) {
343 if (!res->synchronous) {
344 status = handle_async_copy(res, dst_server, src_server, src,
345 dst, &args->src_stateid, restart);
350 if ((!res->synchronous || !args->sync) &&
351 res->write_res.verifier.committed != NFS_FILE_SYNC) {
352 status = process_copy_commit(dst, pos_dst, res);
357 truncate_pagecache_range(dst_inode, pos_dst,
358 pos_dst + res->write_res.count);
359 spin_lock(&dst_inode->i_lock);
360 nfs_set_cache_invalid(
361 dst_inode, NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED |
362 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_ATTR |
363 NFS_INO_INVALID_DATA);
364 spin_unlock(&dst_inode->i_lock);
365 spin_lock(&src_inode->i_lock);
366 nfs_set_cache_invalid(src_inode, NFS_INO_REVAL_PAGECACHE |
367 NFS_INO_REVAL_FORCED |
368 NFS_INO_INVALID_ATIME);
369 spin_unlock(&src_inode->i_lock);
370 status = res->write_res.count;
373 kfree(res->commit_res.verf);
377 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
378 struct file *dst, loff_t pos_dst, size_t count,
379 struct nl4_server *nss,
380 nfs4_stateid *cnr_stateid, bool sync)
382 struct nfs_server *server = NFS_SERVER(file_inode(dst));
383 struct nfs_lock_context *src_lock;
384 struct nfs_lock_context *dst_lock;
385 struct nfs42_copy_args args = {
386 .src_fh = NFS_FH(file_inode(src)),
388 .dst_fh = NFS_FH(file_inode(dst)),
393 struct nfs42_copy_res res;
394 struct nfs4_exception src_exception = {
395 .inode = file_inode(src),
396 .stateid = &args.src_stateid,
398 struct nfs4_exception dst_exception = {
399 .inode = file_inode(dst),
400 .stateid = &args.dst_stateid,
403 bool restart = false;
405 src_lock = nfs_get_lock_context(nfs_file_open_context(src));
406 if (IS_ERR(src_lock))
407 return PTR_ERR(src_lock);
409 src_exception.state = src_lock->open_context->state;
411 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
412 if (IS_ERR(dst_lock)) {
413 err = PTR_ERR(dst_lock);
414 goto out_put_src_lock;
417 dst_exception.state = dst_lock->open_context->state;
420 inode_lock(file_inode(dst));
421 err = _nfs42_proc_copy(src, src_lock,
424 nss, cnr_stateid, &restart);
425 inode_unlock(file_inode(dst));
429 if (err == -ENOTSUPP &&
430 nfs42_files_from_same_server(src, dst)) {
433 } else if (err == -EAGAIN) {
435 dst_exception.retry = 1;
439 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) {
441 dst_exception.retry = 1;
443 } else if ((err == -ESTALE ||
444 err == -NFS4ERR_OFFLOAD_DENIED ||
446 !nfs42_files_from_same_server(src, dst)) {
447 nfs42_do_offload_cancel_async(src, &args.src_stateid);
452 err2 = nfs4_handle_exception(server, err, &src_exception);
453 err = nfs4_handle_exception(server, err, &dst_exception);
456 } while (src_exception.retry || dst_exception.retry);
458 nfs_put_lock_context(dst_lock);
460 nfs_put_lock_context(src_lock);
464 struct nfs42_offloadcancel_data {
465 struct nfs_server *seq_server;
466 struct nfs42_offload_status_args args;
467 struct nfs42_offload_status_res res;
470 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
472 struct nfs42_offloadcancel_data *data = calldata;
474 nfs4_setup_sequence(data->seq_server->nfs_client,
475 &data->args.osa_seq_args,
476 &data->res.osr_seq_res, task);
479 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
481 struct nfs42_offloadcancel_data *data = calldata;
483 nfs41_sequence_done(task, &data->res.osr_seq_res);
484 if (task->tk_status &&
485 nfs4_async_handle_error(task, data->seq_server, NULL,
487 rpc_restart_call_prepare(task);
490 static void nfs42_free_offloadcancel_data(void *data)
495 static const struct rpc_call_ops nfs42_offload_cancel_ops = {
496 .rpc_call_prepare = nfs42_offload_cancel_prepare,
497 .rpc_call_done = nfs42_offload_cancel_done,
498 .rpc_release = nfs42_free_offloadcancel_data,
501 static int nfs42_do_offload_cancel_async(struct file *dst,
502 nfs4_stateid *stateid)
504 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
505 struct nfs42_offloadcancel_data *data = NULL;
506 struct nfs_open_context *ctx = nfs_file_open_context(dst);
507 struct rpc_task *task;
508 struct rpc_message msg = {
509 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL],
510 .rpc_cred = ctx->cred,
512 struct rpc_task_setup task_setup_data = {
513 .rpc_client = dst_server->client,
515 .callback_ops = &nfs42_offload_cancel_ops,
516 .workqueue = nfsiod_workqueue,
517 .flags = RPC_TASK_ASYNC,
521 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
524 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS);
528 data->seq_server = dst_server;
529 data->args.osa_src_fh = NFS_FH(file_inode(dst));
530 memcpy(&data->args.osa_stateid, stateid,
531 sizeof(data->args.osa_stateid));
532 msg.rpc_argp = &data->args;
533 msg.rpc_resp = &data->res;
534 task_setup_data.callback_data = data;
535 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res,
537 task = rpc_run_task(&task_setup_data);
539 return PTR_ERR(task);
540 status = rpc_wait_for_completion_task(task);
541 if (status == -ENOTSUPP)
542 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL;
547 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
548 struct nfs42_copy_notify_args *args,
549 struct nfs42_copy_notify_res *res)
551 struct nfs_server *src_server = NFS_SERVER(file_inode(src));
552 struct rpc_message msg = {
553 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY],
558 struct nfs_open_context *ctx;
559 struct nfs_lock_context *l_ctx;
561 ctx = get_nfs_open_context(nfs_file_open_context(src));
562 l_ctx = nfs_get_lock_context(ctx);
564 return PTR_ERR(l_ctx);
566 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
568 nfs_put_lock_context(l_ctx);
570 if (status == -EAGAIN)
571 status = -NFS4ERR_BAD_STATEID;
575 status = nfs4_call_sync(src_server->client, src_server, &msg,
576 &args->cna_seq_args, &res->cnr_seq_res, 0);
577 if (status == -ENOTSUPP)
578 src_server->caps &= ~NFS_CAP_COPY_NOTIFY;
580 put_nfs_open_context(nfs_file_open_context(src));
584 int nfs42_proc_copy_notify(struct file *src, struct file *dst,
585 struct nfs42_copy_notify_res *res)
587 struct nfs_server *src_server = NFS_SERVER(file_inode(src));
588 struct nfs42_copy_notify_args *args;
589 struct nfs4_exception exception = {
590 .inode = file_inode(src),
594 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY))
597 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS);
601 args->cna_src_fh = NFS_FH(file_inode(src)),
602 args->cna_dst.nl4_type = NL4_NETADDR;
603 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr);
604 exception.stateid = &args->cna_src_stateid;
607 status = _nfs42_proc_copy_notify(src, dst, args, res);
608 if (status == -ENOTSUPP) {
609 status = -EOPNOTSUPP;
612 status = nfs4_handle_exception(src_server, status, &exception);
613 } while (exception.retry);
620 static loff_t _nfs42_proc_llseek(struct file *filep,
621 struct nfs_lock_context *lock, loff_t offset, int whence)
623 struct inode *inode = file_inode(filep);
624 struct nfs42_seek_args args = {
625 .sa_fh = NFS_FH(inode),
627 .sa_what = (whence == SEEK_HOLE) ?
628 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA,
630 struct nfs42_seek_res res;
631 struct rpc_message msg = {
632 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK],
636 struct nfs_server *server = NFS_SERVER(inode);
639 if (!nfs_server_capable(inode, NFS_CAP_SEEK))
642 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
645 if (status == -EAGAIN)
646 status = -NFS4ERR_BAD_STATEID;
650 status = nfs_filemap_write_and_wait_range(inode->i_mapping,
655 status = nfs4_call_sync(server->client, server, &msg,
656 &args.seq_args, &res.seq_res, 0);
657 if (status == -ENOTSUPP)
658 server->caps &= ~NFS_CAP_SEEK;
662 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
665 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
667 struct nfs_server *server = NFS_SERVER(file_inode(filep));
668 struct nfs4_exception exception = { };
669 struct nfs_lock_context *lock;
672 lock = nfs_get_lock_context(nfs_file_open_context(filep));
674 return PTR_ERR(lock);
676 exception.inode = file_inode(filep);
677 exception.state = lock->open_context->state;
680 err = _nfs42_proc_llseek(filep, lock, offset, whence);
683 if (err == -ENOTSUPP) {
687 err = nfs4_handle_exception(server, err, &exception);
688 } while (exception.retry);
690 nfs_put_lock_context(lock);
696 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
698 struct nfs42_layoutstat_data *data = calldata;
699 struct inode *inode = data->inode;
700 struct nfs_server *server = NFS_SERVER(inode);
701 struct pnfs_layout_hdr *lo;
703 spin_lock(&inode->i_lock);
704 lo = NFS_I(inode)->layout;
705 if (!pnfs_layout_is_valid(lo)) {
706 spin_unlock(&inode->i_lock);
710 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
711 spin_unlock(&inode->i_lock);
712 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
713 &data->res.seq_res, task);
717 nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
719 struct nfs42_layoutstat_data *data = calldata;
720 struct inode *inode = data->inode;
721 struct pnfs_layout_hdr *lo;
723 if (!nfs4_sequence_done(task, &data->res.seq_res))
726 switch (task->tk_status) {
729 case -NFS4ERR_BADHANDLE:
731 pnfs_destroy_layout(NFS_I(inode));
733 case -NFS4ERR_EXPIRED:
734 case -NFS4ERR_ADMIN_REVOKED:
735 case -NFS4ERR_DELEG_REVOKED:
736 case -NFS4ERR_STALE_STATEID:
737 case -NFS4ERR_BAD_STATEID:
738 spin_lock(&inode->i_lock);
739 lo = NFS_I(inode)->layout;
740 if (pnfs_layout_is_valid(lo) &&
741 nfs4_stateid_match(&data->args.stateid,
746 * Mark the bad layout state as invalid, then retry
747 * with the current stateid.
749 pnfs_mark_layout_stateid_invalid(lo, &head);
750 spin_unlock(&inode->i_lock);
751 pnfs_free_lseg_list(&head);
752 nfs_commit_inode(inode, 0);
754 spin_unlock(&inode->i_lock);
756 case -NFS4ERR_OLD_STATEID:
757 spin_lock(&inode->i_lock);
758 lo = NFS_I(inode)->layout;
759 if (pnfs_layout_is_valid(lo) &&
760 nfs4_stateid_match_other(&data->args.stateid,
762 /* Do we need to delay before resending? */
763 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
764 &data->args.stateid))
766 rpc_restart_call_prepare(task);
768 spin_unlock(&inode->i_lock);
772 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
775 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status);
779 nfs42_layoutstat_release(void *calldata)
781 struct nfs42_layoutstat_data *data = calldata;
782 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo;
785 for (i = 0; i < data->args.num_dev; i++) {
786 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
787 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
790 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
791 smp_mb__before_atomic();
792 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
793 smp_mb__after_atomic();
794 nfs_iput_and_deactive(data->inode);
795 kfree(data->args.devinfo);
799 static const struct rpc_call_ops nfs42_layoutstat_ops = {
800 .rpc_call_prepare = nfs42_layoutstat_prepare,
801 .rpc_call_done = nfs42_layoutstat_done,
802 .rpc_release = nfs42_layoutstat_release,
805 int nfs42_proc_layoutstats_generic(struct nfs_server *server,
806 struct nfs42_layoutstat_data *data)
808 struct rpc_message msg = {
809 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
810 .rpc_argp = &data->args,
811 .rpc_resp = &data->res,
813 struct rpc_task_setup task_setup = {
814 .rpc_client = server->client,
816 .callback_ops = &nfs42_layoutstat_ops,
817 .callback_data = data,
818 .flags = RPC_TASK_ASYNC,
820 struct rpc_task *task;
822 data->inode = nfs_igrab_and_active(data->args.inode);
824 nfs42_layoutstat_release(data);
827 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
828 task = rpc_run_task(&task_setup);
830 return PTR_ERR(task);
835 static struct nfs42_layouterror_data *
836 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags)
838 struct nfs42_layouterror_data *data;
839 struct inode *inode = lseg->pls_layout->plh_inode;
841 data = kzalloc(sizeof(*data), gfp_flags);
843 data->args.inode = data->inode = nfs_igrab_and_active(inode);
845 data->lseg = pnfs_get_lseg(lseg);
848 nfs_iput_and_deactive(data->inode);
856 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data)
858 pnfs_put_lseg(data->lseg);
859 nfs_iput_and_deactive(data->inode);
864 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata)
866 struct nfs42_layouterror_data *data = calldata;
867 struct inode *inode = data->inode;
868 struct nfs_server *server = NFS_SERVER(inode);
869 struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
872 spin_lock(&inode->i_lock);
873 if (!pnfs_layout_is_valid(lo)) {
874 spin_unlock(&inode->i_lock);
878 for (i = 0; i < data->args.num_errors; i++)
879 nfs4_stateid_copy(&data->args.errors[i].stateid,
881 spin_unlock(&inode->i_lock);
882 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
883 &data->res.seq_res, task);
887 nfs42_layouterror_done(struct rpc_task *task, void *calldata)
889 struct nfs42_layouterror_data *data = calldata;
890 struct inode *inode = data->inode;
891 struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
893 if (!nfs4_sequence_done(task, &data->res.seq_res))
896 switch (task->tk_status) {
899 case -NFS4ERR_BADHANDLE:
901 pnfs_destroy_layout(NFS_I(inode));
903 case -NFS4ERR_EXPIRED:
904 case -NFS4ERR_ADMIN_REVOKED:
905 case -NFS4ERR_DELEG_REVOKED:
906 case -NFS4ERR_STALE_STATEID:
907 case -NFS4ERR_BAD_STATEID:
908 spin_lock(&inode->i_lock);
909 if (pnfs_layout_is_valid(lo) &&
910 nfs4_stateid_match(&data->args.errors[0].stateid,
915 * Mark the bad layout state as invalid, then retry
916 * with the current stateid.
918 pnfs_mark_layout_stateid_invalid(lo, &head);
919 spin_unlock(&inode->i_lock);
920 pnfs_free_lseg_list(&head);
921 nfs_commit_inode(inode, 0);
923 spin_unlock(&inode->i_lock);
925 case -NFS4ERR_OLD_STATEID:
926 spin_lock(&inode->i_lock);
927 if (pnfs_layout_is_valid(lo) &&
928 nfs4_stateid_match_other(&data->args.errors[0].stateid,
930 /* Do we need to delay before resending? */
931 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
932 &data->args.errors[0].stateid))
934 rpc_restart_call_prepare(task);
936 spin_unlock(&inode->i_lock);
940 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
943 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid,
948 nfs42_layouterror_release(void *calldata)
950 struct nfs42_layouterror_data *data = calldata;
952 nfs42_free_layouterror_data(data);
955 static const struct rpc_call_ops nfs42_layouterror_ops = {
956 .rpc_call_prepare = nfs42_layouterror_prepare,
957 .rpc_call_done = nfs42_layouterror_done,
958 .rpc_release = nfs42_layouterror_release,
961 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
962 const struct nfs42_layout_error *errors, size_t n)
964 struct inode *inode = lseg->pls_layout->plh_inode;
965 struct nfs42_layouterror_data *data;
966 struct rpc_task *task;
967 struct rpc_message msg = {
968 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR],
970 struct rpc_task_setup task_setup = {
972 .callback_ops = &nfs42_layouterror_ops,
973 .flags = RPC_TASK_ASYNC,
977 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR))
979 if (n > NFS42_LAYOUTERROR_MAX)
981 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS);
984 for (i = 0; i < n; i++) {
985 data->args.errors[i] = errors[i];
986 data->args.num_errors++;
987 data->res.num_errors++;
989 msg.rpc_argp = &data->args;
990 msg.rpc_resp = &data->res;
991 task_setup.callback_data = data;
992 task_setup.rpc_client = NFS_SERVER(inode)->client;
993 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
994 task = rpc_run_task(&task_setup);
996 return PTR_ERR(task);
1000 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror);
1002 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
1003 struct file *dst_f, struct nfs_lock_context *src_lock,
1004 struct nfs_lock_context *dst_lock, loff_t src_offset,
1005 loff_t dst_offset, loff_t count)
1007 struct inode *src_inode = file_inode(src_f);
1008 struct inode *dst_inode = file_inode(dst_f);
1009 struct nfs_server *server = NFS_SERVER(dst_inode);
1010 struct nfs42_clone_args args = {
1011 .src_fh = NFS_FH(src_inode),
1012 .dst_fh = NFS_FH(dst_inode),
1013 .src_offset = src_offset,
1014 .dst_offset = dst_offset,
1016 .dst_bitmask = server->cache_consistency_bitmask,
1018 struct nfs42_clone_res res = {
1023 msg->rpc_argp = &args;
1024 msg->rpc_resp = &res;
1026 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
1027 src_lock, FMODE_READ);
1029 if (status == -EAGAIN)
1030 status = -NFS4ERR_BAD_STATEID;
1033 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
1034 dst_lock, FMODE_WRITE);
1036 if (status == -EAGAIN)
1037 status = -NFS4ERR_BAD_STATEID;
1041 res.dst_fattr = nfs_alloc_fattr();
1045 status = nfs4_call_sync(server->client, server, msg,
1046 &args.seq_args, &res.seq_res, 0);
1048 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
1050 kfree(res.dst_fattr);
1054 int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
1055 loff_t src_offset, loff_t dst_offset, loff_t count)
1057 struct rpc_message msg = {
1058 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE],
1060 struct inode *inode = file_inode(src_f);
1061 struct nfs_server *server = NFS_SERVER(file_inode(src_f));
1062 struct nfs_lock_context *src_lock;
1063 struct nfs_lock_context *dst_lock;
1064 struct nfs4_exception src_exception = { };
1065 struct nfs4_exception dst_exception = { };
1068 if (!nfs_server_capable(inode, NFS_CAP_CLONE))
1071 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
1072 if (IS_ERR(src_lock))
1073 return PTR_ERR(src_lock);
1075 src_exception.inode = file_inode(src_f);
1076 src_exception.state = src_lock->open_context->state;
1078 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
1079 if (IS_ERR(dst_lock)) {
1080 err = PTR_ERR(dst_lock);
1081 goto out_put_src_lock;
1084 dst_exception.inode = file_inode(dst_f);
1085 dst_exception.state = dst_lock->open_context->state;
1088 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
1089 src_offset, dst_offset, count);
1090 if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
1091 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
1096 err2 = nfs4_handle_exception(server, err, &src_exception);
1097 err = nfs4_handle_exception(server, err, &dst_exception);
1100 } while (src_exception.retry || dst_exception.retry);
1102 nfs_put_lock_context(dst_lock);
1104 nfs_put_lock_context(src_lock);
1108 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
1110 static int _nfs42_proc_removexattr(struct inode *inode, const char *name)
1112 struct nfs_server *server = NFS_SERVER(inode);
1113 struct nfs42_removexattrargs args = {
1114 .fh = NFS_FH(inode),
1117 struct nfs42_removexattrres res;
1118 struct rpc_message msg = {
1119 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR],
1124 unsigned long timestamp = jiffies;
1126 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
1129 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1134 static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
1135 const void *buf, size_t buflen, int flags)
1137 struct nfs_server *server = NFS_SERVER(inode);
1138 struct page *pages[NFS4XATTR_MAXPAGES];
1139 struct nfs42_setxattrargs arg = {
1140 .fh = NFS_FH(inode),
1141 .xattr_pages = pages,
1142 .xattr_len = buflen,
1144 .xattr_flags = flags,
1146 struct nfs42_setxattrres res;
1147 struct rpc_message msg = {
1148 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR],
1153 unsigned long timestamp = jiffies;
1155 if (buflen > server->sxasize)
1159 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages);
1165 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1168 for (; np > 0; np--)
1169 put_page(pages[np - 1]);
1172 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1177 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name,
1178 void *buf, size_t buflen, struct page **pages,
1181 struct nfs_server *server = NFS_SERVER(inode);
1182 struct nfs42_getxattrargs arg = {
1183 .fh = NFS_FH(inode),
1186 struct nfs42_getxattrres res;
1187 struct rpc_message msg = {
1188 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR],
1194 arg.xattr_len = plen;
1195 arg.xattr_pages = pages;
1197 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1203 * Normally, the caching is done one layer up, but for successful
1204 * RPCS, always cache the result here, even if the caller was
1205 * just querying the length, or if the reply was too big for
1206 * the caller. This avoids a second RPC in the case of the
1207 * common query-alloc-retrieve cycle for xattrs.
1209 * Note that xattr_len is always capped to XATTR_SIZE_MAX.
1212 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len);
1215 if (res.xattr_len > buflen)
1217 _copy_from_pages(buf, pages, 0, res.xattr_len);
1220 return res.xattr_len;
1223 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
1224 size_t buflen, u64 *cookiep, bool *eofp)
1226 struct nfs_server *server = NFS_SERVER(inode);
1227 struct page **pages;
1228 struct nfs42_listxattrsargs arg = {
1229 .fh = NFS_FH(inode),
1232 struct nfs42_listxattrsres res = {
1235 .xattr_len = buflen,
1237 struct rpc_message msg = {
1238 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS],
1247 res.scratch = alloc_page(GFP_KERNEL);
1251 xdrlen = nfs42_listxattr_xdrsize(buflen);
1252 if (xdrlen > server->lxasize)
1253 xdrlen = server->lxasize;
1254 np = xdrlen / PAGE_SIZE + 1;
1256 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
1258 goto out_free_scratch;
1259 for (i = 0; i < np; i++) {
1260 pages[i] = alloc_page(GFP_KERNEL);
1262 goto out_free_pages;
1265 arg.xattr_pages = pages;
1268 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1273 *cookiep = res.cookie;
1280 __free_page(pages[np]);
1284 __free_page(res.scratch);
1290 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
1291 void *buf, size_t buflen)
1293 struct nfs4_exception exception = { };
1295 struct page **pages;
1297 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX);
1298 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL);
1302 for (i = 0; i < np; i++) {
1303 pages[i] = alloc_page(GFP_KERNEL);
1312 * The GETXATTR op has no length field in the call, and the
1313 * xattr data is at the end of the reply.
1315 * There is no downside in using the page-aligned length. It will
1316 * allow receiving and caching xattrs that are too large for the
1317 * caller but still fit in the page-rounded value.
1320 err = _nfs42_proc_getxattr(inode, name, buf, buflen,
1321 pages, np * PAGE_SIZE);
1324 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1326 } while (exception.retry);
1330 __free_page(pages[np]);
1336 int nfs42_proc_setxattr(struct inode *inode, const char *name,
1337 const void *buf, size_t buflen, int flags)
1339 struct nfs4_exception exception = { };
1343 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags);
1346 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1348 } while (exception.retry);
1353 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf,
1354 size_t buflen, u64 *cookiep, bool *eofp)
1356 struct nfs4_exception exception = { };
1360 err = _nfs42_proc_listxattrs(inode, buf, buflen,
1364 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1366 } while (exception.retry);
1371 int nfs42_proc_removexattr(struct inode *inode, const char *name)
1373 struct nfs4_exception exception = { };
1377 err = _nfs42_proc_removexattr(inode, name);
1380 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1382 } while (exception.retry);