1 // SPDX-License-Identifier: GPL-2.0-only
3 * Module for pnfs flexfile layout driver.
5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
7 * Tao Peng <bergwolf@primarydata.com>
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/sched/mm.h>
16 #include <linux/sunrpc/metrics.h>
18 #include "flexfilelayout.h"
19 #include "../nfs4session.h"
20 #include "../nfs4idmap.h"
21 #include "../internal.h"
22 #include "../delegation.h"
23 #include "../nfs4trace.h"
24 #include "../iostat.h"
28 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
30 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
31 #define FF_LAYOUTRETURN_MAXERR 20
33 static unsigned short io_maxretrans;
35 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
36 struct nfs_pgio_header *hdr);
37 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
38 struct nfs42_layoutstat_devinfo *devinfo,
40 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
41 const struct nfs42_layoutstat_devinfo *devinfo,
42 struct nfs4_ff_layout_mirror *mirror);
44 static struct pnfs_layout_hdr *
45 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
47 struct nfs4_flexfile_layout *ffl;
49 ffl = kzalloc(sizeof(*ffl), gfp_flags);
51 pnfs_init_ds_commit_info(&ffl->commit_info);
52 INIT_LIST_HEAD(&ffl->error_list);
53 INIT_LIST_HEAD(&ffl->mirrors);
54 ffl->last_report_time = ktime_get();
55 return &ffl->generic_hdr;
61 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
63 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
64 struct nfs4_ff_layout_ds_err *err, *n;
66 list_for_each_entry_safe(err, n, &ffl->error_list, list) {
70 kfree_rcu(ffl, generic_hdr.plh_rcu);
73 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
77 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
78 if (unlikely(p == NULL))
80 stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
81 memcpy(stateid->data, p, NFS4_STATEID_SIZE);
82 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
83 p[0], p[1], p[2], p[3]);
87 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
91 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
94 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
95 nfs4_print_deviceid(devid);
99 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
103 p = xdr_inline_decode(xdr, 4);
106 fh->size = be32_to_cpup(p++);
107 if (fh->size > sizeof(struct nfs_fh)) {
108 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
113 p = xdr_inline_decode(xdr, fh->size);
116 memcpy(&fh->data, p, fh->size);
117 dprintk("%s: fh len %d\n", __func__, fh->size);
123 * Currently only stringified uids and gids are accepted.
124 * I.e., kerberos is not supported to the DSes, so no pricipals.
126 * That means that one common function will suffice, but when
127 * principals are added, this should be split to accomodate
128 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
131 decode_name(struct xdr_stream *xdr, u32 *id)
136 /* opaque_length(4)*/
137 p = xdr_inline_decode(xdr, 4);
140 len = be32_to_cpup(p++);
144 dprintk("%s: len %u\n", __func__, len);
147 p = xdr_inline_decode(xdr, len);
151 if (!nfs_map_string_to_numeric((char *)p, len, id))
157 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
158 const struct nfs4_ff_layout_mirror *m2)
162 if (m1->fh_versions_cnt != m2->fh_versions_cnt)
164 for (i = 0; i < m1->fh_versions_cnt; i++) {
165 bool found_fh = false;
166 for (j = 0; j < m2->fh_versions_cnt; j++) {
167 if (nfs_compare_fh(&m1->fh_versions[i],
168 &m2->fh_versions[j]) == 0) {
179 static struct nfs4_ff_layout_mirror *
180 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
181 struct nfs4_ff_layout_mirror *mirror)
183 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
184 struct nfs4_ff_layout_mirror *pos;
185 struct inode *inode = lo->plh_inode;
187 spin_lock(&inode->i_lock);
188 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
189 if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
191 if (!ff_mirror_match_fh(mirror, pos))
193 if (refcount_inc_not_zero(&pos->ref)) {
194 spin_unlock(&inode->i_lock);
198 list_add(&mirror->mirrors, &ff_layout->mirrors);
200 spin_unlock(&inode->i_lock);
205 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
208 if (mirror->layout == NULL)
210 inode = mirror->layout->plh_inode;
211 spin_lock(&inode->i_lock);
212 list_del(&mirror->mirrors);
213 spin_unlock(&inode->i_lock);
214 mirror->layout = NULL;
217 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
219 struct nfs4_ff_layout_mirror *mirror;
221 mirror = kzalloc(sizeof(*mirror), gfp_flags);
222 if (mirror != NULL) {
223 spin_lock_init(&mirror->lock);
224 refcount_set(&mirror->ref, 1);
225 INIT_LIST_HEAD(&mirror->mirrors);
230 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
232 const struct cred *cred;
234 ff_layout_remove_mirror(mirror);
235 kfree(mirror->fh_versions);
236 cred = rcu_access_pointer(mirror->ro_cred);
238 cred = rcu_access_pointer(mirror->rw_cred);
240 nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
244 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
246 if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
247 ff_layout_free_mirror(mirror);
250 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
254 for (i = 0; i < fls->mirror_array_cnt; i++)
255 ff_layout_put_mirror(fls->mirror_array[i]);
258 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
262 dprintk("--> %s\n", __func__);
264 /* FIXME: remove this check when layout segment support is added */
265 if (lgr->range.offset != 0 ||
266 lgr->range.length != NFS4_MAX_UINT64) {
267 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
272 dprintk("--> %s returns %d\n", __func__, ret);
276 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
279 ff_layout_free_mirror_array(fls);
285 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
286 const struct pnfs_layout_range *l2)
290 if (l1->iomode != l2->iomode)
291 return l1->iomode != IOMODE_READ;
292 end1 = pnfs_calc_offset_end(l1->offset, l1->length);
293 end2 = pnfs_calc_offset_end(l2->offset, l2->length);
294 if (end1 < l2->offset)
296 if (end2 < l1->offset)
298 return l2->offset <= l1->offset;
302 ff_lseg_merge(struct pnfs_layout_segment *new,
303 struct pnfs_layout_segment *old)
305 u64 new_end, old_end;
307 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
309 if (new->pls_range.iomode != old->pls_range.iomode)
311 old_end = pnfs_calc_offset_end(old->pls_range.offset,
312 old->pls_range.length);
313 if (old_end < new->pls_range.offset)
315 new_end = pnfs_calc_offset_end(new->pls_range.offset,
316 new->pls_range.length);
317 if (new_end < old->pls_range.offset)
320 /* Mergeable: copy info from 'old' to 'new' */
321 if (new_end < old_end)
323 if (new->pls_range.offset < old->pls_range.offset)
324 new->pls_range.offset = old->pls_range.offset;
325 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
327 if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
328 set_bit(NFS_LSEG_ROC, &new->pls_flags);
333 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
334 struct pnfs_layout_segment *lseg,
335 struct list_head *free_me)
337 pnfs_generic_layout_insert_lseg(lo, lseg,
338 ff_lseg_range_is_after,
343 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
347 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
348 for (j = i + 1; j < fls->mirror_array_cnt; j++)
349 if (fls->mirror_array[i]->efficiency <
350 fls->mirror_array[j]->efficiency)
351 swap(fls->mirror_array[i],
352 fls->mirror_array[j]);
356 static struct pnfs_layout_segment *
357 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
358 struct nfs4_layoutget_res *lgr,
361 struct pnfs_layout_segment *ret;
362 struct nfs4_ff_layout_segment *fls = NULL;
363 struct xdr_stream stream;
365 struct page *scratch;
367 u32 mirror_array_cnt;
371 dprintk("--> %s\n", __func__);
372 scratch = alloc_page(gfp_flags);
374 return ERR_PTR(-ENOMEM);
376 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
378 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
380 /* stripe unit and mirror_array_cnt */
382 p = xdr_inline_decode(&stream, 8 + 4);
386 p = xdr_decode_hyper(p, &stripe_unit);
387 mirror_array_cnt = be32_to_cpup(p++);
388 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
389 stripe_unit, mirror_array_cnt);
391 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
392 mirror_array_cnt == 0)
396 fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
401 fls->mirror_array_cnt = mirror_array_cnt;
402 fls->stripe_unit = stripe_unit;
404 for (i = 0; i < fls->mirror_array_cnt; i++) {
405 struct nfs4_ff_layout_mirror *mirror;
407 const struct cred __rcu *cred;
410 u32 ds_count, fh_count, id;
414 p = xdr_inline_decode(&stream, 4);
417 ds_count = be32_to_cpup(p);
419 /* FIXME: allow for striping? */
423 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
424 if (fls->mirror_array[i] == NULL) {
429 fls->mirror_array[i]->ds_count = ds_count;
432 rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
438 p = xdr_inline_decode(&stream, 4);
441 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
444 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
450 p = xdr_inline_decode(&stream, 4);
453 fh_count = be32_to_cpup(p);
455 fls->mirror_array[i]->fh_versions =
456 kcalloc(fh_count, sizeof(struct nfs_fh),
458 if (fls->mirror_array[i]->fh_versions == NULL) {
463 for (j = 0; j < fh_count; j++) {
464 rc = decode_nfs_fh(&stream,
465 &fls->mirror_array[i]->fh_versions[j]);
470 fls->mirror_array[i]->fh_versions_cnt = fh_count;
473 rc = decode_name(&stream, &id);
477 uid = make_kuid(&init_user_ns, id);
480 rc = decode_name(&stream, &id);
484 gid = make_kgid(&init_user_ns, id);
486 if (gfp_flags & __GFP_FS)
487 kcred = prepare_kernel_cred(NULL);
489 unsigned int nofs_flags = memalloc_nofs_save();
490 kcred = prepare_kernel_cred(NULL);
491 memalloc_nofs_restore(nofs_flags);
498 cred = RCU_INITIALIZER(kcred);
500 if (lgr->range.iomode == IOMODE_READ)
501 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
503 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
505 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
506 if (mirror != fls->mirror_array[i]) {
507 /* swap cred ptrs so free_mirror will clean up old */
508 if (lgr->range.iomode == IOMODE_READ) {
509 cred = xchg(&mirror->ro_cred, cred);
510 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
512 cred = xchg(&mirror->rw_cred, cred);
513 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
515 ff_layout_free_mirror(fls->mirror_array[i]);
516 fls->mirror_array[i] = mirror;
519 dprintk("%s: iomode %s uid %u gid %u\n", __func__,
520 lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
521 from_kuid(&init_user_ns, uid),
522 from_kgid(&init_user_ns, gid));
525 p = xdr_inline_decode(&stream, 4);
527 goto out_sort_mirrors;
528 fls->flags = be32_to_cpup(p);
530 p = xdr_inline_decode(&stream, 4);
532 goto out_sort_mirrors;
533 for (i=0; i < fls->mirror_array_cnt; i++)
534 fls->mirror_array[i]->report_interval = be32_to_cpup(p);
537 ff_layout_sort_mirrors(fls);
538 rc = ff_layout_check_layout(lgr);
541 ret = &fls->generic_hdr;
542 dprintk("<-- %s (success)\n", __func__);
544 __free_page(scratch);
547 _ff_layout_free_lseg(fls);
549 dprintk("<-- %s (%d)\n", __func__, rc);
553 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
555 struct pnfs_layout_segment *lseg;
557 list_for_each_entry(lseg, &layout->plh_segs, pls_list)
558 if (lseg->pls_range.iomode == IOMODE_RW)
565 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
567 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
569 dprintk("--> %s\n", __func__);
571 if (lseg->pls_range.iomode == IOMODE_RW) {
572 struct nfs4_flexfile_layout *ffl;
575 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
576 inode = ffl->generic_hdr.plh_inode;
577 spin_lock(&inode->i_lock);
578 if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
579 ffl->commit_info.nbuckets = 0;
580 kfree(ffl->commit_info.buckets);
581 ffl->commit_info.buckets = NULL;
583 pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
584 spin_unlock(&inode->i_lock);
586 _ff_layout_free_lseg(fls);
589 /* Return 1 until we have multiple lsegs support */
591 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
597 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
599 /* first IO request? */
600 if (atomic_inc_return(&timer->n_ops) == 1) {
601 timer->start_time = now;
606 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
610 if (atomic_dec_return(&timer->n_ops) < 0)
613 start = timer->start_time;
614 timer->start_time = now;
615 return ktime_sub(now, start);
619 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
620 struct nfs4_ff_layoutstat *layoutstat,
623 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
624 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
626 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
627 if (!mirror->start_time)
628 mirror->start_time = now;
629 if (mirror->report_interval != 0)
630 report_interval = (s64)mirror->report_interval * 1000LL;
631 else if (layoutstats_timer != 0)
632 report_interval = (s64)layoutstats_timer * 1000LL;
633 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
635 ffl->last_report_time = now;
643 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
646 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
648 iostat->ops_requested++;
649 iostat->bytes_requested += requested;
653 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
656 ktime_t time_completed,
657 ktime_t time_started)
659 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
660 ktime_t completion_time = ktime_sub(time_completed, time_started);
663 iostat->ops_completed++;
664 iostat->bytes_completed += completed;
665 iostat->bytes_not_delivered += requested - completed;
667 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
668 iostat->total_busy_time =
669 ktime_add(iostat->total_busy_time, timer);
670 iostat->aggregate_completion_time =
671 ktime_add(iostat->aggregate_completion_time,
676 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
677 struct nfs4_ff_layout_mirror *mirror,
678 __u64 requested, ktime_t now)
682 spin_lock(&mirror->lock);
683 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
684 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
685 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
686 spin_unlock(&mirror->lock);
689 pnfs_report_layoutstat(inode, GFP_KERNEL);
693 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
694 struct nfs4_ff_layout_mirror *mirror,
698 spin_lock(&mirror->lock);
699 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
700 requested, completed,
701 ktime_get(), task->tk_start);
702 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
703 spin_unlock(&mirror->lock);
707 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
708 struct nfs4_ff_layout_mirror *mirror,
709 __u64 requested, ktime_t now)
713 spin_lock(&mirror->lock);
714 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
715 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
716 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
717 spin_unlock(&mirror->lock);
720 pnfs_report_layoutstat(inode, GFP_NOIO);
724 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
725 struct nfs4_ff_layout_mirror *mirror,
728 enum nfs3_stable_how committed)
730 if (committed == NFS_UNSTABLE)
731 requested = completed = 0;
733 spin_lock(&mirror->lock);
734 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
735 requested, completed, ktime_get(), task->tk_start);
736 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
737 spin_unlock(&mirror->lock);
741 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
742 struct nfs_commit_info *cinfo,
745 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
746 struct pnfs_commit_bucket *buckets;
749 if (cinfo->ds->nbuckets != 0) {
750 /* This assumes there is only one RW lseg per file.
751 * To support multiple lseg per file, we need to
752 * change struct pnfs_commit_bucket to allow dynamic
753 * increasing nbuckets.
758 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
760 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
767 spin_lock(&cinfo->inode->i_lock);
768 if (cinfo->ds->nbuckets != 0)
771 cinfo->ds->buckets = buckets;
772 cinfo->ds->nbuckets = size;
773 for (i = 0; i < size; i++) {
774 INIT_LIST_HEAD(&buckets[i].written);
775 INIT_LIST_HEAD(&buckets[i].committing);
776 /* mark direct verifier as unset */
777 buckets[i].direct_verf.committed =
778 NFS_INVALID_STABLE_HOW;
781 spin_unlock(&cinfo->inode->i_lock);
787 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx)
789 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
792 nfs4_mark_deviceid_unavailable(devid);
796 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx)
798 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
801 nfs4_mark_deviceid_available(devid);
804 static struct nfs4_pnfs_ds *
805 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
806 int start_idx, int *best_idx,
809 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
810 struct nfs4_ff_layout_mirror *mirror;
811 struct nfs4_pnfs_ds *ds;
812 bool fail_return = false;
815 /* mirrors are initially sorted by efficiency */
816 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
817 if (idx+1 == fls->mirror_array_cnt)
818 fail_return = !check_device;
820 mirror = FF_LAYOUT_COMP(lseg, idx);
821 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return);
826 nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
836 static struct nfs4_pnfs_ds *
837 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
838 int start_idx, int *best_idx)
840 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
843 static struct nfs4_pnfs_ds *
844 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
845 int start_idx, int *best_idx)
847 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
850 static struct nfs4_pnfs_ds *
851 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
852 int start_idx, int *best_idx)
854 struct nfs4_pnfs_ds *ds;
856 ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
859 return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
863 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
864 struct nfs_page *req,
867 pnfs_put_lseg(pgio->pg_lseg);
868 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
869 nfs_req_openctx(req),
875 if (IS_ERR(pgio->pg_lseg)) {
876 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
877 pgio->pg_lseg = NULL;
882 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
883 struct nfs_page *req)
885 struct nfs_pgio_mirror *pgm;
886 struct nfs4_ff_layout_mirror *mirror;
887 struct nfs4_pnfs_ds *ds;
891 pnfs_generic_pg_check_layout(pgio);
892 /* Use full layout for now */
893 if (!pgio->pg_lseg) {
894 ff_layout_pg_get_read(pgio, req, false);
898 if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
899 ff_layout_pg_get_read(pgio, req, true);
904 ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
906 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
908 pnfs_put_lseg(pgio->pg_lseg);
909 pgio->pg_lseg = NULL;
910 /* Sleep for 1 second before retrying */
915 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
917 pgio->pg_mirror_idx = ds_idx;
919 /* read always uses only one mirror - idx 0 for pgio layer */
920 pgm = &pgio->pg_mirrors[0];
921 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
923 if (NFS_SERVER(pgio->pg_inode)->flags &
924 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
925 pgio->pg_maxretrans = io_maxretrans;
928 if (pgio->pg_error < 0)
931 trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
932 0, NFS4_MAX_UINT64, IOMODE_READ,
933 NFS_I(pgio->pg_inode)->layout,
935 pnfs_put_lseg(pgio->pg_lseg);
936 pgio->pg_lseg = NULL;
937 pgio->pg_maxretrans = 0;
938 nfs_pageio_reset_read_mds(pgio);
942 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
943 struct nfs_page *req)
945 struct nfs4_ff_layout_mirror *mirror;
946 struct nfs_pgio_mirror *pgm;
947 struct nfs_commit_info cinfo;
948 struct nfs4_pnfs_ds *ds;
953 pnfs_generic_pg_check_layout(pgio);
954 if (!pgio->pg_lseg) {
955 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
956 nfs_req_openctx(req),
962 if (IS_ERR(pgio->pg_lseg)) {
963 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
964 pgio->pg_lseg = NULL;
968 /* If no lseg, fall back to write through mds */
969 if (pgio->pg_lseg == NULL)
972 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
973 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
977 /* Use a direct mapping of ds_idx to pgio mirror_idx */
978 if (WARN_ON_ONCE(pgio->pg_mirror_count !=
979 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
982 for (i = 0; i < pgio->pg_mirror_count; i++) {
983 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
984 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
986 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
988 pnfs_put_lseg(pgio->pg_lseg);
989 pgio->pg_lseg = NULL;
990 /* Sleep for 1 second before retrying */
994 pgm = &pgio->pg_mirrors[i];
995 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
998 if (NFS_SERVER(pgio->pg_inode)->flags &
999 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1000 pgio->pg_maxretrans = io_maxretrans;
1004 trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
1005 0, NFS4_MAX_UINT64, IOMODE_RW,
1006 NFS_I(pgio->pg_inode)->layout,
1008 pnfs_put_lseg(pgio->pg_lseg);
1009 pgio->pg_lseg = NULL;
1010 pgio->pg_maxretrans = 0;
1011 nfs_pageio_reset_write_mds(pgio);
1015 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
1016 struct nfs_page *req)
1018 if (!pgio->pg_lseg) {
1019 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1020 nfs_req_openctx(req),
1026 if (IS_ERR(pgio->pg_lseg)) {
1027 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
1028 pgio->pg_lseg = NULL;
1033 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
1035 trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
1036 0, NFS4_MAX_UINT64, IOMODE_RW,
1037 NFS_I(pgio->pg_inode)->layout,
1039 /* no lseg means that pnfs is not in use, so no mirroring here */
1040 nfs_pageio_reset_write_mds(pgio);
1045 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1046 .pg_init = ff_layout_pg_init_read,
1047 .pg_test = pnfs_generic_pg_test,
1048 .pg_doio = pnfs_generic_pg_readpages,
1049 .pg_cleanup = pnfs_generic_pg_cleanup,
1052 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1053 .pg_init = ff_layout_pg_init_write,
1054 .pg_test = pnfs_generic_pg_test,
1055 .pg_doio = pnfs_generic_pg_writepages,
1056 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1057 .pg_cleanup = pnfs_generic_pg_cleanup,
1060 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1062 struct rpc_task *task = &hdr->task;
1064 pnfs_layoutcommit_inode(hdr->inode, false);
1067 dprintk("%s Reset task %5u for i/o through pNFS "
1068 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1070 hdr->inode->i_sb->s_id,
1071 (unsigned long long)NFS_FILEID(hdr->inode),
1073 (unsigned long long)hdr->args.offset);
1075 hdr->completion_ops->reschedule_io(hdr);
1079 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1080 dprintk("%s Reset task %5u for i/o through MDS "
1081 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1083 hdr->inode->i_sb->s_id,
1084 (unsigned long long)NFS_FILEID(hdr->inode),
1086 (unsigned long long)hdr->args.offset);
1088 trace_pnfs_mds_fallback_write_done(hdr->inode,
1089 hdr->args.offset, hdr->args.count,
1090 IOMODE_RW, NFS_I(hdr->inode)->layout,
1092 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1096 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1098 struct rpc_task *task = &hdr->task;
1100 pnfs_layoutcommit_inode(hdr->inode, false);
1102 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1103 dprintk("%s Reset task %5u for i/o through MDS "
1104 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1106 hdr->inode->i_sb->s_id,
1107 (unsigned long long)NFS_FILEID(hdr->inode),
1109 (unsigned long long)hdr->args.offset);
1111 trace_pnfs_mds_fallback_read_done(hdr->inode,
1112 hdr->args.offset, hdr->args.count,
1113 IOMODE_READ, NFS_I(hdr->inode)->layout,
1115 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1119 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1120 struct nfs4_state *state,
1121 struct nfs_client *clp,
1122 struct pnfs_layout_segment *lseg,
1125 struct pnfs_layout_hdr *lo = lseg->pls_layout;
1126 struct inode *inode = lo->plh_inode;
1127 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1128 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1130 switch (task->tk_status) {
1131 case -NFS4ERR_BADSESSION:
1132 case -NFS4ERR_BADSLOT:
1133 case -NFS4ERR_BAD_HIGH_SLOT:
1134 case -NFS4ERR_DEADSESSION:
1135 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1136 case -NFS4ERR_SEQ_FALSE_RETRY:
1137 case -NFS4ERR_SEQ_MISORDERED:
1138 dprintk("%s ERROR %d, Reset session. Exchangeid "
1139 "flags 0x%x\n", __func__, task->tk_status,
1140 clp->cl_exchange_flags);
1141 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1143 case -NFS4ERR_DELAY:
1144 case -NFS4ERR_GRACE:
1145 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1147 case -NFS4ERR_RETRY_UNCACHED_REP:
1149 /* Invalidate Layout errors */
1150 case -NFS4ERR_PNFS_NO_LAYOUT:
1151 case -ESTALE: /* mapped NFS4ERR_STALE */
1152 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
1153 case -EISDIR: /* mapped NFS4ERR_ISDIR */
1154 case -NFS4ERR_FHEXPIRED:
1155 case -NFS4ERR_WRONG_TYPE:
1156 dprintk("%s Invalid layout error %d\n", __func__,
1159 * Destroy layout so new i/o will get a new layout.
1160 * Layout will not be destroyed until all current lseg
1161 * references are put. Mark layout as invalid to resend failed
1162 * i/o and all i/o waiting on the slot table to the MDS until
1163 * layout is destroyed and a new valid layout is obtained.
1165 pnfs_destroy_layout(NFS_I(inode));
1166 rpc_wake_up(&tbl->slot_tbl_waitq);
1168 /* RPC connection errors */
1176 dprintk("%s DS connection error %d\n", __func__,
1178 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1180 rpc_wake_up(&tbl->slot_tbl_waitq);
1183 if (ff_layout_avoid_mds_available_ds(lseg))
1184 return -NFS4ERR_RESET_TO_PNFS;
1186 dprintk("%s Retry through MDS. Error %d\n", __func__,
1188 return -NFS4ERR_RESET_TO_MDS;
1190 task->tk_status = 0;
1194 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1195 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1196 struct pnfs_layout_segment *lseg,
1199 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1201 switch (task->tk_status) {
1202 /* File access problems. Don't mark the device as unavailable */
1211 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1214 dprintk("%s DS connection error %d\n", __func__,
1216 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1219 /* FIXME: Need to prevent infinite looping here. */
1220 return -NFS4ERR_RESET_TO_PNFS;
1222 task->tk_status = 0;
1223 rpc_restart_call_prepare(task);
1224 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1228 static int ff_layout_async_handle_error(struct rpc_task *task,
1229 struct nfs4_state *state,
1230 struct nfs_client *clp,
1231 struct pnfs_layout_segment *lseg,
1234 int vers = clp->cl_nfs_mod->rpc_vers->number;
1236 if (task->tk_status >= 0) {
1237 ff_layout_mark_ds_reachable(lseg, idx);
1241 /* Handle the case of an invalid layout segment */
1242 if (!pnfs_is_valid_lseg(lseg))
1243 return -NFS4ERR_RESET_TO_PNFS;
1247 return ff_layout_async_handle_error_v3(task, lseg, idx);
1249 return ff_layout_async_handle_error_v4(task, state, clp,
1252 /* should never happen */
1258 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1259 int idx, u64 offset, u64 length,
1260 u32 *op_status, int opnum, int error)
1262 struct nfs4_ff_layout_mirror *mirror;
1263 u32 status = *op_status;
1270 case -EPROTONOSUPPORT:
1281 *op_status = status = NFS4ERR_NXIO;
1284 *op_status = status = NFS4ERR_ACCESS;
1291 mirror = FF_LAYOUT_COMP(lseg, idx);
1292 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1293 mirror, offset, length, status, opnum,
1301 ff_layout_mark_ds_unreachable(lseg, idx);
1304 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1308 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1311 /* NFS_PROTO call done callback routines */
1312 static int ff_layout_read_done_cb(struct rpc_task *task,
1313 struct nfs_pgio_header *hdr)
1315 int new_idx = hdr->pgio_mirror_idx;
1318 if (task->tk_status < 0) {
1319 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1320 hdr->args.offset, hdr->args.count,
1321 &hdr->res.op_status, OP_READ,
1323 trace_ff_layout_read_error(hdr);
1326 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1327 hdr->ds_clp, hdr->lseg,
1328 hdr->pgio_mirror_idx);
1330 trace_nfs4_pnfs_read(hdr, err);
1331 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1332 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1334 case -NFS4ERR_RESET_TO_PNFS:
1335 if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1336 hdr->pgio_mirror_idx + 1,
1338 goto out_layouterror;
1339 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1340 return task->tk_status;
1341 case -NFS4ERR_RESET_TO_MDS:
1342 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1343 return task->tk_status;
1350 ff_layout_read_record_layoutstats_done(task, hdr);
1351 ff_layout_send_layouterror(hdr->lseg);
1352 hdr->pgio_mirror_idx = new_idx;
1354 rpc_restart_call_prepare(task);
1359 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1361 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1365 * We reference the rpc_cred of the first WRITE that triggers the need for
1366 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1367 * rfc5661 is not clear about which credential should be used.
1369 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1370 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1371 * we always send layoutcommit after DS writes.
1374 ff_layout_set_layoutcommit(struct inode *inode,
1375 struct pnfs_layout_segment *lseg,
1378 if (!ff_layout_need_layoutcommit(lseg))
1381 pnfs_set_layoutcommit(inode, lseg, end_offset);
1382 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1383 (unsigned long long) NFS_I(inode)->layout->plh_lwb);
1386 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1387 struct nfs_pgio_header *hdr)
1389 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1391 nfs4_ff_layout_stat_io_start_read(hdr->inode,
1392 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1397 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1398 struct nfs_pgio_header *hdr)
1400 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1402 nfs4_ff_layout_stat_io_end_read(task,
1403 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1406 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1409 static int ff_layout_read_prepare_common(struct rpc_task *task,
1410 struct nfs_pgio_header *hdr)
1412 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1413 rpc_exit(task, -EIO);
1417 ff_layout_read_record_layoutstats_start(task, hdr);
1422 * Call ops for the async read/write cases
1423 * In the case of dense layouts, the offset needs to be reset to its
1426 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1428 struct nfs_pgio_header *hdr = data;
1430 if (ff_layout_read_prepare_common(task, hdr))
1433 rpc_call_start(task);
1436 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1438 struct nfs_pgio_header *hdr = data;
1440 if (nfs4_setup_sequence(hdr->ds_clp,
1441 &hdr->args.seq_args,
1446 ff_layout_read_prepare_common(task, hdr);
1449 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1451 struct nfs_pgio_header *hdr = data;
1453 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1455 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1456 task->tk_status == 0) {
1457 nfs4_sequence_done(task, &hdr->res.seq_res);
1461 /* Note this may cause RPC to be resent */
1462 hdr->mds_ops->rpc_call_done(task, hdr);
1465 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1467 struct nfs_pgio_header *hdr = data;
1469 ff_layout_read_record_layoutstats_done(task, hdr);
1470 rpc_count_iostats_metrics(task,
1471 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1474 static void ff_layout_read_release(void *data)
1476 struct nfs_pgio_header *hdr = data;
1478 ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1479 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1480 ff_layout_send_layouterror(hdr->lseg);
1481 pnfs_read_resend_pnfs(hdr);
1482 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1483 ff_layout_reset_read(hdr);
1484 pnfs_generic_rw_release(data);
1488 static int ff_layout_write_done_cb(struct rpc_task *task,
1489 struct nfs_pgio_header *hdr)
1491 loff_t end_offs = 0;
1494 if (task->tk_status < 0) {
1495 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1496 hdr->args.offset, hdr->args.count,
1497 &hdr->res.op_status, OP_WRITE,
1499 trace_ff_layout_write_error(hdr);
1502 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1503 hdr->ds_clp, hdr->lseg,
1504 hdr->pgio_mirror_idx);
1506 trace_nfs4_pnfs_write(hdr, err);
1507 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1508 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1510 case -NFS4ERR_RESET_TO_PNFS:
1511 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1512 return task->tk_status;
1513 case -NFS4ERR_RESET_TO_MDS:
1514 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1515 return task->tk_status;
1520 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1521 hdr->res.verf->committed == NFS_DATA_SYNC)
1522 end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1524 /* Note: if the write is unstable, don't set end_offs until commit */
1525 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1527 /* zero out fattr since we don't care DS attr at all */
1528 hdr->fattr.valid = 0;
1529 if (task->tk_status >= 0)
1530 nfs_writeback_update_inode(hdr);
1535 static int ff_layout_commit_done_cb(struct rpc_task *task,
1536 struct nfs_commit_data *data)
1540 if (task->tk_status < 0) {
1541 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1542 data->args.offset, data->args.count,
1543 &data->res.op_status, OP_COMMIT,
1545 trace_ff_layout_commit_error(data);
1548 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1549 data->lseg, data->ds_commit_index);
1551 trace_nfs4_pnfs_commit_ds(data, err);
1553 case -NFS4ERR_RESET_TO_PNFS:
1554 pnfs_generic_prepare_to_resend_writes(data);
1556 case -NFS4ERR_RESET_TO_MDS:
1557 pnfs_generic_prepare_to_resend_writes(data);
1560 rpc_restart_call_prepare(task);
1564 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1569 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1570 struct nfs_pgio_header *hdr)
1572 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1574 nfs4_ff_layout_stat_io_start_write(hdr->inode,
1575 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1580 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1581 struct nfs_pgio_header *hdr)
1583 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1585 nfs4_ff_layout_stat_io_end_write(task,
1586 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1587 hdr->args.count, hdr->res.count,
1588 hdr->res.verf->committed);
1589 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1592 static int ff_layout_write_prepare_common(struct rpc_task *task,
1593 struct nfs_pgio_header *hdr)
1595 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1596 rpc_exit(task, -EIO);
1600 ff_layout_write_record_layoutstats_start(task, hdr);
1604 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1606 struct nfs_pgio_header *hdr = data;
1608 if (ff_layout_write_prepare_common(task, hdr))
1611 rpc_call_start(task);
1614 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1616 struct nfs_pgio_header *hdr = data;
1618 if (nfs4_setup_sequence(hdr->ds_clp,
1619 &hdr->args.seq_args,
1624 ff_layout_write_prepare_common(task, hdr);
1627 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1629 struct nfs_pgio_header *hdr = data;
1631 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1632 task->tk_status == 0) {
1633 nfs4_sequence_done(task, &hdr->res.seq_res);
1637 /* Note this may cause RPC to be resent */
1638 hdr->mds_ops->rpc_call_done(task, hdr);
1641 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1643 struct nfs_pgio_header *hdr = data;
1645 ff_layout_write_record_layoutstats_done(task, hdr);
1646 rpc_count_iostats_metrics(task,
1647 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1650 static void ff_layout_write_release(void *data)
1652 struct nfs_pgio_header *hdr = data;
1654 ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1655 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1656 ff_layout_send_layouterror(hdr->lseg);
1657 ff_layout_reset_write(hdr, true);
1658 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1659 ff_layout_reset_write(hdr, false);
1660 pnfs_generic_rw_release(data);
1663 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1664 struct nfs_commit_data *cdata)
1666 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1668 nfs4_ff_layout_stat_io_start_write(cdata->inode,
1669 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1673 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1674 struct nfs_commit_data *cdata)
1676 struct nfs_page *req;
1679 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1682 if (task->tk_status == 0) {
1683 list_for_each_entry(req, &cdata->pages, wb_list)
1684 count += req->wb_bytes;
1686 nfs4_ff_layout_stat_io_end_write(task,
1687 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1688 count, count, NFS_FILE_SYNC);
1689 set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1692 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1693 struct nfs_commit_data *cdata)
1695 ff_layout_commit_record_layoutstats_start(task, cdata);
1698 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1700 ff_layout_commit_prepare_common(task, data);
1701 rpc_call_start(task);
1704 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1706 struct nfs_commit_data *wdata = data;
1708 if (nfs4_setup_sequence(wdata->ds_clp,
1709 &wdata->args.seq_args,
1710 &wdata->res.seq_res,
1713 ff_layout_commit_prepare_common(task, data);
1716 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1718 pnfs_generic_write_commit_done(task, data);
1721 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1723 struct nfs_commit_data *cdata = data;
1725 ff_layout_commit_record_layoutstats_done(task, cdata);
1726 rpc_count_iostats_metrics(task,
1727 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1730 static void ff_layout_commit_release(void *data)
1732 struct nfs_commit_data *cdata = data;
1734 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1735 pnfs_generic_commit_release(data);
1738 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1739 .rpc_call_prepare = ff_layout_read_prepare_v3,
1740 .rpc_call_done = ff_layout_read_call_done,
1741 .rpc_count_stats = ff_layout_read_count_stats,
1742 .rpc_release = ff_layout_read_release,
1745 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1746 .rpc_call_prepare = ff_layout_read_prepare_v4,
1747 .rpc_call_done = ff_layout_read_call_done,
1748 .rpc_count_stats = ff_layout_read_count_stats,
1749 .rpc_release = ff_layout_read_release,
1752 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1753 .rpc_call_prepare = ff_layout_write_prepare_v3,
1754 .rpc_call_done = ff_layout_write_call_done,
1755 .rpc_count_stats = ff_layout_write_count_stats,
1756 .rpc_release = ff_layout_write_release,
1759 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1760 .rpc_call_prepare = ff_layout_write_prepare_v4,
1761 .rpc_call_done = ff_layout_write_call_done,
1762 .rpc_count_stats = ff_layout_write_count_stats,
1763 .rpc_release = ff_layout_write_release,
1766 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1767 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1768 .rpc_call_done = ff_layout_commit_done,
1769 .rpc_count_stats = ff_layout_commit_count_stats,
1770 .rpc_release = ff_layout_commit_release,
1773 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1774 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1775 .rpc_call_done = ff_layout_commit_done,
1776 .rpc_count_stats = ff_layout_commit_count_stats,
1777 .rpc_release = ff_layout_commit_release,
1780 static enum pnfs_try_status
1781 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1783 struct pnfs_layout_segment *lseg = hdr->lseg;
1784 struct nfs4_pnfs_ds *ds;
1785 struct rpc_clnt *ds_clnt;
1786 struct nfs4_ff_layout_mirror *mirror;
1787 const struct cred *ds_cred;
1788 loff_t offset = hdr->args.offset;
1789 u32 idx = hdr->pgio_mirror_idx;
1793 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1794 __func__, hdr->inode->i_ino,
1795 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1797 mirror = FF_LAYOUT_COMP(lseg, idx);
1798 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1802 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1804 if (IS_ERR(ds_clnt))
1807 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1811 vers = nfs4_ff_layout_ds_version(mirror);
1813 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1814 ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1816 hdr->pgio_done_cb = ff_layout_read_done_cb;
1817 refcount_inc(&ds->ds_clp->cl_count);
1818 hdr->ds_clp = ds->ds_clp;
1819 fh = nfs4_ff_layout_select_ds_fh(mirror);
1823 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1826 * Note that if we ever decide to split across DSes,
1827 * then we may need to handle dense-like offsets.
1829 hdr->args.offset = offset;
1830 hdr->mds_offset = offset;
1832 /* Perform an asynchronous read to ds */
1833 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1834 vers == 3 ? &ff_layout_read_call_ops_v3 :
1835 &ff_layout_read_call_ops_v4,
1836 0, RPC_TASK_SOFTCONN);
1838 return PNFS_ATTEMPTED;
1841 if (ff_layout_avoid_mds_available_ds(lseg))
1842 return PNFS_TRY_AGAIN;
1843 trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1844 hdr->args.offset, hdr->args.count,
1845 IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1846 return PNFS_NOT_ATTEMPTED;
1849 /* Perform async writes. */
1850 static enum pnfs_try_status
1851 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1853 struct pnfs_layout_segment *lseg = hdr->lseg;
1854 struct nfs4_pnfs_ds *ds;
1855 struct rpc_clnt *ds_clnt;
1856 struct nfs4_ff_layout_mirror *mirror;
1857 const struct cred *ds_cred;
1858 loff_t offset = hdr->args.offset;
1861 int idx = hdr->pgio_mirror_idx;
1863 mirror = FF_LAYOUT_COMP(lseg, idx);
1864 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1868 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1870 if (IS_ERR(ds_clnt))
1873 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1877 vers = nfs4_ff_layout_ds_version(mirror);
1879 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1880 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1881 offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1884 hdr->pgio_done_cb = ff_layout_write_done_cb;
1885 refcount_inc(&ds->ds_clp->cl_count);
1886 hdr->ds_clp = ds->ds_clp;
1887 hdr->ds_commit_idx = idx;
1888 fh = nfs4_ff_layout_select_ds_fh(mirror);
1892 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1895 * Note that if we ever decide to split across DSes,
1896 * then we may need to handle dense-like offsets.
1898 hdr->args.offset = offset;
1900 /* Perform an asynchronous write */
1901 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1902 vers == 3 ? &ff_layout_write_call_ops_v3 :
1903 &ff_layout_write_call_ops_v4,
1904 sync, RPC_TASK_SOFTCONN);
1906 return PNFS_ATTEMPTED;
1909 if (ff_layout_avoid_mds_available_ds(lseg))
1910 return PNFS_TRY_AGAIN;
1911 trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1912 hdr->args.offset, hdr->args.count,
1913 IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1914 return PNFS_NOT_ATTEMPTED;
1917 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1922 static struct nfs_fh *
1923 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1925 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1927 /* FIXME: Assume that there is only one NFS version available
1930 return &flseg->mirror_array[i]->fh_versions[0];
1933 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1935 struct pnfs_layout_segment *lseg = data->lseg;
1936 struct nfs4_pnfs_ds *ds;
1937 struct rpc_clnt *ds_clnt;
1938 struct nfs4_ff_layout_mirror *mirror;
1939 const struct cred *ds_cred;
1944 if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1945 test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1948 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1949 mirror = FF_LAYOUT_COMP(lseg, idx);
1950 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1954 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1956 if (IS_ERR(ds_clnt))
1959 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1963 vers = nfs4_ff_layout_ds_version(mirror);
1965 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1966 data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1968 data->commit_done_cb = ff_layout_commit_done_cb;
1969 data->cred = ds_cred;
1970 refcount_inc(&ds->ds_clp->cl_count);
1971 data->ds_clp = ds->ds_clp;
1972 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1976 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1977 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1978 &ff_layout_commit_call_ops_v4,
1979 how, RPC_TASK_SOFTCONN);
1983 pnfs_generic_prepare_to_resend_writes(data);
1984 pnfs_generic_commit_release(data);
1989 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1990 int how, struct nfs_commit_info *cinfo)
1992 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1993 ff_layout_initiate_commit);
1996 static struct pnfs_ds_commit_info *
1997 ff_layout_get_ds_info(struct inode *inode)
1999 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2004 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2008 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2009 struct inode *inode)
2011 spin_lock(&inode->i_lock);
2012 pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2013 spin_unlock(&inode->i_lock);
2017 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2019 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2023 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2024 const struct nfs4_layoutreturn_args *args,
2025 const struct nfs4_flexfile_layoutreturn_args *ff_args)
2029 start = xdr_reserve_space(xdr, 4);
2030 if (unlikely(!start))
2033 *start = cpu_to_be32(ff_args->num_errors);
2034 /* This assume we always return _ALL_ layouts */
2035 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2039 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2041 WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2045 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2046 const nfs4_stateid *stateid,
2047 const struct nfs42_layoutstat_devinfo *devinfo)
2051 p = xdr_reserve_space(xdr, 8 + 8);
2052 p = xdr_encode_hyper(p, devinfo->offset);
2053 p = xdr_encode_hyper(p, devinfo->length);
2054 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2055 p = xdr_reserve_space(xdr, 4*8);
2056 p = xdr_encode_hyper(p, devinfo->read_count);
2057 p = xdr_encode_hyper(p, devinfo->read_bytes);
2058 p = xdr_encode_hyper(p, devinfo->write_count);
2059 p = xdr_encode_hyper(p, devinfo->write_bytes);
2060 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2064 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2065 const nfs4_stateid *stateid,
2066 const struct nfs42_layoutstat_devinfo *devinfo)
2068 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2069 ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2070 devinfo->ld_private.data);
2073 /* report nothing for now */
2074 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2075 const struct nfs4_layoutreturn_args *args,
2076 struct nfs4_flexfile_layoutreturn_args *ff_args)
2081 p = xdr_reserve_space(xdr, 4);
2082 *p = cpu_to_be32(ff_args->num_dev);
2083 for (i = 0; i < ff_args->num_dev; i++)
2084 ff_layout_encode_ff_iostat(xdr,
2085 &args->layout->plh_stateid,
2086 &ff_args->devinfo[i]);
2090 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2091 unsigned int num_entries)
2095 for (i = 0; i < num_entries; i++) {
2096 if (!devinfo[i].ld_private.ops)
2098 if (!devinfo[i].ld_private.ops->free)
2100 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2104 static struct nfs4_deviceid_node *
2105 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2106 struct pnfs_device *pdev, gfp_t gfp_flags)
2108 struct nfs4_ff_layout_ds *dsaddr;
2110 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2113 return &dsaddr->id_node;
2117 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2118 const void *voidargs,
2119 const struct nfs4_xdr_opaque_data *ff_opaque)
2121 const struct nfs4_layoutreturn_args *args = voidargs;
2122 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2123 struct xdr_buf tmp_buf = {
2126 .iov_base = page_address(ff_args->pages[0]),
2129 .buflen = PAGE_SIZE,
2131 struct xdr_stream tmp_xdr;
2134 dprintk("%s: Begin\n", __func__);
2136 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2138 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2139 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2141 start = xdr_reserve_space(xdr, 4);
2142 *start = cpu_to_be32(tmp_buf.len);
2143 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2145 dprintk("%s: Return\n", __func__);
2149 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2151 struct nfs4_flexfile_layoutreturn_args *ff_args;
2155 ff_args = args->data;
2158 ff_layout_free_ds_ioerr(&ff_args->errors);
2159 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2161 put_page(ff_args->pages[0]);
2165 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2166 .encode = ff_layout_encode_layoutreturn,
2167 .free = ff_layout_free_layoutreturn,
2171 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2173 struct nfs4_flexfile_layoutreturn_args *ff_args;
2174 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2176 ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2179 ff_args->pages[0] = alloc_page(GFP_KERNEL);
2180 if (!ff_args->pages[0])
2181 goto out_nomem_free;
2183 INIT_LIST_HEAD(&ff_args->errors);
2184 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2185 &args->range, &ff_args->errors,
2186 FF_LAYOUTRETURN_MAXERR);
2188 spin_lock(&args->inode->i_lock);
2189 ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2190 &ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2191 spin_unlock(&args->inode->i_lock);
2193 args->ld_private->ops = &layoutreturn_ops;
2194 args->ld_private->data = ff_args;
2202 #ifdef CONFIG_NFS_V4_2
2204 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2206 struct pnfs_layout_hdr *lo = lseg->pls_layout;
2207 struct nfs42_layout_error *errors;
2210 if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2212 ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2213 if (list_empty(&head))
2216 errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
2217 sizeof(*errors), GFP_NOFS);
2218 if (errors != NULL) {
2219 const struct nfs4_ff_layout_ds_err *pos;
2222 list_for_each_entry(pos, &head, list) {
2223 errors[n].offset = pos->offset;
2224 errors[n].length = pos->length;
2225 nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2226 errors[n].errors[0].dev_id = pos->deviceid;
2227 errors[n].errors[0].status = pos->status;
2228 errors[n].errors[0].opnum = pos->opnum;
2230 if (!list_is_last(&pos->list, &head) &&
2231 n < NFS42_LAYOUTERROR_MAX)
2233 if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2239 ff_layout_free_ds_ioerr(&head);
2243 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2249 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2251 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2253 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2257 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2260 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2261 const struct in6_addr *addr = &sin6->sin6_addr;
2264 * RFC 4291, Section 2.2.2
2266 * Shorthanded ANY address
2268 if (ipv6_addr_any(addr))
2269 return snprintf(buf, buflen, "::");
2272 * RFC 4291, Section 2.2.2
2274 * Shorthanded loopback address
2276 if (ipv6_addr_loopback(addr))
2277 return snprintf(buf, buflen, "::1");
2280 * RFC 4291, Section 2.2.3
2282 * Special presentation address format for mapped v4
2285 if (ipv6_addr_v4mapped(addr))
2286 return snprintf(buf, buflen, "::ffff:%pI4",
2287 &addr->s6_addr32[3]);
2290 * RFC 4291, Section 2.2.1
2292 return snprintf(buf, buflen, "%pI6c", addr);
2295 /* Derived from rpc_sockaddr2uaddr */
2297 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2299 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2300 char portbuf[RPCBIND_MAXUADDRPLEN];
2301 char addrbuf[RPCBIND_MAXUADDRLEN];
2303 unsigned short port;
2307 switch (sap->sa_family) {
2309 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2311 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2316 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2318 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2323 /* we only support tcp and tcp6 */
2328 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2329 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2331 p = xdr_reserve_space(xdr, 4 + netid_len);
2332 xdr_encode_opaque(p, netid, netid_len);
2334 p = xdr_reserve_space(xdr, 4 + len);
2335 xdr_encode_opaque(p, addrbuf, len);
2339 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2342 struct timespec64 ts;
2345 p = xdr_reserve_space(xdr, 12);
2346 ts = ktime_to_timespec64(t);
2347 p = xdr_encode_hyper(p, ts.tv_sec);
2348 *p++ = cpu_to_be32(ts.tv_nsec);
2352 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2353 struct nfs4_ff_io_stat *stat)
2357 p = xdr_reserve_space(xdr, 5 * 8);
2358 p = xdr_encode_hyper(p, stat->ops_requested);
2359 p = xdr_encode_hyper(p, stat->bytes_requested);
2360 p = xdr_encode_hyper(p, stat->ops_completed);
2361 p = xdr_encode_hyper(p, stat->bytes_completed);
2362 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2363 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2364 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2368 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2369 const struct nfs42_layoutstat_devinfo *devinfo,
2370 struct nfs4_ff_layout_mirror *mirror)
2372 struct nfs4_pnfs_ds_addr *da;
2373 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2374 struct nfs_fh *fh = &mirror->fh_versions[0];
2377 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2378 dprintk("%s: DS %s: encoding address %s\n",
2379 __func__, ds->ds_remotestr, da->da_remotestr);
2381 ff_layout_encode_netaddr(xdr, da);
2383 p = xdr_reserve_space(xdr, 4 + fh->size);
2384 xdr_encode_opaque(p, fh->data, fh->size);
2385 /* ff_io_latency4 read */
2386 spin_lock(&mirror->lock);
2387 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2388 /* ff_io_latency4 write */
2389 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2390 spin_unlock(&mirror->lock);
2392 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2394 p = xdr_reserve_space(xdr, 4);
2395 *p = cpu_to_be32(false);
2399 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2400 const struct nfs4_xdr_opaque_data *opaque)
2402 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2403 struct nfs42_layoutstat_devinfo, ld_private);
2406 /* layoutupdate length */
2407 start = xdr_reserve_space(xdr, 4);
2408 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2410 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2414 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2416 struct nfs4_ff_layout_mirror *mirror = opaque->data;
2418 ff_layout_put_mirror(mirror);
2421 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2422 .encode = ff_layout_encode_layoutstats,
2423 .free = ff_layout_free_layoutstats,
2427 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2428 struct nfs42_layoutstat_devinfo *devinfo,
2431 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2432 struct nfs4_ff_layout_mirror *mirror;
2433 struct nfs4_deviceid_node *dev;
2436 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2439 if (IS_ERR_OR_NULL(mirror->mirror_ds))
2441 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2443 /* mirror refcount put in cleanup_layoutstats */
2444 if (!refcount_inc_not_zero(&mirror->ref))
2446 dev = &mirror->mirror_ds->id_node;
2447 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2448 devinfo->offset = 0;
2449 devinfo->length = NFS4_MAX_UINT64;
2450 spin_lock(&mirror->lock);
2451 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2452 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2453 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2454 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2455 spin_unlock(&mirror->lock);
2456 devinfo->layout_type = LAYOUT_FLEX_FILES;
2457 devinfo->ld_private.ops = &layoutstat_ops;
2458 devinfo->ld_private.data = mirror;
2467 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2469 struct nfs4_flexfile_layout *ff_layout;
2470 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2472 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2473 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2477 spin_lock(&args->inode->i_lock);
2478 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2479 args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2480 &args->devinfo[0], dev_count);
2481 spin_unlock(&args->inode->i_lock);
2482 if (!args->num_dev) {
2483 kfree(args->devinfo);
2484 args->devinfo = NULL;
2492 ff_layout_set_layoutdriver(struct nfs_server *server,
2493 const struct nfs_fh *dummy)
2495 #if IS_ENABLED(CONFIG_NFS_V4_2)
2496 server->caps |= NFS_CAP_LAYOUTSTATS;
2501 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2502 .id = LAYOUT_FLEX_FILES,
2503 .name = "LAYOUT_FLEX_FILES",
2504 .owner = THIS_MODULE,
2505 .flags = PNFS_LAYOUTGET_ON_OPEN,
2506 .max_layoutget_response = 4096, /* 1 page or so... */
2507 .set_layoutdriver = ff_layout_set_layoutdriver,
2508 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2509 .free_layout_hdr = ff_layout_free_layout_hdr,
2510 .alloc_lseg = ff_layout_alloc_lseg,
2511 .free_lseg = ff_layout_free_lseg,
2512 .add_lseg = ff_layout_add_lseg,
2513 .pg_read_ops = &ff_layout_pg_read_ops,
2514 .pg_write_ops = &ff_layout_pg_write_ops,
2515 .get_ds_info = ff_layout_get_ds_info,
2516 .release_ds_info = ff_layout_release_ds_info,
2517 .free_deviceid_node = ff_layout_free_deviceid_node,
2518 .mark_request_commit = pnfs_layout_mark_request_commit,
2519 .clear_request_commit = pnfs_generic_clear_request_commit,
2520 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2521 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2522 .commit_pagelist = ff_layout_commit_pagelist,
2523 .read_pagelist = ff_layout_read_pagelist,
2524 .write_pagelist = ff_layout_write_pagelist,
2525 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
2526 .prepare_layoutreturn = ff_layout_prepare_layoutreturn,
2527 .sync = pnfs_nfs_generic_sync,
2528 .prepare_layoutstats = ff_layout_prepare_layoutstats,
2531 static int __init nfs4flexfilelayout_init(void)
2533 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2535 return pnfs_register_layoutdriver(&flexfilelayout_type);
2538 static void __exit nfs4flexfilelayout_exit(void)
2540 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2542 pnfs_unregister_layoutdriver(&flexfilelayout_type);
2545 MODULE_ALIAS("nfs-layouttype4-4");
2547 MODULE_LICENSE("GPL");
2548 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2550 module_init(nfs4flexfilelayout_init);
2551 module_exit(nfs4flexfilelayout_exit);
2553 module_param(io_maxretrans, ushort, 0644);
2554 MODULE_PARM_DESC(io_maxretrans, "The number of times the NFSv4.1 client "
2555 "retries an I/O request before returning an error. ");