4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/lov/lov_pack.c
34 * (Un)packing of OST/MDS requests
36 * Author: Andreas Dilger <adilger@clusterfs.com>
39 #define DEBUG_SUBSYSTEM S_LOV
41 #include "../include/lustre/lustre_idl.h"
42 #include "../include/lustre/lustre_user.h"
44 #include "../include/lustre_net.h"
45 #include "../include/lustre_swab.h"
46 #include "../include/obd.h"
47 #include "../include/obd_class.h"
48 #include "../include/obd_support.h"
50 #include "lov_cl_internal.h"
51 #include "lov_internal.h"
53 void lov_dump_lmm_common(int level, void *lmmp)
55 struct lov_mds_md *lmm = lmmp;
58 lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
59 CDEBUG(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
60 POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
61 le32_to_cpu(lmm->lmm_pattern));
62 CDEBUG(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
63 le32_to_cpu(lmm->lmm_stripe_size),
64 le16_to_cpu(lmm->lmm_stripe_count),
65 le16_to_cpu(lmm->lmm_layout_gen));
68 static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
73 if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
74 CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n",
75 stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
79 for (i = 0; i < stripe_count; ++i, ++lod) {
82 ostid_le_to_cpu(&lod->l_ost_oi, &oi);
83 CDEBUG(level, "stripe %u idx %u subobj "DOSTID"\n", i,
84 le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
88 void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
90 lov_dump_lmm_common(level, lmm);
91 lov_dump_lmm_objects(level, lmm->lmm_objects,
92 le16_to_cpu(lmm->lmm_stripe_count));
95 void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
97 lov_dump_lmm_common(level, lmm);
98 CDEBUG(level, "pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
99 lov_dump_lmm_objects(level, lmm->lmm_objects,
100 le16_to_cpu(lmm->lmm_stripe_count));
104 * Pack LOV striping metadata for disk storage format (in little
105 * endian byte order).
107 * This follows the getxattr() conventions. If \a buf_size is zero
108 * then return the size needed. If \a buf_size is too small then
109 * return -ERANGE. Otherwise return the size of the result.
111 ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
114 struct lov_ost_data_v1 *lmm_objects;
115 struct lov_mds_md_v1 *lmmv1 = buf;
116 struct lov_mds_md_v3 *lmmv3 = buf;
120 lmm_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
124 if (buf_size < lmm_size)
128 * lmmv1 and lmmv3 point to the same struct and have the
131 lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
132 lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
133 lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
134 lmmv1->lmm_stripe_count = cpu_to_le16(lsm->lsm_stripe_count);
135 lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
136 lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
138 if (lsm->lsm_magic == LOV_MAGIC_V3) {
139 CLASSERT(sizeof(lsm->lsm_pool_name) ==
140 sizeof(lmmv3->lmm_pool_name));
141 strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
142 sizeof(lmmv3->lmm_pool_name));
143 lmm_objects = lmmv3->lmm_objects;
145 lmm_objects = lmmv1->lmm_objects;
148 for (i = 0; i < lsm->lsm_stripe_count; i++) {
149 struct lov_oinfo *loi = lsm->lsm_oinfo[i];
151 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
152 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
153 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
159 /* Find the max stripecount we should use */
160 __u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
162 __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
165 stripe_count = lov->desc.ld_default_stripe_count;
166 if (stripe_count > lov->desc.ld_active_tgt_count)
167 stripe_count = lov->desc.ld_active_tgt_count;
171 /* stripe count is based on whether ldiskfs can handle
174 if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
175 lov->lov_ocd.ocd_max_easize)
176 max_stripes = lov_mds_md_max_stripe_count(
177 lov->lov_ocd.ocd_max_easize, magic);
179 if (stripe_count > max_stripes)
180 stripe_count = max_stripes;
185 static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
189 if (!lsm_op_find(le32_to_cpu(*(__u32 *)lmm))) {
190 CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n",
191 le32_to_cpu(*(__u32 *)lmm), lmm_bytes);
192 CERROR("%*phN\n", lmm_bytes, lmm);
195 rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm,
201 struct lov_stripe_md *lov_lsm_alloc(u16 stripe_count, u32 pattern, u32 magic)
203 struct lov_stripe_md *lsm;
206 CDEBUG(D_INFO, "alloc lsm, stripe_count %u\n", stripe_count);
208 lsm = lsm_alloc_plain(stripe_count);
210 CERROR("cannot allocate LSM stripe_count %u\n", stripe_count);
211 return ERR_PTR(-ENOMEM);
214 atomic_set(&lsm->lsm_refc, 1);
215 spin_lock_init(&lsm->lsm_lock);
216 lsm->lsm_magic = magic;
217 lsm->lsm_stripe_count = stripe_count;
218 lsm->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
219 lsm->lsm_pattern = pattern;
220 lsm->lsm_pool_name[0] = '\0';
221 lsm->lsm_layout_gen = 0;
222 if (stripe_count > 0)
223 lsm->lsm_oinfo[0]->loi_ost_idx = ~0;
225 for (i = 0; i < stripe_count; i++)
226 loi_init(lsm->lsm_oinfo[i]);
231 int lov_free_memmd(struct lov_stripe_md **lsmp)
233 struct lov_stripe_md *lsm = *lsmp;
237 LASSERT(atomic_read(&lsm->lsm_refc) > 0);
238 refc = atomic_dec_return(&lsm->lsm_refc);
240 lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
245 /* Unpack LOV object metadata from disk storage. It is packed in LE byte
246 * order and is opaque to the networking layer.
248 struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
251 struct lov_stripe_md *lsm;
257 rc = lov_verify_lmm(lmm, lmm_size, &stripe_count);
261 magic = le32_to_cpu(lmm->lmm_magic);
262 pattern = le32_to_cpu(lmm->lmm_pattern);
264 lsm = lov_lsm_alloc(stripe_count, pattern, magic);
268 LASSERT(lsm_op_find(magic));
269 rc = lsm_op_find(magic)->lsm_unpackmd(lov, lsm, lmm);
271 lov_free_memmd(&lsm);
278 /* Retrieve object striping information.
280 * @lump is a pointer to an in-core struct with lmm_ost_count indicating
281 * the maximum number of OST indices which will fit in the user buffer.
282 * lmm_magic must be LOV_USER_MAGIC.
284 int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
285 struct lov_user_md __user *lump)
287 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
288 struct lov_user_md_v3 lum;
289 struct lov_mds_md *lmmk;
301 * "Switch to kernel segment" to allow copying from kernel space by
302 * copy_{to,from}_user().
307 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
308 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
309 lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
314 if (!lsm_is_released(lsm))
315 stripe_count = lsm->lsm_stripe_count;
319 /* we only need the header part from user space to get lmm_magic and
320 * lmm_stripe_count, (the header part is common to v1 and v3)
322 lum_size = sizeof(struct lov_user_md_v1);
323 if (copy_from_user(&lum, lump, lum_size)) {
327 if (lum.lmm_magic != LOV_USER_MAGIC_V1 &&
328 lum.lmm_magic != LOV_USER_MAGIC_V3 &&
329 lum.lmm_magic != LOV_USER_MAGIC_SPECIFIC) {
334 if (lum.lmm_stripe_count &&
335 (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
336 /* Return right size of stripe to user */
337 lum.lmm_stripe_count = stripe_count;
338 rc = copy_to_user(lump, &lum, lum_size);
342 lmmk_size = lov_mds_md_size(stripe_count, lsm->lsm_magic);
345 lmmk = libcfs_kvzalloc(lmmk_size, GFP_NOFS);
351 lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
357 /* FIXME: Bug 1185 - copy fields properly when structs change */
358 /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
359 CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
360 CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0]));
362 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC &&
363 (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
364 lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3))) {
365 lustre_swab_lov_mds_md(lmmk);
366 lustre_swab_lov_user_md_objects(
367 (struct lov_user_ost_data *)lmmk->lmm_objects,
368 lmmk->lmm_stripe_count);
371 if (lum.lmm_magic == LOV_USER_MAGIC) {
372 /* User request for v1, we need skip lmm_pool_name */
373 if (lmmk->lmm_magic == LOV_MAGIC_V3) {
374 memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects,
375 ((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
376 lmmk->lmm_stripe_count *
377 sizeof(struct lov_ost_data_v1));
378 lmm_size -= LOV_MAXPOOLNAME;
381 /* if v3 we just have to update the lum_size */
382 lum_size = sizeof(struct lov_user_md_v3);
385 /* User wasn't expecting this many OST entries */
386 if (lum.lmm_stripe_count == 0) {
388 } else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
393 * Have a difference between lov_mds_md & lov_user_md.
394 * So we have to re-order the data before copy to user.
396 lum.lmm_stripe_count = lmmk->lmm_stripe_count;
397 lum.lmm_layout_gen = lmmk->lmm_layout_gen;
398 ((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen;
399 ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
400 if (copy_to_user(lump, lmmk, lmm_size))