Merge 4.8-rc5 into staging-next
[linux-2.6-microblaze.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/llite/llite_lib.c
33  *
34  * Lustre Light Super operations
35  */
36
37 #define DEBUG_SUBSYSTEM S_LLITE
38
39 #include <linux/module.h>
40 #include <linux/statfs.h>
41 #include <linux/types.h>
42 #include <linux/mm.h>
43
44 #include "../include/lustre/lustre_ioctl.h"
45 #include "../include/lustre_lite.h"
46 #include "../include/lustre_ha.h"
47 #include "../include/lustre_dlm.h"
48 #include "../include/lprocfs_status.h"
49 #include "../include/lustre_disk.h"
50 #include "../include/lustre_param.h"
51 #include "../include/lustre_log.h"
52 #include "../include/cl_object.h"
53 #include "../include/obd_cksum.h"
54 #include "llite_internal.h"
55
56 struct kmem_cache *ll_file_data_slab;
57 struct dentry *llite_root;
58 struct kset *llite_kset;
59
60 #ifndef log2
61 #define log2(n) ffz(~(n))
62 #endif
63
64 static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
65 {
66         struct ll_sb_info *sbi = NULL;
67         unsigned long pages;
68         unsigned long lru_page_max;
69         struct sysinfo si;
70         class_uuid_t uuid;
71         int i;
72
73         sbi = kzalloc(sizeof(*sbi), GFP_NOFS);
74         if (!sbi)
75                 return NULL;
76
77         spin_lock_init(&sbi->ll_lock);
78         mutex_init(&sbi->ll_lco.lco_lock);
79         spin_lock_init(&sbi->ll_pp_extent_lock);
80         spin_lock_init(&sbi->ll_process_lock);
81         sbi->ll_rw_stats_on = 0;
82
83         si_meminfo(&si);
84         pages = si.totalram - si.totalhigh;
85         lru_page_max = pages / 2;
86
87         sbi->ll_cache = cl_cache_init(lru_page_max);
88         if (!sbi->ll_cache) {
89                 kfree(sbi);
90                 return NULL;
91         }
92
93         sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
94                                            SBI_DEFAULT_READAHEAD_MAX);
95         sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
96         sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
97                                            SBI_DEFAULT_READAHEAD_WHOLE_MAX;
98
99         ll_generate_random_uuid(uuid);
100         class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
101         CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
102
103         sbi->ll_flags |= LL_SBI_VERBOSE;
104         sbi->ll_flags |= LL_SBI_CHECKSUM;
105
106         sbi->ll_flags |= LL_SBI_LRU_RESIZE;
107
108         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
109                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
110                                pp_r_hist.oh_lock);
111                 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
112                                pp_w_hist.oh_lock);
113         }
114
115         /* metadata statahead is enabled by default */
116         sbi->ll_sa_max = LL_SA_RPC_DEF;
117         atomic_set(&sbi->ll_sa_total, 0);
118         atomic_set(&sbi->ll_sa_wrong, 0);
119         atomic_set(&sbi->ll_agl_total, 0);
120         sbi->ll_flags |= LL_SBI_AGL_ENABLED;
121
122         /* root squash */
123         sbi->ll_squash.rsi_uid = 0;
124         sbi->ll_squash.rsi_gid = 0;
125         INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
126         init_rwsem(&sbi->ll_squash.rsi_sem);
127
128         sbi->ll_sb = sb;
129
130         return sbi;
131 }
132
133 static void ll_free_sbi(struct super_block *sb)
134 {
135         struct ll_sb_info *sbi = ll_s2sbi(sb);
136
137         if (sbi->ll_cache) {
138                 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
139                         cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
140                 cl_cache_decref(sbi->ll_cache);
141                 sbi->ll_cache = NULL;
142         }
143
144         kfree(sbi);
145 }
146
147 static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
148                                     struct vfsmount *mnt)
149 {
150         struct inode *root = NULL;
151         struct ll_sb_info *sbi = ll_s2sbi(sb);
152         struct obd_device *obd;
153         struct obd_statfs *osfs = NULL;
154         struct ptlrpc_request *request = NULL;
155         struct obd_connect_data *data = NULL;
156         struct obd_uuid *uuid;
157         struct md_op_data *op_data;
158         struct lustre_md lmd;
159         u64 valid;
160         int size, err, checksum;
161
162         obd = class_name2obd(md);
163         if (!obd) {
164                 CERROR("MD %s: not setup or attached\n", md);
165                 return -EINVAL;
166         }
167
168         data = kzalloc(sizeof(*data), GFP_NOFS);
169         if (!data)
170                 return -ENOMEM;
171
172         osfs = kzalloc(sizeof(*osfs), GFP_NOFS);
173         if (!osfs) {
174                 kfree(data);
175                 return -ENOMEM;
176         }
177
178         /* indicate the features supported by this client */
179         data->ocd_connect_flags = OBD_CONNECT_IBITS    | OBD_CONNECT_NODEVOH  |
180                                   OBD_CONNECT_ATTRFID  |
181                                   OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
182                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
183                                   OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
184                                   OBD_CONNECT_VBR       | OBD_CONNECT_FULL20  |
185                                   OBD_CONNECT_64BITHASH |
186                                   OBD_CONNECT_EINPROGRESS |
187                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
188                                   OBD_CONNECT_LAYOUTLOCK |
189                                   OBD_CONNECT_PINGLESS |
190                                   OBD_CONNECT_MAX_EASIZE |
191                                   OBD_CONNECT_FLOCK_DEAD |
192                                   OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
193                                   OBD_CONNECT_OPEN_BY_FID |
194                                   OBD_CONNECT_DIR_STRIPE;
195
196         if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
197                 data->ocd_connect_flags |= OBD_CONNECT_SOM;
198
199         if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
200                 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
201 #ifdef CONFIG_FS_POSIX_ACL
202         data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
203 #endif
204
205         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
206                 /* flag mdc connection as lightweight, only used for test
207                  * purpose, use with care
208                  */
209                 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
210
211         data->ocd_ibits_known = MDS_INODELOCK_FULL;
212         data->ocd_version = LUSTRE_VERSION_CODE;
213
214         if (sb->s_flags & MS_RDONLY)
215                 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
216         if (sbi->ll_flags & LL_SBI_USER_XATTR)
217                 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
218
219         if (sbi->ll_flags & LL_SBI_FLOCK)
220                 sbi->ll_fop = &ll_file_operations_flock;
221         else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
222                 sbi->ll_fop = &ll_file_operations;
223         else
224                 sbi->ll_fop = &ll_file_operations_noflock;
225
226         /* real client */
227         data->ocd_connect_flags |= OBD_CONNECT_REAL;
228
229         data->ocd_brw_size = MD_MAX_BRW_SIZE;
230
231         err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
232                           data, NULL);
233         if (err == -EBUSY) {
234                 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
235                                    md);
236                 goto out;
237         } else if (err) {
238                 CERROR("cannot connect to %s: rc = %d\n", md, err);
239                 goto out;
240         }
241
242         sbi->ll_md_exp->exp_connect_data = *data;
243
244         err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
245                            LUSTRE_SEQ_METADATA);
246         if (err) {
247                 CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
248                        sbi->ll_md_exp->exp_obd->obd_name, err);
249                 goto out_md;
250         }
251
252         /* For mount, we only need fs info from MDT0, and also in DNE, it
253          * can make sure the client can be mounted as long as MDT0 is
254          * available
255          */
256         err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
257                          cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
258                          OBD_STATFS_FOR_MDT0);
259         if (err)
260                 goto out_md_fid;
261
262         /* This needs to be after statfs to ensure connect has finished.
263          * Note that "data" does NOT contain the valid connect reply.
264          * If connecting to a 1.8 server there will be no LMV device, so
265          * we can access the MDC export directly and exp_connect_flags will
266          * be non-zero, but if accessing an upgraded 2.1 server it will
267          * have the correct flags filled in.
268          * XXX: fill in the LMV exp_connect_flags from MDC(s).
269          */
270         valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
271         if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
272             valid != CLIENT_CONNECT_MDT_REQD) {
273                 char *buf;
274
275                 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
276                 if (!buf) {
277                         err = -ENOMEM;
278                         goto out_md_fid;
279                 }
280                 obd_connect_flags2str(buf, PAGE_SIZE,
281                                       valid ^ CLIENT_CONNECT_MDT_REQD, ",");
282                 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
283                                    sbi->ll_md_exp->exp_obd->obd_name, buf);
284                 kfree(buf);
285                 err = -EPROTO;
286                 goto out_md_fid;
287         }
288
289         size = sizeof(*data);
290         err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
291                            KEY_CONN_DATA,  &size, data, NULL);
292         if (err) {
293                 CERROR("%s: Get connect data failed: rc = %d\n",
294                        sbi->ll_md_exp->exp_obd->obd_name, err);
295                 goto out_md_fid;
296         }
297
298         LASSERT(osfs->os_bsize);
299         sb->s_blocksize = osfs->os_bsize;
300         sb->s_blocksize_bits = log2(osfs->os_bsize);
301         sb->s_magic = LL_SUPER_MAGIC;
302         sb->s_maxbytes = MAX_LFS_FILESIZE;
303         sbi->ll_namelen = osfs->os_namelen;
304
305         if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
306             !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
307                 LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
308                 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
309         }
310
311         if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
312                 sb->s_flags |= MS_POSIXACL;
313                 sbi->ll_flags |= LL_SBI_ACL;
314         } else {
315                 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
316                 sb->s_flags &= ~MS_POSIXACL;
317                 sbi->ll_flags &= ~LL_SBI_ACL;
318         }
319
320         if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
321                 sbi->ll_flags |= LL_SBI_64BIT_HASH;
322
323         if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
324                 sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_SHIFT;
325         else
326                 sbi->ll_md_brw_pages = 1;
327
328         if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
329                 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
330
331         if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
332                 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
333                         LCONSOLE_INFO(
334                                 "%s: disabling xattr cache due to unknown maximum xattr size.\n",
335                                 dt);
336                 } else {
337                         sbi->ll_flags |= LL_SBI_XATTR_CACHE;
338                         sbi->ll_xattr_cache_enabled = 1;
339                 }
340         }
341
342         obd = class_name2obd(dt);
343         if (!obd) {
344                 CERROR("DT %s: not setup or attached\n", dt);
345                 err = -ENODEV;
346                 goto out_md_fid;
347         }
348
349         data->ocd_connect_flags = OBD_CONNECT_GRANT     | OBD_CONNECT_VERSION  |
350                                   OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
351                                   OBD_CONNECT_CANCELSET | OBD_CONNECT_FID      |
352                                   OBD_CONNECT_SRVLOCK   | OBD_CONNECT_TRUNCLOCK|
353                                   OBD_CONNECT_AT        | OBD_CONNECT_OSS_CAPA |
354                                   OBD_CONNECT_VBR       | OBD_CONNECT_FULL20   |
355                                   OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
356                                   OBD_CONNECT_EINPROGRESS |
357                                   OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
358                                   OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
359
360         if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
361                 data->ocd_connect_flags |= OBD_CONNECT_SOM;
362
363         if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
364                 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
365                  * disabled by default, because it can still be enabled on the
366                  * fly via /sys. As a consequence, we still need to come to an
367                  * agreement on the supported algorithms at connect time
368                  */
369                 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
370
371                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
372                         data->ocd_cksum_types = OBD_CKSUM_ADLER;
373                 else
374                         data->ocd_cksum_types = cksum_types_supported_client();
375         }
376
377         data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
378
379         CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
380                data->ocd_connect_flags,
381                data->ocd_version, data->ocd_grant);
382
383         obd->obd_upcall.onu_owner = &sbi->ll_lco;
384         obd->obd_upcall.onu_upcall = cl_ocd_update;
385
386         data->ocd_brw_size = DT_MAX_BRW_SIZE;
387
388         err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
389                           NULL);
390         if (err == -EBUSY) {
391                 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part.  Please wait for recovery to complete, abort, or time out.\n",
392                                    dt);
393                 goto out_md;
394         } else if (err) {
395                 CERROR("%s: Cannot connect to %s: rc = %d\n",
396                        sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
397                 goto out_md;
398         }
399
400         sbi->ll_dt_exp->exp_connect_data = *data;
401
402         err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
403                            LUSTRE_SEQ_METADATA);
404         if (err) {
405                 CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
406                        sbi->ll_dt_exp->exp_obd->obd_name, err);
407                 goto out_dt;
408         }
409
410         mutex_lock(&sbi->ll_lco.lco_lock);
411         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
412         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
413         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
414         mutex_unlock(&sbi->ll_lco.lco_lock);
415
416         fid_zero(&sbi->ll_root_fid);
417         err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid);
418         if (err) {
419                 CERROR("cannot mds_connect: rc = %d\n", err);
420                 goto out_lock_cn_cb;
421         }
422         if (!fid_is_sane(&sbi->ll_root_fid)) {
423                 CERROR("%s: Invalid root fid "DFID" during mount\n",
424                        sbi->ll_md_exp->exp_obd->obd_name,
425                        PFID(&sbi->ll_root_fid));
426                 err = -EINVAL;
427                 goto out_lock_cn_cb;
428         }
429         CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
430
431         sb->s_op = &lustre_super_operations;
432         sb->s_xattr = ll_xattr_handlers;
433 #if THREAD_SIZE >= 8192 /*b=17630*/
434         sb->s_export_op = &lustre_export_operations;
435 #endif
436
437         /* make root inode
438          * XXX: move this to after cbd setup?
439          */
440         valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
441         if (sbi->ll_flags & LL_SBI_ACL)
442                 valid |= OBD_MD_FLACL;
443
444         op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
445         if (!op_data) {
446                 err = -ENOMEM;
447                 goto out_lock_cn_cb;
448         }
449
450         op_data->op_fid1 = sbi->ll_root_fid;
451         op_data->op_mode = 0;
452         op_data->op_valid = valid;
453
454         err = md_getattr(sbi->ll_md_exp, op_data, &request);
455         kfree(op_data);
456         if (err) {
457                 CERROR("%s: md_getattr failed for root: rc = %d\n",
458                        sbi->ll_md_exp->exp_obd->obd_name, err);
459                 goto out_lock_cn_cb;
460         }
461
462         err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
463                                sbi->ll_md_exp, &lmd);
464         if (err) {
465                 CERROR("failed to understand root inode md: rc = %d\n", err);
466                 ptlrpc_req_finished(request);
467                 goto out_lock_cn_cb;
468         }
469
470         LASSERT(fid_is_sane(&sbi->ll_root_fid));
471         root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
472                                             sbi->ll_flags & LL_SBI_32BIT_API),
473                        &lmd);
474         md_free_lustre_md(sbi->ll_md_exp, &lmd);
475         ptlrpc_req_finished(request);
476
477         if (IS_ERR(root)) {
478                 if (lmd.lsm)
479                         obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
480 #ifdef CONFIG_FS_POSIX_ACL
481                 if (lmd.posix_acl) {
482                         posix_acl_release(lmd.posix_acl);
483                         lmd.posix_acl = NULL;
484                 }
485 #endif
486                 err = -EBADF;
487                 CERROR("lustre_lite: bad iget4 for root\n");
488                 goto out_root;
489         }
490
491         err = ll_close_thread_start(&sbi->ll_lcq);
492         if (err) {
493                 CERROR("cannot start close thread: rc %d\n", err);
494                 goto out_root;
495         }
496
497         checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
498         err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
499                                  KEY_CHECKSUM, sizeof(checksum), &checksum,
500                                  NULL);
501         if (err) {
502                 CERROR("%s: Set checksum failed: rc = %d\n",
503                        sbi->ll_dt_exp->exp_obd->obd_name, err);
504                 goto out_root;
505         }
506         cl_sb_init(sb);
507
508         err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
509                                  KEY_CACHE_SET, sizeof(*sbi->ll_cache),
510                                  sbi->ll_cache, NULL);
511         if (err) {
512                 CERROR("%s: Set cache_set failed: rc = %d\n",
513                        sbi->ll_dt_exp->exp_obd->obd_name, err);
514                 goto out_root;
515         }
516
517         sb->s_root = d_make_root(root);
518         if (!sb->s_root) {
519                 CERROR("%s: can't make root dentry\n",
520                        ll_get_fsname(sb, NULL, 0));
521                 err = -ENOMEM;
522                 goto out_lock_cn_cb;
523         }
524
525         sbi->ll_sdev_orig = sb->s_dev;
526
527         /* We set sb->s_dev equal on all lustre clients in order to support
528          * NFS export clustering.  NFSD requires that the FSID be the same
529          * on all clients.
530          */
531         /* s_dev is also used in lt_compare() to compare two fs, but that is
532          * only a node-local comparison.
533          */
534         uuid = obd_get_uuid(sbi->ll_md_exp);
535         if (uuid) {
536                 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
537                 get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
538         }
539
540         kfree(data);
541         kfree(osfs);
542
543         if (llite_root) {
544                 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
545                 if (err < 0) {
546                         CERROR("%s: could not register mount in debugfs: "
547                                "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
548                         err = 0;
549                 }
550         }
551
552         return err;
553 out_root:
554         iput(root);
555 out_lock_cn_cb:
556         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
557 out_dt:
558         obd_disconnect(sbi->ll_dt_exp);
559         sbi->ll_dt_exp = NULL;
560 out_md_fid:
561         obd_fid_fini(sbi->ll_md_exp->exp_obd);
562 out_md:
563         obd_disconnect(sbi->ll_md_exp);
564         sbi->ll_md_exp = NULL;
565 out:
566         kfree(data);
567         kfree(osfs);
568         return err;
569 }
570
571 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
572 {
573         int size, rc;
574
575         *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
576         size = sizeof(int);
577         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
578                           KEY_MAX_EASIZE, &size, lmmsize, NULL);
579         if (rc)
580                 CERROR("Get max mdsize error rc %d\n", rc);
581
582         return rc;
583 }
584
585 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
586 {
587         int size, rc;
588
589         size = sizeof(int);
590         rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
591                           KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
592         if (rc)
593                 CERROR("Get default mdsize error rc %d\n", rc);
594
595         return rc;
596 }
597
598 static void client_common_put_super(struct super_block *sb)
599 {
600         struct ll_sb_info *sbi = ll_s2sbi(sb);
601
602         ll_close_thread_shutdown(sbi->ll_lcq);
603
604         cl_sb_fini(sb);
605
606         obd_fid_fini(sbi->ll_dt_exp->exp_obd);
607         obd_disconnect(sbi->ll_dt_exp);
608         sbi->ll_dt_exp = NULL;
609
610         ldebugfs_unregister_mountpoint(sbi);
611
612         obd_fid_fini(sbi->ll_md_exp->exp_obd);
613         obd_disconnect(sbi->ll_md_exp);
614         sbi->ll_md_exp = NULL;
615 }
616
617 void ll_kill_super(struct super_block *sb)
618 {
619         struct ll_sb_info *sbi;
620
621         /* not init sb ?*/
622         if (!(sb->s_flags & MS_ACTIVE))
623                 return;
624
625         sbi = ll_s2sbi(sb);
626         /* we need to restore s_dev from changed for clustered NFS before
627          * put_super because new kernels have cached s_dev and change sb->s_dev
628          * in put_super not affected real removing devices
629          */
630         if (sbi) {
631                 sb->s_dev = sbi->ll_sdev_orig;
632                 sbi->ll_umounting = 1;
633         }
634 }
635
636 static inline int ll_set_opt(const char *opt, char *data, int fl)
637 {
638         if (strncmp(opt, data, strlen(opt)) != 0)
639                 return 0;
640         else
641                 return fl;
642 }
643
644 /* non-client-specific mount options are parsed in lmd_parse */
645 static int ll_options(char *options, int *flags)
646 {
647         int tmp;
648         char *s1 = options, *s2;
649
650         if (!options)
651                 return 0;
652
653         CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
654
655         while (*s1) {
656                 CDEBUG(D_SUPER, "next opt=%s\n", s1);
657                 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
658                 if (tmp) {
659                         *flags |= tmp;
660                         goto next;
661                 }
662                 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
663                 if (tmp) {
664                         *flags |= tmp;
665                         goto next;
666                 }
667                 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
668                 if (tmp) {
669                         *flags |= tmp;
670                         goto next;
671                 }
672                 tmp = ll_set_opt("noflock", s1,
673                                  LL_SBI_FLOCK | LL_SBI_LOCALFLOCK);
674                 if (tmp) {
675                         *flags &= ~tmp;
676                         goto next;
677                 }
678                 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
679                 if (tmp) {
680                         *flags |= tmp;
681                         goto next;
682                 }
683                 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
684                 if (tmp) {
685                         *flags &= ~tmp;
686                         goto next;
687                 }
688                 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
689                 if (tmp) {
690                         *flags |= tmp;
691                         goto next;
692                 }
693                 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
694                 if (tmp) {
695                         *flags &= ~tmp;
696                         goto next;
697                 }
698
699                 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
700                 if (tmp) {
701                         *flags |= tmp;
702                         goto next;
703                 }
704                 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
705                 if (tmp) {
706                         *flags &= ~tmp;
707                         goto next;
708                 }
709                 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
710                 if (tmp) {
711                         *flags |= tmp;
712                         goto next;
713                 }
714                 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
715                 if (tmp) {
716                         *flags &= ~tmp;
717                         goto next;
718                 }
719                 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
720                 if (tmp) {
721                         *flags |= tmp;
722                         goto next;
723                 }
724                 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
725                 if (tmp) {
726                         *flags &= ~tmp;
727                         goto next;
728                 }
729                 tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
730                 if (tmp) {
731                         *flags |= tmp;
732                         goto next;
733                 }
734                 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
735                 if (tmp) {
736                         *flags |= tmp;
737                         goto next;
738                 }
739                 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
740                 if (tmp) {
741                         *flags |= tmp;
742                         goto next;
743                 }
744                 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
745                 if (tmp) {
746                         *flags &= ~tmp;
747                         goto next;
748                 }
749                 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
750                                    s1);
751                 return -EINVAL;
752
753 next:
754                 /* Find next opt */
755                 s2 = strchr(s1, ',');
756                 if (!s2)
757                         break;
758                 s1 = s2 + 1;
759         }
760         return 0;
761 }
762
763 void ll_lli_init(struct ll_inode_info *lli)
764 {
765         lli->lli_inode_magic = LLI_INODE_MAGIC;
766         lli->lli_flags = 0;
767         lli->lli_ioepoch = 0;
768         lli->lli_maxbytes = MAX_LFS_FILESIZE;
769         spin_lock_init(&lli->lli_lock);
770         lli->lli_posix_acl = NULL;
771         /* Do not set lli_fid, it has been initialized already. */
772         fid_zero(&lli->lli_pfid);
773         INIT_LIST_HEAD(&lli->lli_close_list);
774         lli->lli_pending_och = NULL;
775         lli->lli_mds_read_och = NULL;
776         lli->lli_mds_write_och = NULL;
777         lli->lli_mds_exec_och = NULL;
778         lli->lli_open_fd_read_count = 0;
779         lli->lli_open_fd_write_count = 0;
780         lli->lli_open_fd_exec_count = 0;
781         mutex_init(&lli->lli_och_mutex);
782         spin_lock_init(&lli->lli_agl_lock);
783         lli->lli_has_smd = false;
784         spin_lock_init(&lli->lli_layout_lock);
785         ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
786         lli->lli_clob = NULL;
787
788         init_rwsem(&lli->lli_xattrs_list_rwsem);
789         mutex_init(&lli->lli_xattrs_enq_lock);
790
791         LASSERT(lli->lli_vfs_inode.i_mode != 0);
792         if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
793                 mutex_init(&lli->lli_readdir_mutex);
794                 lli->lli_opendir_key = NULL;
795                 lli->lli_sai = NULL;
796                 spin_lock_init(&lli->lli_sa_lock);
797                 lli->lli_opendir_pid = 0;
798         } else {
799                 mutex_init(&lli->lli_size_mutex);
800                 lli->lli_symlink_name = NULL;
801                 init_rwsem(&lli->lli_trunc_sem);
802                 mutex_init(&lli->lli_write_mutex);
803                 init_rwsem(&lli->lli_glimpse_sem);
804                 lli->lli_glimpse_time = 0;
805                 INIT_LIST_HEAD(&lli->lli_agl_list);
806                 lli->lli_agl_index = 0;
807                 lli->lli_async_rc = 0;
808         }
809         mutex_init(&lli->lli_layout_mutex);
810 }
811
812 static inline int ll_bdi_register(struct backing_dev_info *bdi)
813 {
814         static atomic_t ll_bdi_num = ATOMIC_INIT(0);
815
816         bdi->name = "lustre";
817         return bdi_register(bdi, NULL, "lustre-%d",
818                             atomic_inc_return(&ll_bdi_num));
819 }
820
821 int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
822 {
823         struct lustre_profile *lprof = NULL;
824         struct lustre_sb_info *lsi = s2lsi(sb);
825         struct ll_sb_info *sbi;
826         char  *dt = NULL, *md = NULL;
827         char  *profilenm = get_profile_name(sb);
828         struct config_llog_instance *cfg;
829         int    err;
830
831         CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
832
833         cfg = kzalloc(sizeof(*cfg), GFP_NOFS);
834         if (!cfg)
835                 return -ENOMEM;
836
837         try_module_get(THIS_MODULE);
838
839         /* client additional sb info */
840         sbi = ll_init_sbi(sb);
841         lsi->lsi_llsbi = sbi;
842         if (!sbi) {
843                 module_put(THIS_MODULE);
844                 kfree(cfg);
845                 return -ENOMEM;
846         }
847
848         err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
849         if (err)
850                 goto out_free;
851
852         err = bdi_init(&lsi->lsi_bdi);
853         if (err)
854                 goto out_free;
855         lsi->lsi_flags |= LSI_BDI_INITIALIZED;
856         lsi->lsi_bdi.capabilities = 0;
857         err = ll_bdi_register(&lsi->lsi_bdi);
858         if (err)
859                 goto out_free;
860
861         sb->s_bdi = &lsi->lsi_bdi;
862         /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
863         sb->s_d_op = &ll_d_ops;
864
865         /* Generate a string unique to this super, in case some joker tries
866          * to mount the same fs at two mount points.
867          * Use the address of the super itself.
868          */
869         cfg->cfg_instance = sb;
870         cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
871         cfg->cfg_callback = class_config_llog_handler;
872         /* set up client obds */
873         err = lustre_process_log(sb, profilenm, cfg);
874         if (err < 0)
875                 goto out_free;
876
877         /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
878         lprof = class_get_profile(profilenm);
879         if (!lprof) {
880                 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS.  Does that filesystem exist?\n",
881                                    profilenm);
882                 err = -EINVAL;
883                 goto out_free;
884         }
885         CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
886                lprof->lp_md, lprof->lp_dt);
887
888         dt = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
889         if (!dt) {
890                 err = -ENOMEM;
891                 goto out_free;
892         }
893
894         md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance);
895         if (!md) {
896                 err = -ENOMEM;
897                 goto out_free;
898         }
899
900         /* connections, registrations, sb setup */
901         err = client_common_fill_super(sb, md, dt, mnt);
902
903 out_free:
904         kfree(md);
905         kfree(dt);
906         if (err)
907                 ll_put_super(sb);
908         else if (sbi->ll_flags & LL_SBI_VERBOSE)
909                 LCONSOLE_WARN("Mounted %s\n", profilenm);
910
911         kfree(cfg);
912         return err;
913 } /* ll_fill_super */
914
915 void ll_put_super(struct super_block *sb)
916 {
917         struct config_llog_instance cfg, params_cfg;
918         struct obd_device *obd;
919         struct lustre_sb_info *lsi = s2lsi(sb);
920         struct ll_sb_info *sbi = ll_s2sbi(sb);
921         char *profilenm = get_profile_name(sb);
922         int ccc_count, next, force = 1, rc = 0;
923
924         CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
925
926         cfg.cfg_instance = sb;
927         lustre_end_log(sb, profilenm, &cfg);
928
929         params_cfg.cfg_instance = sb;
930         lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
931
932         if (sbi->ll_md_exp) {
933                 obd = class_exp2obd(sbi->ll_md_exp);
934                 if (obd)
935                         force = obd->obd_force;
936         }
937
938         /* Wait for unstable pages to be committed to stable storage */
939         if (!force) {
940                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
941
942                 rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
943                                   !atomic_read(&sbi->ll_cache->ccc_unstable_nr),
944                                   &lwi);
945         }
946
947         ccc_count = atomic_read(&sbi->ll_cache->ccc_unstable_nr);
948         if (!force && rc != -EINTR)
949                 LASSERTF(!ccc_count, "count: %i\n", ccc_count);
950
951         /* We need to set force before the lov_disconnect in
952          * lustre_common_put_super, since l_d cleans up osc's as well.
953          */
954         if (force) {
955                 next = 0;
956                 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
957                                                      &next)) != NULL) {
958                         obd->obd_force = force;
959                 }
960         }
961
962         if (sbi->ll_lcq) {
963                 /* Only if client_common_fill_super succeeded */
964                 client_common_put_super(sb);
965         }
966
967         next = 0;
968         while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
969                 class_manual_cleanup(obd);
970
971         if (sbi->ll_flags & LL_SBI_VERBOSE)
972                 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
973
974         if (profilenm)
975                 class_del_profile(profilenm);
976
977         if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
978                 bdi_destroy(&lsi->lsi_bdi);
979                 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
980         }
981
982         ll_free_sbi(sb);
983         lsi->lsi_llsbi = NULL;
984
985         lustre_common_put_super(sb);
986
987         cl_env_cache_purge(~0);
988
989         module_put(THIS_MODULE);
990 } /* client_put_super */
991
992 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
993 {
994         struct inode *inode = NULL;
995
996         /* NOTE: we depend on atomic igrab() -bzzz */
997         lock_res_and_lock(lock);
998         if (lock->l_resource->lr_lvb_inode) {
999                 struct ll_inode_info *lli;
1000
1001                 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1002                 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1003                         inode = igrab(lock->l_resource->lr_lvb_inode);
1004                 } else {
1005                         inode = lock->l_resource->lr_lvb_inode;
1006                         LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ?  D_INFO :
1007                                          D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
1008                                          lock->l_resource->lr_lvb_inode,
1009                                          lli->lli_inode_magic);
1010                         inode = NULL;
1011                 }
1012         }
1013         unlock_res_and_lock(lock);
1014         return inode;
1015 }
1016
1017 static void ll_dir_clear_lsm_md(struct inode *inode)
1018 {
1019         struct ll_inode_info *lli = ll_i2info(inode);
1020
1021         LASSERT(S_ISDIR(inode->i_mode));
1022
1023         if (lli->lli_lsm_md) {
1024                 lmv_free_memmd(lli->lli_lsm_md);
1025                 lli->lli_lsm_md = NULL;
1026         }
1027 }
1028
1029 static struct inode *ll_iget_anon_dir(struct super_block *sb,
1030                                       const struct lu_fid *fid,
1031                                       struct lustre_md *md)
1032 {
1033         struct ll_sb_info *sbi = ll_s2sbi(sb);
1034         struct mdt_body *body = md->body;
1035         struct inode *inode;
1036         ino_t ino;
1037
1038         ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1039         inode = iget_locked(sb, ino);
1040         if (!inode) {
1041                 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1042                        ll_get_fsname(sb, NULL, 0), PFID(fid));
1043                 return ERR_PTR(-ENOENT);
1044         }
1045
1046         if (inode->i_state & I_NEW) {
1047                 struct ll_inode_info *lli = ll_i2info(inode);
1048                 struct lmv_stripe_md *lsm = md->lmv;
1049
1050                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1051                                 (body->mbo_mode & S_IFMT);
1052                 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1053                          PFID(fid));
1054
1055                 LTIME_S(inode->i_mtime) = 0;
1056                 LTIME_S(inode->i_atime) = 0;
1057                 LTIME_S(inode->i_ctime) = 0;
1058                 inode->i_rdev = 0;
1059
1060                 inode->i_op = &ll_dir_inode_operations;
1061                 inode->i_fop = &ll_dir_operations;
1062                 lli->lli_fid = *fid;
1063                 ll_lli_init(lli);
1064
1065                 LASSERT(lsm);
1066                 /* master object FID */
1067                 lli->lli_pfid = body->mbo_fid1;
1068                 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
1069                        lli, PFID(fid), PFID(&lli->lli_pfid));
1070                 unlock_new_inode(inode);
1071         }
1072
1073         return inode;
1074 }
1075
1076 static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1077 {
1078         struct lmv_stripe_md *lsm = md->lmv;
1079         struct lu_fid *fid;
1080         int i;
1081
1082         LASSERT(lsm);
1083         /*
1084          * XXX sigh, this lsm_root initialization should be in
1085          * LMV layer, but it needs ll_iget right now, so we
1086          * put this here right now.
1087          */
1088         for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1089                 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1090                 LASSERT(!lsm->lsm_md_oinfo[i].lmo_root);
1091                 /* Unfortunately ll_iget will call ll_update_inode,
1092                  * where the initialization of slave inode is slightly
1093                  * different, so it reset lsm_md to NULL to avoid
1094                  * initializing lsm for slave inode.
1095                  */
1096                 /* For migrating inode, master stripe and master object will
1097                  * be same, so we only need assign this inode
1098                  */
1099                 if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && !i)
1100                         lsm->lsm_md_oinfo[i].lmo_root = inode;
1101                 else
1102                         lsm->lsm_md_oinfo[i].lmo_root =
1103                                 ll_iget_anon_dir(inode->i_sb, fid, md);
1104                 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1105                         int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
1106
1107                         lsm->lsm_md_oinfo[i].lmo_root = NULL;
1108                         return rc;
1109                 }
1110         }
1111
1112         /*
1113          * Here is where the lsm is being initialized(fill lmo_info) after
1114          * client retrieve MD stripe information from MDT.
1115          */
1116         return md_update_lsm_md(ll_i2mdexp(inode), lsm, md->body,
1117                                 ll_md_blocking_ast);
1118 }
1119
1120 static inline int lli_lsm_md_eq(const struct lmv_stripe_md *lsm_md1,
1121                                 const struct lmv_stripe_md *lsm_md2)
1122 {
1123         return lsm_md1->lsm_md_magic == lsm_md2->lsm_md_magic &&
1124                lsm_md1->lsm_md_stripe_count == lsm_md2->lsm_md_stripe_count &&
1125                lsm_md1->lsm_md_master_mdt_index ==
1126                         lsm_md2->lsm_md_master_mdt_index &&
1127                lsm_md1->lsm_md_hash_type == lsm_md2->lsm_md_hash_type &&
1128                lsm_md1->lsm_md_layout_version ==
1129                         lsm_md2->lsm_md_layout_version &&
1130                !strcmp(lsm_md1->lsm_md_pool_name,
1131                        lsm_md2->lsm_md_pool_name);
1132 }
1133
1134 static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
1135 {
1136         struct ll_inode_info *lli = ll_i2info(inode);
1137         struct lmv_stripe_md *lsm = md->lmv;
1138         int rc;
1139
1140         LASSERT(S_ISDIR(inode->i_mode));
1141         CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1142                PFID(ll_inode2fid(inode)));
1143
1144         /* no striped information from request. */
1145         if (!lsm) {
1146                 if (!lli->lli_lsm_md) {
1147                         return 0;
1148                 } else if (lli->lli_lsm_md->lsm_md_hash_type &
1149                            LMV_HASH_FLAG_MIGRATION) {
1150                         /*
1151                          * migration is done, the temporay MIGRATE layout has
1152                          * been removed
1153                          */
1154                         CDEBUG(D_INODE, DFID" finish migration.\n",
1155                                PFID(ll_inode2fid(inode)));
1156                         lmv_free_memmd(lli->lli_lsm_md);
1157                         lli->lli_lsm_md = NULL;
1158                         return 0;
1159                 } else {
1160                         /*
1161                          * The lustre_md from req does not include stripeEA,
1162                          * see ll_md_setattr
1163                          */
1164                         return 0;
1165                 }
1166         }
1167
1168         /* set the directory layout */
1169         if (!lli->lli_lsm_md) {
1170                 rc = ll_init_lsm_md(inode, md);
1171                 if (rc)
1172                         return rc;
1173
1174                 lli->lli_lsm_md = lsm;
1175                 /*
1176                  * set lsm_md to NULL, so the following free lustre_md
1177                  * will not free this lsm
1178                  */
1179                 md->lmv = NULL;
1180                 CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
1181                        lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
1182                 return 0;
1183         }
1184
1185         /* Compare the old and new stripe information */
1186         if (!lsm_md_eq(lli->lli_lsm_md, lsm)) {
1187                 struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
1188                 int idx;
1189
1190                 CERROR("%s: inode "DFID"(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
1191                        ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
1192                        inode, lsm, old_lsm,
1193                        lsm->lsm_md_magic, old_lsm->lsm_md_magic,
1194                        lsm->lsm_md_stripe_count,
1195                        old_lsm->lsm_md_stripe_count,
1196                        lsm->lsm_md_master_mdt_index,
1197                        old_lsm->lsm_md_master_mdt_index,
1198                        lsm->lsm_md_hash_type, old_lsm->lsm_md_hash_type,
1199                        lsm->lsm_md_layout_version,
1200                        old_lsm->lsm_md_layout_version,
1201                        lsm->lsm_md_pool_name,
1202                        old_lsm->lsm_md_pool_name);
1203
1204                 for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) {
1205                         CERROR("%s: sub FIDs in old lsm idx %d, old: "DFID"\n",
1206                                ll_get_fsname(inode->i_sb, NULL, 0), idx,
1207                                PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid));
1208                 }
1209
1210                 for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) {
1211                         CERROR("%s: sub FIDs in new lsm idx %d, new: "DFID"\n",
1212                                ll_get_fsname(inode->i_sb, NULL, 0), idx,
1213                                PFID(&lsm->lsm_md_oinfo[idx].lmo_fid));
1214                 }
1215
1216                 return -EIO;
1217         }
1218
1219         return 0;
1220 }
1221
1222 void ll_clear_inode(struct inode *inode)
1223 {
1224         struct ll_inode_info *lli = ll_i2info(inode);
1225         struct ll_sb_info *sbi = ll_i2sbi(inode);
1226
1227         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1228                PFID(ll_inode2fid(inode)), inode);
1229
1230         if (S_ISDIR(inode->i_mode)) {
1231                 /* these should have been cleared in ll_file_release */
1232                 LASSERT(!lli->lli_opendir_key);
1233                 LASSERT(!lli->lli_sai);
1234                 LASSERT(lli->lli_opendir_pid == 0);
1235         }
1236
1237         spin_lock(&lli->lli_lock);
1238         ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1239         spin_unlock(&lli->lli_lock);
1240         md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1241
1242         LASSERT(!lli->lli_open_fd_write_count);
1243         LASSERT(!lli->lli_open_fd_read_count);
1244         LASSERT(!lli->lli_open_fd_exec_count);
1245
1246         if (lli->lli_mds_write_och)
1247                 ll_md_real_close(inode, FMODE_WRITE);
1248         if (lli->lli_mds_exec_och)
1249                 ll_md_real_close(inode, FMODE_EXEC);
1250         if (lli->lli_mds_read_och)
1251                 ll_md_real_close(inode, FMODE_READ);
1252
1253         if (S_ISLNK(inode->i_mode)) {
1254                 kfree(lli->lli_symlink_name);
1255                 lli->lli_symlink_name = NULL;
1256         }
1257
1258         ll_xattr_cache_destroy(inode);
1259
1260 #ifdef CONFIG_FS_POSIX_ACL
1261         if (lli->lli_posix_acl) {
1262                 posix_acl_release(lli->lli_posix_acl);
1263                 lli->lli_posix_acl = NULL;
1264         }
1265 #endif
1266         lli->lli_inode_magic = LLI_INODE_DEAD;
1267
1268         if (S_ISDIR(inode->i_mode))
1269                 ll_dir_clear_lsm_md(inode);
1270         if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
1271                 LASSERT(list_empty(&lli->lli_agl_list));
1272
1273         /*
1274          * XXX This has to be done before lsm is freed below, because
1275          * cl_object still uses inode lsm.
1276          */
1277         cl_inode_fini(inode);
1278         lli->lli_has_smd = false;
1279 }
1280
1281 #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
1282
1283 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
1284                          struct md_open_data **mod)
1285 {
1286         struct lustre_md md;
1287         struct inode *inode = d_inode(dentry);
1288         struct ll_sb_info *sbi = ll_i2sbi(inode);
1289         struct ptlrpc_request *request = NULL;
1290         int rc, ia_valid;
1291
1292         op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1293                                      LUSTRE_OPC_ANY, NULL);
1294         if (IS_ERR(op_data))
1295                 return PTR_ERR(op_data);
1296
1297         rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
1298                         &request, mod);
1299         if (rc) {
1300                 ptlrpc_req_finished(request);
1301                 if (rc == -ENOENT) {
1302                         clear_nlink(inode);
1303                         /* Unlinked special device node? Or just a race?
1304                          * Pretend we did everything.
1305                          */
1306                         if (!S_ISREG(inode->i_mode) &&
1307                             !S_ISDIR(inode->i_mode)) {
1308                                 ia_valid = op_data->op_attr.ia_valid;
1309                                 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1310                                 rc = simple_setattr(dentry, &op_data->op_attr);
1311                                 op_data->op_attr.ia_valid = ia_valid;
1312                         }
1313                 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1314                         CERROR("md_setattr fails: rc = %d\n", rc);
1315                 }
1316                 return rc;
1317         }
1318
1319         rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1320                               sbi->ll_md_exp, &md);
1321         if (rc) {
1322                 ptlrpc_req_finished(request);
1323                 return rc;
1324         }
1325
1326         ia_valid = op_data->op_attr.ia_valid;
1327         /* inode size will be in cl_setattr_ost, can't do it now since dirty
1328          * cache is not cleared yet.
1329          */
1330         op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1331         rc = simple_setattr(dentry, &op_data->op_attr);
1332         op_data->op_attr.ia_valid = ia_valid;
1333
1334         /* Extract epoch data if obtained. */
1335         op_data->op_handle = md.body->mbo_handle;
1336         op_data->op_ioepoch = md.body->mbo_ioepoch;
1337
1338         rc = ll_update_inode(inode, &md);
1339         ptlrpc_req_finished(request);
1340
1341         return rc;
1342 }
1343
1344 /* Close IO epoch and send Size-on-MDS attribute update. */
1345 static int ll_setattr_done_writing(struct inode *inode,
1346                                    struct md_op_data *op_data,
1347                                    struct md_open_data *mod)
1348 {
1349         struct ll_inode_info *lli = ll_i2info(inode);
1350         int rc = 0;
1351
1352         if (!S_ISREG(inode->i_mode))
1353                 return 0;
1354
1355         CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
1356                op_data->op_ioepoch, PFID(&lli->lli_fid));
1357
1358         op_data->op_flags = MF_EPOCH_CLOSE;
1359         ll_done_writing_attr(inode, op_data);
1360         ll_pack_inode2opdata(inode, op_data, NULL);
1361
1362         rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1363         if (rc == -EAGAIN)
1364                 /* MDS has instructed us to obtain Size-on-MDS attribute
1365                  * from OSTs and send setattr to back to MDS.
1366                  */
1367                 rc = ll_som_update(inode, op_data);
1368         else if (rc) {
1369                 CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
1370                        ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
1371                        PFID(ll_inode2fid(inode)), rc);
1372         }
1373         return rc;
1374 }
1375
1376 /* If this inode has objects allocated to it (lsm != NULL), then the OST
1377  * object(s) determine the file size and mtime.  Otherwise, the MDS will
1378  * keep these values until such a time that objects are allocated for it.
1379  * We do the MDS operations first, as it is checking permissions for us.
1380  * We don't to the MDS RPC if there is nothing that we want to store there,
1381  * otherwise there is no harm in updating mtime/atime on the MDS if we are
1382  * going to do an RPC anyways.
1383  *
1384  * If we are doing a truncate, we will send the mtime and ctime updates
1385  * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1386  * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1387  * at the same time.
1388  *
1389  * In case of HSMimport, we only set attr on MDS.
1390  */
1391 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
1392 {
1393         struct inode *inode = d_inode(dentry);
1394         struct ll_inode_info *lli = ll_i2info(inode);
1395         struct md_op_data *op_data = NULL;
1396         struct md_open_data *mod = NULL;
1397         bool file_is_released = false;
1398         int rc = 0, rc1 = 0;
1399
1400         CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
1401                ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
1402                i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
1403
1404         if (attr->ia_valid & ATTR_SIZE) {
1405                 /* Check new size against VFS/VM file size limit and rlimit */
1406                 rc = inode_newsize_ok(inode, attr->ia_size);
1407                 if (rc)
1408                         return rc;
1409
1410                 /* The maximum Lustre file size is variable, based on the
1411                  * OST maximum object size and number of stripes.  This
1412                  * needs another check in addition to the VFS check above.
1413                  */
1414                 if (attr->ia_size > ll_file_maxbytes(inode)) {
1415                         CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
1416                                PFID(&lli->lli_fid), attr->ia_size,
1417                                ll_file_maxbytes(inode));
1418                         return -EFBIG;
1419                 }
1420
1421                 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1422         }
1423
1424         /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1425         if (attr->ia_valid & TIMES_SET_FLAGS) {
1426                 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
1427                     !capable(CFS_CAP_FOWNER))
1428                         return -EPERM;
1429         }
1430
1431         /* We mark all of the fields "set" so MDS/OST does not re-set them */
1432         if (attr->ia_valid & ATTR_CTIME) {
1433                 attr->ia_ctime = CURRENT_TIME;
1434                 attr->ia_valid |= ATTR_CTIME_SET;
1435         }
1436         if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1437             (attr->ia_valid & ATTR_ATIME)) {
1438                 attr->ia_atime = CURRENT_TIME;
1439                 attr->ia_valid |= ATTR_ATIME_SET;
1440         }
1441         if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1442             (attr->ia_valid & ATTR_MTIME)) {
1443                 attr->ia_mtime = CURRENT_TIME;
1444                 attr->ia_valid |= ATTR_MTIME_SET;
1445         }
1446
1447         if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1448                 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %llu\n",
1449                        LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1450                        (s64)ktime_get_real_seconds());
1451
1452         /* We always do an MDS RPC, even if we're only changing the size;
1453          * only the MDS knows whether truncate() should fail with -ETXTBUSY
1454          */
1455
1456         op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
1457         if (!op_data)
1458                 return -ENOMEM;
1459
1460         if (!S_ISDIR(inode->i_mode))
1461                 inode_unlock(inode);
1462
1463         /* truncate on a released file must failed with -ENODATA,
1464          * so size must not be set on MDS for released file
1465          * but other attributes must be set
1466          */
1467         if (S_ISREG(inode->i_mode)) {
1468                 struct lov_stripe_md *lsm;
1469                 __u32 gen;
1470
1471                 ll_layout_refresh(inode, &gen);
1472                 lsm = ccc_inode_lsm_get(inode);
1473                 if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
1474                         file_is_released = true;
1475                 ccc_inode_lsm_put(inode, lsm);
1476
1477                 if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
1478                         if (file_is_released) {
1479                                 rc = ll_layout_restore(inode, 0, attr->ia_size);
1480                                 if (rc < 0)
1481                                         goto out;
1482
1483                                 file_is_released = false;
1484                                 ll_layout_refresh(inode, &gen);
1485                         }
1486
1487                         /*
1488                          * If we are changing file size, file content is
1489                          * modified, flag it.
1490                          */
1491                         attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1492                         spin_lock(&lli->lli_lock);
1493                         lli->lli_flags |= LLIF_DATA_MODIFIED;
1494                         spin_unlock(&lli->lli_lock);
1495                         op_data->op_bias |= MDS_DATA_MODIFIED;
1496                 }
1497         }
1498
1499         memcpy(&op_data->op_attr, attr, sizeof(*attr));
1500
1501         /* Open epoch for truncate. */
1502         if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
1503             (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1504                 op_data->op_flags = MF_EPOCH_OPEN;
1505
1506         rc = ll_md_setattr(dentry, op_data, &mod);
1507         if (rc)
1508                 goto out;
1509
1510         /* RPC to MDT is sent, cancel data modification flag */
1511         if (op_data->op_bias & MDS_DATA_MODIFIED) {
1512                 spin_lock(&lli->lli_lock);
1513                 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1514                 spin_unlock(&lli->lli_lock);
1515         }
1516
1517         ll_ioepoch_open(lli, op_data->op_ioepoch);
1518         if (!S_ISREG(inode->i_mode) || file_is_released) {
1519                 rc = 0;
1520                 goto out;
1521         }
1522
1523         if (attr->ia_valid & (ATTR_SIZE |
1524                               ATTR_ATIME | ATTR_ATIME_SET |
1525                               ATTR_MTIME | ATTR_MTIME_SET)) {
1526                 /* For truncate and utimes sending attributes to OSTs, setting
1527                  * mtime/atime to the past will be performed under PW [0:EOF]
1528                  * extent lock (new_size:EOF for truncate).  It may seem
1529                  * excessive to send mtime/atime updates to OSTs when not
1530                  * setting times to past, but it is necessary due to possible
1531                  * time de-synchronization between MDT inode and OST objects
1532                  */
1533                 if (attr->ia_valid & ATTR_SIZE)
1534                         down_write(&lli->lli_trunc_sem);
1535                 rc = cl_setattr_ost(inode, attr);
1536                 if (attr->ia_valid & ATTR_SIZE)
1537                         up_write(&lli->lli_trunc_sem);
1538         }
1539 out:
1540         if (op_data->op_ioepoch) {
1541                 rc1 = ll_setattr_done_writing(inode, op_data, mod);
1542                 if (!rc)
1543                         rc = rc1;
1544         }
1545         ll_finish_md_op_data(op_data);
1546
1547         if (!S_ISDIR(inode->i_mode)) {
1548                 inode_lock(inode);
1549                 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
1550                         inode_dio_wait(inode);
1551         }
1552
1553         ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1554                         LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1555
1556         return rc;
1557 }
1558
1559 int ll_setattr(struct dentry *de, struct iattr *attr)
1560 {
1561         int mode = d_inode(de)->i_mode;
1562
1563         if ((attr->ia_valid & (ATTR_CTIME | ATTR_SIZE | ATTR_MODE)) ==
1564                               (ATTR_CTIME | ATTR_SIZE | ATTR_MODE))
1565                 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1566
1567         if (((attr->ia_valid & (ATTR_MODE | ATTR_FORCE | ATTR_SIZE)) ==
1568                                (ATTR_SIZE | ATTR_MODE)) &&
1569             (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
1570              (((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) &&
1571               !(attr->ia_mode & S_ISGID))))
1572                 attr->ia_valid |= ATTR_FORCE;
1573
1574         if ((attr->ia_valid & ATTR_MODE) &&
1575             (mode & S_ISUID) &&
1576             !(attr->ia_mode & S_ISUID) &&
1577             !(attr->ia_valid & ATTR_KILL_SUID))
1578                 attr->ia_valid |= ATTR_KILL_SUID;
1579
1580         if ((attr->ia_valid & ATTR_MODE) &&
1581             ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) &&
1582             !(attr->ia_mode & S_ISGID) &&
1583             !(attr->ia_valid & ATTR_KILL_SGID))
1584                 attr->ia_valid |= ATTR_KILL_SGID;
1585
1586         return ll_setattr_raw(de, attr, false);
1587 }
1588
1589 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1590                        __u64 max_age, __u32 flags)
1591 {
1592         struct ll_sb_info *sbi = ll_s2sbi(sb);
1593         struct obd_statfs obd_osfs;
1594         int rc;
1595
1596         rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1597         if (rc) {
1598                 CERROR("md_statfs fails: rc = %d\n", rc);
1599                 return rc;
1600         }
1601
1602         osfs->os_type = sb->s_magic;
1603
1604         CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
1605                osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
1606                osfs->os_files);
1607
1608         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1609                 flags |= OBD_STATFS_NODELAY;
1610
1611         rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1612         if (rc) {
1613                 CERROR("obd_statfs fails: rc = %d\n", rc);
1614                 return rc;
1615         }
1616
1617         CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
1618                obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1619                obd_osfs.os_files);
1620
1621         osfs->os_bsize = obd_osfs.os_bsize;
1622         osfs->os_blocks = obd_osfs.os_blocks;
1623         osfs->os_bfree = obd_osfs.os_bfree;
1624         osfs->os_bavail = obd_osfs.os_bavail;
1625
1626         /* If we don't have as many objects free on the OST as inodes
1627          * on the MDS, we reduce the total number of inodes to
1628          * compensate, so that the "inodes in use" number is correct.
1629          */
1630         if (obd_osfs.os_ffree < osfs->os_ffree) {
1631                 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1632                         obd_osfs.os_ffree;
1633                 osfs->os_ffree = obd_osfs.os_ffree;
1634         }
1635
1636         return rc;
1637 }
1638
1639 int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1640 {
1641         struct super_block *sb = de->d_sb;
1642         struct obd_statfs osfs;
1643         int rc;
1644
1645         CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
1646         ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1647
1648         /* Some amount of caching on the client is allowed */
1649         rc = ll_statfs_internal(sb, &osfs,
1650                                 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1651                                 0);
1652         if (rc)
1653                 return rc;
1654
1655         statfs_unpack(sfs, &osfs);
1656
1657         /* We need to downshift for all 32-bit kernels, because we can't
1658          * tell if the kernel is being called via sys_statfs64() or not.
1659          * Stop before overflowing f_bsize - in which case it is better
1660          * to just risk EOVERFLOW if caller is using old sys_statfs().
1661          */
1662         if (sizeof(long) < 8) {
1663                 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1664                         sfs->f_bsize <<= 1;
1665
1666                         osfs.os_blocks >>= 1;
1667                         osfs.os_bfree >>= 1;
1668                         osfs.os_bavail >>= 1;
1669                 }
1670         }
1671
1672         sfs->f_blocks = osfs.os_blocks;
1673         sfs->f_bfree = osfs.os_bfree;
1674         sfs->f_bavail = osfs.os_bavail;
1675         sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
1676         return 0;
1677 }
1678
1679 void ll_inode_size_lock(struct inode *inode)
1680 {
1681         struct ll_inode_info *lli;
1682
1683         LASSERT(!S_ISDIR(inode->i_mode));
1684
1685         lli = ll_i2info(inode);
1686         mutex_lock(&lli->lli_size_mutex);
1687 }
1688
1689 void ll_inode_size_unlock(struct inode *inode)
1690 {
1691         struct ll_inode_info *lli;
1692
1693         lli = ll_i2info(inode);
1694         mutex_unlock(&lli->lli_size_mutex);
1695 }
1696
1697 int ll_update_inode(struct inode *inode, struct lustre_md *md)
1698 {
1699         struct ll_inode_info *lli = ll_i2info(inode);
1700         struct mdt_body *body = md->body;
1701         struct lov_stripe_md *lsm = md->lsm;
1702         struct ll_sb_info *sbi = ll_i2sbi(inode);
1703
1704         LASSERT((lsm != NULL) == ((body->mbo_valid & OBD_MD_FLEASIZE) != 0));
1705         if (lsm) {
1706                 if (!lli->lli_has_smd &&
1707                     !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
1708                         cl_file_inode_init(inode, md);
1709
1710                 lli->lli_maxbytes = lsm->lsm_maxbytes;
1711                 if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
1712                         lli->lli_maxbytes = MAX_LFS_FILESIZE;
1713         }
1714
1715         if (S_ISDIR(inode->i_mode)) {
1716                 int rc;
1717
1718                 rc = ll_update_lsm_md(inode, md);
1719                 if (rc)
1720                         return rc;
1721         }
1722
1723 #ifdef CONFIG_FS_POSIX_ACL
1724         if (body->mbo_valid & OBD_MD_FLACL) {
1725                 spin_lock(&lli->lli_lock);
1726                 if (lli->lli_posix_acl)
1727                         posix_acl_release(lli->lli_posix_acl);
1728                 lli->lli_posix_acl = md->posix_acl;
1729                 spin_unlock(&lli->lli_lock);
1730         }
1731 #endif
1732         inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
1733                                         sbi->ll_flags & LL_SBI_32BIT_API);
1734         inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
1735
1736         if (body->mbo_valid & OBD_MD_FLATIME) {
1737                 if (body->mbo_atime > LTIME_S(inode->i_atime))
1738                         LTIME_S(inode->i_atime) = body->mbo_atime;
1739                 lli->lli_atime = body->mbo_atime;
1740         }
1741         if (body->mbo_valid & OBD_MD_FLMTIME) {
1742                 if (body->mbo_mtime > LTIME_S(inode->i_mtime)) {
1743                         CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
1744                                inode->i_ino, LTIME_S(inode->i_mtime),
1745                                body->mbo_mtime);
1746                         LTIME_S(inode->i_mtime) = body->mbo_mtime;
1747                 }
1748                 lli->lli_mtime = body->mbo_mtime;
1749         }
1750         if (body->mbo_valid & OBD_MD_FLCTIME) {
1751                 if (body->mbo_ctime > LTIME_S(inode->i_ctime))
1752                         LTIME_S(inode->i_ctime) = body->mbo_ctime;
1753                 lli->lli_ctime = body->mbo_ctime;
1754         }
1755         if (body->mbo_valid & OBD_MD_FLMODE)
1756                 inode->i_mode = (inode->i_mode & S_IFMT) |
1757                                 (body->mbo_mode & ~S_IFMT);
1758         if (body->mbo_valid & OBD_MD_FLTYPE)
1759                 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1760                                 (body->mbo_mode & S_IFMT);
1761         LASSERT(inode->i_mode != 0);
1762         if (S_ISREG(inode->i_mode))
1763                 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
1764                                        LL_MAX_BLKSIZE_BITS);
1765         else
1766                 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1767         if (body->mbo_valid & OBD_MD_FLUID)
1768                 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
1769         if (body->mbo_valid & OBD_MD_FLGID)
1770                 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
1771         if (body->mbo_valid & OBD_MD_FLFLAGS)
1772                 inode->i_flags = ll_ext_to_inode_flags(body->mbo_flags);
1773         if (body->mbo_valid & OBD_MD_FLNLINK)
1774                 set_nlink(inode, body->mbo_nlink);
1775         if (body->mbo_valid & OBD_MD_FLRDEV)
1776                 inode->i_rdev = old_decode_dev(body->mbo_rdev);
1777
1778         if (body->mbo_valid & OBD_MD_FLID) {
1779                 /* FID shouldn't be changed! */
1780                 if (fid_is_sane(&lli->lli_fid)) {
1781                         LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
1782                                  "Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
1783                                  PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
1784                                  PFID(ll_inode2fid(inode)), inode);
1785                 } else {
1786                         lli->lli_fid = body->mbo_fid1;
1787                 }
1788         }
1789
1790         LASSERT(fid_seq(&lli->lli_fid) != 0);
1791
1792         if (body->mbo_valid & OBD_MD_FLSIZE) {
1793                 if (exp_connect_som(ll_i2mdexp(inode)) &&
1794                     S_ISREG(inode->i_mode)) {
1795                         struct lustre_handle lockh;
1796                         enum ldlm_mode mode;
1797
1798                         /* As it is possible a blocking ast has been processed
1799                          * by this time, we need to check there is an UPDATE
1800                          * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1801                          * it.
1802                          */
1803                         mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1804                                                &lockh, LDLM_FL_CBPENDING,
1805                                                LCK_CR | LCK_CW |
1806                                                LCK_PR | LCK_PW);
1807                         if (mode) {
1808                                 if (lli->lli_flags & (LLIF_DONE_WRITING |
1809                                                       LLIF_EPOCH_PENDING |
1810                                                       LLIF_SOM_DIRTY)) {
1811                                         CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
1812                                                sbi->ll_md_exp->exp_obd->obd_name,
1813                                                PFID(ll_inode2fid(inode)),
1814                                                lli->lli_flags);
1815                                 } else {
1816                                         /* Use old size assignment to avoid
1817                                          * deadlock bz14138 & bz14326
1818                                          */
1819                                         i_size_write(inode, body->mbo_size);
1820                                         spin_lock(&lli->lli_lock);
1821                                         lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1822                                         spin_unlock(&lli->lli_lock);
1823                                 }
1824                                 ldlm_lock_decref(&lockh, mode);
1825                         }
1826                 } else {
1827                         /* Use old size assignment to avoid
1828                          * deadlock bz14138 & bz14326
1829                          */
1830                         i_size_write(inode, body->mbo_size);
1831
1832                         CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
1833                                inode->i_ino, (unsigned long long)body->mbo_size);
1834                 }
1835
1836                 if (body->mbo_valid & OBD_MD_FLBLOCKS)
1837                         inode->i_blocks = body->mbo_blocks;
1838         }
1839
1840         if (body->mbo_valid & OBD_MD_TSTATE) {
1841                 if (body->mbo_t_state & MS_RESTORE)
1842                         lli->lli_flags |= LLIF_FILE_RESTORING;
1843         }
1844
1845         return 0;
1846 }
1847
1848 int ll_read_inode2(struct inode *inode, void *opaque)
1849 {
1850         struct lustre_md *md = opaque;
1851         struct ll_inode_info *lli = ll_i2info(inode);
1852         int rc;
1853
1854         CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1855                PFID(&lli->lli_fid), inode);
1856
1857         LASSERT(!lli->lli_has_smd);
1858
1859         /* Core attributes from the MDS first.  This is a new inode, and
1860          * the VFS doesn't zero times in the core inode so we have to do
1861          * it ourselves.  They will be overwritten by either MDS or OST
1862          * attributes - we just need to make sure they aren't newer.
1863          */
1864         LTIME_S(inode->i_mtime) = 0;
1865         LTIME_S(inode->i_atime) = 0;
1866         LTIME_S(inode->i_ctime) = 0;
1867         inode->i_rdev = 0;
1868         rc = ll_update_inode(inode, md);
1869         if (rc)
1870                 return rc;
1871
1872         /* OIDEBUG(inode); */
1873
1874         if (S_ISREG(inode->i_mode)) {
1875                 struct ll_sb_info *sbi = ll_i2sbi(inode);
1876
1877                 inode->i_op = &ll_file_inode_operations;
1878                 inode->i_fop = sbi->ll_fop;
1879                 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
1880         } else if (S_ISDIR(inode->i_mode)) {
1881                 inode->i_op = &ll_dir_inode_operations;
1882                 inode->i_fop = &ll_dir_operations;
1883         } else if (S_ISLNK(inode->i_mode)) {
1884                 inode->i_op = &ll_fast_symlink_inode_operations;
1885         } else {
1886                 inode->i_op = &ll_special_inode_operations;
1887
1888                 init_special_inode(inode, inode->i_mode,
1889                                    inode->i_rdev);
1890         }
1891
1892         return 0;
1893 }
1894
1895 void ll_delete_inode(struct inode *inode)
1896 {
1897         struct ll_inode_info *lli = ll_i2info(inode);
1898
1899         if (S_ISREG(inode->i_mode) && lli->lli_clob)
1900                 /* discard all dirty pages before truncating them, required by
1901                  * osc_extent implementation at LU-1030.
1902                  */
1903                 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
1904                                    CL_FSYNC_DISCARD, 1);
1905
1906         truncate_inode_pages_final(&inode->i_data);
1907
1908         /* Workaround for LU-118 */
1909         if (inode->i_data.nrpages) {
1910                 spin_lock_irq(&inode->i_data.tree_lock);
1911                 spin_unlock_irq(&inode->i_data.tree_lock);
1912                 LASSERTF(inode->i_data.nrpages == 0,
1913                          "inode="DFID"(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
1914                          PFID(ll_inode2fid(inode)), inode,
1915                          inode->i_data.nrpages);
1916         }
1917         /* Workaround end */
1918
1919         ll_clear_inode(inode);
1920         clear_inode(inode);
1921 }
1922
1923 int ll_iocontrol(struct inode *inode, struct file *file,
1924                  unsigned int cmd, unsigned long arg)
1925 {
1926         struct ll_sb_info *sbi = ll_i2sbi(inode);
1927         struct ptlrpc_request *req = NULL;
1928         int rc, flags = 0;
1929
1930         switch (cmd) {
1931         case FSFILT_IOC_GETFLAGS: {
1932                 struct mdt_body *body;
1933                 struct md_op_data *op_data;
1934
1935                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1936                                              0, 0, LUSTRE_OPC_ANY,
1937                                              NULL);
1938                 if (IS_ERR(op_data))
1939                         return PTR_ERR(op_data);
1940
1941                 op_data->op_valid = OBD_MD_FLFLAGS;
1942                 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1943                 ll_finish_md_op_data(op_data);
1944                 if (rc) {
1945                         CERROR("%s: failure inode "DFID": rc = %d\n",
1946                                sbi->ll_md_exp->exp_obd->obd_name,
1947                                PFID(ll_inode2fid(inode)), rc);
1948                         return -abs(rc);
1949                 }
1950
1951                 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1952
1953                 flags = body->mbo_flags;
1954
1955                 ptlrpc_req_finished(req);
1956
1957                 return put_user(flags, (int __user *)arg);
1958         }
1959         case FSFILT_IOC_SETFLAGS: {
1960                 struct lov_stripe_md *lsm;
1961                 struct obd_info oinfo = { };
1962                 struct md_op_data *op_data;
1963
1964                 if (get_user(flags, (int __user *)arg))
1965                         return -EFAULT;
1966
1967                 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1968                                              LUSTRE_OPC_ANY, NULL);
1969                 if (IS_ERR(op_data))
1970                         return PTR_ERR(op_data);
1971
1972                 op_data->op_attr_flags = flags;
1973                 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1974                 rc = md_setattr(sbi->ll_md_exp, op_data,
1975                                 NULL, 0, NULL, 0, &req, NULL);
1976                 ll_finish_md_op_data(op_data);
1977                 ptlrpc_req_finished(req);
1978                 if (rc)
1979                         return rc;
1980
1981                 inode->i_flags = ll_ext_to_inode_flags(flags);
1982
1983                 lsm = ccc_inode_lsm_get(inode);
1984                 if (!lsm_has_objects(lsm)) {
1985                         ccc_inode_lsm_put(inode, lsm);
1986                         return 0;
1987                 }
1988
1989                 oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
1990                 if (!oinfo.oi_oa) {
1991                         ccc_inode_lsm_put(inode, lsm);
1992                         return -ENOMEM;
1993                 }
1994                 oinfo.oi_md = lsm;
1995                 oinfo.oi_oa->o_oi = lsm->lsm_oi;
1996                 oinfo.oi_oa->o_flags = flags;
1997                 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
1998                                        OBD_MD_FLGROUP;
1999                 obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
2000                 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
2001                 kmem_cache_free(obdo_cachep, oinfo.oi_oa);
2002                 ccc_inode_lsm_put(inode, lsm);
2003
2004                 if (rc && rc != -EPERM && rc != -EACCES)
2005                         CERROR("osc_setattr_async fails: rc = %d\n", rc);
2006
2007                 return rc;
2008         }
2009         default:
2010                 return -ENOSYS;
2011         }
2012
2013         return 0;
2014 }
2015
2016 int ll_flush_ctx(struct inode *inode)
2017 {
2018         struct ll_sb_info  *sbi = ll_i2sbi(inode);
2019
2020         CDEBUG(D_SEC, "flush context for user %d\n",
2021                from_kuid(&init_user_ns, current_uid()));
2022
2023         obd_set_info_async(NULL, sbi->ll_md_exp,
2024                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2025                            0, NULL, NULL);
2026         obd_set_info_async(NULL, sbi->ll_dt_exp,
2027                            sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
2028                            0, NULL, NULL);
2029         return 0;
2030 }
2031
2032 /* umount -f client means force down, don't save state */
2033 void ll_umount_begin(struct super_block *sb)
2034 {
2035         struct ll_sb_info *sbi = ll_s2sbi(sb);
2036         struct obd_device *obd;
2037         struct obd_ioctl_data *ioc_data;
2038
2039         CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2040                sb->s_count, atomic_read(&sb->s_active));
2041
2042         obd = class_exp2obd(sbi->ll_md_exp);
2043         if (!obd) {
2044                 CERROR("Invalid MDC connection handle %#llx\n",
2045                        sbi->ll_md_exp->exp_handle.h_cookie);
2046                 return;
2047         }
2048         obd->obd_force = 1;
2049
2050         obd = class_exp2obd(sbi->ll_dt_exp);
2051         if (!obd) {
2052                 CERROR("Invalid LOV connection handle %#llx\n",
2053                        sbi->ll_dt_exp->exp_handle.h_cookie);
2054                 return;
2055         }
2056         obd->obd_force = 1;
2057
2058         ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS);
2059         if (ioc_data) {
2060                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2061                               sizeof(*ioc_data), ioc_data, NULL);
2062
2063                 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2064                               sizeof(*ioc_data), ioc_data, NULL);
2065
2066                 kfree(ioc_data);
2067         }
2068
2069         /* Really, we'd like to wait until there are no requests outstanding,
2070          * and then continue.  For now, we just invalidate the requests,
2071          * schedule() and sleep one second if needed, and hope.
2072          */
2073         schedule();
2074 }
2075
2076 int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2077 {
2078         struct ll_sb_info *sbi = ll_s2sbi(sb);
2079         char *profilenm = get_profile_name(sb);
2080         int err;
2081         __u32 read_only;
2082
2083         if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2084                 read_only = *flags & MS_RDONLY;
2085                 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2086                                          sizeof(KEY_READ_ONLY),
2087                                          KEY_READ_ONLY, sizeof(read_only),
2088                                          &read_only, NULL);
2089                 if (err) {
2090                         LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2091                                       profilenm, read_only ?
2092                                       "read-only" : "read-write", err);
2093                         return err;
2094                 }
2095
2096                 if (read_only)
2097                         sb->s_flags |= MS_RDONLY;
2098                 else
2099                         sb->s_flags &= ~MS_RDONLY;
2100
2101                 if (sbi->ll_flags & LL_SBI_VERBOSE)
2102                         LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2103                                       read_only ?  "read-only" : "read-write");
2104         }
2105         return 0;
2106 }
2107
2108 /**
2109  * Cleanup the open handle that is cached on MDT-side.
2110  *
2111  * For open case, the client side open handling thread may hit error
2112  * after the MDT grant the open. Under such case, the client should
2113  * send close RPC to the MDT as cleanup; otherwise, the open handle
2114  * on the MDT will be leaked there until the client umount or evicted.
2115  *
2116  * In further, if someone unlinked the file, because the open handle
2117  * holds the reference on such file/object, then it will block the
2118  * subsequent threads that want to locate such object via FID.
2119  *
2120  * \param[in] sb        super block for this file-system
2121  * \param[in] open_req  pointer to the original open request
2122  */
2123 void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2124 {
2125         struct mdt_body                 *body;
2126         struct md_op_data               *op_data;
2127         struct ptlrpc_request           *close_req = NULL;
2128         struct obd_export               *exp       = ll_s2sbi(sb)->ll_md_exp;
2129
2130         body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
2131         op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
2132         if (!op_data)
2133                 return;
2134
2135         op_data->op_fid1 = body->mbo_fid1;
2136         op_data->op_ioepoch = body->mbo_ioepoch;
2137         op_data->op_handle = body->mbo_handle;
2138         op_data->op_mod_time = get_seconds();
2139         md_close(exp, op_data, NULL, &close_req);
2140         ptlrpc_req_finished(close_req);
2141         ll_finish_md_op_data(op_data);
2142 }
2143
2144 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2145                   struct super_block *sb, struct lookup_intent *it)
2146 {
2147         struct ll_sb_info *sbi = NULL;
2148         struct lustre_md md = { NULL };
2149         int rc;
2150
2151         LASSERT(*inode || sb);
2152         sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2153         rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2154                               sbi->ll_md_exp, &md);
2155         if (rc)
2156                 goto cleanup;
2157
2158         if (*inode) {
2159                 rc = ll_update_inode(*inode, &md);
2160                 if (rc)
2161                         goto out;
2162         } else {
2163                 LASSERT(sb);
2164
2165                 /*
2166                  * At this point server returns to client's same fid as client
2167                  * generated for creating. So using ->fid1 is okay here.
2168                  */
2169                 if (!fid_is_sane(&md.body->mbo_fid1)) {
2170                         CERROR("%s: Fid is insane " DFID "\n",
2171                                ll_get_fsname(sb, NULL, 0),
2172                                PFID(&md.body->mbo_fid1));
2173                         rc = -EINVAL;
2174                         goto out;
2175                 }
2176
2177                 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
2178                                              sbi->ll_flags & LL_SBI_32BIT_API),
2179                                  &md);
2180                 if (IS_ERR(*inode)) {
2181 #ifdef CONFIG_FS_POSIX_ACL
2182                         if (md.posix_acl) {
2183                                 posix_acl_release(md.posix_acl);
2184                                 md.posix_acl = NULL;
2185                         }
2186 #endif
2187                         rc = -ENOMEM;
2188                         CERROR("new_inode -fatal: rc %d\n", rc);
2189                         goto out;
2190                 }
2191         }
2192
2193         /* Handling piggyback layout lock.
2194          * Layout lock can be piggybacked by getattr and open request.
2195          * The lsm can be applied to inode only if it comes with a layout lock
2196          * otherwise correct layout may be overwritten, for example:
2197          * 1. proc1: mdt returns a lsm but not granting layout
2198          * 2. layout was changed by another client
2199          * 3. proc2: refresh layout and layout lock granted
2200          * 4. proc1: to apply a stale layout
2201          */
2202         if (it && it->it_lock_mode != 0) {
2203                 struct lustre_handle lockh;
2204                 struct ldlm_lock *lock;
2205
2206                 lockh.cookie = it->it_lock_handle;
2207                 lock = ldlm_handle2lock(&lockh);
2208                 LASSERT(lock);
2209                 if (ldlm_has_layout(lock)) {
2210                         struct cl_object_conf conf;
2211
2212                         memset(&conf, 0, sizeof(conf));
2213                         conf.coc_opc = OBJECT_CONF_SET;
2214                         conf.coc_inode = *inode;
2215                         conf.coc_lock = lock;
2216                         conf.u.coc_md = &md;
2217                         (void)ll_layout_conf(*inode, &conf);
2218                 }
2219                 LDLM_LOCK_PUT(lock);
2220         }
2221
2222 out:
2223         if (md.lsm)
2224                 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2225         md_free_lustre_md(sbi->ll_md_exp, &md);
2226
2227 cleanup:
2228         if (rc != 0 && it && it->it_op & IT_OPEN)
2229                 ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
2230
2231         return rc;
2232 }
2233
2234 int ll_obd_statfs(struct inode *inode, void __user *arg)
2235 {
2236         struct ll_sb_info *sbi = NULL;
2237         struct obd_export *exp;
2238         char *buf = NULL;
2239         struct obd_ioctl_data *data = NULL;
2240         __u32 type;
2241         int len = 0, rc;
2242
2243         if (!inode) {
2244                 rc = -EINVAL;
2245                 goto out_statfs;
2246         }
2247
2248         sbi = ll_i2sbi(inode);
2249         if (!sbi) {
2250                 rc = -EINVAL;
2251                 goto out_statfs;
2252         }
2253
2254         rc = obd_ioctl_getdata(&buf, &len, arg);
2255         if (rc)
2256                 goto out_statfs;
2257
2258         data = (void *)buf;
2259         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2260             !data->ioc_pbuf1 || !data->ioc_pbuf2) {
2261                 rc = -EINVAL;
2262                 goto out_statfs;
2263         }
2264
2265         if (data->ioc_inllen1 != sizeof(__u32) ||
2266             data->ioc_inllen2 != sizeof(__u32) ||
2267             data->ioc_plen1 != sizeof(struct obd_statfs) ||
2268             data->ioc_plen2 != sizeof(struct obd_uuid)) {
2269                 rc = -EINVAL;
2270                 goto out_statfs;
2271         }
2272
2273         memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2274         if (type & LL_STATFS_LMV) {
2275                 exp = sbi->ll_md_exp;
2276         } else if (type & LL_STATFS_LOV) {
2277                 exp = sbi->ll_dt_exp;
2278         } else {
2279                 rc = -ENODEV;
2280                 goto out_statfs;
2281         }
2282
2283         rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
2284         if (rc)
2285                 goto out_statfs;
2286 out_statfs:
2287         if (buf)
2288                 obd_ioctl_freedata(buf, len);
2289         return rc;
2290 }
2291
2292 int ll_process_config(struct lustre_cfg *lcfg)
2293 {
2294         char *ptr;
2295         void *sb;
2296         struct lprocfs_static_vars lvars;
2297         unsigned long x;
2298         int rc = 0;
2299
2300         lprocfs_llite_init_vars(&lvars);
2301
2302         /* The instance name contains the sb: lustre-client-aacfe000 */
2303         ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2304         if (!ptr || !*(++ptr))
2305                 return -EINVAL;
2306         rc = kstrtoul(ptr, 16, &x);
2307         if (rc != 0)
2308                 return -EINVAL;
2309         sb = (void *)x;
2310         /* This better be a real Lustre superblock! */
2311         LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2312
2313         /* Note we have not called client_common_fill_super yet, so
2314          * proc fns must be able to handle that!
2315          */
2316         rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2317                                       lcfg, sb);
2318         if (rc > 0)
2319                 rc = 0;
2320         return rc;
2321 }
2322
2323 /* this function prepares md_op_data hint for passing ot down to MD stack. */
2324 struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
2325                                       struct inode *i1, struct inode *i2,
2326                                       const char *name, int namelen,
2327                                       int mode, __u32 opc, void *data)
2328 {
2329         if (!name) {
2330                 /* Do not reuse namelen for something else. */
2331                 if (namelen)
2332                         return ERR_PTR(-EINVAL);
2333         } else {
2334                 if (namelen > ll_i2sbi(i1)->ll_namelen)
2335                         return ERR_PTR(-ENAMETOOLONG);
2336
2337                 if (!lu_name_is_valid_2(name, namelen))
2338                         return ERR_PTR(-EINVAL);
2339         }
2340
2341         if (!op_data)
2342                 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
2343
2344         if (!op_data)
2345                 return ERR_PTR(-ENOMEM);
2346
2347         ll_i2gids(op_data->op_suppgids, i1, i2);
2348         op_data->op_fid1 = *ll_inode2fid(i1);
2349         if (S_ISDIR(i1->i_mode))
2350                 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
2351
2352         if (i2) {
2353                 op_data->op_fid2 = *ll_inode2fid(i2);
2354                 if (S_ISDIR(i2->i_mode))
2355                         op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
2356         } else {
2357                 fid_zero(&op_data->op_fid2);
2358         }
2359
2360         if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
2361                 op_data->op_cli_flags |= CLI_HASH64;
2362
2363         if (ll_need_32bit_api(ll_i2sbi(i1)))
2364                 op_data->op_cli_flags |= CLI_API32;
2365
2366         op_data->op_name = name;
2367         op_data->op_namelen = namelen;
2368         op_data->op_mode = mode;
2369         op_data->op_mod_time = ktime_get_real_seconds();
2370         op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2371         op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2372         op_data->op_cap = cfs_curproc_cap_pack();
2373         op_data->op_bias = 0;
2374         op_data->op_cli_flags = 0;
2375         if ((opc == LUSTRE_OPC_CREATE) && name &&
2376             filename_is_volatile(name, namelen, NULL))
2377                 op_data->op_bias |= MDS_CREATE_VOLATILE;
2378         op_data->op_mds = 0;
2379         op_data->op_data = data;
2380
2381         /* When called by ll_setattr_raw, file is i1. */
2382         if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
2383                 op_data->op_bias |= MDS_DATA_MODIFIED;
2384
2385         return op_data;
2386 }
2387
2388 void ll_finish_md_op_data(struct md_op_data *op_data)
2389 {
2390         kfree(op_data);
2391 }
2392
2393 int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2394 {
2395         struct ll_sb_info *sbi;
2396
2397         LASSERT(seq && dentry);
2398         sbi = ll_s2sbi(dentry->d_sb);
2399
2400         if (sbi->ll_flags & LL_SBI_NOLCK)
2401                 seq_puts(seq, ",nolock");
2402
2403         if (sbi->ll_flags & LL_SBI_FLOCK)
2404                 seq_puts(seq, ",flock");
2405
2406         if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2407                 seq_puts(seq, ",localflock");
2408
2409         if (sbi->ll_flags & LL_SBI_USER_XATTR)
2410                 seq_puts(seq, ",user_xattr");
2411
2412         if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2413                 seq_puts(seq, ",lazystatfs");
2414
2415         if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2416                 seq_puts(seq, ",user_fid2path");
2417
2418         return 0;
2419 }
2420
2421 /**
2422  * Get obd name by cmd, and copy out to user space
2423  */
2424 int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2425 {
2426         struct ll_sb_info *sbi = ll_i2sbi(inode);
2427         struct obd_device *obd;
2428
2429         if (cmd == OBD_IOC_GETDTNAME)
2430                 obd = class_exp2obd(sbi->ll_dt_exp);
2431         else if (cmd == OBD_IOC_GETMDNAME)
2432                 obd = class_exp2obd(sbi->ll_md_exp);
2433         else
2434                 return -EINVAL;
2435
2436         if (!obd)
2437                 return -ENOENT;
2438
2439         if (copy_to_user((void __user *)arg, obd->obd_name,
2440                          strlen(obd->obd_name) + 1))
2441                 return -EFAULT;
2442
2443         return 0;
2444 }
2445
2446 /**
2447  * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2448  * fsname will be returned in this buffer; otherwise, a static buffer will be
2449  * used to store the fsname and returned to caller.
2450  */
2451 char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2452 {
2453         static char fsname_static[MTI_NAME_MAXLEN];
2454         struct lustre_sb_info *lsi = s2lsi(sb);
2455         char *ptr;
2456         int len;
2457
2458         if (!buf) {
2459                 /* this means the caller wants to use static buffer
2460                  * and it doesn't care about race. Usually this is
2461                  * in error reporting path
2462                  */
2463                 buf = fsname_static;
2464                 buflen = sizeof(fsname_static);
2465         }
2466
2467         len = strlen(lsi->lsi_lmd->lmd_profile);
2468         ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2469         if (ptr && (strcmp(ptr, "-client") == 0))
2470                 len -= 7;
2471
2472         if (unlikely(len >= buflen))
2473                 len = buflen - 1;
2474         strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2475         buf[len] = '\0';
2476
2477         return buf;
2478 }
2479
2480 void ll_dirty_page_discard_warn(struct page *page, int ioret)
2481 {
2482         char *buf, *path = NULL;
2483         struct dentry *dentry = NULL;
2484         struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
2485
2486         /* this can be called inside spin lock so use GFP_ATOMIC. */
2487         buf = (char *)__get_free_page(GFP_ATOMIC);
2488         if (buf) {
2489                 dentry = d_find_alias(page->mapping->host);
2490                 if (dentry)
2491                         path = dentry_path_raw(dentry, buf, PAGE_SIZE);
2492         }
2493
2494         CDEBUG(D_WARNING,
2495                "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
2496                ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
2497                s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
2498                PFID(&obj->vob_header.coh_lu.loh_fid),
2499                (path && !IS_ERR(path)) ? path : "", ioret);
2500
2501         if (dentry)
2502                 dput(dentry);
2503
2504         if (buf)
2505                 free_page((unsigned long)buf);
2506 }
2507
2508 /*
2509  * Compute llite root squash state after a change of root squash
2510  * configuration setting or add/remove of a lnet nid
2511  */
2512 void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
2513 {
2514         struct root_squash_info *squash = &sbi->ll_squash;
2515         lnet_process_id_t id;
2516         bool matched;
2517         int i;
2518
2519         /* Update norootsquash flag */
2520         down_write(&squash->rsi_sem);
2521         if (list_empty(&squash->rsi_nosquash_nids)) {
2522                 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
2523         } else {
2524                 /*
2525                  * Do not apply root squash as soon as one of our NIDs is
2526                  * in the nosquash_nids list
2527                  */
2528                 matched = false;
2529                 i = 0;
2530
2531                 while (LNetGetId(i++, &id) != -ENOENT) {
2532                         if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND)
2533                                 continue;
2534                         if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
2535                                 matched = true;
2536                                 break;
2537                         }
2538                 }
2539                 if (matched)
2540                         sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
2541                 else
2542                         sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
2543         }
2544         up_write(&squash->rsi_sem);
2545 }