Merge tag 'irqchip-fixes-5.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / fs / quota / quota.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Quota code necessary even when VFS quota support is not compiled
4  * into the kernel.  The interesting stuff is over in dquot.c, here
5  * we have symbols for initial quotactl(2) handling, the sysctl(2)
6  * variables, etc - things needed even when quota support disabled.
7  */
8
9 #include <linux/fs.h>
10 #include <linux/namei.h>
11 #include <linux/slab.h>
12 #include <asm/current.h>
13 #include <linux/uaccess.h>
14 #include <linux/kernel.h>
15 #include <linux/security.h>
16 #include <linux/syscalls.h>
17 #include <linux/capability.h>
18 #include <linux/quotaops.h>
19 #include <linux/types.h>
20 #include <linux/writeback.h>
21 #include <linux/nospec.h>
22
23 static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
24                                      qid_t id)
25 {
26         switch (cmd) {
27         /* these commands do not require any special privilegues */
28         case Q_GETFMT:
29         case Q_SYNC:
30         case Q_GETINFO:
31         case Q_XGETQSTAT:
32         case Q_XGETQSTATV:
33         case Q_XQUOTASYNC:
34                 break;
35         /* allow to query information for dquots we "own" */
36         case Q_GETQUOTA:
37         case Q_XGETQUOTA:
38                 if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) ||
39                     (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id))))
40                         break;
41                 fallthrough;
42         default:
43                 if (!capable(CAP_SYS_ADMIN))
44                         return -EPERM;
45         }
46
47         return security_quotactl(cmd, type, id, sb);
48 }
49
50 static void quota_sync_one(struct super_block *sb, void *arg)
51 {
52         int type = *(int *)arg;
53
54         if (sb->s_qcop && sb->s_qcop->quota_sync &&
55             (sb->s_quota_types & (1 << type)))
56                 sb->s_qcop->quota_sync(sb, type);
57 }
58
59 static int quota_sync_all(int type)
60 {
61         int ret;
62
63         ret = security_quotactl(Q_SYNC, type, 0, NULL);
64         if (!ret)
65                 iterate_supers(quota_sync_one, &type);
66         return ret;
67 }
68
69 unsigned int qtype_enforce_flag(int type)
70 {
71         switch (type) {
72         case USRQUOTA:
73                 return FS_QUOTA_UDQ_ENFD;
74         case GRPQUOTA:
75                 return FS_QUOTA_GDQ_ENFD;
76         case PRJQUOTA:
77                 return FS_QUOTA_PDQ_ENFD;
78         }
79         return 0;
80 }
81
82 static int quota_quotaon(struct super_block *sb, int type, qid_t id,
83                          const struct path *path)
84 {
85         if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
86                 return -ENOSYS;
87         if (sb->s_qcop->quota_enable)
88                 return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type));
89         if (IS_ERR(path))
90                 return PTR_ERR(path);
91         return sb->s_qcop->quota_on(sb, type, id, path);
92 }
93
94 static int quota_quotaoff(struct super_block *sb, int type)
95 {
96         if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable)
97                 return -ENOSYS;
98         if (sb->s_qcop->quota_disable)
99                 return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type));
100         return sb->s_qcop->quota_off(sb, type);
101 }
102
103 static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
104 {
105         __u32 fmt;
106
107         if (!sb_has_quota_active(sb, type))
108                 return -ESRCH;
109         fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
110         if (copy_to_user(addr, &fmt, sizeof(fmt)))
111                 return -EFAULT;
112         return 0;
113 }
114
115 static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
116 {
117         struct qc_state state;
118         struct qc_type_state *tstate;
119         struct if_dqinfo uinfo;
120         int ret;
121
122         if (!sb->s_qcop->get_state)
123                 return -ENOSYS;
124         ret = sb->s_qcop->get_state(sb, &state);
125         if (ret)
126                 return ret;
127         tstate = state.s_state + type;
128         if (!(tstate->flags & QCI_ACCT_ENABLED))
129                 return -ESRCH;
130         memset(&uinfo, 0, sizeof(uinfo));
131         uinfo.dqi_bgrace = tstate->spc_timelimit;
132         uinfo.dqi_igrace = tstate->ino_timelimit;
133         if (tstate->flags & QCI_SYSFILE)
134                 uinfo.dqi_flags |= DQF_SYS_FILE;
135         if (tstate->flags & QCI_ROOT_SQUASH)
136                 uinfo.dqi_flags |= DQF_ROOT_SQUASH;
137         uinfo.dqi_valid = IIF_ALL;
138         if (copy_to_user(addr, &uinfo, sizeof(uinfo)))
139                 return -EFAULT;
140         return 0;
141 }
142
143 static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
144 {
145         struct if_dqinfo info;
146         struct qc_info qinfo;
147
148         if (copy_from_user(&info, addr, sizeof(info)))
149                 return -EFAULT;
150         if (!sb->s_qcop->set_info)
151                 return -ENOSYS;
152         if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE))
153                 return -EINVAL;
154         memset(&qinfo, 0, sizeof(qinfo));
155         if (info.dqi_valid & IIF_FLAGS) {
156                 if (info.dqi_flags & ~DQF_SETINFO_MASK)
157                         return -EINVAL;
158                 if (info.dqi_flags & DQF_ROOT_SQUASH)
159                         qinfo.i_flags |= QCI_ROOT_SQUASH;
160                 qinfo.i_fieldmask |= QC_FLAGS;
161         }
162         if (info.dqi_valid & IIF_BGRACE) {
163                 qinfo.i_spc_timelimit = info.dqi_bgrace;
164                 qinfo.i_fieldmask |= QC_SPC_TIMER;
165         }
166         if (info.dqi_valid & IIF_IGRACE) {
167                 qinfo.i_ino_timelimit = info.dqi_igrace;
168                 qinfo.i_fieldmask |= QC_INO_TIMER;
169         }
170         return sb->s_qcop->set_info(sb, type, &qinfo);
171 }
172
173 static inline qsize_t qbtos(qsize_t blocks)
174 {
175         return blocks << QIF_DQBLKSIZE_BITS;
176 }
177
178 static inline qsize_t stoqb(qsize_t space)
179 {
180         return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
181 }
182
183 static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
184 {
185         memset(dst, 0, sizeof(*dst));
186         dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
187         dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
188         dst->dqb_curspace = src->d_space;
189         dst->dqb_ihardlimit = src->d_ino_hardlimit;
190         dst->dqb_isoftlimit = src->d_ino_softlimit;
191         dst->dqb_curinodes = src->d_ino_count;
192         dst->dqb_btime = src->d_spc_timer;
193         dst->dqb_itime = src->d_ino_timer;
194         dst->dqb_valid = QIF_ALL;
195 }
196
197 static int quota_getquota(struct super_block *sb, int type, qid_t id,
198                           void __user *addr)
199 {
200         struct kqid qid;
201         struct qc_dqblk fdq;
202         struct if_dqblk idq;
203         int ret;
204
205         if (!sb->s_qcop->get_dqblk)
206                 return -ENOSYS;
207         qid = make_kqid(current_user_ns(), type, id);
208         if (!qid_has_mapping(sb->s_user_ns, qid))
209                 return -EINVAL;
210         ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
211         if (ret)
212                 return ret;
213         copy_to_if_dqblk(&idq, &fdq);
214         if (copy_to_user(addr, &idq, sizeof(idq)))
215                 return -EFAULT;
216         return 0;
217 }
218
219 /*
220  * Return quota for next active quota >= this id, if any exists,
221  * otherwise return -ENOENT via ->get_nextdqblk
222  */
223 static int quota_getnextquota(struct super_block *sb, int type, qid_t id,
224                           void __user *addr)
225 {
226         struct kqid qid;
227         struct qc_dqblk fdq;
228         struct if_nextdqblk idq;
229         int ret;
230
231         if (!sb->s_qcop->get_nextdqblk)
232                 return -ENOSYS;
233         qid = make_kqid(current_user_ns(), type, id);
234         if (!qid_has_mapping(sb->s_user_ns, qid))
235                 return -EINVAL;
236         ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
237         if (ret)
238                 return ret;
239         /* struct if_nextdqblk is a superset of struct if_dqblk */
240         copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq);
241         idq.dqb_id = from_kqid(current_user_ns(), qid);
242         if (copy_to_user(addr, &idq, sizeof(idq)))
243                 return -EFAULT;
244         return 0;
245 }
246
247 static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
248 {
249         dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
250         dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
251         dst->d_space = src->dqb_curspace;
252         dst->d_ino_hardlimit = src->dqb_ihardlimit;
253         dst->d_ino_softlimit = src->dqb_isoftlimit;
254         dst->d_ino_count = src->dqb_curinodes;
255         dst->d_spc_timer = src->dqb_btime;
256         dst->d_ino_timer = src->dqb_itime;
257
258         dst->d_fieldmask = 0;
259         if (src->dqb_valid & QIF_BLIMITS)
260                 dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
261         if (src->dqb_valid & QIF_SPACE)
262                 dst->d_fieldmask |= QC_SPACE;
263         if (src->dqb_valid & QIF_ILIMITS)
264                 dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
265         if (src->dqb_valid & QIF_INODES)
266                 dst->d_fieldmask |= QC_INO_COUNT;
267         if (src->dqb_valid & QIF_BTIME)
268                 dst->d_fieldmask |= QC_SPC_TIMER;
269         if (src->dqb_valid & QIF_ITIME)
270                 dst->d_fieldmask |= QC_INO_TIMER;
271 }
272
273 static int quota_setquota(struct super_block *sb, int type, qid_t id,
274                           void __user *addr)
275 {
276         struct qc_dqblk fdq;
277         struct if_dqblk idq;
278         struct kqid qid;
279
280         if (copy_from_user(&idq, addr, sizeof(idq)))
281                 return -EFAULT;
282         if (!sb->s_qcop->set_dqblk)
283                 return -ENOSYS;
284         qid = make_kqid(current_user_ns(), type, id);
285         if (!qid_has_mapping(sb->s_user_ns, qid))
286                 return -EINVAL;
287         copy_from_if_dqblk(&fdq, &idq);
288         return sb->s_qcop->set_dqblk(sb, qid, &fdq);
289 }
290
291 static int quota_enable(struct super_block *sb, void __user *addr)
292 {
293         __u32 flags;
294
295         if (copy_from_user(&flags, addr, sizeof(flags)))
296                 return -EFAULT;
297         if (!sb->s_qcop->quota_enable)
298                 return -ENOSYS;
299         return sb->s_qcop->quota_enable(sb, flags);
300 }
301
302 static int quota_disable(struct super_block *sb, void __user *addr)
303 {
304         __u32 flags;
305
306         if (copy_from_user(&flags, addr, sizeof(flags)))
307                 return -EFAULT;
308         if (!sb->s_qcop->quota_disable)
309                 return -ENOSYS;
310         return sb->s_qcop->quota_disable(sb, flags);
311 }
312
313 static int quota_state_to_flags(struct qc_state *state)
314 {
315         int flags = 0;
316
317         if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED)
318                 flags |= FS_QUOTA_UDQ_ACCT;
319         if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED)
320                 flags |= FS_QUOTA_UDQ_ENFD;
321         if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)
322                 flags |= FS_QUOTA_GDQ_ACCT;
323         if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED)
324                 flags |= FS_QUOTA_GDQ_ENFD;
325         if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED)
326                 flags |= FS_QUOTA_PDQ_ACCT;
327         if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED)
328                 flags |= FS_QUOTA_PDQ_ENFD;
329         return flags;
330 }
331
332 static int quota_getstate(struct super_block *sb, int type,
333                           struct fs_quota_stat *fqs)
334 {
335         struct qc_state state;
336         int ret;
337
338         memset(&state, 0, sizeof (struct qc_state));
339         ret = sb->s_qcop->get_state(sb, &state);
340         if (ret < 0)
341                 return ret;
342
343         memset(fqs, 0, sizeof(*fqs));
344         fqs->qs_version = FS_QSTAT_VERSION;
345         fqs->qs_flags = quota_state_to_flags(&state);
346         /* No quota enabled? */
347         if (!fqs->qs_flags)
348                 return -ENOSYS;
349         fqs->qs_incoredqs = state.s_incoredqs;
350
351         fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
352         fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
353         fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
354         fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
355         fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
356
357         /* Inodes may be allocated even if inactive; copy out if present */
358         if (state.s_state[USRQUOTA].ino) {
359                 fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
360                 fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
361                 fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
362         }
363         if (state.s_state[GRPQUOTA].ino) {
364                 fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
365                 fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
366                 fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
367         }
368         if (state.s_state[PRJQUOTA].ino) {
369                 /*
370                  * Q_XGETQSTAT doesn't have room for both group and project
371                  * quotas.  So, allow the project quota values to be copied out
372                  * only if there is no group quota information available.
373                  */
374                 if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) {
375                         fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino;
376                         fqs->qs_gquota.qfs_nblks =
377                                         state.s_state[PRJQUOTA].blocks;
378                         fqs->qs_gquota.qfs_nextents =
379                                         state.s_state[PRJQUOTA].nextents;
380                 }
381         }
382         return 0;
383 }
384
385 static int quota_getxstate(struct super_block *sb, int type, void __user *addr)
386 {
387         struct fs_quota_stat fqs;
388         int ret;
389
390         if (!sb->s_qcop->get_state)
391                 return -ENOSYS;
392         ret = quota_getstate(sb, type, &fqs);
393         if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
394                 return -EFAULT;
395         return ret;
396 }
397
398 static int quota_getstatev(struct super_block *sb, int type,
399                            struct fs_quota_statv *fqs)
400 {
401         struct qc_state state;
402         int ret;
403
404         memset(&state, 0, sizeof (struct qc_state));
405         ret = sb->s_qcop->get_state(sb, &state);
406         if (ret < 0)
407                 return ret;
408
409         memset(fqs, 0, sizeof(*fqs));
410         fqs->qs_version = FS_QSTAT_VERSION;
411         fqs->qs_flags = quota_state_to_flags(&state);
412         /* No quota enabled? */
413         if (!fqs->qs_flags)
414                 return -ENOSYS;
415         fqs->qs_incoredqs = state.s_incoredqs;
416
417         fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
418         fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
419         fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
420         fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
421         fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
422
423         /* Inodes may be allocated even if inactive; copy out if present */
424         if (state.s_state[USRQUOTA].ino) {
425                 fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
426                 fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
427                 fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
428         }
429         if (state.s_state[GRPQUOTA].ino) {
430                 fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
431                 fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
432                 fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
433         }
434         if (state.s_state[PRJQUOTA].ino) {
435                 fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino;
436                 fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks;
437                 fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents;
438         }
439         return 0;
440 }
441
442 static int quota_getxstatev(struct super_block *sb, int type, void __user *addr)
443 {
444         struct fs_quota_statv fqs;
445         int ret;
446
447         if (!sb->s_qcop->get_state)
448                 return -ENOSYS;
449
450         memset(&fqs, 0, sizeof(fqs));
451         if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */
452                 return -EFAULT;
453
454         /* If this kernel doesn't support user specified version, fail */
455         switch (fqs.qs_version) {
456         case FS_QSTATV_VERSION1:
457                 break;
458         default:
459                 return -EINVAL;
460         }
461         ret = quota_getstatev(sb, type, &fqs);
462         if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
463                 return -EFAULT;
464         return ret;
465 }
466
467 /*
468  * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
469  * out of there as xfsprogs rely on definitions being in that header file. So
470  * just define same functions here for quota purposes.
471  */
472 #define XFS_BB_SHIFT 9
473
474 static inline u64 quota_bbtob(u64 blocks)
475 {
476         return blocks << XFS_BB_SHIFT;
477 }
478
479 static inline u64 quota_btobb(u64 bytes)
480 {
481         return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
482 }
483
484 static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
485 {
486         dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
487         dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
488         dst->d_ino_hardlimit = src->d_ino_hardlimit;
489         dst->d_ino_softlimit = src->d_ino_softlimit;
490         dst->d_space = quota_bbtob(src->d_bcount);
491         dst->d_ino_count = src->d_icount;
492         dst->d_ino_timer = src->d_itimer;
493         dst->d_spc_timer = src->d_btimer;
494         dst->d_ino_warns = src->d_iwarns;
495         dst->d_spc_warns = src->d_bwarns;
496         dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
497         dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
498         dst->d_rt_space = quota_bbtob(src->d_rtbcount);
499         dst->d_rt_spc_timer = src->d_rtbtimer;
500         dst->d_rt_spc_warns = src->d_rtbwarns;
501         dst->d_fieldmask = 0;
502         if (src->d_fieldmask & FS_DQ_ISOFT)
503                 dst->d_fieldmask |= QC_INO_SOFT;
504         if (src->d_fieldmask & FS_DQ_IHARD)
505                 dst->d_fieldmask |= QC_INO_HARD;
506         if (src->d_fieldmask & FS_DQ_BSOFT)
507                 dst->d_fieldmask |= QC_SPC_SOFT;
508         if (src->d_fieldmask & FS_DQ_BHARD)
509                 dst->d_fieldmask |= QC_SPC_HARD;
510         if (src->d_fieldmask & FS_DQ_RTBSOFT)
511                 dst->d_fieldmask |= QC_RT_SPC_SOFT;
512         if (src->d_fieldmask & FS_DQ_RTBHARD)
513                 dst->d_fieldmask |= QC_RT_SPC_HARD;
514         if (src->d_fieldmask & FS_DQ_BTIMER)
515                 dst->d_fieldmask |= QC_SPC_TIMER;
516         if (src->d_fieldmask & FS_DQ_ITIMER)
517                 dst->d_fieldmask |= QC_INO_TIMER;
518         if (src->d_fieldmask & FS_DQ_RTBTIMER)
519                 dst->d_fieldmask |= QC_RT_SPC_TIMER;
520         if (src->d_fieldmask & FS_DQ_BWARNS)
521                 dst->d_fieldmask |= QC_SPC_WARNS;
522         if (src->d_fieldmask & FS_DQ_IWARNS)
523                 dst->d_fieldmask |= QC_INO_WARNS;
524         if (src->d_fieldmask & FS_DQ_RTBWARNS)
525                 dst->d_fieldmask |= QC_RT_SPC_WARNS;
526         if (src->d_fieldmask & FS_DQ_BCOUNT)
527                 dst->d_fieldmask |= QC_SPACE;
528         if (src->d_fieldmask & FS_DQ_ICOUNT)
529                 dst->d_fieldmask |= QC_INO_COUNT;
530         if (src->d_fieldmask & FS_DQ_RTBCOUNT)
531                 dst->d_fieldmask |= QC_RT_SPACE;
532 }
533
534 static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst,
535                                        struct fs_disk_quota *src)
536 {
537         memset(dst, 0, sizeof(*dst));
538         dst->i_spc_timelimit = src->d_btimer;
539         dst->i_ino_timelimit = src->d_itimer;
540         dst->i_rt_spc_timelimit = src->d_rtbtimer;
541         dst->i_ino_warnlimit = src->d_iwarns;
542         dst->i_spc_warnlimit = src->d_bwarns;
543         dst->i_rt_spc_warnlimit = src->d_rtbwarns;
544         if (src->d_fieldmask & FS_DQ_BWARNS)
545                 dst->i_fieldmask |= QC_SPC_WARNS;
546         if (src->d_fieldmask & FS_DQ_IWARNS)
547                 dst->i_fieldmask |= QC_INO_WARNS;
548         if (src->d_fieldmask & FS_DQ_RTBWARNS)
549                 dst->i_fieldmask |= QC_RT_SPC_WARNS;
550         if (src->d_fieldmask & FS_DQ_BTIMER)
551                 dst->i_fieldmask |= QC_SPC_TIMER;
552         if (src->d_fieldmask & FS_DQ_ITIMER)
553                 dst->i_fieldmask |= QC_INO_TIMER;
554         if (src->d_fieldmask & FS_DQ_RTBTIMER)
555                 dst->i_fieldmask |= QC_RT_SPC_TIMER;
556 }
557
558 static int quota_setxquota(struct super_block *sb, int type, qid_t id,
559                            void __user *addr)
560 {
561         struct fs_disk_quota fdq;
562         struct qc_dqblk qdq;
563         struct kqid qid;
564
565         if (copy_from_user(&fdq, addr, sizeof(fdq)))
566                 return -EFAULT;
567         if (!sb->s_qcop->set_dqblk)
568                 return -ENOSYS;
569         qid = make_kqid(current_user_ns(), type, id);
570         if (!qid_has_mapping(sb->s_user_ns, qid))
571                 return -EINVAL;
572         /* Are we actually setting timer / warning limits for all users? */
573         if (from_kqid(sb->s_user_ns, qid) == 0 &&
574             fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) {
575                 struct qc_info qinfo;
576                 int ret;
577
578                 if (!sb->s_qcop->set_info)
579                         return -EINVAL;
580                 copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq);
581                 ret = sb->s_qcop->set_info(sb, type, &qinfo);
582                 if (ret)
583                         return ret;
584                 /* These are already done */
585                 fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK);
586         }
587         copy_from_xfs_dqblk(&qdq, &fdq);
588         return sb->s_qcop->set_dqblk(sb, qid, &qdq);
589 }
590
591 static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
592                               int type, qid_t id)
593 {
594         memset(dst, 0, sizeof(*dst));
595         dst->d_version = FS_DQUOT_VERSION;
596         dst->d_id = id;
597         if (type == USRQUOTA)
598                 dst->d_flags = FS_USER_QUOTA;
599         else if (type == PRJQUOTA)
600                 dst->d_flags = FS_PROJ_QUOTA;
601         else
602                 dst->d_flags = FS_GROUP_QUOTA;
603         dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
604         dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
605         dst->d_ino_hardlimit = src->d_ino_hardlimit;
606         dst->d_ino_softlimit = src->d_ino_softlimit;
607         dst->d_bcount = quota_btobb(src->d_space);
608         dst->d_icount = src->d_ino_count;
609         dst->d_itimer = src->d_ino_timer;
610         dst->d_btimer = src->d_spc_timer;
611         dst->d_iwarns = src->d_ino_warns;
612         dst->d_bwarns = src->d_spc_warns;
613         dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
614         dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
615         dst->d_rtbcount = quota_btobb(src->d_rt_space);
616         dst->d_rtbtimer = src->d_rt_spc_timer;
617         dst->d_rtbwarns = src->d_rt_spc_warns;
618 }
619
620 static int quota_getxquota(struct super_block *sb, int type, qid_t id,
621                            void __user *addr)
622 {
623         struct fs_disk_quota fdq;
624         struct qc_dqblk qdq;
625         struct kqid qid;
626         int ret;
627
628         if (!sb->s_qcop->get_dqblk)
629                 return -ENOSYS;
630         qid = make_kqid(current_user_ns(), type, id);
631         if (!qid_has_mapping(sb->s_user_ns, qid))
632                 return -EINVAL;
633         ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
634         if (ret)
635                 return ret;
636         copy_to_xfs_dqblk(&fdq, &qdq, type, id);
637         if (copy_to_user(addr, &fdq, sizeof(fdq)))
638                 return -EFAULT;
639         return ret;
640 }
641
642 /*
643  * Return quota for next active quota >= this id, if any exists,
644  * otherwise return -ENOENT via ->get_nextdqblk.
645  */
646 static int quota_getnextxquota(struct super_block *sb, int type, qid_t id,
647                             void __user *addr)
648 {
649         struct fs_disk_quota fdq;
650         struct qc_dqblk qdq;
651         struct kqid qid;
652         qid_t id_out;
653         int ret;
654
655         if (!sb->s_qcop->get_nextdqblk)
656                 return -ENOSYS;
657         qid = make_kqid(current_user_ns(), type, id);
658         if (!qid_has_mapping(sb->s_user_ns, qid))
659                 return -EINVAL;
660         ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq);
661         if (ret)
662                 return ret;
663         id_out = from_kqid(current_user_ns(), qid);
664         copy_to_xfs_dqblk(&fdq, &qdq, type, id_out);
665         if (copy_to_user(addr, &fdq, sizeof(fdq)))
666                 return -EFAULT;
667         return ret;
668 }
669
670 static int quota_rmxquota(struct super_block *sb, void __user *addr)
671 {
672         __u32 flags;
673
674         if (copy_from_user(&flags, addr, sizeof(flags)))
675                 return -EFAULT;
676         if (!sb->s_qcop->rm_xquota)
677                 return -ENOSYS;
678         return sb->s_qcop->rm_xquota(sb, flags);
679 }
680
681 /* Copy parameters and call proper function */
682 static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
683                        void __user *addr, const struct path *path)
684 {
685         int ret;
686
687         type = array_index_nospec(type, MAXQUOTAS);
688         /*
689          * Quota not supported on this fs? Check this before s_quota_types
690          * since they needn't be set if quota is not supported at all.
691          */
692         if (!sb->s_qcop)
693                 return -ENOSYS;
694         if (!(sb->s_quota_types & (1 << type)))
695                 return -EINVAL;
696
697         ret = check_quotactl_permission(sb, type, cmd, id);
698         if (ret < 0)
699                 return ret;
700
701         switch (cmd) {
702         case Q_QUOTAON:
703                 return quota_quotaon(sb, type, id, path);
704         case Q_QUOTAOFF:
705                 return quota_quotaoff(sb, type);
706         case Q_GETFMT:
707                 return quota_getfmt(sb, type, addr);
708         case Q_GETINFO:
709                 return quota_getinfo(sb, type, addr);
710         case Q_SETINFO:
711                 return quota_setinfo(sb, type, addr);
712         case Q_GETQUOTA:
713                 return quota_getquota(sb, type, id, addr);
714         case Q_GETNEXTQUOTA:
715                 return quota_getnextquota(sb, type, id, addr);
716         case Q_SETQUOTA:
717                 return quota_setquota(sb, type, id, addr);
718         case Q_SYNC:
719                 if (!sb->s_qcop->quota_sync)
720                         return -ENOSYS;
721                 return sb->s_qcop->quota_sync(sb, type);
722         case Q_XQUOTAON:
723                 return quota_enable(sb, addr);
724         case Q_XQUOTAOFF:
725                 return quota_disable(sb, addr);
726         case Q_XQUOTARM:
727                 return quota_rmxquota(sb, addr);
728         case Q_XGETQSTAT:
729                 return quota_getxstate(sb, type, addr);
730         case Q_XGETQSTATV:
731                 return quota_getxstatev(sb, type, addr);
732         case Q_XSETQLIM:
733                 return quota_setxquota(sb, type, id, addr);
734         case Q_XGETQUOTA:
735                 return quota_getxquota(sb, type, id, addr);
736         case Q_XGETNEXTQUOTA:
737                 return quota_getnextxquota(sb, type, id, addr);
738         case Q_XQUOTASYNC:
739                 if (sb_rdonly(sb))
740                         return -EROFS;
741                 /* XFS quotas are fully coherent now, making this call a noop */
742                 return 0;
743         default:
744                 return -EINVAL;
745         }
746 }
747
748 #ifdef CONFIG_BLOCK
749
750 /* Return 1 if 'cmd' will block on frozen filesystem */
751 static int quotactl_cmd_write(int cmd)
752 {
753         /*
754          * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access
755          * as dquot_acquire() may allocate space for new structure and OCFS2
756          * needs to increment on-disk use count.
757          */
758         switch (cmd) {
759         case Q_GETFMT:
760         case Q_GETINFO:
761         case Q_SYNC:
762         case Q_XGETQSTAT:
763         case Q_XGETQSTATV:
764         case Q_XGETQUOTA:
765         case Q_XGETNEXTQUOTA:
766         case Q_XQUOTASYNC:
767                 return 0;
768         }
769         return 1;
770 }
771 #endif /* CONFIG_BLOCK */
772
773 /* Return true if quotactl command is manipulating quota on/off state */
774 static bool quotactl_cmd_onoff(int cmd)
775 {
776         return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) ||
777                  (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF);
778 }
779
780 /*
781  * look up a superblock on which quota ops will be performed
782  * - use the name of a block device to find the superblock thereon
783  */
784 static struct super_block *quotactl_block(const char __user *special, int cmd)
785 {
786 #ifdef CONFIG_BLOCK
787         struct block_device *bdev;
788         struct super_block *sb;
789         struct filename *tmp = getname(special);
790
791         if (IS_ERR(tmp))
792                 return ERR_CAST(tmp);
793         bdev = lookup_bdev(tmp->name);
794         putname(tmp);
795         if (IS_ERR(bdev))
796                 return ERR_CAST(bdev);
797         if (quotactl_cmd_onoff(cmd))
798                 sb = get_super_exclusive_thawed(bdev);
799         else if (quotactl_cmd_write(cmd))
800                 sb = get_super_thawed(bdev);
801         else
802                 sb = get_super(bdev);
803         bdput(bdev);
804         if (!sb)
805                 return ERR_PTR(-ENODEV);
806
807         return sb;
808 #else
809         return ERR_PTR(-ENODEV);
810 #endif
811 }
812
813 /*
814  * This is the system call interface. This communicates with
815  * the user-level programs. Currently this only supports diskquota
816  * calls. Maybe we need to add the process quotas etc. in the future,
817  * but we probably should use rlimits for that.
818  */
819 int kernel_quotactl(unsigned int cmd, const char __user *special,
820                     qid_t id, void __user *addr)
821 {
822         uint cmds, type;
823         struct super_block *sb = NULL;
824         struct path path, *pathp = NULL;
825         int ret;
826
827         cmds = cmd >> SUBCMDSHIFT;
828         type = cmd & SUBCMDMASK;
829
830         if (type >= MAXQUOTAS)
831                 return -EINVAL;
832
833         /*
834          * As a special case Q_SYNC can be called without a specific device.
835          * It will iterate all superblocks that have quota enabled and call
836          * the sync action on each of them.
837          */
838         if (!special) {
839                 if (cmds == Q_SYNC)
840                         return quota_sync_all(type);
841                 return -ENODEV;
842         }
843
844         /*
845          * Path for quotaon has to be resolved before grabbing superblock
846          * because that gets s_umount sem which is also possibly needed by path
847          * resolution (think about autofs) and thus deadlocks could arise.
848          */
849         if (cmds == Q_QUOTAON) {
850                 ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
851                 if (ret)
852                         pathp = ERR_PTR(ret);
853                 else
854                         pathp = &path;
855         }
856
857         sb = quotactl_block(special, cmds);
858         if (IS_ERR(sb)) {
859                 ret = PTR_ERR(sb);
860                 goto out;
861         }
862
863         ret = do_quotactl(sb, type, cmds, id, addr, pathp);
864
865         if (!quotactl_cmd_onoff(cmds))
866                 drop_super(sb);
867         else
868                 drop_super_exclusive(sb);
869 out:
870         if (pathp && !IS_ERR(pathp))
871                 path_put(pathp);
872         return ret;
873 }
874
875 SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
876                 qid_t, id, void __user *, addr)
877 {
878         return kernel_quotactl(cmd, special, id, addr);
879 }