Merge tag 'defconfig-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / block / partitions / aix.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  fs/partitions/aix.c
4  *
5  *  Copyright (C) 2012-2013 Philippe De Muyter <phdm@macqel.be>
6  */
7
8 #include "check.h"
9
10 struct lvm_rec {
11         char lvm_id[4]; /* "_LVM" */
12         char reserved4[16];
13         __be32 lvmarea_len;
14         __be32 vgda_len;
15         __be32 vgda_psn[2];
16         char reserved36[10];
17         __be16 pp_size; /* log2(pp_size) */
18         char reserved46[12];
19         __be16 version;
20         };
21
22 struct vgda {
23         __be32 secs;
24         __be32 usec;
25         char reserved8[16];
26         __be16 numlvs;
27         __be16 maxlvs;
28         __be16 pp_size;
29         __be16 numpvs;
30         __be16 total_vgdas;
31         __be16 vgda_size;
32         };
33
34 struct lvd {
35         __be16 lv_ix;
36         __be16 res2;
37         __be16 res4;
38         __be16 maxsize;
39         __be16 lv_state;
40         __be16 mirror;
41         __be16 mirror_policy;
42         __be16 num_lps;
43         __be16 res10[8];
44         };
45
46 struct lvname {
47         char name[64];
48         };
49
50 struct ppe {
51         __be16 lv_ix;
52         unsigned short res2;
53         unsigned short res4;
54         __be16 lp_ix;
55         unsigned short res8[12];
56         };
57
58 struct pvd {
59         char reserved0[16];
60         __be16 pp_count;
61         char reserved18[2];
62         __be32 psn_part1;
63         char reserved24[8];
64         struct ppe ppe[1016];
65         };
66
67 #define LVM_MAXLVS 256
68
69 /**
70  * read_lba(): Read bytes from disk, starting at given LBA
71  * @state
72  * @lba
73  * @buffer
74  * @count
75  *
76  * Description:  Reads @count bytes from @state->disk into @buffer.
77  * Returns number of bytes read on success, 0 on error.
78  */
79 static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
80                         size_t count)
81 {
82         size_t totalreadcount = 0;
83
84         if (!buffer || lba + count / 512 > get_capacity(state->disk) - 1ULL)
85                 return 0;
86
87         while (count) {
88                 int copied = 512;
89                 Sector sect;
90                 unsigned char *data = read_part_sector(state, lba++, &sect);
91                 if (!data)
92                         break;
93                 if (copied > count)
94                         copied = count;
95                 memcpy(buffer, data, copied);
96                 put_dev_sector(sect);
97                 buffer += copied;
98                 totalreadcount += copied;
99                 count -= copied;
100         }
101         return totalreadcount;
102 }
103
104 /**
105  * alloc_pvd(): reads physical volume descriptor
106  * @state
107  * @lba
108  *
109  * Description: Returns pvd on success,  NULL on error.
110  * Allocates space for pvd and fill it with disk blocks at @lba
111  * Notes: remember to free pvd when you're done!
112  */
113 static struct pvd *alloc_pvd(struct parsed_partitions *state, u32 lba)
114 {
115         size_t count = sizeof(struct pvd);
116         struct pvd *p;
117
118         p = kmalloc(count, GFP_KERNEL);
119         if (!p)
120                 return NULL;
121
122         if (read_lba(state, lba, (u8 *) p, count) < count) {
123                 kfree(p);
124                 return NULL;
125         }
126         return p;
127 }
128
129 /**
130  * alloc_lvn(): reads logical volume names
131  * @state
132  * @lba
133  *
134  * Description: Returns lvn on success,  NULL on error.
135  * Allocates space for lvn and fill it with disk blocks at @lba
136  * Notes: remember to free lvn when you're done!
137  */
138 static struct lvname *alloc_lvn(struct parsed_partitions *state, u32 lba)
139 {
140         size_t count = sizeof(struct lvname) * LVM_MAXLVS;
141         struct lvname *p;
142
143         p = kmalloc(count, GFP_KERNEL);
144         if (!p)
145                 return NULL;
146
147         if (read_lba(state, lba, (u8 *) p, count) < count) {
148                 kfree(p);
149                 return NULL;
150         }
151         return p;
152 }
153
154 int aix_partition(struct parsed_partitions *state)
155 {
156         int ret = 0;
157         Sector sect;
158         unsigned char *d;
159         u32 pp_bytes_size;
160         u32 pp_blocks_size = 0;
161         u32 vgda_sector = 0;
162         u32 vgda_len = 0;
163         int numlvs = 0;
164         struct pvd *pvd = NULL;
165         struct lv_info {
166                 unsigned short pps_per_lv;
167                 unsigned short pps_found;
168                 unsigned char lv_is_contiguous;
169         } *lvip;
170         struct lvname *n = NULL;
171
172         d = read_part_sector(state, 7, &sect);
173         if (d) {
174                 struct lvm_rec *p = (struct lvm_rec *)d;
175                 u16 lvm_version = be16_to_cpu(p->version);
176                 char tmp[64];
177
178                 if (lvm_version == 1) {
179                         int pp_size_log2 = be16_to_cpu(p->pp_size);
180
181                         pp_bytes_size = 1 << pp_size_log2;
182                         pp_blocks_size = pp_bytes_size / 512;
183                         snprintf(tmp, sizeof(tmp),
184                                 " AIX LVM header version %u found\n",
185                                 lvm_version);
186                         vgda_len = be32_to_cpu(p->vgda_len);
187                         vgda_sector = be32_to_cpu(p->vgda_psn[0]);
188                 } else {
189                         snprintf(tmp, sizeof(tmp),
190                                 " unsupported AIX LVM version %d found\n",
191                                 lvm_version);
192                 }
193                 strlcat(state->pp_buf, tmp, PAGE_SIZE);
194                 put_dev_sector(sect);
195         }
196         if (vgda_sector && (d = read_part_sector(state, vgda_sector, &sect))) {
197                 struct vgda *p = (struct vgda *)d;
198
199                 numlvs = be16_to_cpu(p->numlvs);
200                 put_dev_sector(sect);
201         }
202         lvip = kcalloc(state->limit, sizeof(struct lv_info), GFP_KERNEL);
203         if (!lvip)
204                 return 0;
205         if (numlvs && (d = read_part_sector(state, vgda_sector + 1, &sect))) {
206                 struct lvd *p = (struct lvd *)d;
207                 int i;
208
209                 n = alloc_lvn(state, vgda_sector + vgda_len - 33);
210                 if (n) {
211                         int foundlvs = 0;
212
213                         for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) {
214                                 lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps);
215                                 if (lvip[i].pps_per_lv)
216                                         foundlvs += 1;
217                         }
218                         /* pvd loops depend on n[].name and lvip[].pps_per_lv */
219                         pvd = alloc_pvd(state, vgda_sector + 17);
220                 }
221                 put_dev_sector(sect);
222         }
223         if (pvd) {
224                 int numpps = be16_to_cpu(pvd->pp_count);
225                 int psn_part1 = be32_to_cpu(pvd->psn_part1);
226                 int i;
227                 int cur_lv_ix = -1;
228                 int next_lp_ix = 1;
229                 int lp_ix;
230
231                 for (i = 0; i < numpps; i += 1) {
232                         struct ppe *p = pvd->ppe + i;
233                         unsigned int lv_ix;
234
235                         lp_ix = be16_to_cpu(p->lp_ix);
236                         if (!lp_ix) {
237                                 next_lp_ix = 1;
238                                 continue;
239                         }
240                         lv_ix = be16_to_cpu(p->lv_ix) - 1;
241                         if (lv_ix >= state->limit) {
242                                 cur_lv_ix = -1;
243                                 continue;
244                         }
245                         lvip[lv_ix].pps_found += 1;
246                         if (lp_ix == 1) {
247                                 cur_lv_ix = lv_ix;
248                                 next_lp_ix = 1;
249                         } else if (lv_ix != cur_lv_ix || lp_ix != next_lp_ix) {
250                                 next_lp_ix = 1;
251                                 continue;
252                         }
253                         if (lp_ix == lvip[lv_ix].pps_per_lv) {
254                                 char tmp[70];
255
256                                 put_partition(state, lv_ix + 1,
257                                   (i + 1 - lp_ix) * pp_blocks_size + psn_part1,
258                                   lvip[lv_ix].pps_per_lv * pp_blocks_size);
259                                 snprintf(tmp, sizeof(tmp), " <%s>\n",
260                                          n[lv_ix].name);
261                                 strlcat(state->pp_buf, tmp, PAGE_SIZE);
262                                 lvip[lv_ix].lv_is_contiguous = 1;
263                                 ret = 1;
264                                 next_lp_ix = 1;
265                         } else
266                                 next_lp_ix += 1;
267                 }
268                 for (i = 0; i < state->limit; i += 1)
269                         if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
270                                 char tmp[sizeof(n[i].name) + 1]; // null char
271
272                                 snprintf(tmp, sizeof(tmp), "%s", n[i].name);
273                                 pr_warn("partition %s (%u pp's found) is "
274                                         "not contiguous\n",
275                                         tmp, lvip[i].pps_found);
276                         }
277                 kfree(pvd);
278         }
279         kfree(n);
280         kfree(lvip);
281         return ret;
282 }