1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/partitions/aix.c
4 *
5 * Copyright (C) 2012-2013 Philippe De Muyter <phdm@macqel.be>
6 */
7
8 #include "check.h"
9
10 struct lvm_rec {
11 char lvm_id[4]; /* "_LVM" */
12 char reserved4[16];
13 __be32 lvmarea_len;
14 __be32 vgda_len;
15 __be32 vgda_psn[2];
16 char reserved36[10];
17 __be16 pp_size; /* log2(pp_size) */
18 char reserved46[12];
19 __be16 version;
20 };
21
22 struct vgda {
23 __be32 secs;
24 __be32 usec;
25 char reserved8[16];
26 __be16 numlvs;
27 __be16 maxlvs;
28 __be16 pp_size;
29 __be16 numpvs;
30 __be16 total_vgdas;
31 __be16 vgda_size;
32 };
33
34 struct lvd {
35 __be16 lv_ix;
36 __be16 res2;
37 __be16 res4;
38 __be16 maxsize;
39 __be16 lv_state;
40 __be16 mirror;
41 __be16 mirror_policy;
42 __be16 num_lps;
43 __be16 res10[8];
44 };
45
46 struct lvname {
47 char name[64];
48 };
49
50 struct ppe {
51 __be16 lv_ix;
52 unsigned short res2;
53 unsigned short res4;
54 __be16 lp_ix;
55 unsigned short res8[12];
56 };
57
58 struct pvd {
59 char reserved0[16];
60 __be16 pp_count;
61 char reserved18[2];
62 __be32 psn_part1;
63 char reserved24[8];
64 struct ppe ppe[1016];
65 };
66
67 #define LVM_MAXLVS 256
68
69 /**
70 * read_lba(): Read bytes from disk, starting at given LBA
71 * @state
72 * @lba
73 * @buffer
74 * @count
75 *
76 * Description: Reads @count bytes from @state->disk into @buffer.
77 * Returns number of bytes read on success, 0 on error.
78 */
read_lba(struct parsed_partitions * state,u64 lba,u8 * buffer,size_t count)79 static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
80 size_t count)
81 {
82 size_t totalreadcount = 0;
83
84 if (!buffer || lba + count / 512 > get_capacity(state->disk) - 1ULL)
85 return 0;
86
87 while (count) {
88 int copied = 512;
89 Sector sect;
90 unsigned char *data = read_part_sector(state, lba++, §);
91 if (!data)
92 break;
93 if (copied > count)
94 copied = count;
95 memcpy(buffer, data, copied);
96 put_dev_sector(sect);
97 buffer += copied;
98 totalreadcount += copied;
99 count -= copied;
100 }
101 return totalreadcount;
102 }
103
104 /**
105 * alloc_pvd(): reads physical volume descriptor
106 * @state
107 * @lba
108 *
109 * Description: Returns pvd on success, NULL on error.
110 * Allocates space for pvd and fill it with disk blocks at @lba
111 * Notes: remember to free pvd when you're done!
112 */
alloc_pvd(struct parsed_partitions * state,u32 lba)113 static struct pvd *alloc_pvd(struct parsed_partitions *state, u32 lba)
114 {
115 size_t count = sizeof(struct pvd);
116 struct pvd *p;
117
118 p = kmalloc(count, GFP_KERNEL);
119 if (!p)
120 return NULL;
121
122 if (read_lba(state, lba, (u8 *) p, count) < count) {
123 kfree(p);
124 return NULL;
125 }
126 return p;
127 }
128
129 /**
130 * alloc_lvn(): reads logical volume names
131 * @state
132 * @lba
133 *
134 * Description: Returns lvn on success, NULL on error.
135 * Allocates space for lvn and fill it with disk blocks at @lba
136 * Notes: remember to free lvn when you're done!
137 */
alloc_lvn(struct parsed_partitions * state,u32 lba)138 static struct lvname *alloc_lvn(struct parsed_partitions *state, u32 lba)
139 {
140 size_t count = sizeof(struct lvname) * LVM_MAXLVS;
141 struct lvname *p;
142
143 p = kmalloc(count, GFP_KERNEL);
144 if (!p)
145 return NULL;
146
147 if (read_lba(state, lba, (u8 *) p, count) < count) {
148 kfree(p);
149 return NULL;
150 }
151 return p;
152 }
153
aix_partition(struct parsed_partitions * state)154 int aix_partition(struct parsed_partitions *state)
155 {
156 int ret = 0;
157 Sector sect;
158 unsigned char *d;
159 u32 pp_bytes_size;
160 u32 pp_blocks_size = 0;
161 u32 vgda_sector = 0;
162 u32 vgda_len = 0;
163 int numlvs = 0;
164 struct pvd *pvd = NULL;
165 struct lv_info {
166 unsigned short pps_per_lv;
167 unsigned short pps_found;
168 unsigned char lv_is_contiguous;
169 } *lvip;
170 struct lvname *n = NULL;
171
172 d = read_part_sector(state, 7, §);
173 if (d) {
174 struct lvm_rec *p = (struct lvm_rec *)d;
175 u16 lvm_version = be16_to_cpu(p->version);
176
177 if (lvm_version == 1) {
178 int pp_size_log2 = be16_to_cpu(p->pp_size);
179
180 pp_bytes_size = 1 << pp_size_log2;
181 pp_blocks_size = pp_bytes_size / 512;
182 seq_buf_printf(&state->pp_buf,
183 " AIX LVM header version %u found\n",
184 lvm_version);
185 vgda_len = be32_to_cpu(p->vgda_len);
186 vgda_sector = be32_to_cpu(p->vgda_psn[0]);
187 } else {
188 seq_buf_printf(&state->pp_buf,
189 " unsupported AIX LVM version %d found\n",
190 lvm_version);
191 }
192 put_dev_sector(sect);
193 }
194 if (vgda_sector && (d = read_part_sector(state, vgda_sector, §))) {
195 struct vgda *p = (struct vgda *)d;
196
197 numlvs = be16_to_cpu(p->numlvs);
198 put_dev_sector(sect);
199 }
200 lvip = kzalloc_objs(struct lv_info, state->limit);
201 if (!lvip)
202 return 0;
203 if (numlvs && (d = read_part_sector(state, vgda_sector + 1, §))) {
204 struct lvd *p = (struct lvd *)d;
205 int i;
206
207 n = alloc_lvn(state, vgda_sector + vgda_len - 33);
208 if (n) {
209 int foundlvs = 0;
210
211 for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) {
212 lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps);
213 if (lvip[i].pps_per_lv)
214 foundlvs += 1;
215 }
216 /* pvd loops depend on n[].name and lvip[].pps_per_lv */
217 pvd = alloc_pvd(state, vgda_sector + 17);
218 }
219 put_dev_sector(sect);
220 }
221 if (pvd) {
222 int numpps = be16_to_cpu(pvd->pp_count);
223 int psn_part1 = be32_to_cpu(pvd->psn_part1);
224 int i;
225 int cur_lv_ix = -1;
226 int next_lp_ix = 1;
227 int lp_ix;
228
229 for (i = 0; i < numpps; i += 1) {
230 struct ppe *p = pvd->ppe + i;
231 unsigned int lv_ix;
232
233 lp_ix = be16_to_cpu(p->lp_ix);
234 if (!lp_ix) {
235 next_lp_ix = 1;
236 continue;
237 }
238 lv_ix = be16_to_cpu(p->lv_ix) - 1;
239 if (lv_ix >= state->limit) {
240 cur_lv_ix = -1;
241 continue;
242 }
243 lvip[lv_ix].pps_found += 1;
244 if (lp_ix == 1) {
245 cur_lv_ix = lv_ix;
246 next_lp_ix = 1;
247 } else if (lv_ix != cur_lv_ix || lp_ix != next_lp_ix) {
248 next_lp_ix = 1;
249 continue;
250 }
251 if (lp_ix == lvip[lv_ix].pps_per_lv) {
252 put_partition(state, lv_ix + 1,
253 (i + 1 - lp_ix) * pp_blocks_size + psn_part1,
254 lvip[lv_ix].pps_per_lv * pp_blocks_size);
255 seq_buf_printf(&state->pp_buf, " <%s>\n",
256 n[lv_ix].name);
257 lvip[lv_ix].lv_is_contiguous = 1;
258 ret = 1;
259 next_lp_ix = 1;
260 } else
261 next_lp_ix += 1;
262 }
263 for (i = 0; i < state->limit; i += 1)
264 if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
265 char tmp[sizeof(n[i].name) + 1]; // null char
266
267 snprintf(tmp, sizeof(tmp), "%s", n[i].name);
268 pr_warn("partition %s (%u pp's found) is "
269 "not contiguous\n",
270 tmp, lvip[i].pps_found);
271 }
272 kfree(pvd);
273 }
274 kfree(n);
275 kfree(lvip);
276 return ret;
277 }
278