1 // SPDX-License-Identifier: GPL-2.0
7 struct buffer_head *bh;
10 static DEFINE_RWLOCK(pointers_lock);
12 static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
18 static inline int verify_chain(Indirect *from, Indirect *to)
20 while (from <= to && from->key == *from->p)
25 static inline block_t *block_end(struct buffer_head *bh)
27 return (block_t *)((char*)bh->b_data + bh->b_size);
30 static inline Indirect *get_branch(struct inode *inode,
33 Indirect chain[DEPTH],
36 struct super_block *sb = inode->i_sb;
38 struct buffer_head *bh;
41 /* i_data is not going away, no lock needed */
42 add_chain (chain, NULL, i_data(inode) + *offsets);
46 bh = sb_bread(sb, block_to_cpu(p->key));
49 read_lock(&pointers_lock);
50 if (!verify_chain(chain, p))
52 add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
53 read_unlock(&pointers_lock);
60 read_unlock(&pointers_lock);
70 static int alloc_branch(struct inode *inode,
77 int parent = minix_new_block(inode);
80 branch[0].key = cpu_to_block(parent);
81 if (parent) for (n = 1; n < num; n++) {
82 struct buffer_head *bh;
83 /* Allocate the next block */
84 int nr = minix_new_block(inode);
87 branch[n].key = cpu_to_block(nr);
88 bh = sb_getblk(inode->i_sb, parent);
90 minix_free_block(inode, nr);
95 memset(bh->b_data, 0, bh->b_size);
97 branch[n].p = (block_t*) bh->b_data + offsets[n];
98 *branch[n].p = branch[n].key;
99 set_buffer_uptodate(bh);
101 mark_buffer_dirty_inode(bh, inode);
107 /* Allocation failed, free what we already allocated */
108 for (i = 1; i < n; i++)
109 bforget(branch[i].bh);
110 for (i = 0; i < n; i++)
111 minix_free_block(inode, block_to_cpu(branch[i].key));
115 static inline int splice_branch(struct inode *inode,
116 Indirect chain[DEPTH],
122 write_lock(&pointers_lock);
124 /* Verify that place we are splicing to is still there and vacant */
125 if (!verify_chain(chain, where-1) || *where->p)
128 *where->p = where->key;
130 write_unlock(&pointers_lock);
132 /* We are done with atomic stuff, now do the rest of housekeeping */
134 inode->i_ctime = current_time(inode);
136 /* had we spliced it onto indirect block? */
138 mark_buffer_dirty_inode(where->bh, inode);
140 mark_inode_dirty(inode);
144 write_unlock(&pointers_lock);
145 for (i = 1; i < num; i++)
146 bforget(where[i].bh);
147 for (i = 0; i < num; i++)
148 minix_free_block(inode, block_to_cpu(where[i].key));
152 static int get_block(struct inode * inode, sector_t block,
153 struct buffer_head *bh, int create)
157 Indirect chain[DEPTH];
160 int depth = block_to_path(inode, block, offsets);
166 partial = get_branch(inode, depth, offsets, chain, &err);
168 /* Simplest case - block found, no allocation needed */
171 map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key));
172 /* Clean up and exit */
173 partial = chain+depth-1; /* the whole chain */
177 /* Next simple case - plain lookup or failed read of indirect block */
178 if (!create || err == -EIO) {
180 while (partial > chain) {
189 * Indirect block might be removed by truncate while we were
190 * reading it. Handling of that case (forget what we've got and
191 * reread) is taken out of the main path.
196 left = (chain + depth) - partial;
197 err = alloc_branch(inode, left, offsets+(partial-chain), partial);
201 if (splice_branch(inode, chain, partial, left) < 0)
208 while (partial > chain) {
215 static inline int all_zeroes(block_t *p, block_t *q)
223 static Indirect *find_shared(struct inode *inode,
226 Indirect chain[DEPTH],
229 Indirect *partial, *p;
233 for (k = depth; k > 1 && !offsets[k-1]; k--)
235 partial = get_branch(inode, k, offsets, chain, &err);
237 write_lock(&pointers_lock);
239 partial = chain + k-1;
240 if (!partial->key && *partial->p) {
241 write_unlock(&pointers_lock);
244 for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
246 if (p == chain + k - 1 && p > chain) {
252 write_unlock(&pointers_lock);
263 static inline void free_data(struct inode *inode, block_t *p, block_t *q)
267 for ( ; p < q ; p++) {
268 nr = block_to_cpu(*p);
271 minix_free_block(inode, nr);
276 static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
278 struct buffer_head * bh;
282 for ( ; p < q ; p++) {
283 nr = block_to_cpu(*p);
287 bh = sb_bread(inode->i_sb, nr);
290 free_branches(inode, (block_t*)bh->b_data,
291 block_end(bh), depth);
293 minix_free_block(inode, nr);
294 mark_inode_dirty(inode);
297 free_data(inode, p, q);
300 static inline void truncate (struct inode * inode)
302 struct super_block *sb = inode->i_sb;
303 block_t *idata = i_data(inode);
305 Indirect chain[DEPTH];
312 iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits;
313 block_truncate_page(inode->i_mapping, inode->i_size, get_block);
315 n = block_to_path(inode, iblock, offsets);
320 free_data(inode, idata+offsets[0], idata + DIRECT);
325 first_whole = offsets[0] + 1 - DIRECT;
326 partial = find_shared(inode, n, offsets, chain, &nr);
328 if (partial == chain)
329 mark_inode_dirty(inode);
331 mark_buffer_dirty_inode(partial->bh, inode);
332 free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
334 /* Clear the ends of indirect blocks on the shared branch */
335 while (partial > chain) {
336 free_branches(inode, partial->p + 1, block_end(partial->bh),
337 (chain+n-1) - partial);
338 mark_buffer_dirty_inode(partial->bh, inode);
339 brelse (partial->bh);
343 /* Kill the remaining (whole) subtrees */
344 while (first_whole < DEPTH-1) {
345 nr = idata[DIRECT+first_whole];
347 idata[DIRECT+first_whole] = 0;
348 mark_inode_dirty(inode);
349 free_branches(inode, &nr, &nr+1, first_whole+1);
353 inode->i_mtime = inode->i_ctime = current_time(inode);
354 mark_inode_dirty(inode);
357 static inline unsigned nblocks(loff_t size, struct super_block *sb)
359 int k = sb->s_blocksize_bits - 10;
360 unsigned blocks, res, direct = DIRECT, i = DEPTH;
361 blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k);
363 while (--i && blocks > direct) {
365 blocks += sb->s_blocksize/sizeof(block_t) - 1;
366 blocks /= sb->s_blocksize/sizeof(block_t);