local64.h: make <asm/local64.h> mandatory
[linux-2.6-microblaze.git] / fs / udf / balloc.c
1 /*
2  * balloc.c
3  *
4  * PURPOSE
5  *      Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *      This file is distributed under the terms of the GNU General Public
9  *      License (GPL). Copies of the GPL can be obtained from:
10  *              ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *      Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1999-2001 Ben Fennema
14  *  (C) 1999 Stelias Computing Inc
15  *
16  * HISTORY
17  *
18  *  02/24/99 blf  Created.
19  *
20  */
21
22 #include "udfdecl.h"
23
24 #include <linux/bitops.h>
25
26 #include "udf_i.h"
27 #include "udf_sb.h"
28
29 #define udf_clear_bit   __test_and_clear_bit_le
30 #define udf_set_bit     __test_and_set_bit_le
31 #define udf_test_bit    test_bit_le
32 #define udf_find_next_one_bit   find_next_bit_le
33
34 static int read_block_bitmap(struct super_block *sb,
35                              struct udf_bitmap *bitmap, unsigned int block,
36                              unsigned long bitmap_nr)
37 {
38         struct buffer_head *bh = NULL;
39         int retval = 0;
40         struct kernel_lb_addr loc;
41
42         loc.logicalBlockNum = bitmap->s_extPosition;
43         loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
44
45         bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
46         if (!bh)
47                 retval = -EIO;
48
49         bitmap->s_block_bitmap[bitmap_nr] = bh;
50         return retval;
51 }
52
53 static int __load_block_bitmap(struct super_block *sb,
54                                struct udf_bitmap *bitmap,
55                                unsigned int block_group)
56 {
57         int retval = 0;
58         int nr_groups = bitmap->s_nr_groups;
59
60         if (block_group >= nr_groups) {
61                 udf_debug("block_group (%u) > nr_groups (%d)\n",
62                           block_group, nr_groups);
63         }
64
65         if (bitmap->s_block_bitmap[block_group])
66                 return block_group;
67
68         retval = read_block_bitmap(sb, bitmap, block_group, block_group);
69         if (retval < 0)
70                 return retval;
71
72         return block_group;
73 }
74
75 static inline int load_block_bitmap(struct super_block *sb,
76                                     struct udf_bitmap *bitmap,
77                                     unsigned int block_group)
78 {
79         int slot;
80
81         slot = __load_block_bitmap(sb, bitmap, block_group);
82
83         if (slot < 0)
84                 return slot;
85
86         if (!bitmap->s_block_bitmap[slot])
87                 return -EIO;
88
89         return slot;
90 }
91
92 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
93 {
94         struct udf_sb_info *sbi = UDF_SB(sb);
95         struct logicalVolIntegrityDesc *lvid;
96
97         if (!sbi->s_lvid_bh)
98                 return;
99
100         lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
101         le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
102         udf_updated_lvid(sb);
103 }
104
105 static void udf_bitmap_free_blocks(struct super_block *sb,
106                                    struct udf_bitmap *bitmap,
107                                    struct kernel_lb_addr *bloc,
108                                    uint32_t offset,
109                                    uint32_t count)
110 {
111         struct udf_sb_info *sbi = UDF_SB(sb);
112         struct buffer_head *bh = NULL;
113         struct udf_part_map *partmap;
114         unsigned long block;
115         unsigned long block_group;
116         unsigned long bit;
117         unsigned long i;
118         int bitmap_nr;
119         unsigned long overflow;
120
121         mutex_lock(&sbi->s_alloc_mutex);
122         partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
123         if (bloc->logicalBlockNum + count < count ||
124             (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
125                 udf_debug("%u < %d || %u + %u > %u\n",
126                           bloc->logicalBlockNum, 0,
127                           bloc->logicalBlockNum, count,
128                           partmap->s_partition_len);
129                 goto error_return;
130         }
131
132         block = bloc->logicalBlockNum + offset +
133                 (sizeof(struct spaceBitmapDesc) << 3);
134
135         do {
136                 overflow = 0;
137                 block_group = block >> (sb->s_blocksize_bits + 3);
138                 bit = block % (sb->s_blocksize << 3);
139
140                 /*
141                 * Check to see if we are freeing blocks across a group boundary.
142                 */
143                 if (bit + count > (sb->s_blocksize << 3)) {
144                         overflow = bit + count - (sb->s_blocksize << 3);
145                         count -= overflow;
146                 }
147                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
148                 if (bitmap_nr < 0)
149                         goto error_return;
150
151                 bh = bitmap->s_block_bitmap[bitmap_nr];
152                 for (i = 0; i < count; i++) {
153                         if (udf_set_bit(bit + i, bh->b_data)) {
154                                 udf_debug("bit %lu already set\n", bit + i);
155                                 udf_debug("byte=%2x\n",
156                                           ((__u8 *)bh->b_data)[(bit + i) >> 3]);
157                         }
158                 }
159                 udf_add_free_space(sb, sbi->s_partition, count);
160                 mark_buffer_dirty(bh);
161                 if (overflow) {
162                         block += count;
163                         count = overflow;
164                 }
165         } while (overflow);
166
167 error_return:
168         mutex_unlock(&sbi->s_alloc_mutex);
169 }
170
171 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
172                                       struct udf_bitmap *bitmap,
173                                       uint16_t partition, uint32_t first_block,
174                                       uint32_t block_count)
175 {
176         struct udf_sb_info *sbi = UDF_SB(sb);
177         int alloc_count = 0;
178         int bit, block, block_group;
179         int bitmap_nr;
180         struct buffer_head *bh;
181         __u32 part_len;
182
183         mutex_lock(&sbi->s_alloc_mutex);
184         part_len = sbi->s_partmaps[partition].s_partition_len;
185         if (first_block >= part_len)
186                 goto out;
187
188         if (first_block + block_count > part_len)
189                 block_count = part_len - first_block;
190
191         do {
192                 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
193                 block_group = block >> (sb->s_blocksize_bits + 3);
194
195                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
196                 if (bitmap_nr < 0)
197                         goto out;
198                 bh = bitmap->s_block_bitmap[bitmap_nr];
199
200                 bit = block % (sb->s_blocksize << 3);
201
202                 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
203                         if (!udf_clear_bit(bit, bh->b_data))
204                                 goto out;
205                         block_count--;
206                         alloc_count++;
207                         bit++;
208                         block++;
209                 }
210                 mark_buffer_dirty(bh);
211         } while (block_count > 0);
212
213 out:
214         udf_add_free_space(sb, partition, -alloc_count);
215         mutex_unlock(&sbi->s_alloc_mutex);
216         return alloc_count;
217 }
218
219 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
220                                 struct udf_bitmap *bitmap, uint16_t partition,
221                                 uint32_t goal, int *err)
222 {
223         struct udf_sb_info *sbi = UDF_SB(sb);
224         int newbit, bit = 0;
225         udf_pblk_t block;
226         int block_group, group_start;
227         int end_goal, nr_groups, bitmap_nr, i;
228         struct buffer_head *bh = NULL;
229         char *ptr;
230         udf_pblk_t newblock = 0;
231
232         *err = -ENOSPC;
233         mutex_lock(&sbi->s_alloc_mutex);
234
235 repeat:
236         if (goal >= sbi->s_partmaps[partition].s_partition_len)
237                 goal = 0;
238
239         nr_groups = bitmap->s_nr_groups;
240         block = goal + (sizeof(struct spaceBitmapDesc) << 3);
241         block_group = block >> (sb->s_blocksize_bits + 3);
242         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
243
244         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
245         if (bitmap_nr < 0)
246                 goto error_return;
247         bh = bitmap->s_block_bitmap[bitmap_nr];
248         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
249                       sb->s_blocksize - group_start);
250
251         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
252                 bit = block % (sb->s_blocksize << 3);
253                 if (udf_test_bit(bit, bh->b_data))
254                         goto got_block;
255
256                 end_goal = (bit + 63) & ~63;
257                 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
258                 if (bit < end_goal)
259                         goto got_block;
260
261                 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
262                               sb->s_blocksize - ((bit + 7) >> 3));
263                 newbit = (ptr - ((char *)bh->b_data)) << 3;
264                 if (newbit < sb->s_blocksize << 3) {
265                         bit = newbit;
266                         goto search_back;
267                 }
268
269                 newbit = udf_find_next_one_bit(bh->b_data,
270                                                sb->s_blocksize << 3, bit);
271                 if (newbit < sb->s_blocksize << 3) {
272                         bit = newbit;
273                         goto got_block;
274                 }
275         }
276
277         for (i = 0; i < (nr_groups * 2); i++) {
278                 block_group++;
279                 if (block_group >= nr_groups)
280                         block_group = 0;
281                 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
282
283                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
284                 if (bitmap_nr < 0)
285                         goto error_return;
286                 bh = bitmap->s_block_bitmap[bitmap_nr];
287                 if (i < nr_groups) {
288                         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
289                                       sb->s_blocksize - group_start);
290                         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
291                                 bit = (ptr - ((char *)bh->b_data)) << 3;
292                                 break;
293                         }
294                 } else {
295                         bit = udf_find_next_one_bit(bh->b_data,
296                                                     sb->s_blocksize << 3,
297                                                     group_start << 3);
298                         if (bit < sb->s_blocksize << 3)
299                                 break;
300                 }
301         }
302         if (i >= (nr_groups * 2)) {
303                 mutex_unlock(&sbi->s_alloc_mutex);
304                 return newblock;
305         }
306         if (bit < sb->s_blocksize << 3)
307                 goto search_back;
308         else
309                 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
310                                             group_start << 3);
311         if (bit >= sb->s_blocksize << 3) {
312                 mutex_unlock(&sbi->s_alloc_mutex);
313                 return 0;
314         }
315
316 search_back:
317         i = 0;
318         while (i < 7 && bit > (group_start << 3) &&
319                udf_test_bit(bit - 1, bh->b_data)) {
320                 ++i;
321                 --bit;
322         }
323
324 got_block:
325         newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
326                 (sizeof(struct spaceBitmapDesc) << 3);
327
328         if (newblock >= sbi->s_partmaps[partition].s_partition_len) {
329                 /*
330                  * Ran off the end of the bitmap, and bits following are
331                  * non-compliant (not all zero)
332                  */
333                 udf_err(sb, "bitmap for partition %d corrupted (block %u marked"
334                         " as free, partition length is %u)\n", partition,
335                         newblock, sbi->s_partmaps[partition].s_partition_len);
336                 goto error_return;
337         }
338
339         if (!udf_clear_bit(bit, bh->b_data)) {
340                 udf_debug("bit already cleared for block %d\n", bit);
341                 goto repeat;
342         }
343
344         mark_buffer_dirty(bh);
345
346         udf_add_free_space(sb, partition, -1);
347         mutex_unlock(&sbi->s_alloc_mutex);
348         *err = 0;
349         return newblock;
350
351 error_return:
352         *err = -EIO;
353         mutex_unlock(&sbi->s_alloc_mutex);
354         return 0;
355 }
356
357 static void udf_table_free_blocks(struct super_block *sb,
358                                   struct inode *table,
359                                   struct kernel_lb_addr *bloc,
360                                   uint32_t offset,
361                                   uint32_t count)
362 {
363         struct udf_sb_info *sbi = UDF_SB(sb);
364         struct udf_part_map *partmap;
365         uint32_t start, end;
366         uint32_t elen;
367         struct kernel_lb_addr eloc;
368         struct extent_position oepos, epos;
369         int8_t etype;
370         struct udf_inode_info *iinfo;
371
372         mutex_lock(&sbi->s_alloc_mutex);
373         partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
374         if (bloc->logicalBlockNum + count < count ||
375             (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
376                 udf_debug("%u < %d || %u + %u > %u\n",
377                           bloc->logicalBlockNum, 0,
378                           bloc->logicalBlockNum, count,
379                           partmap->s_partition_len);
380                 goto error_return;
381         }
382
383         iinfo = UDF_I(table);
384         udf_add_free_space(sb, sbi->s_partition, count);
385
386         start = bloc->logicalBlockNum + offset;
387         end = bloc->logicalBlockNum + offset + count - 1;
388
389         epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
390         elen = 0;
391         epos.block = oepos.block = iinfo->i_location;
392         epos.bh = oepos.bh = NULL;
393
394         while (count &&
395                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
396                 if (((eloc.logicalBlockNum +
397                         (elen >> sb->s_blocksize_bits)) == start)) {
398                         if ((0x3FFFFFFF - elen) <
399                                         (count << sb->s_blocksize_bits)) {
400                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
401                                                         sb->s_blocksize_bits);
402                                 count -= tmp;
403                                 start += tmp;
404                                 elen = (etype << 30) |
405                                         (0x40000000 - sb->s_blocksize);
406                         } else {
407                                 elen = (etype << 30) |
408                                         (elen +
409                                         (count << sb->s_blocksize_bits));
410                                 start += count;
411                                 count = 0;
412                         }
413                         udf_write_aext(table, &oepos, &eloc, elen, 1);
414                 } else if (eloc.logicalBlockNum == (end + 1)) {
415                         if ((0x3FFFFFFF - elen) <
416                                         (count << sb->s_blocksize_bits)) {
417                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
418                                                 sb->s_blocksize_bits);
419                                 count -= tmp;
420                                 end -= tmp;
421                                 eloc.logicalBlockNum -= tmp;
422                                 elen = (etype << 30) |
423                                         (0x40000000 - sb->s_blocksize);
424                         } else {
425                                 eloc.logicalBlockNum = start;
426                                 elen = (etype << 30) |
427                                         (elen +
428                                         (count << sb->s_blocksize_bits));
429                                 end -= count;
430                                 count = 0;
431                         }
432                         udf_write_aext(table, &oepos, &eloc, elen, 1);
433                 }
434
435                 if (epos.bh != oepos.bh) {
436                         oepos.block = epos.block;
437                         brelse(oepos.bh);
438                         get_bh(epos.bh);
439                         oepos.bh = epos.bh;
440                         oepos.offset = 0;
441                 } else {
442                         oepos.offset = epos.offset;
443                 }
444         }
445
446         if (count) {
447                 /*
448                  * NOTE: we CANNOT use udf_add_aext here, as it can try to
449                  * allocate a new block, and since we hold the super block
450                  * lock already very bad things would happen :)
451                  *
452                  * We copy the behavior of udf_add_aext, but instead of
453                  * trying to allocate a new block close to the existing one,
454                  * we just steal a block from the extent we are trying to add.
455                  *
456                  * It would be nice if the blocks were close together, but it
457                  * isn't required.
458                  */
459
460                 int adsize;
461
462                 eloc.logicalBlockNum = start;
463                 elen = EXT_RECORDED_ALLOCATED |
464                         (count << sb->s_blocksize_bits);
465
466                 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
467                         adsize = sizeof(struct short_ad);
468                 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
469                         adsize = sizeof(struct long_ad);
470                 else {
471                         brelse(oepos.bh);
472                         brelse(epos.bh);
473                         goto error_return;
474                 }
475
476                 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
477                         /* Steal a block from the extent being free'd */
478                         udf_setup_indirect_aext(table, eloc.logicalBlockNum,
479                                                 &epos);
480
481                         eloc.logicalBlockNum++;
482                         elen -= sb->s_blocksize;
483                 }
484
485                 /* It's possible that stealing the block emptied the extent */
486                 if (elen)
487                         __udf_add_aext(table, &epos, &eloc, elen, 1);
488         }
489
490         brelse(epos.bh);
491         brelse(oepos.bh);
492
493 error_return:
494         mutex_unlock(&sbi->s_alloc_mutex);
495         return;
496 }
497
498 static int udf_table_prealloc_blocks(struct super_block *sb,
499                                      struct inode *table, uint16_t partition,
500                                      uint32_t first_block, uint32_t block_count)
501 {
502         struct udf_sb_info *sbi = UDF_SB(sb);
503         int alloc_count = 0;
504         uint32_t elen, adsize;
505         struct kernel_lb_addr eloc;
506         struct extent_position epos;
507         int8_t etype = -1;
508         struct udf_inode_info *iinfo;
509
510         if (first_block >= sbi->s_partmaps[partition].s_partition_len)
511                 return 0;
512
513         iinfo = UDF_I(table);
514         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
515                 adsize = sizeof(struct short_ad);
516         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
517                 adsize = sizeof(struct long_ad);
518         else
519                 return 0;
520
521         mutex_lock(&sbi->s_alloc_mutex);
522         epos.offset = sizeof(struct unallocSpaceEntry);
523         epos.block = iinfo->i_location;
524         epos.bh = NULL;
525         eloc.logicalBlockNum = 0xFFFFFFFF;
526
527         while (first_block != eloc.logicalBlockNum &&
528                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
529                 udf_debug("eloc=%u, elen=%u, first_block=%u\n",
530                           eloc.logicalBlockNum, elen, first_block);
531                 ; /* empty loop body */
532         }
533
534         if (first_block == eloc.logicalBlockNum) {
535                 epos.offset -= adsize;
536
537                 alloc_count = (elen >> sb->s_blocksize_bits);
538                 if (alloc_count > block_count) {
539                         alloc_count = block_count;
540                         eloc.logicalBlockNum += alloc_count;
541                         elen -= (alloc_count << sb->s_blocksize_bits);
542                         udf_write_aext(table, &epos, &eloc,
543                                         (etype << 30) | elen, 1);
544                 } else
545                         udf_delete_aext(table, epos);
546         } else {
547                 alloc_count = 0;
548         }
549
550         brelse(epos.bh);
551
552         if (alloc_count)
553                 udf_add_free_space(sb, partition, -alloc_count);
554         mutex_unlock(&sbi->s_alloc_mutex);
555         return alloc_count;
556 }
557
558 static udf_pblk_t udf_table_new_block(struct super_block *sb,
559                                struct inode *table, uint16_t partition,
560                                uint32_t goal, int *err)
561 {
562         struct udf_sb_info *sbi = UDF_SB(sb);
563         uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
564         udf_pblk_t newblock = 0;
565         uint32_t adsize;
566         uint32_t elen, goal_elen = 0;
567         struct kernel_lb_addr eloc, goal_eloc;
568         struct extent_position epos, goal_epos;
569         int8_t etype;
570         struct udf_inode_info *iinfo = UDF_I(table);
571
572         *err = -ENOSPC;
573
574         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
575                 adsize = sizeof(struct short_ad);
576         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
577                 adsize = sizeof(struct long_ad);
578         else
579                 return newblock;
580
581         mutex_lock(&sbi->s_alloc_mutex);
582         if (goal >= sbi->s_partmaps[partition].s_partition_len)
583                 goal = 0;
584
585         /* We search for the closest matching block to goal. If we find
586            a exact hit, we stop. Otherwise we keep going till we run out
587            of extents. We store the buffer_head, bloc, and extoffset
588            of the current closest match and use that when we are done.
589          */
590         epos.offset = sizeof(struct unallocSpaceEntry);
591         epos.block = iinfo->i_location;
592         epos.bh = goal_epos.bh = NULL;
593
594         while (spread &&
595                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
596                 if (goal >= eloc.logicalBlockNum) {
597                         if (goal < eloc.logicalBlockNum +
598                                         (elen >> sb->s_blocksize_bits))
599                                 nspread = 0;
600                         else
601                                 nspread = goal - eloc.logicalBlockNum -
602                                         (elen >> sb->s_blocksize_bits);
603                 } else {
604                         nspread = eloc.logicalBlockNum - goal;
605                 }
606
607                 if (nspread < spread) {
608                         spread = nspread;
609                         if (goal_epos.bh != epos.bh) {
610                                 brelse(goal_epos.bh);
611                                 goal_epos.bh = epos.bh;
612                                 get_bh(goal_epos.bh);
613                         }
614                         goal_epos.block = epos.block;
615                         goal_epos.offset = epos.offset - adsize;
616                         goal_eloc = eloc;
617                         goal_elen = (etype << 30) | elen;
618                 }
619         }
620
621         brelse(epos.bh);
622
623         if (spread == 0xFFFFFFFF) {
624                 brelse(goal_epos.bh);
625                 mutex_unlock(&sbi->s_alloc_mutex);
626                 return 0;
627         }
628
629         /* Only allocate blocks from the beginning of the extent.
630            That way, we only delete (empty) extents, never have to insert an
631            extent because of splitting */
632         /* This works, but very poorly.... */
633
634         newblock = goal_eloc.logicalBlockNum;
635         goal_eloc.logicalBlockNum++;
636         goal_elen -= sb->s_blocksize;
637
638         if (goal_elen)
639                 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
640         else
641                 udf_delete_aext(table, goal_epos);
642         brelse(goal_epos.bh);
643
644         udf_add_free_space(sb, partition, -1);
645
646         mutex_unlock(&sbi->s_alloc_mutex);
647         *err = 0;
648         return newblock;
649 }
650
651 void udf_free_blocks(struct super_block *sb, struct inode *inode,
652                      struct kernel_lb_addr *bloc, uint32_t offset,
653                      uint32_t count)
654 {
655         uint16_t partition = bloc->partitionReferenceNum;
656         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
657
658         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
659                 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
660                                        bloc, offset, count);
661         } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
662                 udf_table_free_blocks(sb, map->s_uspace.s_table,
663                                       bloc, offset, count);
664         }
665
666         if (inode) {
667                 inode_sub_bytes(inode,
668                                 ((sector_t)count) << sb->s_blocksize_bits);
669         }
670 }
671
672 inline int udf_prealloc_blocks(struct super_block *sb,
673                                struct inode *inode,
674                                uint16_t partition, uint32_t first_block,
675                                uint32_t block_count)
676 {
677         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
678         int allocated;
679
680         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
681                 allocated = udf_bitmap_prealloc_blocks(sb,
682                                                        map->s_uspace.s_bitmap,
683                                                        partition, first_block,
684                                                        block_count);
685         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
686                 allocated = udf_table_prealloc_blocks(sb,
687                                                       map->s_uspace.s_table,
688                                                       partition, first_block,
689                                                       block_count);
690         else
691                 return 0;
692
693         if (inode && allocated > 0)
694                 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
695         return allocated;
696 }
697
698 inline udf_pblk_t udf_new_block(struct super_block *sb,
699                          struct inode *inode,
700                          uint16_t partition, uint32_t goal, int *err)
701 {
702         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
703         udf_pblk_t block;
704
705         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
706                 block = udf_bitmap_new_block(sb,
707                                              map->s_uspace.s_bitmap,
708                                              partition, goal, err);
709         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
710                 block = udf_table_new_block(sb,
711                                             map->s_uspace.s_table,
712                                             partition, goal, err);
713         else {
714                 *err = -EIO;
715                 return 0;
716         }
717         if (inode && block)
718                 inode_add_bytes(inode, sb->s_blocksize);
719         return block;
720 }