2 * rfd_ftl.c -- resident flash disk (flash translation layer)
4 * Copyright © 2005 Sean Young <sean@mess.org>
6 * This type of flash translation layer (FTL) is used by the Embedded BIOS
7 * by General Software. It is known as the Resident Flash Disk (RFD), see:
9 * http://www.gensw.com/pages/prod/bios/rfd.htm
14 #include <linux/hdreg.h>
15 #include <linux/init.h>
16 #include <linux/mtd/blktrans.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/vmalloc.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/module.h>
23 #include <asm/types.h>
25 static int block_size = 0;
26 module_param(block_size, int, 0);
27 MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
29 #define PREFIX "rfd_ftl: "
31 /* This major has been assigned by device@lanana.org */
33 #define RFD_FTL_MAJOR 256
36 /* Maximum number of partitions in an FTL region */
39 /* An erase unit should start with this value */
40 #define RFD_MAGIC 0x9193
42 /* the second value is 0xffff or 0xffc8; function unknown */
44 /* the third value is always 0xffff, ignored */
46 /* next is an array of mapping for each corresponding sector */
47 #define HEADER_MAP_OFFSET 3
48 #define SECTOR_DELETED 0x0000
49 #define SECTOR_ZERO 0xfffe
50 #define SECTOR_FREE 0xffff
52 #define SECTOR_SIZE 512
54 #define SECTORS_PER_TRACK 63
71 struct mtd_blktrans_dev mbd;
73 u_int block_size; /* size of erase unit */
74 u_int total_blocks; /* number of erase units */
75 u_int header_sectors_per_block; /* header sectors in erase unit */
76 u_int data_sectors_per_block; /* data sectors in erase unit */
77 u_int sector_count; /* sectors in translated disk */
78 u_int header_size; /* bytes in header sector */
79 int reserved_block; /* block next up for reclaim */
80 int current_block; /* block to write to */
81 u16 *header_cache; /* cached header */
90 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
92 static int build_block_map(struct partition *part, int block_no)
94 struct block *block = &part->blocks[block_no];
97 block->offset = part->block_size * block_no;
99 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
100 block->state = BLOCK_UNUSED;
104 block->state = BLOCK_OK;
106 for (i=0; i<part->data_sectors_per_block; i++) {
109 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
111 if (entry == SECTOR_DELETED)
114 if (entry == SECTOR_FREE) {
115 block->free_sectors++;
119 if (entry == SECTOR_ZERO)
122 if (entry >= part->sector_count) {
123 printk(KERN_WARNING PREFIX
124 "'%s': unit #%d: entry %d corrupt, "
125 "sector %d out of range\n",
126 part->mbd.mtd->name, block_no, i, entry);
130 if (part->sector_map[entry] != -1) {
131 printk(KERN_WARNING PREFIX
132 "'%s': more than one entry for sector %d\n",
133 part->mbd.mtd->name, entry);
138 part->sector_map[entry] = block->offset +
139 (i + part->header_sectors_per_block) * SECTOR_SIZE;
141 block->used_sectors++;
144 if (block->free_sectors == part->data_sectors_per_block)
145 part->reserved_block = block_no;
150 static int scan_header(struct partition *part)
152 int sectors_per_block;
157 sectors_per_block = part->block_size / SECTOR_SIZE;
158 part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
160 if (part->total_blocks < 2)
163 /* each erase block has three bytes header, followed by the map */
164 part->header_sectors_per_block =
165 ((HEADER_MAP_OFFSET + sectors_per_block) *
166 sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
168 part->data_sectors_per_block = sectors_per_block -
169 part->header_sectors_per_block;
171 part->header_size = (HEADER_MAP_OFFSET +
172 part->data_sectors_per_block) * sizeof(u16);
174 part->cylinders = (part->data_sectors_per_block *
175 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
177 part->sector_count = part->cylinders * SECTORS_PER_TRACK;
179 part->current_block = -1;
180 part->reserved_block = -1;
181 part->is_reclaiming = 0;
183 part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
184 if (!part->header_cache)
187 part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
192 part->sector_map = vmalloc(array_size(sizeof(u_long),
193 part->sector_count));
194 if (!part->sector_map) {
195 printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
196 "sector map", part->mbd.mtd->name);
200 for (i=0; i<part->sector_count; i++)
201 part->sector_map[i] = -1;
203 for (i=0, blocks_found=0; i<part->total_blocks; i++) {
204 rc = mtd_read(part->mbd.mtd, i * part->block_size,
205 part->header_size, &retlen,
206 (u_char *)part->header_cache);
208 if (!rc && retlen != part->header_size)
214 if (!build_block_map(part, i))
218 if (blocks_found == 0) {
219 printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
220 part->mbd.mtd->name);
225 if (part->reserved_block == -1) {
226 printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
227 part->mbd.mtd->name);
235 vfree(part->sector_map);
236 kfree(part->header_cache);
242 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
244 struct partition *part = (struct partition*)dev;
249 if (sector >= part->sector_count)
252 addr = part->sector_map[sector];
254 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
256 if (!rc && retlen != SECTOR_SIZE)
260 printk(KERN_WARNING PREFIX "error reading '%s' at "
261 "0x%lx\n", part->mbd.mtd->name, addr);
265 memset(buf, 0, SECTOR_SIZE);
270 static int erase_block(struct partition *part, int block)
272 struct erase_info *erase;
275 erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
279 erase->addr = part->blocks[block].offset;
280 erase->len = part->block_size;
282 part->blocks[block].state = BLOCK_ERASING;
283 part->blocks[block].free_sectors = 0;
285 rc = mtd_erase(part->mbd.mtd, erase);
287 printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
288 "failed\n", (unsigned long long)erase->addr,
289 (unsigned long long)erase->len, part->mbd.mtd->name);
290 part->blocks[block].state = BLOCK_FAILED;
291 part->blocks[block].free_sectors = 0;
292 part->blocks[block].used_sectors = 0;
294 u16 magic = cpu_to_le16(RFD_MAGIC);
297 part->blocks[block].state = BLOCK_ERASED;
298 part->blocks[block].free_sectors = part->data_sectors_per_block;
299 part->blocks[block].used_sectors = 0;
300 part->blocks[block].erases++;
302 rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
303 sizeof(magic), &retlen, (u_char *)&magic);
304 if (!rc && retlen != sizeof(magic))
308 pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
309 part->mbd.mtd->name, part->blocks[block].offset);
310 part->blocks[block].state = BLOCK_FAILED;
312 part->blocks[block].state = BLOCK_OK;
321 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
328 part->is_reclaiming = 1;
330 sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
334 map = kmalloc(part->header_size, GFP_KERNEL);
338 rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
339 part->header_size, &retlen, (u_char *)map);
341 if (!rc && retlen != part->header_size)
345 printk(KERN_ERR PREFIX "error reading '%s' at "
346 "0x%lx\n", part->mbd.mtd->name,
347 part->blocks[block_no].offset);
352 for (i=0; i<part->data_sectors_per_block; i++) {
353 u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
357 if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
360 if (entry == SECTOR_ZERO)
363 /* already warned about and ignored in build_block_map() */
364 if (entry >= part->sector_count)
367 addr = part->blocks[block_no].offset +
368 (i + part->header_sectors_per_block) * SECTOR_SIZE;
370 if (*old_sector == addr) {
372 if (!part->blocks[block_no].used_sectors--) {
373 rc = erase_block(part, block_no);
378 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
381 if (!rc && retlen != SECTOR_SIZE)
385 printk(KERN_ERR PREFIX "'%s': Unable to "
386 "read sector for relocation\n",
387 part->mbd.mtd->name);
392 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
404 part->is_reclaiming = 0;
409 static int reclaim_block(struct partition *part, u_long *old_sector)
411 int block, best_block, score, old_sector_block;
414 /* we have a race if sync doesn't exist */
415 mtd_sync(part->mbd.mtd);
417 score = 0x7fffffff; /* MAX_INT */
419 if (*old_sector != -1)
420 old_sector_block = *old_sector / part->block_size;
422 old_sector_block = -1;
424 for (block=0; block<part->total_blocks; block++) {
427 if (block == part->reserved_block)
431 * Postpone reclaiming if there is a free sector as
432 * more removed sectors is more efficient (have to move
435 if (part->blocks[block].free_sectors)
438 this_score = part->blocks[block].used_sectors;
440 if (block == old_sector_block)
443 /* no point in moving a full block */
444 if (part->blocks[block].used_sectors ==
445 part->data_sectors_per_block)
449 this_score += part->blocks[block].erases;
451 if (this_score < score) {
457 if (best_block == -1)
460 part->current_block = -1;
461 part->reserved_block = best_block;
463 pr_debug("reclaim_block: reclaiming block #%d with %d used "
464 "%d free sectors\n", best_block,
465 part->blocks[best_block].used_sectors,
466 part->blocks[best_block].free_sectors);
468 if (part->blocks[best_block].used_sectors)
469 rc = move_block_contents(part, best_block, old_sector);
471 rc = erase_block(part, best_block);
477 * IMPROVE: It would be best to choose the block with the most deleted sectors,
478 * because if we fill that one up first it'll have the most chance of having
479 * the least live sectors at reclaim.
481 static int find_free_block(struct partition *part)
485 block = part->current_block == -1 ?
486 jiffies % part->total_blocks : part->current_block;
490 if (part->blocks[block].free_sectors &&
491 block != part->reserved_block)
494 if (part->blocks[block].state == BLOCK_UNUSED)
495 erase_block(part, block);
497 if (++block >= part->total_blocks)
500 } while (block != stop);
505 static int find_writable_block(struct partition *part, u_long *old_sector)
510 block = find_free_block(part);
513 if (!part->is_reclaiming) {
514 rc = reclaim_block(part, old_sector);
518 block = find_free_block(part);
527 rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
528 part->header_size, &retlen,
529 (u_char *)part->header_cache);
531 if (!rc && retlen != part->header_size)
535 printk(KERN_ERR PREFIX "'%s': unable to read header at "
536 "0x%lx\n", part->mbd.mtd->name,
537 part->blocks[block].offset);
541 part->current_block = block;
547 static int mark_sector_deleted(struct partition *part, u_long old_addr)
549 int block, offset, rc;
552 u16 del = cpu_to_le16(SECTOR_DELETED);
554 block = old_addr / part->block_size;
555 offset = (old_addr % part->block_size) / SECTOR_SIZE -
556 part->header_sectors_per_block;
558 addr = part->blocks[block].offset +
559 (HEADER_MAP_OFFSET + offset) * sizeof(u16);
560 rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
563 if (!rc && retlen != sizeof(del))
567 printk(KERN_ERR PREFIX "error writing '%s' at "
568 "0x%lx\n", part->mbd.mtd->name, addr);
571 if (block == part->current_block)
572 part->header_cache[offset + HEADER_MAP_OFFSET] = del;
574 part->blocks[block].used_sectors--;
576 if (!part->blocks[block].used_sectors &&
577 !part->blocks[block].free_sectors)
578 rc = erase_block(part, block);
584 static int find_free_sector(const struct partition *part, const struct block *block)
588 i = stop = part->data_sectors_per_block - block->free_sectors;
591 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
595 if (++i == part->data_sectors_per_block)
603 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
605 struct partition *part = (struct partition*)dev;
613 if (part->current_block == -1 ||
614 !part->blocks[part->current_block].free_sectors) {
616 rc = find_writable_block(part, old_addr);
621 block = &part->blocks[part->current_block];
623 i = find_free_sector(part, block);
630 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
632 rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
635 if (!rc && retlen != SECTOR_SIZE)
639 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
640 part->mbd.mtd->name, addr);
644 part->sector_map[sector] = addr;
646 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
648 part->header_cache[i + HEADER_MAP_OFFSET] = entry;
650 addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
651 rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
654 if (!rc && retlen != sizeof(entry))
658 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
659 part->mbd.mtd->name, addr);
662 block->used_sectors++;
663 block->free_sectors--;
669 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
671 struct partition *part = (struct partition*)dev;
676 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
678 if (part->reserved_block == -1) {
683 if (sector >= part->sector_count) {
688 old_addr = part->sector_map[sector];
690 for (i=0; i<SECTOR_SIZE; i++) {
694 rc = do_writesect(dev, sector, buf, &old_addr);
700 if (i == SECTOR_SIZE)
701 part->sector_map[sector] = -1;
704 rc = mark_sector_deleted(part, old_addr);
710 static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
712 struct partition *part = (struct partition*)dev;
715 geo->sectors = SECTORS_PER_TRACK;
716 geo->cylinders = part->cylinders;
721 static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
723 struct partition *part;
725 if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
728 part = kzalloc(sizeof(struct partition), GFP_KERNEL);
735 part->block_size = block_size;
737 if (!mtd->erasesize) {
738 printk(KERN_WARNING PREFIX "please provide block_size");
741 part->block_size = mtd->erasesize;
744 if (scan_header(part) == 0) {
745 part->mbd.size = part->sector_count;
747 part->mbd.devnum = -1;
748 if (!(mtd->flags & MTD_WRITEABLE))
749 part->mbd.readonly = 1;
750 else if (part->errors) {
751 printk(KERN_WARNING PREFIX "'%s': errors found, "
752 "setting read-only\n", mtd->name);
753 part->mbd.readonly = 1;
756 printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
757 mtd->name, mtd->type, mtd->flags);
759 if (!add_mtd_blktrans_dev((void*)part))
766 static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
768 struct partition *part = (struct partition*)dev;
771 for (i=0; i<part->total_blocks; i++) {
772 pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
773 part->mbd.mtd->name, i, part->blocks[i].erases);
776 del_mtd_blktrans_dev(dev);
777 vfree(part->sector_map);
778 kfree(part->header_cache);
782 static struct mtd_blktrans_ops rfd_ftl_tr = {
784 .major = RFD_FTL_MAJOR,
785 .part_bits = PART_BITS,
786 .blksize = SECTOR_SIZE,
788 .readsect = rfd_ftl_readsect,
789 .writesect = rfd_ftl_writesect,
790 .getgeo = rfd_ftl_getgeo,
791 .add_mtd = rfd_ftl_add_mtd,
792 .remove_dev = rfd_ftl_remove_dev,
793 .owner = THIS_MODULE,
796 static int __init init_rfd_ftl(void)
798 return register_mtd_blktrans(&rfd_ftl_tr);
801 static void __exit cleanup_rfd_ftl(void)
803 deregister_mtd_blktrans(&rfd_ftl_tr);
806 module_init(init_rfd_ftl);
807 module_exit(cleanup_rfd_ftl);
809 MODULE_LICENSE("GPL");
810 MODULE_AUTHOR("Sean Young <sean@mess.org>");
811 MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
812 "used by General Software's Embedded BIOS");