void *desc_kaddr, *bitmap_kaddr;
        unsigned long group, maxgroup, ngroups;
        unsigned long group_offset, maxgroup_offset;
-       unsigned long n, entries_per_group, groups_per_desc_block;
+       unsigned long n, entries_per_group;
        unsigned long i, j;
        spinlock_t *lock;
        int pos, ret;
        maxgroup = ngroups - 1;
        group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
        entries_per_group = nilfs_palloc_entries_per_group(inode);
-       groups_per_desc_block = nilfs_palloc_groups_per_desc_block(inode);
 
        for (i = 0; i < ngroups; i += n) {
                if (group >= ngroups) {
 
                              int level, __u64 *keyp, __u64 *ptrp)
 {
        struct nilfs_btree_node *node, *right;
-       __u64 newkey;
-       __u64 newptr;
        int nchildren, n, move, ncblk;
 
        node = nilfs_btree_get_nonroot_node(path, level);
        if (!buffer_dirty(path[level].bp_sib_bh))
                mark_buffer_dirty(path[level].bp_sib_bh);
 
-       newkey = nilfs_btree_node_get_key(right, 0);
-       newptr = path[level].bp_newreq.bpr_ptr;
-
        if (move) {
                path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
                nilfs_btree_node_insert(right, path[level].bp_index,
 
 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
 {
        struct nilfs_dat_entry *entry;
-       __u64 start;
        sector_t blocknr;
        void *kaddr;
        int ret;
        kaddr = kmap_atomic(req->pr_entry_bh->b_page);
        entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
                                             req->pr_entry_bh, kaddr);
-       start = le64_to_cpu(entry->de_start);
        blocknr = le64_to_cpu(entry->de_blocknr);
        kunmap_atomic(kaddr);
 
 
 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
 {
        int mode = 0;
-       int err;
 
        spin_lock(&sci->sc_state_lock);
        mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
        spin_unlock(&sci->sc_state_lock);
 
        if (mode) {
-               err = nilfs_segctor_do_construct(sci, mode);
+               nilfs_segctor_do_construct(sci, mode);
 
                spin_lock(&sci->sc_state_lock);
                sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
 
        size_t susz = NILFS_MDT(sufile)->mi_entry_size;
        __u64 segnum, maxsegnum, last_alloc;
        void *kaddr;
-       unsigned long nsegments, ncleansegs, nsus, cnt;
+       unsigned long nsegments, nsus, cnt;
        int ret, j;
 
        down_write(&NILFS_MDT(sufile)->mi_sem);
                goto out_sem;
        kaddr = kmap_atomic(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
-       ncleansegs = le64_to_cpu(header->sh_ncleansegs);
        last_alloc = le64_to_cpu(header->sh_last_alloc);
        kunmap_atomic(kaddr);