set_memory_ro((unsigned long)header, header->pages);
        fp->bpf_func = (void *)ctx.target;
-       fp->jited = true;
+       fp->jited = 1;
 out:
        kfree(ctx.offsets);
        return;
 
 
        set_memory_ro((unsigned long)header, header->pages);
        prog->bpf_func = (void *)ctx.image;
-       prog->jited = true;
+       prog->jited = 1;
 out:
        kfree(ctx.offset);
 }
 
                bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
 
        fp->bpf_func = (void *)ctx.target;
-       fp->jited = true;
+       fp->jited = 1;
 
 out:
        kfree(ctx.offsets);
 
                ((u64 *)image)[1] = local_paca->kernel_toc;
 #endif
                fp->bpf_func = (void *)image;
-               fp->jited = true;
+               fp->jited = 1;
        }
 out:
        kfree(addrs);
 
        if (jit.prg_buf) {
                set_memory_ro((unsigned long)header, header->pages);
                fp->bpf_func = (void *) jit.prg_buf;
-               fp->jited = true;
+               fp->jited = 1;
        }
 free_addrs:
        kfree(jit.addrs);
 
        if (image) {
                bpf_flush_icache(image, image + proglen);
                fp->bpf_func = (void *)image;
-               fp->jited = true;
+               fp->jited = 1;
        }
 out:
        kfree(addrs);
 
                bpf_flush_icache(header, image + proglen);
                set_memory_ro((unsigned long)header, header->pages);
                prog->bpf_func = (void *)image;
-               prog->jited = true;
+               prog->jited = 1;
        }
 out:
        kfree(addrs);
 
 
 struct bpf_prog {
        u16                     pages;          /* Number of allocated pages */
-       bool                    jited;          /* Is our filter JIT'ed? */
-       bool                    gpl_compatible; /* Is our filter GPL compatible? */
+       kmemcheck_bitfield_begin(meta);
+       u16                     jited:1,        /* Is our filter JIT'ed? */
+                               gpl_compatible:1; /* Is filter GPL compatible? */
+       kmemcheck_bitfield_end(meta);
        u32                     len;            /* Number of filter blocks */
        enum bpf_prog_type      type;           /* Type of BPF program */
        struct bpf_prog_aux     *aux;           /* Auxiliary fields */
 
        if (fp == NULL)
                return NULL;
 
+       kmemcheck_annotate_bitfield(fp, meta);
+
        aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
        if (aux == NULL) {
                vfree(fp);
 
        fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
        if (fp != NULL) {
+               kmemcheck_annotate_bitfield(fp, meta);
+
                memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
                fp->pages = size / PAGE_SIZE;
 
 
                goto free_prog;
 
        prog->orig_prog = NULL;
-       prog->jited = false;
+       prog->jited = 0;
 
        atomic_set(&prog->aux->refcnt, 1);
-       prog->gpl_compatible = is_gpl;
+       prog->gpl_compatible = is_gpl ? 1 : 0;
 
        /* find program type: socket_filter vs tracing_filter */
        err = find_prog_type(type, prog);
 
        int err;
 
        fp->bpf_func = NULL;
-       fp->jited = false;
+       fp->jited = 0;
 
        err = bpf_check_classic(fp->insns, fp->len);
        if (err) {