/* protects napi_hash addition/deletion and napi_gen_id */
 static DEFINE_SPINLOCK(napi_hash_lock);
 
-static unsigned int napi_gen_id;
+static unsigned int napi_gen_id = NR_CPUS;
 static DEFINE_HASHTABLE(napi_hash, 8);
 
 static seqcount_t devnet_rename_seq;
        int queue_index = 0;
 
 #ifdef CONFIG_XPS
-       if (skb->sender_cpu == 0)
+       u32 sender_cpu = skb->sender_cpu - 1;
+
+       if (sender_cpu >= (u32)NR_CPUS)
                skb->sender_cpu = raw_smp_processor_id() + 1;
 #endif
 
 
 void napi_hash_add(struct napi_struct *napi)
 {
-       if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
+       if (test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
+               return;
 
-               spin_lock(&napi_hash_lock);
+       spin_lock(&napi_hash_lock);
 
-               /* 0 is not a valid id, we also skip an id that is taken
-                * we expect both events to be extremely rare
-                */
-               napi->napi_id = 0;
-               while (!napi->napi_id) {
-                       napi->napi_id = ++napi_gen_id;
-                       if (napi_by_id(napi->napi_id))
-                               napi->napi_id = 0;
-               }
+       /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
+       do {
+               if (unlikely(++napi_gen_id < NR_CPUS + 1))
+                       napi_gen_id = NR_CPUS + 1;
+       } while (napi_by_id(napi_gen_id));
+       napi->napi_id = napi_gen_id;
 
-               hlist_add_head_rcu(&napi->napi_hash_node,
-                       &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
+       hlist_add_head_rcu(&napi->napi_hash_node,
+                          &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
 
-               spin_unlock(&napi_hash_lock);
-       }
+       spin_unlock(&napi_hash_lock);
 }
 EXPORT_SYMBOL_GPL(napi_hash_add);