+static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
+ unsigned int flags)
+{
+ WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
+
+ /* don't poll if the mapped io is done */
+ if (atomic_read(&io->io_count) > 1)
+ bio_poll(&io->tio.clone, iob, flags);
+
+ /* bio_poll holds the last reference */
+ return atomic_read(&io->io_count) == 1;
+}
+
+static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
+ unsigned int flags)
+{
+ struct hlist_head *head = dm_get_bio_hlist_head(bio);
+ struct hlist_head tmp = HLIST_HEAD_INIT;
+ struct hlist_node *next;
+ struct dm_io *io;
+
+ /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
+ if (!(bio->bi_opf & REQ_DM_POLL_LIST))
+ return 0;
+
+ WARN_ON_ONCE(hlist_empty(head));
+
+ hlist_move_list(head, &tmp);
+
+ /*
+ * Restore .bi_private before possibly completing dm_io.
+ *
+ * bio_poll() is only possible once @bio has been completely
+ * submitted via submit_bio_noacct()'s depth-first submission.
+ * So there is no dm_queue_poll_io() race associated with
+ * clearing REQ_DM_POLL_LIST here.
+ */
+ bio->bi_opf &= ~REQ_DM_POLL_LIST;
+ bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data;
+
+ hlist_for_each_entry_safe(io, next, &tmp, node) {
+ if (dm_poll_dm_io(io, iob, flags)) {
+ hlist_del_init(&io->node);
+ /*
+ * clone_endio() has already occurred, so passing
+ * error as 0 here doesn't override io->status
+ */
+ dm_io_dec_pending(io, 0);
+ }
+ }
+
+ /* Not done? */
+ if (!hlist_empty(&tmp)) {
+ bio->bi_opf |= REQ_DM_POLL_LIST;
+ /* Reset bio->bi_private to dm_io list head */
+ hlist_move_list(&tmp, head);
+ return 0;
+ }
+ return 1;
+}
+