Merge tag 'kgdb-5.4-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Oct 2019 18:17:57 +0000 (11:17 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Oct 2019 18:17:57 +0000 (11:17 -0700)
Pull kgdb update from Daniel Thompson:
 "This is just a single patch adding a new reviewer for kgdb. New
  reviewers will be a big help so I decided to consider this to be a
  fix!

  I'm looking forward to working more closely with Doug"

* tag 'kgdb-5.4-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt/linux:
  MAINTAINERS: kgdb: Add myself as a reviewer for kgdb/kdb

MAINTAINERS
drivers/char/random.c
drivers/clocksource/timer-of.c
fs/erofs/data.c
fs/erofs/super.c
fs/erofs/zdata.c
kernel/sched/membarrier.c
kernel/time/tick-broadcast-hrtimer.c

index acdd996..55199ef 100644 (file)
@@ -6112,7 +6112,10 @@ M:       Gao Xiang <gaoxiang25@huawei.com>
 M:     Chao Yu <yuchao0@huawei.com>
 L:     linux-erofs@lists.ozlabs.org
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
+F:     Documentation/filesystems/erofs.txt
 F:     fs/erofs/
+F:     include/trace/events/erofs.h
 
 ERRSEQ ERROR TRACKING INFRASTRUCTURE
 M:     Jeff Layton <jlayton@kernel.org>
index c2f7de9..de434fe 100644 (file)
@@ -2520,4 +2520,4 @@ void add_bootloader_randomness(const void *buf, unsigned int size)
        else
                add_device_randomness(buf, size);
 }
-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
\ No newline at end of file
+EXPORT_SYMBOL_GPL(add_bootloader_randomness);
index d8c2bd4..11ff701 100644 (file)
@@ -25,7 +25,9 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
 
        struct clock_event_device *clkevt = &to->clkevt;
 
-       of_irq->percpu ? free_percpu_irq(of_irq->irq, clkevt) :
+       if (of_irq->percpu)
+               free_percpu_irq(of_irq->irq, clkevt);
+       else
                free_irq(of_irq->irq, clkevt);
 }
 
index 8a9fcbd..fc3a8d8 100644 (file)
@@ -34,11 +34,15 @@ static void erofs_readendio(struct bio *bio)
 
 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
 {
-       struct inode *const bd_inode = sb->s_bdev->bd_inode;
-       struct address_space *const mapping = bd_inode->i_mapping;
+       struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
+       struct page *page;
 
-       return read_cache_page_gfp(mapping, blkaddr,
+       page = read_cache_page_gfp(mapping, blkaddr,
                                   mapping_gfp_constraint(mapping, ~__GFP_FS));
+       /* should already be PageUptodate */
+       if (!IS_ERR(page))
+               lock_page(page);
+       return page;
 }
 
 static int erofs_map_blocks_flatmode(struct inode *inode,
index caf9a95..0e36949 100644 (file)
@@ -105,9 +105,9 @@ static int erofs_read_superblock(struct super_block *sb)
        int ret;
 
        page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
-       if (!page) {
+       if (IS_ERR(page)) {
                erofs_err(sb, "cannot read erofs superblock");
-               return -EIO;
+               return PTR_ERR(page);
        }
 
        sbi = EROFS_SB(sb);
index 96e34c9..fad80c9 100644 (file)
@@ -575,7 +575,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
        struct erofs_map_blocks *const map = &fe->map;
        struct z_erofs_collector *const clt = &fe->clt;
        const loff_t offset = page_offset(page);
-       bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED);
+       bool tight = true;
 
        enum z_erofs_cache_alloctype cache_strategy;
        enum z_erofs_page_type page_type;
@@ -628,8 +628,16 @@ restart_now:
        preload_compressed_pages(clt, MNGD_MAPPING(sbi),
                                 cache_strategy, pagepool);
 
-       tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED);
 hitted:
+       /*
+        * Ensure the current partial page belongs to this submit chain rather
+        * than other concurrent submit chains or the noio(bypass) chain since
+        * those chains are handled asynchronously thus the page cannot be used
+        * for inplace I/O or pagevec (should be processed in strict order.)
+        */
+       tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
+                 clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
+
        cur = end - min_t(unsigned int, offset + end - map->m_la, end);
        if (!(map->m_flags & EROFS_MAP_MAPPED)) {
                zero_user_segment(page, cur, end);
index a39bed2..168479a 100644 (file)
@@ -174,7 +174,6 @@ static int membarrier_private_expedited(int flags)
                 */
                if (cpu == raw_smp_processor_id())
                        continue;
-               rcu_read_lock();
                p = rcu_dereference(cpu_rq(cpu)->curr);
                if (p && p->mm == mm)
                        __cpumask_set_cpu(cpu, tmpmask);
index c1f5bb5..b5a65e2 100644 (file)
@@ -42,39 +42,39 @@ static int bc_shutdown(struct clock_event_device *evt)
  */
 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
 {
-       int bc_moved;
        /*
-        * We try to cancel the timer first. If the callback is on
-        * flight on some other cpu then we let it handle it. If we
-        * were able to cancel the timer nothing can rearm it as we
-        * own broadcast_lock.
+        * This is called either from enter/exit idle code or from the
+        * broadcast handler. In all cases tick_broadcast_lock is held.
         *
-        * However we can also be called from the event handler of
-        * ce_broadcast_hrtimer itself when it expires. We cannot
-        * restart the timer because we are in the callback, but we
-        * can set the expiry time and let the callback return
-        * HRTIMER_RESTART.
+        * hrtimer_cancel() cannot be called here neither from the
+        * broadcast handler nor from the enter/exit idle code. The idle
+        * code can run into the problem described in bc_shutdown() and the
+        * broadcast handler cannot wait for itself to complete for obvious
+        * reasons.
         *
-        * Since we are in the idle loop at this point and because
-        * hrtimer_{start/cancel} functions call into tracing,
-        * calls to these functions must be bound within RCU_NONIDLE.
+        * Each caller tries to arm the hrtimer on its own CPU, but if the
+        * hrtimer callbback function is currently running, then
+        * hrtimer_start() cannot move it and the timer stays on the CPU on
+        * which it is assigned at the moment.
+        *
+        * As this can be called from idle code, the hrtimer_start()
+        * invocation has to be wrapped with RCU_NONIDLE() as
+        * hrtimer_start() can call into tracing.
         */
-       RCU_NONIDLE(
-               {
-                       bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
-                       if (bc_moved) {
-                               hrtimer_start(&bctimer, expires,
-                                             HRTIMER_MODE_ABS_PINNED_HARD);
-                       }
-               }
-       );
-
-       if (bc_moved) {
-               /* Bind the "device" to the cpu */
-               bc->bound_on = smp_processor_id();
-       } else if (bc->bound_on == smp_processor_id()) {
-               hrtimer_set_expires(&bctimer, expires);
-       }
+       RCU_NONIDLE( {
+               hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED_HARD);
+               /*
+                * The core tick broadcast mode expects bc->bound_on to be set
+                * correctly to prevent a CPU which has the broadcast hrtimer
+                * armed from going deep idle.
+                *
+                * As tick_broadcast_lock is held, nothing can change the cpu
+                * base which was just established in hrtimer_start() above. So
+                * the below access is safe even without holding the hrtimer
+                * base lock.
+                */
+               bc->bound_on = bctimer.base->cpu_base->cpu;
+       } );
        return 0;
 }
 
@@ -100,10 +100,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
 {
        ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
 
-       if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
-               if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
-                       return HRTIMER_RESTART;
-
        return HRTIMER_NORESTART;
 }