Merge tag 'net-5.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Nov 2020 22:02:04 +0000 (14:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Nov 2020 22:02:04 +0000 (14:02 -0800)
Pull networking fixes from Jakub Kicinski:
 "Current release - regressions:

   - arm64: dts: fsl-ls1028a-kontron-sl28: specify in-band mode for
     ENETC

  Current release - bugs in new features:

   - mptcp: provide rmem[0] limit offset to fix oops

  Previous release - regressions:

   - IPv6: Set SIT tunnel hard_header_len to zero to fix path MTU
     calculations

   - lan743x: correctly handle chips with internal PHY

   - bpf: Don't rely on GCC __attribute__((optimize)) to disable GCSE

   - mlx5e: Fix VXLAN port table synchronization after function reload

  Previous release - always broken:

   - bpf: Zero-fill re-used per-cpu map element

   - fix out-of-order UDP packets when forwarding with UDP GSO fraglists
     turned on:
       - fix UDP header access on Fast/frag0 UDP GRO
       - fix IP header access and skb lookup on Fast/frag0 UDP GRO

   - ethtool: netlink: add missing netdev_features_change() call

   - net: Update window_clamp if SOCK_RCVBUF is set

   - igc: Fix returning wrong statistics

   - ch_ktls: fix multiple leaks and corner cases in Chelsio TLS offload

   - tunnels: Fix off-by-one in lower MTU bounds for ICMP/ICMPv6 replies

   - r8169: disable hw csum for short packets on all chip versions

   - vrf: Fix fast path output packet handling with async Netfilter
     rules"

* tag 'net-5.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (65 commits)
  lan743x: fix use of uninitialized variable
  net: udp: fix IP header access and skb lookup on Fast/frag0 UDP GRO
  net: udp: fix UDP header access on Fast/frag0 UDP GRO
  devlink: Avoid overwriting port attributes of registered port
  vrf: Fix fast path output packet handling with async Netfilter rules
  cosa: Add missing kfree in error path of cosa_write
  net: switch to the kernel.org patchwork instance
  ch_ktls: stop the txq if reaches threshold
  ch_ktls: tcb update fails sometimes
  ch_ktls/cxgb4: handle partial tag alone SKBs
  ch_ktls: don't free skb before sending FIN
  ch_ktls: packet handling prior to start marker
  ch_ktls: Correction in middle record handling
  ch_ktls: missing handling of header alone
  ch_ktls: Correction in trimmed_len calculation
  cxgb4/ch_ktls: creating skbs causes panic
  ch_ktls: Update cheksum information
  ch_ktls: Correction in finding correct length
  cxgb4/ch_ktls: decrypted bit is not enough
  net/x25: Fix null-ptr-deref in x25_connect
  ...

247 files changed:
Documentation/ABI/stable/sysfs-driver-dma-ioatdma
Documentation/ABI/testing/sysfs-class-net
Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml
Documentation/filesystems/ext4/journal.rst
Documentation/filesystems/ext4/super.rst
Documentation/filesystems/journalling.rst
Documentation/firmware-guide/acpi/acpi-lid.rst
Documentation/firmware-guide/acpi/gpio-properties.rst
Documentation/firmware-guide/acpi/method-tracing.rst
Documentation/leds/index.rst
Documentation/misc-devices/index.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arm64/kvm/mmu.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/sys_regs.h
arch/powerpc/include/asm/nohash/32/kup-8xx.h
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
arch/powerpc/include/asm/nohash/32/pte-8xx.h
arch/powerpc/include/asm/topology.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/kernel/eeh_cache.c
arch/powerpc/kernel/head_40x.S
arch/powerpc/kernel/head_8xx.S
arch/powerpc/kernel/head_book3s_32.S
arch/powerpc/kernel/smp.c
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/ftrace.c
arch/riscv/kernel/head.S
arch/riscv/kernel/vdso/.gitignore
arch/riscv/kernel/vdso/Makefile
arch/riscv/kernel/vdso/so2s.sh [new file with mode: 0755]
arch/riscv/mm/fault.c
arch/riscv/mm/init.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/lib/memcpy_64.S
arch/x86/lib/memmove_64.S
arch/x86/lib/memset_64.S
drivers/acpi/acpi_video.c
drivers/acpi/battery.c
drivers/acpi/button.c
drivers/acpi/dptf/dptf_pch_fivr.c
drivers/acpi/dptf/dptf_power.c
drivers/acpi/dptf/int340x_thermal.c
drivers/acpi/event.c
drivers/acpi/evged.c
drivers/acpi/fan.c
drivers/acpi/internal.h
drivers/acpi/nfit/core.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_link.c
drivers/acpi/pci_mcfg.c
drivers/acpi/power.c
drivers/acpi/processor_perflib.c
drivers/acpi/sbs.c
drivers/acpi/sbshc.c
drivers/acpi/sbshc.h
drivers/acpi/scan.c
drivers/acpi/video_detect.c
drivers/acpi/wakeup.c
drivers/block/null_blk.h
drivers/block/null_blk_zoned.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_performance.c
drivers/cpufreq/cpufreq_powersave.c
drivers/cpufreq/intel_pstate.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-designware-slave.c
drivers/i2c/busses/i2c-mlxbf.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/iommu/intel/iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-bcm2836.c
drivers/irqchip/irq-mst-intc.c
drivers/irqchip/irq-renesas-intc-irqpin.c
drivers/irqchip/irq-sifive-plic.c
drivers/irqchip/irq-stm32-exti.c
drivers/irqchip/irq-ti-sci-inta.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/powercap/powercap_sys.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/hpsa.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/Kconfig
drivers/tty/serial/serial_txx9.c
drivers/tty/tty_io.c
drivers/tty/vt/vt.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/ep0.c
drivers/usb/gadget/legacy/raw_gadget.c
drivers/usb/gadget/udc/fsl_udc_core.c
drivers/usb/gadget/udc/goku_udc.c
drivers/usb/misc/apple-mfi-fastcharge.c
drivers/usb/mtu3/mtu3_gadget.c
drivers/usb/serial/cyberjack.c
drivers/usb/serial/option.c
drivers/xen/swiotlb-xen.c
fs/btrfs/block-rsv.c
fs/btrfs/dev-replace.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/ref-verify.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/ceph/caps.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/quota.c
fs/ceph/snap.c
fs/crypto/keysetup.c
fs/erofs/inode.c
fs/erofs/zdata.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/fast_commit.c
fs/ext4/fast_commit.h
fs/ext4/file.c
fs/ext4/fsmap.c
fs/ext4/fsync.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/ext4/super.c
fs/io-wq.c
fs/io_uring.c
fs/iomap/buffered-io.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/recovery.c
fs/jbd2/transaction.c
fs/nfs/dir.c
fs/nfs/nfs42xattr.c
fs/nfs/nfs42xdr.c
fs/nfs/nfsroot.c
fs/nfsd/nfs3proc.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/ocfs2/journal.c
fs/proc/cpuinfo.c
fs/proc/generic.c
fs/proc/inode.c
fs/proc/stat.c
fs/seq_file.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/scrub/inode.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_reflink.c
include/linux/cpufreq.h
include/linux/io_uring.h
include/linux/iomap.h
include/linux/jbd2.h
include/linux/seq_file.h
include/linux/swiotlb.h
include/trace/events/ext4.h
include/trace/events/sunrpc.h
kernel/dma/swiotlb.c
kernel/entry/common.c
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/irq/Kconfig
kernel/sched/cpufreq_schedutil.c
net/sunrpc/sysctl.c
scripts/get_abi.pl
tools/power/x86/turbostat/Makefile
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c
tools/testing/selftests/core/close_range_test.c
tools/testing/selftests/filesystems/binderfs/binderfs_test.c
tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc
tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc
tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc
tools/testing/selftests/ftrace/test.d/functions
tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc
tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc
tools/testing/selftests/ftrace/test.d/kprobe/profile.tc
tools/testing/selftests/kselftest_harness.h
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/get-reg-list-sve.c [new file with mode: 0644]
tools/testing/selftests/kvm/aarch64/get-reg-list.c [new file with mode: 0644]
tools/testing/selftests/kvm/clear_dirty_log_test.c [deleted file]
tools/testing/selftests/kvm/demand_paging_test.c
tools/testing/selftests/kvm/dirty_log_perf_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/dirty_log_test.c
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/include/perf_test_util.h [new file with mode: 0644]
tools/testing/selftests/kvm/include/test_util.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/aarch64/processor.c
tools/testing/selftests/kvm/lib/aarch64/ucall.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/kvm_util_internal.h
tools/testing/selftests/kvm/lib/s390x/processor.c
tools/testing/selftests/kvm/lib/s390x/ucall.c
tools/testing/selftests/kvm/lib/test_util.c
tools/testing/selftests/kvm/lib/x86_64/handlers.S [new file with mode: 0644]
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/lib/x86_64/ucall.c
tools/testing/selftests/kvm/x86_64/kvm_pv_test.c [new file with mode: 0644]
tools/testing/selftests/lib.mk
tools/testing/selftests/pidfd/config
tools/testing/selftests/pidfd/pidfd_getfd_test.c
tools/testing/selftests/pidfd/pidfd_open_test.c
tools/testing/selftests/pidfd/pidfd_poll_test.c
tools/testing/selftests/pidfd/pidfd_setns_test.c
tools/testing/selftests/pidfd/pidfd_test.c
tools/testing/selftests/proc/proc-loadavg-001.c
tools/testing/selftests/proc/proc-self-syscall.c
tools/testing/selftests/proc/proc-uptime-002.c

index 420c1d0..3a4e2cd 100644 (file)
@@ -1,29 +1,29 @@
-What:           sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/cap
+What:           /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/cap
 Date:           December 3, 2009
 KernelVersion:  2.6.32
 Contact:        dmaengine@vger.kernel.org
 Description:   Capabilities the DMA supports.Currently there are DMA_PQ, DMA_PQ_VAL,
                DMA_XOR,DMA_XOR_VAL,DMA_INTERRUPT.
 
-What:           sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_active
+What:           /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_active
 Date:           December 3, 2009
 KernelVersion:  2.6.32
 Contact:        dmaengine@vger.kernel.org
 Description:   The number of descriptors active in the ring.
 
-What:           sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_size
+What:           /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_size
 Date:           December 3, 2009
 KernelVersion:  2.6.32
 Contact:        dmaengine@vger.kernel.org
 Description:   Descriptor ring size, total number of descriptors available.
 
-What:           sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/version
+What:           /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/version
 Date:           December 3, 2009
 KernelVersion:  2.6.32
 Contact:        dmaengine@vger.kernel.org
 Description:   Version of ioatdma device.
 
-What:           sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/intr_coalesce
+What:           /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/intr_coalesce
 Date:           August 8, 2017
 KernelVersion:  4.14
 Contact:        dmaengine@vger.kernel.org
index 7670012..1f2002d 100644 (file)
@@ -152,7 +152,7 @@ Description:
                When an interface is under test, it cannot be expected
                to pass packets as normal.
 
-What:          /sys/clas/net/<iface>/duplex
+What:          /sys/class/net/<iface>/duplex
 Date:          October 2009
 KernelVersion: 2.6.33
 Contact:       netdev@vger.kernel.org
index f6c3fcc..b5af120 100644 (file)
@@ -32,6 +32,11 @@ description: |
                        | | vint  | bit  |  | 0 |.....|63| vintx  |
                        | +--------------+  +------------+        |
                        |                                         |
+                       |      Unmap                              |
+                       | +--------------+                        |
+  Unmapped events ---->| |   umapidx    |-------------------------> Globalevents
+                       | +--------------+                        |
+                       |                                         |
                        +-----------------------------------------+
 
   Configuration of these Intmap registers that maps global events to vint is
@@ -70,6 +75,11 @@ properties:
         - description: |
             "limit" specifies the limit for translation
 
+  ti,unmapped-event-sources:
+    $ref: /schemas/types.yaml#definitions/phandle-array
+    description:
+      Array of phandles to DMA controllers where the unmapped events originate.
+
 required:
   - compatible
   - reg
index 805a1e9..849d5b1 100644 (file)
@@ -256,6 +256,10 @@ which is 1024 bytes long:
      - s\_padding2
      -
    * - 0x54
+     - \_\_be32
+     - s\_num\_fc\_blocks
+     - Number of fast commit blocks in the journal.
+   * - 0x58
      - \_\_u32
      - s\_padding[42]
      -
@@ -310,6 +314,8 @@ The journal incompat features are any combination of the following:
      - This journal uses v3 of the checksum on-disk format. This is the same as
        v2, but the journal block tag size is fixed regardless of the size of
        block numbers. (JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3)
+   * - 0x20
+     - Journal has fast commit blocks. (JBD2\_FEATURE\_INCOMPAT\_FAST\_COMMIT)
 
 .. _jbd2_checksum_type:
 
index 93e55d7..2eb1ab2 100644 (file)
@@ -596,6 +596,13 @@ following:
      - Sparse Super Block, v2. If this flag is set, the SB field s\_backup\_bgs
        points to the two block groups that contain backup superblocks
        (COMPAT\_SPARSE\_SUPER2).
+   * - 0x400
+     - Fast commits supported. Although fast commits blocks are
+       backward incompatible, fast commit blocks are not always
+       present in the journal. If fast commit blocks are present in
+       the journal, JBD2 incompat feature
+       (JBD2\_FEATURE\_INCOMPAT\_FAST\_COMMIT) gets
+       set (COMPAT\_FAST\_COMMIT).
 
 .. _super_incompat:
 
index 5a5f70b..e18f90f 100644 (file)
@@ -136,10 +136,8 @@ Fast commits
 ~~~~~~~~~~~~
 
 JBD2 to also allows you to perform file-system specific delta commits known as
-fast commits. In order to use fast commits, you first need to call
-:c:func:`jbd2_fc_init` and tell how many blocks at the end of journal
-area should be reserved for fast commits. Along with that, you will also need
-to set following callbacks that perform correspodning work:
+fast commits. In order to use fast commits, you will need to set following
+callbacks that perform correspodning work:
 
 `journal->j_fc_cleanup_cb`: Cleanup function called after every full commit and
 fast commit.
index 874ce0e..71b9af1 100644 (file)
@@ -19,9 +19,9 @@ report the "current" state of the lid as either "opened" or "closed".
 
 For most platforms, both the _LID method and the lid notifications are
 reliable. However, there are exceptions. In order to work with these
-exceptional buggy platforms, special restrictions and expections should be
+exceptional buggy platforms, special restrictions and exceptions should be
 taken into account. This document describes the restrictions and the
-expections of the Linux ACPI lid device driver.
+exceptions of the Linux ACPI lid device driver.
 
 
 Restrictions of the returning value of the _LID control method
@@ -46,7 +46,7 @@ state is changed to "closed". The "closed" notification is normally used to
 trigger some system power saving operations on Windows. Since it is fully
 tested, it is reliable from all AML tables.
 
-Expections for the userspace users of the ACPI lid device driver
+Exceptions for the userspace users of the ACPI lid device driver
 ================================================================
 
 The ACPI button driver exports the lid state to the userspace via the
@@ -100,7 +100,7 @@ use the following kernel parameter:
 C. button.lid_init_state=ignore:
    When this option is specified, the ACPI button driver never reports the
    initial lid state and there is a compensation mechanism implemented to
-   ensure that the reliable "closed" notifications can always be delievered
+   ensure that the reliable "closed" notifications can always be delivered
    to the userspace by always pairing "closed" input events with complement
    "opened" input events. But there is still no guarantee that the "opened"
    notifications can be delivered to the userspace when the lid is actually
index bb6d74f..59aad61 100644 (file)
@@ -20,9 +20,9 @@ index, like the ASL example below shows::
 
       Name (_CRS, ResourceTemplate ()
       {
-          GpioIo (Exclusive, PullUp, 0, 0, IoRestrictionInputOnly,
+          GpioIo (Exclusive, PullUp, 0, 0, IoRestrictionOutputOnly,
                   "\\_SB.GPO0", 0, ResourceConsumer) {15}
-          GpioIo (Exclusive, PullUp, 0, 0, IoRestrictionInputOnly,
+          GpioIo (Exclusive, PullUp, 0, 0, IoRestrictionOutputOnly,
                   "\\_SB.GPO0", 0, ResourceConsumer) {27, 31}
       })
 
@@ -49,15 +49,41 @@ index
 pin
   Pin in the GpioIo()/GpioInt() resource. Typically this is zero.
 active_low
-  If 1 the GPIO is marked as active_low.
+  If 1, the GPIO is marked as active_low.
 
 Since ACPI GpioIo() resource does not have a field saying whether it is
 active low or high, the "active_low" argument can be used here.  Setting
 it to 1 marks the GPIO as active low.
 
+Note, active_low in _DSD does not make sense for GpioInt() resource and
+must be 0. GpioInt() resource has its own means of defining it.
+
 In our Bluetooth example the "reset-gpios" refers to the second GpioIo()
 resource, second pin in that resource with the GPIO number of 31.
 
+The GpioIo() resource unfortunately doesn't explicitly provide an initial
+state of the output pin which driver should use during its initialization.
+
+Linux tries to use common sense here and derives the state from the bias
+and polarity settings. The table below shows the expectations:
+
+=========  =============  ==============
+Pull Bias     Polarity     Requested...
+=========  =============  ==============
+Implicit     x            AS IS (assumed firmware configured for us)
+Explicit     x (no _DSD)  as Pull Bias (Up == High, Down == Low),
+                          assuming non-active (Polarity = !Pull Bias)
+Down         Low          as low, assuming active
+Down         High         as low, assuming non-active
+Up           Low          as high, assuming non-active
+Up           High         as high, assuming active
+=========  =============  ==============
+
+That said, for our above example the both GPIOs, since the bias setting
+is explicit and _DSD is present, will be treated as active with a high
+polarity and Linux will configure the pins in this state until a driver
+reprograms them differently.
+
 It is possible to leave holes in the array of GPIOs. This is useful in
 cases like with SPI host controllers where some chip selects may be
 implemented as GPIOs and some as native signals. For example a SPI host
@@ -112,8 +138,8 @@ Example::
   Package () {
       "gpio-line-names",
       Package () {
-          "SPI0_CS_N", "EXP2_INT", "MUX6_IO", "UART0_RXD", "MUX7_IO",
-          "LVL_C_A1", "MUX0_IO", "SPI1_MISO"
+          "SPI0_CS_N", "EXP2_INT", "MUX6_IO", "UART0_RXD",
+          "MUX7_IO", "LVL_C_A1", "MUX0_IO", "SPI1_MISO",
       }
   }
 
@@ -137,7 +163,7 @@ to the GPIO lines it is going to use and provide the GPIO subsystem with a
 mapping between those names and the ACPI GPIO resources corresponding to them.
 
 To do that, the driver needs to define a mapping table as a NULL-terminated
-array of struct acpi_gpio_mapping objects that each contain a name, a pointer
+array of struct acpi_gpio_mapping objects that each contains a name, a pointer
 to an array of line data (struct acpi_gpio_params) objects and the size of that
 array.  Each struct acpi_gpio_params object consists of three fields,
 crs_entry_index, line_index, active_low, representing the index of the target
@@ -154,13 +180,14 @@ question would look like this::
   static const struct acpi_gpio_mapping bluetooth_acpi_gpios[] = {
     { "reset-gpios", &reset_gpio, 1 },
     { "shutdown-gpios", &shutdown_gpio, 1 },
-    { },
+    { }
   };
 
 Next, the mapping table needs to be passed as the second argument to
-acpi_dev_add_driver_gpios() that will register it with the ACPI device object
-pointed to by its first argument.  That should be done in the driver's .probe()
-routine.  On removal, the driver should unregister its GPIO mapping table by
+acpi_dev_add_driver_gpios() or its managed analogue that will
+register it with the ACPI device object pointed to by its first
+argument. That should be done in the driver's .probe() routine.
+On removal, the driver should unregister its GPIO mapping table by
 calling acpi_dev_remove_driver_gpios() on the ACPI device object where that
 table was previously registered.
 
@@ -191,12 +218,12 @@ The driver might expect to get the right GPIO when it does::
 but since there is no way to know the mapping between "reset" and
 the GpioIo() in _CRS desc will hold ERR_PTR(-ENOENT).
 
-The driver author can solve this by passing the mapping explictly
-(the recommended way and documented in the above chapter).
+The driver author can solve this by passing the mapping explicitly
+(this is the recommended way and it's documented in the above chapter).
 
 The ACPI GPIO mapping tables should not contaminate drivers that are not
 knowing about which exact device they are servicing on. It implies that
-the ACPI GPIO mapping tables are hardly linked to ACPI ID and certain
+the ACPI GPIO mapping tables are hardly linked to an ACPI ID and certain
 objects, as listed in the above chapter, of the device in question.
 
 Getting GPIO descriptor
@@ -229,5 +256,5 @@ Case 2 explicitly tells GPIO core to look for resources in _CRS.
 Be aware that gpiod_get_index() in cases 1 and 2, assuming that there
 are two versions of ACPI device description provided and no mapping is
 present in the driver, will return different resources. That's why a
-certain driver has to handle them carefully as explained in previous
+certain driver has to handle them carefully as explained in the previous
 chapter.
index 0aa7e2c..6ab6c09 100644 (file)
@@ -98,7 +98,7 @@ subject to change::
    [    0.188903]   exdebug-0398 ex_trace_point        : Method End [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
 
 Developers can utilize these special log entries to track the AML
-interpretion, thus can aid issue debugging and performance tuning. Note
+interpretation, thus can aid issue debugging and performance tuning. Note
 that, as the "AML tracer" logs are implemented via ACPI_DEBUG_PRINT()
 macro, CONFIG_ACPI_DEBUG is also required to be enabled for enabling
 "AML tracer" logs.
index 53e6090..e5d63b9 100644 (file)
@@ -25,3 +25,4 @@ LEDs
    leds-lp5562
    leds-lp55xx
    leds-mlxcpld
+   leds-sc27xx
index 46072ce..64420b3 100644 (file)
@@ -24,7 +24,6 @@ fit into other categories.
    isl29003
    lis3lv02d
    max6875
-   mic/index
    pci-endpoint-test
    spear-pcie-gadget
    uacce
index 36d5f1f..e00a66d 100644 (file)
@@ -6367,7 +6367,7 @@ accesses that would usually trigger a #GP by KVM into the guest will
 instead get bounced to user space through the KVM_EXIT_X86_RDMSR and
 KVM_EXIT_X86_WRMSR exit notifications.
 
-8.25 KVM_X86_SET_MSR_FILTER
+8.27 KVM_X86_SET_MSR_FILTER
 ---------------------------
 
 :Architectures: x86
@@ -6381,8 +6381,7 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
 trap and emulate MSRs that are outside of the scope of KVM as well as
 limit the attack surface on KVM's MSR emulation code.
 
-
-8.26 KVM_CAP_ENFORCE_PV_CPUID
+8.28 KVM_CAP_ENFORCE_PV_CPUID
 -----------------------------
 
 Architectures: x86
index a301f09..4a34b25 100644 (file)
@@ -6614,6 +6614,7 @@ Q:        http://patchwork.ozlabs.org/project/linux-ext4/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4.git
 F:     Documentation/filesystems/ext4/
 F:     fs/ext4/
+F:     include/trace/events/ext4.h
 
 Extended Verification Module (EVM)
 M:     Mimi Zohar <zohar@linux.ibm.com>
@@ -11163,7 +11164,7 @@ F:      Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
 F:     drivers/input/touchscreen/melfas_mip4.c
 
 MELLANOX BLUEFIELD I2C DRIVER
-M:     Khalil Blaiech <kblaiech@mellanox.com>
+M:     Khalil Blaiech <kblaiech@nvidia.com>
 L:     linux-i2c@vger.kernel.org
 S:     Supported
 F:     drivers/i2c/busses/i2c-mlxbf.c
index 9e7fd6a..008aba5 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index 57972bd..1a01da9 100644 (file)
@@ -788,10 +788,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        }
 
        switch (vma_shift) {
+#ifndef __PAGETABLE_PMD_FOLDED
        case PUD_SHIFT:
                if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
                        break;
                fallthrough;
+#endif
        case CONT_PMD_SHIFT:
                vma_shift = PMD_SHIFT;
                fallthrough;
index fb12d3e..d0868d0 100644 (file)
@@ -1069,7 +1069,7 @@ static bool trap_ptrauth(struct kvm_vcpu *vcpu,
 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
                        const struct sys_reg_desc *rd)
 {
-       return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
+       return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
 }
 
 #define __PTRAUTH_KEY(k)                                               \
@@ -1153,6 +1153,22 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
        return val;
 }
 
+static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
+                                 const struct sys_reg_desc *r)
+{
+       u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
+                        (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+
+       switch (id) {
+       case SYS_ID_AA64ZFR0_EL1:
+               if (!vcpu_has_sve(vcpu))
+                       return REG_RAZ;
+               break;
+       }
+
+       return 0;
+}
+
 /* cpufeature ID register access trap handlers */
 
 static bool __access_id_reg(struct kvm_vcpu *vcpu,
@@ -1171,7 +1187,9 @@ static bool access_id_reg(struct kvm_vcpu *vcpu,
                          struct sys_reg_params *p,
                          const struct sys_reg_desc *r)
 {
-       return __access_id_reg(vcpu, p, r, false);
+       bool raz = sysreg_visible_as_raz(vcpu, r);
+
+       return __access_id_reg(vcpu, p, r, raz);
 }
 
 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
@@ -1192,72 +1210,7 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
        if (vcpu_has_sve(vcpu))
                return 0;
 
-       return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
-}
-
-/* Visibility overrides for SVE-specific ID registers */
-static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
-                                     const struct sys_reg_desc *rd)
-{
-       if (vcpu_has_sve(vcpu))
-               return 0;
-
-       return REG_HIDDEN_USER;
-}
-
-/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
-static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
-{
-       if (!vcpu_has_sve(vcpu))
-               return 0;
-
-       return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
-}
-
-static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
-                                  struct sys_reg_params *p,
-                                  const struct sys_reg_desc *rd)
-{
-       if (p->is_write)
-               return write_to_read_only(vcpu, p, rd);
-
-       p->regval = guest_id_aa64zfr0_el1(vcpu);
-       return true;
-}
-
-static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
-               const struct sys_reg_desc *rd,
-               const struct kvm_one_reg *reg, void __user *uaddr)
-{
-       u64 val;
-
-       if (WARN_ON(!vcpu_has_sve(vcpu)))
-               return -ENOENT;
-
-       val = guest_id_aa64zfr0_el1(vcpu);
-       return reg_to_user(uaddr, &val, reg->id);
-}
-
-static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
-               const struct sys_reg_desc *rd,
-               const struct kvm_one_reg *reg, void __user *uaddr)
-{
-       const u64 id = sys_reg_to_index(rd);
-       int err;
-       u64 val;
-
-       if (WARN_ON(!vcpu_has_sve(vcpu)))
-               return -ENOENT;
-
-       err = reg_from_user(&val, uaddr, id);
-       if (err)
-               return err;
-
-       /* This is what we mean by invariant: you can't change it. */
-       if (val != guest_id_aa64zfr0_el1(vcpu))
-               return -EINVAL;
-
-       return 0;
+       return REG_HIDDEN;
 }
 
 /*
@@ -1299,13 +1252,17 @@ static int __set_id_reg(const struct kvm_vcpu *vcpu,
 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                      const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       return __get_id_reg(vcpu, rd, uaddr, false);
+       bool raz = sysreg_visible_as_raz(vcpu, rd);
+
+       return __get_id_reg(vcpu, rd, uaddr, raz);
 }
 
 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                      const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       return __set_id_reg(vcpu, rd, uaddr, false);
+       bool raz = sysreg_visible_as_raz(vcpu, rd);
+
+       return __set_id_reg(vcpu, rd, uaddr, raz);
 }
 
 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
@@ -1397,6 +1354,7 @@ static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
        .access = access_id_reg,                \
        .get_user = get_id_reg,                 \
        .set_user = set_id_reg,                 \
+       .visibility = id_visibility,            \
 }
 
 /*
@@ -1518,7 +1476,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        ID_SANITISED(ID_AA64PFR1_EL1),
        ID_UNALLOCATED(4,2),
        ID_UNALLOCATED(4,3),
-       { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
+       ID_SANITISED(ID_AA64ZFR0_EL1),
        ID_UNALLOCATED(4,5),
        ID_UNALLOCATED(4,6),
        ID_UNALLOCATED(4,7),
@@ -2185,7 +2143,7 @@ static void perform_access(struct kvm_vcpu *vcpu,
        trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
 
        /* Check for regs disabled by runtime config */
-       if (sysreg_hidden_from_guest(vcpu, r)) {
+       if (sysreg_hidden(vcpu, r)) {
                kvm_inject_undefined(vcpu);
                return;
        }
@@ -2684,7 +2642,7 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
                return get_invariant_sys_reg(reg->id, uaddr);
 
        /* Check for regs disabled by runtime config */
-       if (sysreg_hidden_from_user(vcpu, r))
+       if (sysreg_hidden(vcpu, r))
                return -ENOENT;
 
        if (r->get_user)
@@ -2709,7 +2667,7 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
                return set_invariant_sys_reg(reg->id, uaddr);
 
        /* Check for regs disabled by runtime config */
-       if (sysreg_hidden_from_user(vcpu, r))
+       if (sysreg_hidden(vcpu, r))
                return -ENOENT;
 
        if (r->set_user)
@@ -2780,7 +2738,7 @@ static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
        if (!(rd->reg || rd->get_user))
                return 0;
 
-       if (sysreg_hidden_from_user(vcpu, rd))
+       if (sysreg_hidden(vcpu, rd))
                return 0;
 
        if (!copy_reg_to_user(rd, uind))
index 5a6fc30..0f95964 100644 (file)
@@ -59,8 +59,8 @@ struct sys_reg_desc {
                                   const struct sys_reg_desc *rd);
 };
 
-#define REG_HIDDEN_USER                (1 << 0) /* hidden from userspace ioctls */
-#define REG_HIDDEN_GUEST       (1 << 1) /* hidden from guest */
+#define REG_HIDDEN             (1 << 0) /* hidden from userspace and guest */
+#define REG_RAZ                        (1 << 1) /* RAZ from userspace and guest */
 
 static __printf(2, 3)
 inline void print_sys_reg_msg(const struct sys_reg_params *p,
@@ -111,22 +111,22 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r
        __vcpu_sys_reg(vcpu, r->reg) = r->val;
 }
 
-static inline bool sysreg_hidden_from_guest(const struct kvm_vcpu *vcpu,
-                                           const struct sys_reg_desc *r)
+static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu,
+                                const struct sys_reg_desc *r)
 {
        if (likely(!r->visibility))
                return false;
 
-       return r->visibility(vcpu, r) & REG_HIDDEN_GUEST;
+       return r->visibility(vcpu, r) & REG_HIDDEN;
 }
 
-static inline bool sysreg_hidden_from_user(const struct kvm_vcpu *vcpu,
-                                          const struct sys_reg_desc *r)
+static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu,
+                                        const struct sys_reg_desc *r)
 {
        if (likely(!r->visibility))
                return false;
 
-       return r->visibility(vcpu, r) & REG_HIDDEN_USER;
+       return r->visibility(vcpu, r) & REG_RAZ;
 }
 
 static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
index 85ed239..567cdc5 100644 (file)
@@ -63,7 +63,7 @@ static inline void restore_user_access(unsigned long flags)
 static inline bool
 bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
 {
-       return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
+       return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xff000000),
                    "Bug: fault blocked by AP register !");
 }
 
index 1d9ac0f..0bd1b14 100644 (file)
  * respectively NA for All or X for Supervisor and no access for User.
  * Then we use the APG to say whether accesses are according to Page rules or
  * "all Supervisor" rules (Access to all)
- * Therefore, we define 2 APG groups. lsb is _PMD_USER
- * 0 => Kernel => 01 (all accesses performed according to page definition)
- * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * 2-15 => Not Used
- */
-#define MI_APG_INIT    0x40000000
-
-/*
- * 0 => Kernel => 01 (all accesses performed according to page definition)
- * 1 => User => 10 (all accesses performed according to swaped page definition)
- * 2-15 => Not Used
- */
-#define MI_APG_KUEP    0x60000000
+ * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say
+ * "all User" rules, that will lead to NA for all.
+ * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED
+ * 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
+ * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
+ * 2 => User => 11 (all accesses performed according as user iaw page definition)
+ * 3 => User+Accessed => 00 (all accesses performed as supervisor iaw page definition) for INIT
+ *                    => 10 (all accesses performed according to swaped page definition) for KUEP
+ * 4-15 => Not Used
+ */
+#define MI_APG_INIT    0xdc000000
+#define MI_APG_KUEP    0xde000000
 
 /* The effective page number register.  When read, contains the information
  * about the last instruction TLB miss.  When MI_RPN is written, bits in
 #define MD_Ks          0x80000000      /* Should not be set */
 #define MD_Kp          0x40000000      /* Should always be set */
 
-/*
- * All pages' PP data bits are set to either 000 or 011 or 001, which means
- * respectively RW for Supervisor and no access for User, or RO for
- * Supervisor and no access for user and NA for ALL.
- * Then we use the APG to say whether accesses are according to Page rules or
- * "all Supervisor" rules (Access to all)
- * Therefore, we define 2 APG groups. lsb is _PMD_USER
- * 0 => Kernel => 01 (all accesses performed according to page definition)
- * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * 2-15 => Not Used
- */
-#define MD_APG_INIT    0x40000000
-
-/*
- * 0 => No user => 01 (all accesses performed according to page definition)
- * 1 => User => 10 (all accesses performed according to swaped page definition)
- * 2-15 => Not Used
- */
-#define MD_APG_KUAP    0x60000000
+/* See explanation above at the definition of MI_APG_INIT */
+#define MD_APG_INIT    0xdc000000
+#define MD_APG_KUAP    0xde000000
 
 /* The effective page number register.  When read, contains the information
  * about the last instruction TLB miss.  When MD_RPN is written, bits in
index 66f403a..1581204 100644 (file)
@@ -39,9 +39,9 @@
  * into the TLB.
  */
 #define _PAGE_GUARDED  0x0010  /* Copied to L1 G entry in DTLB */
-#define _PAGE_SPECIAL  0x0020  /* SW entry */
+#define _PAGE_ACCESSED 0x0020  /* Copied to L1 APG 1 entry in I/DTLB */
 #define _PAGE_EXEC     0x0040  /* Copied to PP (bit 21) in ITLB */
-#define _PAGE_ACCESSED 0x0080  /* software: page referenced */
+#define _PAGE_SPECIAL  0x0080  /* SW entry */
 
 #define _PAGE_NA       0x0200  /* Supervisor NA, User no access */
 #define _PAGE_RO       0x0600  /* Supervisor RO, User no access */
 
 #define _PMD_PRESENT   0x0001
 #define _PMD_PRESENT_MASK      _PMD_PRESENT
-#define _PMD_BAD       0x0fd0
+#define _PMD_BAD       0x0f90
 #define _PMD_PAGE_MASK 0x000c
 #define _PMD_PAGE_8M   0x000c
 #define _PMD_PAGE_512K 0x0004
-#define _PMD_USER      0x0020  /* APG 1 */
+#define _PMD_ACCESSED  0x0020  /* APG 1 */
+#define _PMD_USER      0x0040  /* APG 2 */
 
 #define _PTE_NONE_MASK 0
 
index 8728590..3beeb03 100644 (file)
@@ -6,6 +6,7 @@
 
 struct device;
 struct device_node;
+struct drmem_lmb;
 
 #ifdef CONFIG_NUMA
 
@@ -61,6 +62,9 @@ static inline int early_cpu_to_node(int cpu)
         */
        return (nid < 0) ? 0 : nid;
 }
+
+int of_drconf_to_nid_single(struct drmem_lmb *lmb);
+
 #else
 
 static inline int early_cpu_to_node(int cpu) { return 0; }
@@ -84,10 +88,12 @@ static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
        return 0;
 }
 
-#endif /* CONFIG_NUMA */
+static inline int of_drconf_to_nid_single(struct drmem_lmb *lmb)
+{
+       return first_online_node;
+}
 
-struct drmem_lmb;
-int of_drconf_to_nid_single(struct drmem_lmb *lmb);
+#endif /* CONFIG_NUMA */
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
 extern int find_and_online_cpu_nid(int cpu);
index ef5bbb7..501c9a7 100644 (file)
@@ -178,7 +178,7 @@ do {                                                                \
  * are no aliasing issues.
  */
 #define __put_user_asm_goto(x, addr, label, op)                        \
-       asm volatile goto(                                      \
+       asm_volatile_goto(                                      \
                "1:     " op "%U1%X1 %0,%1      # put_user\n"   \
                EX_TABLE(1b, %l2)                               \
                :                                               \
@@ -191,7 +191,7 @@ do {                                                                \
        __put_user_asm_goto(x, ptr, label, "std")
 #else /* __powerpc64__ */
 #define __put_user_asm2_goto(x, addr, label)                   \
-       asm volatile goto(                                      \
+       asm_volatile_goto(                                      \
                "1:     stw%X1 %0, %1\n"                        \
                "2:     stw%X1 %L0, %L1\n"                      \
                EX_TABLE(1b, %l2)                               \
index 6b50bf1..bf32704 100644 (file)
@@ -264,8 +264,9 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
 {
        struct pci_io_addr_range *piar;
        struct rb_node *n;
+       unsigned long flags;
 
-       spin_lock(&pci_io_addr_cache_root.piar_lock);
+       spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
        for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
                piar = rb_entry(n, struct pci_io_addr_range, rb_node);
 
@@ -273,7 +274,7 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
                       (piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
                       &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
        }
-       spin_unlock(&pci_io_addr_cache_root.piar_lock);
+       spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
 
        return 0;
 }
index 44c9018..a1ae006 100644 (file)
@@ -284,11 +284,7 @@ _ENTRY(saved_ksp_limit)
 
        rlwimi  r11, r10, 22, 20, 29    /* Compute PTE address */
        lwz     r11, 0(r11)             /* Get Linux PTE */
-#ifdef CONFIG_SWAP
        li      r9, _PAGE_PRESENT | _PAGE_ACCESSED
-#else
-       li      r9, _PAGE_PRESENT
-#endif
        andc.   r9, r9, r11             /* Check permission */
        bne     5f
 
@@ -369,11 +365,7 @@ _ENTRY(saved_ksp_limit)
 
        rlwimi  r11, r10, 22, 20, 29    /* Compute PTE address */
        lwz     r11, 0(r11)             /* Get Linux PTE */
-#ifdef CONFIG_SWAP
        li      r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
-#else
-       li      r9, _PAGE_PRESENT | _PAGE_EXEC
-#endif
        andc.   r9, r9, r11             /* Check permission */
        bne     5f
 
index 9f359d3..ee0bfeb 100644 (file)
@@ -202,9 +202,7 @@ SystemCall:
 
 InstructionTLBMiss:
        mtspr   SPRN_SPRG_SCRATCH0, r10
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
        mtspr   SPRN_SPRG_SCRATCH1, r11
-#endif
 
        /* If we are faulting a kernel address, we have to use the
         * kernel page tables.
@@ -224,25 +222,13 @@ InstructionTLBMiss:
 3:
        mtcr    r11
 #endif
-#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
        lwz     r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10)        /* Get level 1 entry */
        mtspr   SPRN_MD_TWC, r11
-#else
-       lwz     r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10)        /* Get level 1 entry */
-       mtspr   SPRN_MI_TWC, r10        /* Set segment attributes */
-       mtspr   SPRN_MD_TWC, r10
-#endif
        mfspr   r10, SPRN_MD_TWC
        lwz     r10, 0(r10)     /* Get the pte */
-#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
+       rlwimi  r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
        rlwimi  r11, r10, 32 - 9, _PMD_PAGE_512K
        mtspr   SPRN_MI_TWC, r11
-#endif
-#ifdef CONFIG_SWAP
-       rlwinm  r11, r10, 32-5, _PAGE_PRESENT
-       and     r11, r11, r10
-       rlwimi  r10, r11, 0, _PAGE_PRESENT
-#endif
        /* The Linux PTE won't go exactly into the MMU TLB.
         * Software indicator bits 20 and 23 must be clear.
         * Software indicator bits 22, 24, 25, 26, and 27 must be
@@ -256,9 +242,7 @@ InstructionTLBMiss:
 
        /* Restore registers */
 0:     mfspr   r10, SPRN_SPRG_SCRATCH0
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
        mfspr   r11, SPRN_SPRG_SCRATCH1
-#endif
        rfi
        patch_site      0b, patch__itlbmiss_exit_1
 
@@ -268,9 +252,7 @@ InstructionTLBMiss:
        addi    r10, r10, 1
        stw     r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
        mfspr   r10, SPRN_SPRG_SCRATCH0
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
        mfspr   r11, SPRN_SPRG_SCRATCH1
-#endif
        rfi
 #endif
 
@@ -297,30 +279,16 @@ DataStoreTLBMiss:
        mfspr   r10, SPRN_MD_TWC
        lwz     r10, 0(r10)     /* Get the pte */
 
-       /* Insert the Guarded flag into the TWC from the Linux PTE.
+       /* Insert Guarded and Accessed flags into the TWC from the Linux PTE.
         * It is bit 27 of both the Linux PTE and the TWC (at least
         * I got that right :-).  It will be better when we can put
         * this into the Linux pgd/pmd and load it in the operation
         * above.
         */
-       rlwimi  r11, r10, 0, _PAGE_GUARDED
+       rlwimi  r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
        rlwimi  r11, r10, 32 - 9, _PMD_PAGE_512K
        mtspr   SPRN_MD_TWC, r11
 
-       /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
-        * We also need to know if the insn is a load/store, so:
-        * Clear _PAGE_PRESENT and load that which will
-        * trap into DTLB Error with store bit set accordinly.
-        */
-       /* PRESENT=0x1, ACCESSED=0x20
-        * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
-        * r10 = (r10 & ~PRESENT) | r11;
-        */
-#ifdef CONFIG_SWAP
-       rlwinm  r11, r10, 32-5, _PAGE_PRESENT
-       and     r11, r11, r10
-       rlwimi  r10, r11, 0, _PAGE_PRESENT
-#endif
        /* The Linux PTE won't go exactly into the MMU TLB.
         * Software indicator bits 24, 25, 26, and 27 must be
         * set.  All other Linux PTE bits control the behavior
@@ -711,7 +679,7 @@ initial_mmu:
        li      r9, 4                           /* up to 4 pages of 8M */
        mtctr   r9
        lis     r9, KERNELBASE@h                /* Create vaddr for TLB */
-       li      r10, MI_PS8MEG | MI_SVALID      /* Set 8M byte page */
+       li      r10, MI_PS8MEG | _PMD_ACCESSED | MI_SVALID
        li      r11, MI_BOOTINIT                /* Create RPN for address 0 */
 1:
        mtspr   SPRN_MI_CTR, r8 /* Set instruction MMU control */
@@ -775,7 +743,7 @@ _GLOBAL(mmu_pin_tlb)
 #ifdef CONFIG_PIN_TLB_TEXT
        LOAD_REG_IMMEDIATE(r5, 28 << 8)
        LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
-       LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG)
+       LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
        LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
        LOAD_REG_ADDR(r9, _sinittext)
        li      r0, 4
@@ -797,7 +765,7 @@ _GLOBAL(mmu_pin_tlb)
        LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
 #ifdef CONFIG_PIN_TLB_DATA
        LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
-       LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG)
+       LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
 #ifdef CONFIG_PIN_TLB_IMMR
        li      r0, 3
 #else
@@ -834,7 +802,7 @@ _GLOBAL(mmu_pin_tlb)
 #endif
 #ifdef CONFIG_PIN_TLB_IMMR
        LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
-       LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED)
+       LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED | _PMD_ACCESSED)
        mfspr   r8, SPRN_IMMR
        rlwinm  r8, r8, 0, 0xfff80000
        ori     r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
index 5eb9eed..2aa16d5 100644 (file)
@@ -457,11 +457,7 @@ InstructionTLBMiss:
        cmplw   0,r1,r3
 #endif
        mfspr   r2, SPRN_SPRG_PGDIR
-#ifdef CONFIG_SWAP
        li      r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
-#else
-       li      r1,_PAGE_PRESENT | _PAGE_EXEC
-#endif
 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
        bgt-    112f
        lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
@@ -523,11 +519,7 @@ DataLoadTLBMiss:
        lis     r1, TASK_SIZE@h         /* check if kernel address */
        cmplw   0,r1,r3
        mfspr   r2, SPRN_SPRG_PGDIR
-#ifdef CONFIG_SWAP
        li      r1, _PAGE_PRESENT | _PAGE_ACCESSED
-#else
-       li      r1, _PAGE_PRESENT
-#endif
        bgt-    112f
        lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
        addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
@@ -603,11 +595,7 @@ DataStoreTLBMiss:
        lis     r1, TASK_SIZE@h         /* check if kernel address */
        cmplw   0,r1,r3
        mfspr   r2, SPRN_SPRG_PGDIR
-#ifdef CONFIG_SWAP
        li      r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
-#else
-       li      r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
-#endif
        bgt-    112f
        lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
        addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
index 3c6b982..8c2857c 100644 (file)
@@ -1393,13 +1393,14 @@ static void add_cpu_to_masks(int cpu)
 /* Activate a secondary processor. */
 void start_secondary(void *unused)
 {
-       unsigned int cpu = smp_processor_id();
+       unsigned int cpu = raw_smp_processor_id();
 
        mmgrab(&init_mm);
        current->active_mm = &init_mm;
 
        smp_store_cpu_info(cpu);
        set_dec(tb_ticks_per_jiffy);
+       rcu_cpu_starting(cpu);
        preempt_disable();
        cpu_callin_map[cpu] = 1;
 
index c47e6b3..824b2c9 100644 (file)
@@ -476,7 +476,7 @@ do {                                                                        \
 do {                                                                   \
        long __kr_err;                                                  \
                                                                        \
-       __put_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err);  \
+       __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err);  \
        if (unlikely(__kr_err))                                         \
                goto err_label;                                         \
 } while (0)
index 99e12fa..765b624 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2013 Linaro Limited
  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
index 11e2a4f..7e84979 100644 (file)
@@ -35,12 +35,17 @@ ENTRY(_start)
        .word 0
 #endif
        .balign 8
+#ifdef CONFIG_RISCV_M_MODE
+       /* Image load offset (0MB) from start of RAM for M-mode */
+       .dword 0
+#else
 #if __riscv_xlen == 64
        /* Image load offset(2MB) from start of RAM */
        .dword 0x200000
 #else
        /* Image load offset(4MB) from start of RAM */
        .dword 0x400000
+#endif
 #endif
        /* Effective size of kernel image */
        .dword _end - _start
index 11ebee9..3a19def 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
 vdso.lds
 *.tmp
+vdso-syms.S
index 7d6a94d..cb8f9e4 100644 (file)
@@ -43,19 +43,14 @@ $(obj)/vdso.o: $(obj)/vdso.so
 SYSCFLAGS_vdso.so.dbg = $(c_flags)
 $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
        $(call if_changed,vdsold)
+SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+       -Wl,--build-id -Wl,--hash-style=both
 
 # We also create a special relocatable object that should mirror the symbol
 # table and layout of the linked DSO. With ld --just-symbols we can then
 # refer to these symbols in the kernel code rather than hand-coded addresses.
-
-SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-       -Wl,--build-id=sha1 -Wl,--hash-style=both
-$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
-       $(call if_changed,vdsold)
-
-LDFLAGS_vdso-syms.o := -r --just-symbols
-$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
-       $(call if_changed,ld)
+$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
+       $(call if_changed,so2s)
 
 # strip rule for the .so file
 $(obj)/%.so: OBJCOPYFLAGS := -S
@@ -73,6 +68,11 @@ quiet_cmd_vdsold = VDSOLD  $@
                            $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
                    rm $@.tmp
 
+# Extracts symbol offsets from the VDSO, converting them into an assembly file
+# that contains the same symbols at the same offsets.
+quiet_cmd_so2s = SO2S    $@
+      cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
+
 # install commands for the unstripped file
 quiet_cmd_vdso_install = INSTALL $@
       cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
diff --git a/arch/riscv/kernel/vdso/so2s.sh b/arch/riscv/kernel/vdso/so2s.sh
new file mode 100755 (executable)
index 0000000..e64cb6d
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2020 Palmer Dabbelt <palmerdabbelt@google.com>
+
+sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_4.15\)*!.global \2\n.set \2,0x\1!' \
+| grep '^\.'
index 1359e21..3c8b9e4 100644 (file)
@@ -86,6 +86,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
        pmd_t *pmd, *pmd_k;
        pte_t *pte_k;
        int index;
+       unsigned long pfn;
 
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs))
@@ -100,7 +101,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
         * of a task switch.
         */
        index = pgd_index(addr);
-       pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
+       pfn = csr_read(CSR_SATP) & SATP_PPN;
+       pgd = (pgd_t *)pfn_to_virt(pfn) + index;
        pgd_k = init_mm.pgd + index;
 
        if (!pgd_present(*pgd_k)) {
index ea933b7..8e577f1 100644 (file)
@@ -154,9 +154,8 @@ disable:
 
 void __init setup_bootmem(void)
 {
-       phys_addr_t mem_size = 0;
-       phys_addr_t total_mem = 0;
-       phys_addr_t mem_start, start, end = 0;
+       phys_addr_t mem_start = 0;
+       phys_addr_t start, end = 0;
        phys_addr_t vmlinux_end = __pa_symbol(&_end);
        phys_addr_t vmlinux_start = __pa_symbol(&_start);
        u64 i;
@@ -164,21 +163,18 @@ void __init setup_bootmem(void)
        /* Find the memory region containing the kernel */
        for_each_mem_range(i, &start, &end) {
                phys_addr_t size = end - start;
-               if (!total_mem)
+               if (!mem_start)
                        mem_start = start;
                if (start <= vmlinux_start && vmlinux_end <= end)
                        BUG_ON(size == 0);
-               total_mem = total_mem + size;
        }
 
        /*
-        * Remove memblock from the end of usable area to the
-        * end of region
+        * The maximal physical memory size is -PAGE_OFFSET.
+        * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
+        * as it is unusable by kernel.
         */
-       mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
-       if (mem_start + mem_size < end)
-               memblock_remove(mem_start + mem_size,
-                               end - mem_start - mem_size);
+       memblock_enforce_memory_limit(mem_start - PAGE_OFFSET);
 
        /* Reserve from the start of the kernel to the end of the kernel */
        memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
@@ -297,6 +293,7 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
 #define NUM_EARLY_PMDS         (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
 #endif
 pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
+pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
 
 static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
 {
@@ -494,6 +491,18 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
                                   load_pa + (va - PAGE_OFFSET),
                                   map_size, PAGE_KERNEL_EXEC);
 
+#ifndef __PAGETABLE_PMD_FOLDED
+       /* Setup early PMD for DTB */
+       create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
+                          (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
+       /* Create two consecutive PMD mappings for FDT early scan */
+       pa = dtb_pa & ~(PMD_SIZE - 1);
+       create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
+                          pa, PMD_SIZE, PAGE_KERNEL);
+       create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
+                          pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
+       dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
+#else
        /* Create two consecutive PGD mappings for FDT early scan */
        pa = dtb_pa & ~(PGDIR_SIZE - 1);
        create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
@@ -501,6 +510,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
        create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
                           pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
        dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
+#endif
        dtb_early_pa = dtb_pa;
 
        /*
index 714233c..3115caa 100644 (file)
@@ -290,6 +290,9 @@ static void __init uv_stringify(int len, char *to, char *from)
 {
        /* Relies on 'to' being NULL chars so result will be NULL terminated */
        strncpy(to, from, len-1);
+
+       /* Trim trailing spaces */
+       (void)strim(to);
 }
 
 /* Find UV arch type entry in UVsystab */
@@ -366,7 +369,7 @@ static int __init early_get_arch_type(void)
        return ret;
 }
 
-static int __init uv_set_system_type(char *_oem_id)
+static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
 {
        /* Save OEM_ID passed from ACPI MADT */
        uv_stringify(sizeof(oem_id), oem_id, _oem_id);
@@ -386,13 +389,23 @@ static int __init uv_set_system_type(char *_oem_id)
                        /* (Not hubless), not a UV */
                        return 0;
 
+               /* Is UV hubless system */
+               uv_hubless_system = 0x01;
+
+               /* UV5 Hubless */
+               if (strncmp(uv_archtype, "NSGI5", 5) == 0)
+                       uv_hubless_system |= 0x20;
+
                /* UV4 Hubless: CH */
-               if (strncmp(uv_archtype, "NSGI4", 5) == 0)
-                       uv_hubless_system = 0x11;
+               else if (strncmp(uv_archtype, "NSGI4", 5) == 0)
+                       uv_hubless_system |= 0x10;
 
                /* UV3 Hubless: UV300/MC990X w/o hub */
                else
-                       uv_hubless_system = 0x9;
+                       uv_hubless_system |= 0x8;
+
+               /* Copy APIC type */
+               uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
 
                pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n",
                        oem_id, oem_table_id, uv_system_type, uv_hubless_system);
@@ -456,7 +469,7 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
        uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
 
        /* If not UV, return. */
-       if (likely(uv_set_system_type(_oem_id) == 0))
+       if (uv_set_system_type(_oem_id, _oem_table_id) == 0)
                return 0;
 
        /* Save and Decode OEM Table ID */
index d3f0db4..581fb72 100644 (file)
@@ -1254,6 +1254,14 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
        return 0;
 }
 
+static bool is_spec_ib_user_controlled(void)
+{
+       return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
+               spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+               spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+               spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
+}
+
 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
 {
        switch (ctrl) {
@@ -1261,16 +1269,26 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
                if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
                    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
                        return 0;
+
                /*
-                * Indirect branch speculation is always disabled in strict
-                * mode. It can neither be enabled if it was force-disabled
-                * by a  previous prctl call.
+                * With strict mode for both IBPB and STIBP, the instruction
+                * code paths avoid checking this task flag and instead,
+                * unconditionally run the instruction. However, STIBP and IBPB
+                * are independent and either can be set to conditionally
+                * enabled regardless of the mode of the other.
+                *
+                * If either is set to conditional, allow the task flag to be
+                * updated, unless it was force-disabled by a previous prctl
+                * call. Currently, this is possible on an AMD CPU which has the
+                * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
+                * kernel is booted with 'spectre_v2_user=seccomp', then
+                * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
+                * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
                 */
-               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
-                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
-                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
+               if (!is_spec_ib_user_controlled() ||
                    task_spec_ib_force_disable(task))
                        return -EPERM;
+
                task_clear_spec_ib_disable(task);
                task_update_spec_tif(task);
                break;
@@ -1283,10 +1301,10 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
                if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
                    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
                        return -EPERM;
-               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
-                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
-                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+
+               if (!is_spec_ib_user_controlled())
                        return 0;
+
                task_set_spec_ib_disable(task);
                if (ctrl == PR_SPEC_FORCE_DISABLE)
                        task_set_spec_ib_force_disable(task);
@@ -1351,20 +1369,17 @@ static int ib_prctl_get(struct task_struct *task)
        if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
            spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
                return PR_SPEC_ENABLE;
-       else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
-           spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
-           spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
-               return PR_SPEC_DISABLE;
-       else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
-           spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
-           spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
-           spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
+       else if (is_spec_ib_user_controlled()) {
                if (task_spec_ib_force_disable(task))
                        return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
                if (task_spec_ib_disable(task))
                        return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
                return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
-       } else
+       } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+           spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+           spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+               return PR_SPEC_DISABLE;
+       else
                return PR_SPEC_NOT_AFFECTED;
 }
 
index 06a278b..d50041f 100644 (file)
@@ -90,6 +90,20 @@ static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent)
        return 0;
 }
 
+void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
+
+       /*
+        * save the feature bitmap to avoid cpuid lookup for every PV
+        * operation
+        */
+       if (best)
+               vcpu->arch.pv_cpuid.features = best->eax;
+}
+
 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
@@ -124,13 +138,6 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
                (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
                best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
 
-       /*
-        * save the feature bitmap to avoid cpuid lookup for every PV
-        * operation
-        */
-       if (best)
-               vcpu->arch.pv_cpuid.features = best->eax;
-
        if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
                best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
                if (best)
@@ -162,6 +169,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                vcpu->arch.guest_supported_xcr0 =
                        (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
 
+       kvm_update_pv_runtime(vcpu);
+
        vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
        kvm_mmu_reset_context(vcpu);
 
index bf85779..f7a6e8f 100644 (file)
@@ -11,6 +11,7 @@ extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
 void kvm_set_cpu_caps(void);
 
 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
+void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
                                              u32 function, u32 index);
 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
index 1f96adf..5bb1939 100644 (file)
@@ -856,12 +856,14 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
        } else {
                rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
                desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
-               while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
-                       desc = desc->more;
+               while (desc->sptes[PTE_LIST_EXT-1]) {
                        count += PTE_LIST_EXT;
-               }
-               if (desc->sptes[PTE_LIST_EXT-1]) {
-                       desc->more = mmu_alloc_pte_list_desc(vcpu);
+
+                       if (!desc->more) {
+                               desc->more = mmu_alloc_pte_list_desc(vcpu);
+                               desc = desc->more;
+                               break;
+                       }
                        desc = desc->more;
                }
                for (i = 0; desc->sptes[i]; ++i)
index f5ede41..447edc0 100644 (file)
@@ -255,11 +255,10 @@ static struct kmem_cache *x86_emulator_cache;
 
 /*
  * When called, it means the previous get/set msr reached an invalid msr.
- * Return 0 if we want to ignore/silent this failed msr access, or 1 if we want
- * to fail the caller.
+ * Return true if we want to ignore/silent this failed msr access.
  */
-static int kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
-                                u64 data, bool write)
+static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
+                                 u64 data, bool write)
 {
        const char *op = write ? "wrmsr" : "rdmsr";
 
@@ -268,11 +267,11 @@ static int kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
                        kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
                                      op, msr, data);
                /* Mask the error */
-               return 0;
+               return true;
        } else {
                kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
                                      op, msr, data);
-               return -ENOENT;
+               return false;
        }
 }
 
@@ -1416,7 +1415,8 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        if (r == KVM_MSR_RET_INVALID) {
                /* Unconditionally clear the output for simplicity */
                *data = 0;
-               r = kvm_msr_ignored_check(vcpu, index, 0, false);
+               if (kvm_msr_ignored_check(vcpu, index, 0, false))
+                       r = 0;
        }
 
        if (r)
@@ -1540,7 +1540,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
        struct msr_data msr;
 
        if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
-               return -EPERM;
+               return KVM_MSR_RET_FILTERED;
 
        switch (index) {
        case MSR_FS_BASE:
@@ -1581,7 +1581,8 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
        int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
 
        if (ret == KVM_MSR_RET_INVALID)
-               ret = kvm_msr_ignored_check(vcpu, index, data, true);
+               if (kvm_msr_ignored_check(vcpu, index, data, true))
+                       ret = 0;
 
        return ret;
 }
@@ -1599,7 +1600,7 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
        int ret;
 
        if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
-               return -EPERM;
+               return KVM_MSR_RET_FILTERED;
 
        msr.index = index;
        msr.host_initiated = host_initiated;
@@ -1618,7 +1619,8 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
        if (ret == KVM_MSR_RET_INVALID) {
                /* Unconditionally clear *data for simplicity */
                *data = 0;
-               ret = kvm_msr_ignored_check(vcpu, index, 0, false);
+               if (kvm_msr_ignored_check(vcpu, index, 0, false))
+                       ret = 0;
        }
 
        return ret;
@@ -1662,9 +1664,9 @@ static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu)
 static u64 kvm_msr_reason(int r)
 {
        switch (r) {
-       case -ENOENT:
+       case KVM_MSR_RET_INVALID:
                return KVM_MSR_EXIT_REASON_UNKNOWN;
-       case -EPERM:
+       case KVM_MSR_RET_FILTERED:
                return KVM_MSR_EXIT_REASON_FILTER;
        default:
                return KVM_MSR_EXIT_REASON_INVAL;
@@ -1965,7 +1967,7 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
        struct kvm_arch *ka = &vcpu->kvm->arch;
 
        if (vcpu->vcpu_id == 0 && !host_initiated) {
-               if (ka->boot_vcpu_runs_old_kvmclock && old_msr)
+               if (ka->boot_vcpu_runs_old_kvmclock != old_msr)
                        kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
 
                ka->boot_vcpu_runs_old_kvmclock = old_msr;
@@ -3063,9 +3065,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        /* Values other than LBR and BTF are vendor-specific,
                           thus reserved and should throw a #GP */
                        return 1;
-               }
-               vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
-                           __func__, data);
+               } else if (report_ignored_msrs)
+                       vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
+                                   __func__, data);
                break;
        case 0x200 ... 0x2ff:
                return kvm_mtrr_set_msr(vcpu, msr, data);
@@ -3463,29 +3465,63 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = vcpu->arch.efer;
                break;
        case MSR_KVM_WALL_CLOCK:
+               if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+                       return 1;
+
+               msr_info->data = vcpu->kvm->arch.wall_clock;
+               break;
        case MSR_KVM_WALL_CLOCK_NEW:
+               if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+                       return 1;
+
                msr_info->data = vcpu->kvm->arch.wall_clock;
                break;
        case MSR_KVM_SYSTEM_TIME:
+               if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+                       return 1;
+
+               msr_info->data = vcpu->arch.time;
+               break;
        case MSR_KVM_SYSTEM_TIME_NEW:
+               if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+                       return 1;
+
                msr_info->data = vcpu->arch.time;
                break;
        case MSR_KVM_ASYNC_PF_EN:
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+                       return 1;
+
                msr_info->data = vcpu->arch.apf.msr_en_val;
                break;
        case MSR_KVM_ASYNC_PF_INT:
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
+                       return 1;
+
                msr_info->data = vcpu->arch.apf.msr_int_val;
                break;
        case MSR_KVM_ASYNC_PF_ACK:
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+                       return 1;
+
                msr_info->data = 0;
                break;
        case MSR_KVM_STEAL_TIME:
+               if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
+                       return 1;
+
                msr_info->data = vcpu->arch.st.msr_val;
                break;
        case MSR_KVM_PV_EOI_EN:
+               if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
+                       return 1;
+
                msr_info->data = vcpu->arch.pv_eoi.msr_val;
                break;
        case MSR_KVM_POLL_CONTROL:
+               if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
+                       return 1;
+
                msr_info->data = vcpu->arch.msr_kvm_poll_control;
                break;
        case MSR_IA32_P5_MC_ADDR:
@@ -4575,6 +4611,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 
        case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
                vcpu->arch.pv_cpuid.enforce = cap->args[0];
+               if (vcpu->arch.pv_cpuid.enforce)
+                       kvm_update_pv_runtime(vcpu);
 
                return 0;
 
index 3900ab0..e7ca622 100644 (file)
@@ -376,7 +376,13 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
 
-#define  KVM_MSR_RET_INVALID  2
+/*
+ * Internal error codes that are used to indicate that MSR emulation encountered
+ * an error that should result in #GP in the guest, unless userspace
+ * handles it.
+ */
+#define  KVM_MSR_RET_INVALID   2       /* in-kernel MSR emulation #GP condition */
+#define  KVM_MSR_RET_FILTERED  3       /* #GP due to userspace MSR filter */
 
 #define __cr4_reserved_bits(__cpu_has, __c)             \
 ({                                                      \
index 037faac..1e299ac 100644 (file)
@@ -16,8 +16,6 @@
  * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
  */
 
-.weak memcpy
-
 /*
  * memcpy - Copy a memory block.
  *
@@ -30,7 +28,7 @@
  * rax original destination
  */
 SYM_FUNC_START_ALIAS(__memcpy)
-SYM_FUNC_START_LOCAL(memcpy)
+SYM_FUNC_START_WEAK(memcpy)
        ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
                      "jmp memcpy_erms", X86_FEATURE_ERMS
 
index 7ff00ea..41902fe 100644 (file)
@@ -24,9 +24,7 @@
  * Output:
  * rax: dest
  */
-.weak memmove
-
-SYM_FUNC_START_ALIAS(memmove)
+SYM_FUNC_START_WEAK(memmove)
 SYM_FUNC_START(__memmove)
 
        mov %rdi, %rax
index 9ff15ee..0bfd26e 100644 (file)
@@ -6,8 +6,6 @@
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 
-.weak memset
-
 /*
  * ISO C memset - set a memory block to a byte value. This function uses fast
  * string to get better performance than the original function. The code is
@@ -19,7 +17,7 @@
  *
  * rax   original destination
  */
-SYM_FUNC_START_ALIAS(memset)
+SYM_FUNC_START_WEAK(memset)
 SYM_FUNC_START(__memset)
        /*
         * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
index bc96457..a322a7b 100644 (file)
@@ -578,7 +578,7 @@ acpi_video_bqc_value_to_level(struct acpi_video_device *device,
                                ACPI_VIDEO_FIRST_LEVEL - 1 - bqc_value;
 
                level = device->brightness->levels[bqc_value +
-                                                  ACPI_VIDEO_FIRST_LEVEL];
+                                                  ACPI_VIDEO_FIRST_LEVEL];
        } else {
                level = bqc_value;
        }
@@ -990,8 +990,8 @@ set_level:
                goto out_free_levels;
 
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                         "found %d brightness levels\n",
-                         br->count - ACPI_VIDEO_FIRST_LEVEL));
+                         "found %d brightness levels\n",
+                         br->count - ACPI_VIDEO_FIRST_LEVEL));
        return 0;
 
 out_free_levels:
index cab4af5..08ee1c7 100644 (file)
@@ -987,7 +987,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
         */
        if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
            (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
-            (battery->capacity_now <= battery->alarm)))
+            (battery->capacity_now <= battery->alarm)))
                acpi_pm_wakeup_event(&battery->device->dev);
 
        return result;
index 0761529..0d93a5e 100644 (file)
@@ -89,7 +89,18 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
                 */
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "E2215T"),
+               },
+               .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+       },
+       {
+               /*
+                * Medion Akoya E2228T, notification of the LID device only
+                * happens on close, not on open and _LID always returns closed.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "E2228T"),
                },
                .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
        },
index 4c1992f..5fca182 100644 (file)
@@ -106,6 +106,7 @@ static int pch_fivr_remove(struct platform_device *pdev)
 
 static const struct acpi_device_id pch_fivr_device_ids[] = {
        {"INTC1045", 0},
+       {"INTC1049", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
index 0674130..a24d5d7 100644 (file)
@@ -229,6 +229,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
        {"INT3532", 0},
        {"INTC1047", 0},
        {"INTC1050", 0},
+       {"INTC1060", 0},
+       {"INTC1061", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
index 8d420c7..d14025a 100644 (file)
@@ -25,10 +25,16 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
        {"INT340A"},
        {"INT340B"},
        {"INTC1040"},
+       {"INTC1041"},
        {"INTC1043"},
        {"INTC1044"},
        {"INTC1045"},
+       {"INTC1046"},
        {"INTC1047"},
+       {"INTC1048"},
+       {"INTC1049"},
+       {"INTC1060"},
+       {"INTC1061"},
        {""},
 };
 
index 1706439..92e59f4 100644 (file)
@@ -31,7 +31,7 @@ int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
        event.type = type;
        event.data = data;
        return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event)
-                        == NOTIFY_BAD) ? -EINVAL : 0;
+                       == NOTIFY_BAD) ? -EINVAL : 0;
 }
 EXPORT_SYMBOL(acpi_notifier_call_chain);
 
index b1a7f8d..fe6b679 100644 (file)
@@ -101,7 +101,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
 
        switch (gsi) {
        case 0 ... 255:
-               sprintf(ev_name, "_%c%02hhX",
+               sprintf(ev_name, "_%c%02X",
                        trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
 
                if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
index 6287338..48354f8 100644 (file)
@@ -27,6 +27,7 @@ static const struct acpi_device_id fan_device_ids[] = {
        {"PNP0C0B", 0},
        {"INT3404", 0},
        {"INTC1044", 0},
+       {"INTC1048", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, fan_device_ids);
index 43411a7..e3638ba 100644 (file)
@@ -134,7 +134,7 @@ int acpi_add_power_resource(acpi_handle handle);
 void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
 int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
 int acpi_device_sleep_wake(struct acpi_device *dev,
-                           int enable, int sleep_state, int dev_state);
+                          int enable, int sleep_state, int dev_state);
 int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
 int acpi_power_on_resources(struct acpi_device *device, int state);
 int acpi_power_transition(struct acpi_device *device, int state);
index 3a3c209..4426082 100644 (file)
@@ -2175,10 +2175,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
  * these commands.
  */
 enum nfit_aux_cmds {
-        NFIT_CMD_TRANSLATE_SPA = 5,
-        NFIT_CMD_ARS_INJECT_SET = 7,
-        NFIT_CMD_ARS_INJECT_CLEAR = 8,
-        NFIT_CMD_ARS_INJECT_GET = 9,
+       NFIT_CMD_TRANSLATE_SPA = 5,
+       NFIT_CMD_ARS_INJECT_SET = 7,
+       NFIT_CMD_ARS_INJECT_CLEAR = 8,
+       NFIT_CMD_ARS_INJECT_GET = 9,
 };
 
 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
@@ -2632,7 +2632,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
        nfit_blk->bdw_offset = nfit_mem->bdw->offset;
        mmio = &nfit_blk->mmio[BDW];
        mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
-                        nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
+                       nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
        if (!mmio->addr.base) {
                dev_dbg(dev, "%s failed to map bdw\n",
                                nvdimm_name(nvdimm));
index dea8a60..14ee631 100644 (file)
@@ -175,7 +175,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
         * configure the IRQ assigned to this slot|dev|pin.  The 'source_index'
         * indicates which resource descriptor in the resource template (of
         * the link device) this interrupt is allocated from.
-        * 
+        *
         * NOTE: Don't query the Link Device for IRQ information at this time
         *       because Link Device enumeration may not have occurred yet
         *       (e.g. exists somewhere 'below' this _PRT entry in the ACPI
index 606da5d..fb4c563 100644 (file)
@@ -6,8 +6,8 @@
  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  *  Copyright (C) 2002       Dominik Brodowski <devel@brodo.de>
  *
- * TBD: 
- *      1. Support more than one IRQ resource entry per link device (index).
+ * TBD:
+ *     1. Support more than one IRQ resource entry per link device (index).
  *     2. Implement start/stop mechanism and use ACPI Bus Driver facilities
  *        for IRQ management (e.g. start()->_SRS).
  */
@@ -249,8 +249,8 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
                }
        }
 
-       /* 
-        * Query and parse _CRS to get the current IRQ assignment. 
+       /*
+        * Query and parse _CRS to get the current IRQ assignment.
         */
 
        status = acpi_walk_resources(link->device->handle, METHOD_NAME__CRS,
@@ -396,7 +396,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
 /*
  * "acpi_irq_balance" (default in APIC mode) enables ACPI to use PIC Interrupt
  * Link Devices to move the PIRQs around to minimize sharing.
- * 
+ *
  * "acpi_irq_nobalance" (default in PIC mode) tells ACPI not to move any PIC IRQs
  * that the BIOS has already set to active.  This is necessary because
  * ACPI has no automatic means of knowing what ISA IRQs are used.  Note that
@@ -414,7 +414,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
  *
  * Note that PCI IRQ routers have a list of possible IRQs,
  * which may not include the IRQs this table says are available.
- * 
+ *
  * Since this heuristic can't tell the difference between a link
  * that no device will attach to, vs. a link which may be shared
  * by multiple active devices -- it is not optimal.
index 7ddd57a..95f23ac 100644 (file)
@@ -173,7 +173,7 @@ static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment,
 {
        if (!memcmp(f->oem_id, mcfg_oem_id, ACPI_OEM_ID_SIZE) &&
            !memcmp(f->oem_table_id, mcfg_oem_table_id,
-                   ACPI_OEM_TABLE_ID_SIZE) &&
+                   ACPI_OEM_TABLE_ID_SIZE) &&
            f->oem_revision == mcfg_oem_revision &&
            f->segment == segment &&
            resource_contains(&f->bus_range, bus_range))
index 837b875..8048da8 100644 (file)
@@ -13,7 +13,7 @@
  * 1. via "Device Specific (D-State) Control"
  * 2. via "Power Resource Control".
  * The code below deals with ACPI Power Resources control.
- * 
+ *
  * An ACPI "power resource object" represents a software controllable power
  * plane, clock plane, or other resource depended on by a device.
  *
@@ -645,7 +645,7 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
  * -ENODEV if the execution of either _DSW or _PSW has failed
  */
 int acpi_device_sleep_wake(struct acpi_device *dev,
-                           int enable, int sleep_state, int dev_state)
+                          int enable, int sleep_state, int dev_state)
 {
        union acpi_object in_arg[3];
        struct acpi_object_list arg_list = { 3, in_arg };
@@ -690,7 +690,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
 
 /*
  * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229):
- * 1. Power on the power resources required for the wakeup device 
+ * 1. Power on the power resources required for the wakeup device
  * 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
  *    State Wake) for the device, if present
  */
index 5909e8f..b04a689 100644 (file)
@@ -354,7 +354,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
                                  (u32) px->control, (u32) px->status));
 
                /*
-                * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
+                * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
                 */
                if (!px->core_frequency ||
                    ((u32)(px->core_frequency * 1000) !=
@@ -627,7 +627,7 @@ int acpi_processor_preregister_performance(
                goto err_ret;
 
        /*
-        * Now that we have _PSD data from all CPUs, lets setup P-state 
+        * Now that we have _PSD data from all CPUs, lets setup P-state
         * domain info.
         */
        for_each_possible_cpu(i) {
@@ -693,7 +693,7 @@ int acpi_processor_preregister_performance(
                        if (match_pdomain->domain != pdomain->domain)
                                continue;
 
-                       match_pr->performance->shared_type = 
+                       match_pr->performance->shared_type =
                                        pr->performance->shared_type;
                        cpumask_copy(match_pr->performance->shared_cpu_map,
                                     pr->performance->shared_cpu_map);
index f158b8c..e6d9f4d 100644 (file)
@@ -366,7 +366,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
                                         state_readers[i].mode,
                                         ACPI_SBS_BATTERY,
                                         state_readers[i].command,
-                                        (u8 *)battery +
+                                        (u8 *)battery +
                                                state_readers[i].offset);
                if (result)
                        goto end;
index 87b74e9..53c2862 100644 (file)
@@ -176,7 +176,7 @@ int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 address,
 EXPORT_SYMBOL_GPL(acpi_smbus_write);
 
 int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
-                                smbus_alarm_callback callback, void *context)
+                                smbus_alarm_callback callback, void *context)
 {
        mutex_lock(&hc->lock);
        hc->callback = callback;
index c3522bb..695c390 100644 (file)
@@ -24,9 +24,9 @@ enum acpi_sbs_device_addr {
 typedef void (*smbus_alarm_callback)(void *context);
 
 extern int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
-              u8 command, u8 * data);
+               u8 command, u8 *data);
 extern int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 slave_address,
-               u8 command, u8 * data, u8 length);
+               u8 command, u8 *data, u8 length);
 extern int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
-                                smbus_alarm_callback callback, void *context);
+               smbus_alarm_callback callback, void *context);
 extern int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc);
index a896e5e..bc6a79e 100644 (file)
@@ -1453,7 +1453,7 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
 }
 
 /**
- * acpi_dma_configure - Set-up DMA configuration for the device.
+ * acpi_dma_configure_id - Set-up DMA configuration for the device.
  * @dev: The pointer to the device
  * @attr: device dma attributes
  * @input_id: input device id const value pointer
index 3a032af..4f5463b 100644 (file)
@@ -178,14 +178,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
                },
        },
-        {
-         .callback = video_detect_force_video,
-         .ident = "ThinkPad X201T",
-         .matches = {
-                DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"),
-                },
-        },
+       {
+        .callback = video_detect_force_video,
+        .ident = "ThinkPad X201T",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"),
+               },
+       },
 
        /* The native backlight controls do not work on some older machines */
        {
index f89dd9a..b02bf77 100644 (file)
@@ -44,7 +44,7 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
                if (!dev->wakeup.flags.valid
                    || sleep_state > (u32) dev->wakeup.sleep_state
                    || !(device_may_wakeup(&dev->dev)
-                       || dev->wakeup.prepare_count))
+                        || dev->wakeup.prepare_count))
                        continue;
 
                if (device_may_wakeup(&dev->dev))
@@ -69,7 +69,7 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
                if (!dev->wakeup.flags.valid
                    || sleep_state > (u32) dev->wakeup.sleep_state
                    || !(device_may_wakeup(&dev->dev)
-                       || dev->wakeup.prepare_count))
+                        || dev->wakeup.prepare_count))
                        continue;
 
                acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
index cfd00ad..c24d9b5 100644 (file)
@@ -47,7 +47,7 @@ struct nullb_device {
        unsigned int nr_zones_closed;
        struct blk_zone *zones;
        sector_t zone_size_sects;
-       spinlock_t zone_dev_lock;
+       spinlock_t zone_lock;
        unsigned long *zone_locks;
 
        unsigned long size; /* device size in MB */
index 8775acb..beb34b4 100644 (file)
@@ -46,11 +46,20 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
        if (!dev->zones)
                return -ENOMEM;
 
-       spin_lock_init(&dev->zone_dev_lock);
-       dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
-       if (!dev->zone_locks) {
-               kvfree(dev->zones);
-               return -ENOMEM;
+       /*
+        * With memory backing, the zone_lock spinlock needs to be temporarily
+        * released to avoid scheduling in atomic context. To guarantee zone
+        * information protection, use a bitmap to lock zones with
+        * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
+        * implies that the queue is marked with BLK_MQ_F_BLOCKING.
+        */
+       spin_lock_init(&dev->zone_lock);
+       if (dev->memory_backed) {
+               dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
+               if (!dev->zone_locks) {
+                       kvfree(dev->zones);
+                       return -ENOMEM;
+               }
        }
 
        if (dev->zone_nr_conv >= dev->nr_zones) {
@@ -137,12 +146,17 @@ void null_free_zoned_dev(struct nullb_device *dev)
 
 static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
 {
-       wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
+       if (dev->memory_backed)
+               wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
+       spin_lock_irq(&dev->zone_lock);
 }
 
 static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
 {
-       clear_and_wake_up_bit(zno, dev->zone_locks);
+       spin_unlock_irq(&dev->zone_lock);
+
+       if (dev->memory_backed)
+               clear_and_wake_up_bit(zno, dev->zone_locks);
 }
 
 int null_report_zones(struct gendisk *disk, sector_t sector,
@@ -322,7 +336,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
                return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
 
        null_lock_zone(dev, zno);
-       spin_lock(&dev->zone_dev_lock);
 
        switch (zone->cond) {
        case BLK_ZONE_COND_FULL:
@@ -375,9 +388,17 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
        if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
                zone->cond = BLK_ZONE_COND_IMP_OPEN;
 
-       spin_unlock(&dev->zone_dev_lock);
+       /*
+        * Memory backing allocation may sleep: release the zone_lock spinlock
+        * to avoid scheduling in atomic context. Zone operation atomicity is
+        * still guaranteed through the zone_locks bitmap.
+        */
+       if (dev->memory_backed)
+               spin_unlock_irq(&dev->zone_lock);
        ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
-       spin_lock(&dev->zone_dev_lock);
+       if (dev->memory_backed)
+               spin_lock_irq(&dev->zone_lock);
+
        if (ret != BLK_STS_OK)
                goto unlock;
 
@@ -392,7 +413,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
        ret = BLK_STS_OK;
 
 unlock:
-       spin_unlock(&dev->zone_dev_lock);
        null_unlock_zone(dev, zno);
 
        return ret;
@@ -516,9 +536,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
                        null_lock_zone(dev, i);
                        zone = &dev->zones[i];
                        if (zone->cond != BLK_ZONE_COND_EMPTY) {
-                               spin_lock(&dev->zone_dev_lock);
                                null_reset_zone(dev, zone);
-                               spin_unlock(&dev->zone_dev_lock);
                                trace_nullb_zone_op(cmd, i, zone->cond);
                        }
                        null_unlock_zone(dev, i);
@@ -530,7 +548,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
        zone = &dev->zones[zone_no];
 
        null_lock_zone(dev, zone_no);
-       spin_lock(&dev->zone_dev_lock);
 
        switch (op) {
        case REQ_OP_ZONE_RESET:
@@ -550,8 +567,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
                break;
        }
 
-       spin_unlock(&dev->zone_dev_lock);
-
        if (ret == BLK_STS_OK)
                trace_nullb_zone_op(cmd, zone_no, zone->cond);
 
index 336b5e9..1e7e3f2 100644 (file)
@@ -2254,7 +2254,7 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
                return -EINVAL;
 
        /* Platform doesn't want dynamic frequency switching ? */
-       if (policy->governor->dynamic_switching &&
+       if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
            cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
                struct cpufreq_governor *gov = cpufreq_fallback_governor();
 
@@ -2280,6 +2280,8 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
                }
        }
 
+       policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
+
        return 0;
 }
 
index c56773c..bab8e61 100644 (file)
@@ -156,7 +156,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
 #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_)                       \
        {                                                               \
                .name = _name_,                                         \
-               .dynamic_switching = true,                              \
+               .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,                 \
                .owner = THIS_MODULE,                                   \
                .init = cpufreq_dbs_governor_init,                      \
                .exit = cpufreq_dbs_governor_exit,                      \
index 71c1d9a..addd93f 100644 (file)
@@ -20,6 +20,7 @@ static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
 static struct cpufreq_governor cpufreq_gov_performance = {
        .name           = "performance",
        .owner          = THIS_MODULE,
+       .flags          = CPUFREQ_GOV_STRICT_TARGET,
        .limits         = cpufreq_gov_performance_limits,
 };
 
index 7749522..8d830d8 100644 (file)
@@ -21,6 +21,7 @@ static struct cpufreq_governor cpufreq_gov_powersave = {
        .name           = "powersave",
        .limits         = cpufreq_gov_powersave_limits,
        .owner          = THIS_MODULE,
+       .flags          = CPUFREQ_GOV_STRICT_TARGET,
 };
 
 MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
index b7a9779..36a3ccf 100644 (file)
@@ -2527,7 +2527,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
 }
 
 static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
-                                    bool fast_switch)
+                                    bool strict, bool fast_switch)
 {
        u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
 
@@ -2539,7 +2539,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
         * field in it, so opportunistically update the max too if needed.
         */
        value &= ~HWP_MAX_PERF(~0L);
-       value |= HWP_MAX_PERF(cpu->max_perf_ratio);
+       value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
 
        if (value == prev)
                return;
@@ -2562,14 +2562,16 @@ static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
                              pstate_funcs.get_val(cpu, target_pstate));
 }
 
-static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
-                                      bool fast_switch)
+static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
+                                      int target_pstate, bool fast_switch)
 {
+       struct cpudata *cpu = all_cpu_data[policy->cpu];
        int old_pstate = cpu->pstate.current_pstate;
 
        target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
        if (hwp_active) {
-               intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch);
+               intel_cpufreq_adjust_hwp(cpu, target_pstate,
+                                        policy->strict_target, fast_switch);
                cpu->pstate.current_pstate = target_pstate;
        } else if (target_pstate != old_pstate) {
                intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
@@ -2609,7 +2611,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
                break;
        }
 
-       target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
+       target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
 
        freqs.new = target_pstate * cpu->pstate.scaling;
 
@@ -2628,7 +2630,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
 
        target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
 
-       target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
+       target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
 
        return target_pstate * cpu->pstate.scaling;
 }
index a4f473e..a97a9d0 100644 (file)
@@ -733,7 +733,7 @@ config I2C_LPC2K
 
 config I2C_MLXBF
         tristate "Mellanox BlueField I2C controller"
-        depends on ARM64
+        depends on MELLANOX_PLATFORM && ARM64
         help
           Enabling this option will add I2C SMBus support for Mellanox BlueField
           system.
index 44974b5..0d15f4c 100644 (file)
@@ -159,7 +159,6 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
        u32 raw_stat, stat, enabled, tmp;
        u8 val = 0, slave_activity;
 
-       regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
        regmap_read(dev->map, DW_IC_ENABLE, &enabled);
        regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_stat);
        regmap_read(dev->map, DW_IC_STATUS, &tmp);
@@ -168,32 +167,30 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
        if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
                return 0;
 
+       stat = i2c_dw_read_clear_intrbits_slave(dev);
        dev_dbg(dev->dev,
                "%#x STATUS SLAVE_ACTIVITY=%#x : RAW_INTR_STAT=%#x : INTR_STAT=%#x\n",
                enabled, slave_activity, raw_stat, stat);
 
-       if ((stat & DW_IC_INTR_RX_FULL) && (stat & DW_IC_INTR_STOP_DET))
-               i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val);
+       if (stat & DW_IC_INTR_RX_FULL) {
+               if (dev->status != STATUS_WRITE_IN_PROGRESS) {
+                       dev->status = STATUS_WRITE_IN_PROGRESS;
+                       i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED,
+                                       &val);
+               }
+
+               regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
+               val = tmp;
+               if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED,
+                                    &val))
+                       dev_vdbg(dev->dev, "Byte %X acked!", val);
+       }
 
        if (stat & DW_IC_INTR_RD_REQ) {
                if (slave_activity) {
-                       if (stat & DW_IC_INTR_RX_FULL) {
-                               regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
-                               val = tmp;
-
-                               if (!i2c_slave_event(dev->slave,
-                                                    I2C_SLAVE_WRITE_RECEIVED,
-                                                    &val)) {
-                                       dev_vdbg(dev->dev, "Byte %X acked!",
-                                                val);
-                               }
-                               regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
-                               stat = i2c_dw_read_clear_intrbits_slave(dev);
-                       } else {
-                               regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
-                               regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &tmp);
-                               stat = i2c_dw_read_clear_intrbits_slave(dev);
-                       }
+                       regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
+
+                       dev->status = STATUS_READ_IN_PROGRESS;
                        if (!i2c_slave_event(dev->slave,
                                             I2C_SLAVE_READ_REQUESTED,
                                             &val))
@@ -205,21 +202,11 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
                if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED,
                                     &val))
                        regmap_read(dev->map, DW_IC_CLR_RX_DONE, &tmp);
-
-               i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val);
-               stat = i2c_dw_read_clear_intrbits_slave(dev);
-               return 1;
        }
 
-       if (stat & DW_IC_INTR_RX_FULL) {
-               regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
-               val = tmp;
-               if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED,
-                                    &val))
-                       dev_vdbg(dev->dev, "Byte %X acked!", val);
-       } else {
+       if (stat & DW_IC_INTR_STOP_DET) {
+               dev->status = STATUS_IDLE;
                i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val);
-               stat = i2c_dw_read_clear_intrbits_slave(dev);
        }
 
        return 1;
@@ -230,7 +217,6 @@ static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id)
        struct dw_i2c_dev *dev = dev_id;
        int ret;
 
-       i2c_dw_read_clear_intrbits_slave(dev);
        ret = i2c_dw_irq_handler_slave(dev);
        if (ret > 0)
                complete(&dev->cmd_complete);
index ee59e0d..33574d4 100644 (file)
  * Master. Default value is set to 400MHz.
  */
 #define MLXBF_I2C_TYU_PLL_OUT_FREQ  (400 * 1000 * 1000)
-/* Reference clock for Bluefield 1 - 156 MHz. */
-#define MLXBF_I2C_TYU_PLL_IN_FREQ   (156 * 1000 * 1000)
-/* Reference clock for BlueField 2 - 200 MHz. */
-#define MLXBF_I2C_YU_PLL_IN_FREQ    (200 * 1000 * 1000)
+/* Reference clock for Bluefield - 156 MHz. */
+#define MLXBF_I2C_PLL_IN_FREQ       (156 * 1000 * 1000)
 
 /* Constant used to determine the PLL frequency. */
 #define MLNXBF_I2C_COREPLL_CONST    16384
@@ -489,44 +487,6 @@ static struct mutex mlxbf_i2c_bus_lock;
 
 #define MLXBF_I2C_FREQUENCY_1GHZ  1000000000
 
-static void mlxbf_i2c_write(void __iomem *io, int reg, u32 val)
-{
-       writel(val, io + reg);
-}
-
-static u32 mlxbf_i2c_read(void __iomem *io, int reg)
-{
-       return readl(io + reg);
-}
-
-/*
- * This function is used to read data from Master GW Data Descriptor.
- * Data bytes in the Master GW Data Descriptor are shifted left so the
- * data starts at the MSB of the descriptor registers as set by the
- * underlying hardware. TYU_READ_DATA enables byte swapping while
- * reading data bytes, and MUST be called by the SMBus read routines
- * to copy data from the 32 * 32-bit HW Data registers a.k.a Master GW
- * Data Descriptor.
- */
-static u32 mlxbf_i2c_read_data(void __iomem *io, int reg)
-{
-       return (u32)be32_to_cpu(mlxbf_i2c_read(io, reg));
-}
-
-/*
- * This function is used to write data to the Master GW Data Descriptor.
- * Data copied to the Master GW Data Descriptor MUST be shifted left so
- * the data starts at the MSB of the descriptor registers as required by
- * the underlying hardware. TYU_WRITE_DATA enables byte swapping when
- * writing data bytes, and MUST be called by the SMBus write routines to
- * copy data to the 32 * 32-bit HW Data registers a.k.a Master GW Data
- * Descriptor.
- */
-static void mlxbf_i2c_write_data(void __iomem *io, int reg, u32 val)
-{
-       mlxbf_i2c_write(io, reg, (u32)cpu_to_be32(val));
-}
-
 /*
  * Function to poll a set of bits at a specific address; it checks whether
  * the bits are equal to zero when eq_zero is set to 'true', and not equal
@@ -541,7 +501,7 @@ static u32 mlxbf_smbus_poll(void __iomem *io, u32 addr, u32 mask,
        timeout = (timeout / MLXBF_I2C_POLL_FREQ_IN_USEC) + 1;
 
        do {
-               bits = mlxbf_i2c_read(io, addr) & mask;
+               bits = readl(io + addr) & mask;
                if (eq_zero ? bits == 0 : bits != 0)
                        return eq_zero ? 1 : bits;
                udelay(MLXBF_I2C_POLL_FREQ_IN_USEC);
@@ -609,16 +569,16 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
                         MLXBF_I2C_SMBUS_TIMEOUT);
 
        /* Read cause status bits. */
-       cause_status_bits = mlxbf_i2c_read(priv->mst_cause->io,
-                                          MLXBF_I2C_CAUSE_ARBITER);
+       cause_status_bits = readl(priv->mst_cause->io +
+                                       MLXBF_I2C_CAUSE_ARBITER);
        cause_status_bits &= MLXBF_I2C_CAUSE_MASTER_ARBITER_BITS_MASK;
 
        /*
         * Parse both Cause and Master GW bits, then return transaction status.
         */
 
-       master_status_bits = mlxbf_i2c_read(priv->smbus->io,
-                                           MLXBF_I2C_SMBUS_MASTER_STATUS);
+       master_status_bits = readl(priv->smbus->io +
+                                       MLXBF_I2C_SMBUS_MASTER_STATUS);
        master_status_bits &= MLXBF_I2C_SMBUS_MASTER_STATUS_MASK;
 
        if (mlxbf_i2c_smbus_transaction_success(master_status_bits,
@@ -649,10 +609,17 @@ static void mlxbf_i2c_smbus_write_data(struct mlxbf_i2c_priv *priv,
 
        aligned_length = round_up(length, 4);
 
-       /* Copy data bytes from 4-byte aligned source buffer. */
+       /*
+        * Copy data bytes from 4-byte aligned source buffer.
+        * Data copied to the Master GW Data Descriptor MUST be shifted
+        * left so the data starts at the MSB of the descriptor registers
+        * as required by the underlying hardware. Enable byte swapping
+        * when writing data bytes to the 32 * 32-bit HW Data registers
+        * a.k.a Master GW Data Descriptor.
+        */
        for (offset = 0; offset < aligned_length; offset += sizeof(u32)) {
                data32 = *((u32 *)(data + offset));
-               mlxbf_i2c_write_data(priv->smbus->io, addr + offset, data32);
+               iowrite32be(data32, priv->smbus->io + addr + offset);
        }
 }
 
@@ -664,15 +631,23 @@ static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
 
        mask = sizeof(u32) - 1;
 
+       /*
+        * Data bytes in the Master GW Data Descriptor are shifted left
+        * so the data starts at the MSB of the descriptor registers as
+        * set by the underlying hardware. Enable byte swapping while
+        * reading data bytes from the 32 * 32-bit HW Data registers
+        * a.k.a Master GW Data Descriptor.
+        */
+
        for (offset = 0; offset < (length & ~mask); offset += sizeof(u32)) {
-               data32 = mlxbf_i2c_read_data(priv->smbus->io, addr + offset);
+               data32 = ioread32be(priv->smbus->io + addr + offset);
                *((u32 *)(data + offset)) = data32;
        }
 
        if (!(length & mask))
                return;
 
-       data32 = mlxbf_i2c_read_data(priv->smbus->io, addr + offset);
+       data32 = ioread32be(priv->smbus->io + addr + offset);
 
        for (byte = 0; byte < (length & mask); byte++) {
                data[offset + byte] = data32 & GENMASK(7, 0);
@@ -698,16 +673,16 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
        command |= rol32(pec_en, MLXBF_I2C_MASTER_SEND_PEC_SHIFT);
 
        /* Clear status bits. */
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_STATUS, 0x0);
+       writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
        /* Set the cause data. */
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_CAUSE_OR_CLEAR, ~0x0);
+       writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
        /* Zero PEC byte. */
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_PEC, 0x0);
+       writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
        /* Zero byte count. */
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_RS_BYTES, 0x0);
+       writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_RS_BYTES);
 
        /* GW activation. */
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_GW, command);
+       writel(command, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_GW);
 
        /*
         * Poll master status and check status bits. An ACK is sent when
@@ -823,8 +798,8 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
                 * needs to be 'manually' reset. This should be removed in
                 * next tag integration.
                 */
-               mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_FSM,
-                               MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK);
+               writel(MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK,
+                       priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_FSM);
        }
 
        return ret;
@@ -1113,8 +1088,8 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
        timer |= mlxbf_i2c_set_timer(priv, timings->scl_low,
                                     false, MLXBF_I2C_MASK_16,
                                     MLXBF_I2C_SHIFT_16);
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH,
-                         timer);
+       writel(timer, priv->smbus->io +
+               MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH);
 
        timer = mlxbf_i2c_set_timer(priv, timings->sda_rise, false,
                                    MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_0);
@@ -1124,37 +1099,34 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
                                     MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_16);
        timer |= mlxbf_i2c_set_timer(priv, timings->scl_fall, false,
                                     MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_24);
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE,
-                         timer);
+       writel(timer, priv->smbus->io +
+               MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE);
 
        timer = mlxbf_i2c_set_timer(priv, timings->hold_start, true,
                                    MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
        timer |= mlxbf_i2c_set_timer(priv, timings->hold_data, true,
                                     MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_THOLD, timer);
+       writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_THOLD);
 
        timer = mlxbf_i2c_set_timer(priv, timings->setup_start, true,
                                    MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
        timer |= mlxbf_i2c_set_timer(priv, timings->setup_stop, true,
                                     MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
-       mlxbf_i2c_write(priv->smbus->io,
-                       MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP, timer);
+       writel(timer, priv->smbus->io +
+               MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP);
 
        timer = mlxbf_i2c_set_timer(priv, timings->setup_data, true,
                                    MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA,
-                         timer);
+       writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA);
 
        timer = mlxbf_i2c_set_timer(priv, timings->buf, false,
                                    MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
        timer |= mlxbf_i2c_set_timer(priv, timings->thigh_max, false,
                                     MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_THIGH_MAX_TBUF,
-                       timer);
+       writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
 
        timer = timings->timeout;
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT,
-                       timer);
+       writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
 }
 
 enum mlxbf_i2c_timings_config {
@@ -1426,19 +1398,15 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
         * platform firmware; disabling the bus might compromise the system
         * functionality.
         */
-       config_reg = mlxbf_i2c_read(gpio_res->io,
-                                   MLXBF_I2C_GPIO_0_FUNC_EN_0);
+       config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0);
        config_reg = MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(priv->bus,
                                                         config_reg);
-       mlxbf_i2c_write(gpio_res->io, MLXBF_I2C_GPIO_0_FUNC_EN_0,
-                       config_reg);
+       writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0);
 
-       config_reg = mlxbf_i2c_read(gpio_res->io,
-                                   MLXBF_I2C_GPIO_0_FORCE_OE_EN);
+       config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN);
        config_reg = MLXBF_I2C_GPIO_SMBUS_GW_RESET_PINS(priv->bus,
                                                        config_reg);
-       mlxbf_i2c_write(gpio_res->io, MLXBF_I2C_GPIO_0_FORCE_OE_EN,
-                       config_reg);
+       writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN);
 
        mutex_unlock(gpio_res->lock);
 
@@ -1452,10 +1420,9 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
        u32 corepll_val;
        u16 core_f;
 
-       pad_frequency = MLXBF_I2C_TYU_PLL_IN_FREQ;
+       pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
 
-       corepll_val = mlxbf_i2c_read(corepll_res->io,
-                                    MLXBF_I2C_CORE_PLL_REG1);
+       corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
 
        /* Get Core PLL configuration bits. */
        core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
@@ -1488,12 +1455,10 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
        u8 core_od, core_r;
        u32 core_f;
 
-       pad_frequency = MLXBF_I2C_YU_PLL_IN_FREQ;
+       pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
 
-       corepll_reg1_val = mlxbf_i2c_read(corepll_res->io,
-                                         MLXBF_I2C_CORE_PLL_REG1);
-       corepll_reg2_val = mlxbf_i2c_read(corepll_res->io,
-                                         MLXBF_I2C_CORE_PLL_REG2);
+       corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
+       corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
 
        /* Get Core PLL configuration bits */
        core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
@@ -1585,7 +1550,7 @@ static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr)
         * (7-bit address, 1 status bit (1 if enabled, 0 if not)).
         */
        for (reg = 0; reg < reg_cnt; reg++) {
-               slave_reg = mlxbf_i2c_read(priv->smbus->io,
+               slave_reg = readl(priv->smbus->io +
                                MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4);
                /*
                 * Each register holds 4 slave addresses. So, we have to keep
@@ -1643,8 +1608,8 @@ static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr)
 
        /* Enable the slave address and update the register. */
        slave_reg |= (1 << MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT) << (byte * 8);
-       mlxbf_i2c_write(priv->smbus->io,
-                       MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4, slave_reg);
+       writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
+               reg * 0x4);
 
        return 0;
 }
@@ -1668,7 +1633,7 @@ static int mlxbf_slave_disable(struct mlxbf_i2c_priv *priv)
         * (7-bit address, 1 status bit (1 if enabled, 0 if not)).
         */
        for (reg = 0; reg < reg_cnt; reg++) {
-               slave_reg = mlxbf_i2c_read(priv->smbus->io,
+               slave_reg = readl(priv->smbus->io +
                                MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4);
 
                /* Check whether the address slots are empty. */
@@ -1708,8 +1673,8 @@ static int mlxbf_slave_disable(struct mlxbf_i2c_priv *priv)
 
        /* Cleanup the slave address slot. */
        slave_reg &= ~(GENMASK(7, 0) << (slave_byte * 8));
-       mlxbf_i2c_write(priv->smbus->io,
-                       MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4, slave_reg);
+       writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
+               reg * 0x4);
 
        return 0;
 }
@@ -1801,7 +1766,7 @@ static int mlxbf_i2c_init_slave(struct platform_device *pdev,
        int ret;
 
        /* Reset FSM. */
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_FSM, 0);
+       writel(0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_FSM);
 
        /*
         * Enable slave cause interrupt bits. Drive
@@ -1810,15 +1775,13 @@ static int mlxbf_i2c_init_slave(struct platform_device *pdev,
         * masters issue a Read and Write, respectively. But, clear all
         * interrupts first.
         */
-       mlxbf_i2c_write(priv->slv_cause->io,
-                         MLXBF_I2C_CAUSE_OR_CLEAR, ~0);
+       writel(~0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
        int_reg = MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE;
        int_reg |= MLXBF_I2C_CAUSE_WRITE_SUCCESS;
-       mlxbf_i2c_write(priv->slv_cause->io,
-                         MLXBF_I2C_CAUSE_OR_EVTEN0, int_reg);
+       writel(int_reg, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_EVTEN0);
 
        /* Finally, set the 'ready' bit to start handling transactions. */
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_READY, 0x1);
+       writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
 
        /* Initialize the cause coalesce resource. */
        ret = mlxbf_i2c_init_coalesce(pdev, priv);
@@ -1844,23 +1807,21 @@ static bool mlxbf_i2c_has_coalesce(struct mlxbf_i2c_priv *priv, bool *read,
                                MLXBF_I2C_CAUSE_YU_SLAVE_BIT :
                                priv->bus + MLXBF_I2C_CAUSE_TYU_SLAVE_BIT;
 
-       coalesce0_reg = mlxbf_i2c_read(priv->coalesce->io,
-                                      MLXBF_I2C_CAUSE_COALESCE_0);
+       coalesce0_reg = readl(priv->coalesce->io + MLXBF_I2C_CAUSE_COALESCE_0);
        is_set = coalesce0_reg & (1 << slave_shift);
 
        if (!is_set)
                return false;
 
        /* Check the source of the interrupt, i.e. whether a Read or Write. */
-       cause_reg = mlxbf_i2c_read(priv->slv_cause->io,
-                                    MLXBF_I2C_CAUSE_ARBITER);
+       cause_reg = readl(priv->slv_cause->io + MLXBF_I2C_CAUSE_ARBITER);
        if (cause_reg & MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE)
                *read = true;
        else if (cause_reg & MLXBF_I2C_CAUSE_WRITE_SUCCESS)
                *write = true;
 
        /* Clear cause bits. */
-       mlxbf_i2c_write(priv->slv_cause->io, MLXBF_I2C_CAUSE_OR_CLEAR, ~0x0);
+       writel(~0x0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
 
        return true;
 }
@@ -1900,8 +1861,8 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
         * address, if supplied.
         */
        if (recv_bytes > 0) {
-               data32 = mlxbf_i2c_read_data(priv->smbus->io,
-                                            MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
+               data32 = ioread32be(priv->smbus->io +
+                                       MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
 
                /* Parse the received bytes. */
                switch (recv_bytes) {
@@ -1966,7 +1927,7 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
        control32 |= rol32(write_size, MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT);
        control32 |= rol32(pec_en, MLXBF_I2C_SLAVE_SEND_PEC_SHIFT);
 
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_GW, control32);
+       writel(control32, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_GW);
 
        /*
         * Wait until the transfer is completed; the driver will wait
@@ -1975,10 +1936,9 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
        mlxbf_smbus_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT);
 
        /* Release the Slave GW. */
-       mlxbf_i2c_write(priv->smbus->io,
-                       MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES, 0x0);
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_PEC, 0x0);
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_READY, 0x1);
+       writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+       writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
+       writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
 
        return 0;
 }
@@ -2023,10 +1983,9 @@ static int mlxbf_smbus_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
        i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
 
        /* Release the Slave GW. */
-       mlxbf_i2c_write(priv->smbus->io,
-                       MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES, 0x0);
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_PEC, 0x0);
-       mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_READY, 0x1);
+       writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+       writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
+       writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
 
        return ret;
 }
@@ -2061,8 +2020,8 @@ static irqreturn_t mlxbf_smbus_irq(int irq, void *ptr)
         * slave, if the higher 8 bits are sent then the slave expect N bytes
         * from the master.
         */
-       rw_bytes_reg = mlxbf_i2c_read(priv->smbus->io,
-                                     MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+       rw_bytes_reg = readl(priv->smbus->io +
+                               MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
        recv_bytes = (rw_bytes_reg >> 8) & GENMASK(7, 0);
 
        /*
@@ -2264,6 +2223,7 @@ static const struct of_device_id mlxbf_i2c_dt_ids[] = {
 
 MODULE_DEVICE_TABLE(of, mlxbf_i2c_dt_ids);
 
+#ifdef CONFIG_ACPI
 static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = {
        { "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] },
        { "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] },
@@ -2305,6 +2265,12 @@ static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
 
        return ret;
 }
+#else
+static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
+{
+       return -ENOENT;
+}
+#endif /* CONFIG_ACPI */
 
 static int mlxbf_i2c_of_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
 {
@@ -2473,7 +2439,9 @@ static struct platform_driver mlxbf_i2c_driver = {
        .driver = {
                .name = "i2c-mlxbf",
                .of_match_table = mlxbf_i2c_dt_ids,
+#ifdef CONFIG_ACPI
                .acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids),
+#endif /* CONFIG_ACPI  */
        },
 };
 
@@ -2502,5 +2470,5 @@ static void __exit mlxbf_i2c_exit(void)
 module_exit(mlxbf_i2c_exit);
 
 MODULE_DESCRIPTION("Mellanox BlueField I2C bus driver");
-MODULE_AUTHOR("Khalil Blaiech <kblaiech@mellanox.com>");
+MODULE_AUTHOR("Khalil Blaiech <kblaiech@nvidia.com>");
 MODULE_LICENSE("GPL v2");
index 0cbdfbe..33de99b 100644 (file)
@@ -475,6 +475,10 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
 {
        u16 control_reg;
 
+       writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+       udelay(50);
+       writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+
        mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
 
        /* Set ioconfig */
@@ -529,10 +533,6 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
 
        mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
        mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN);
-
-       writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
-       udelay(50);
-       writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
 }
 
 static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
index cab7255..bdd6077 100644 (file)
@@ -129,6 +129,7 @@ struct sh_mobile_i2c_data {
        int sr;
        bool send_stop;
        bool stop_after_dma;
+       bool atomic_xfer;
 
        struct resource *res;
        struct dma_chan *dma_tx;
@@ -330,13 +331,15 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, enum sh_mobile_i2c_op
                ret = iic_rd(pd, ICDR);
                break;
        case OP_RX_STOP: /* enable DTE interrupt, issue stop */
-               iic_wr(pd, ICIC,
-                      ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+               if (!pd->atomic_xfer)
+                       iic_wr(pd, ICIC,
+                              ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
                iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK);
                break;
        case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */
-               iic_wr(pd, ICIC,
-                      ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+               if (!pd->atomic_xfer)
+                       iic_wr(pd, ICIC,
+                              ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
                ret = iic_rd(pd, ICDR);
                iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK);
                break;
@@ -429,7 +432,8 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
 
        if (wakeup) {
                pd->sr |= SW_DONE;
-               wake_up(&pd->wait);
+               if (!pd->atomic_xfer)
+                       wake_up(&pd->wait);
        }
 
        /* defeat write posting to avoid spurious WAIT interrupts */
@@ -581,6 +585,9 @@ static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
        pd->pos = -1;
        pd->sr = 0;
 
+       if (pd->atomic_xfer)
+               return;
+
        pd->dma_buf = i2c_get_dma_safe_msg_buf(pd->msg, 8);
        if (pd->dma_buf)
                sh_mobile_i2c_xfer_dma(pd);
@@ -637,15 +644,13 @@ static int poll_busy(struct sh_mobile_i2c_data *pd)
        return i ? 0 : -ETIMEDOUT;
 }
 
-static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
-                             struct i2c_msg *msgs,
-                             int num)
+static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd,
+                        struct i2c_msg *msgs, int num)
 {
-       struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
        struct i2c_msg  *msg;
        int err = 0;
        int i;
-       long timeout;
+       long time_left;
 
        /* Wake up device and enable clock */
        pm_runtime_get_sync(pd->dev);
@@ -662,15 +667,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
                if (do_start)
                        i2c_op(pd, OP_START);
 
-               /* The interrupt handler takes care of the rest... */
-               timeout = wait_event_timeout(pd->wait,
-                                      pd->sr & (ICSR_TACK | SW_DONE),
-                                      adapter->timeout);
-
-               /* 'stop_after_dma' tells if DMA transfer was complete */
-               i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
+               if (pd->atomic_xfer) {
+                       unsigned long j = jiffies + pd->adap.timeout;
+
+                       time_left = time_before_eq(jiffies, j);
+                       while (time_left &&
+                              !(pd->sr & (ICSR_TACK | SW_DONE))) {
+                               unsigned char sr = iic_rd(pd, ICSR);
+
+                               if (sr & (ICSR_AL   | ICSR_TACK |
+                                         ICSR_WAIT | ICSR_DTE)) {
+                                       sh_mobile_i2c_isr(0, pd);
+                                       udelay(150);
+                               } else {
+                                       cpu_relax();
+                               }
+                               time_left = time_before_eq(jiffies, j);
+                       }
+               } else {
+                       /* The interrupt handler takes care of the rest... */
+                       time_left = wait_event_timeout(pd->wait,
+                                       pd->sr & (ICSR_TACK | SW_DONE),
+                                       pd->adap.timeout);
+
+                       /* 'stop_after_dma' tells if DMA xfer was complete */
+                       i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg,
+                                                pd->stop_after_dma);
+               }
 
-               if (!timeout) {
+               if (!time_left) {
                        dev_err(pd->dev, "Transfer request timed out\n");
                        if (pd->dma_direction != DMA_NONE)
                                sh_mobile_i2c_cleanup_dma(pd);
@@ -696,14 +721,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
        return err ?: num;
 }
 
+static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
+                             struct i2c_msg *msgs,
+                             int num)
+{
+       struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
+
+       pd->atomic_xfer = false;
+       return sh_mobile_xfer(pd, msgs, num);
+}
+
+static int sh_mobile_i2c_xfer_atomic(struct i2c_adapter *adapter,
+                                    struct i2c_msg *msgs,
+                                    int num)
+{
+       struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
+
+       pd->atomic_xfer = true;
+       return sh_mobile_xfer(pd, msgs, num);
+}
+
 static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter)
 {
        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
 }
 
 static const struct i2c_algorithm sh_mobile_i2c_algorithm = {
-       .functionality  = sh_mobile_i2c_func,
-       .master_xfer    = sh_mobile_i2c_xfer,
+       .functionality = sh_mobile_i2c_func,
+       .master_xfer = sh_mobile_i2c_xfer,
+       .master_xfer_atomic = sh_mobile_i2c_xfer_atomic,
 };
 
 static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = {
index 1b1ca63..c662201 100644 (file)
@@ -3818,9 +3818,8 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
         * page aligned, we don't need to use a bounce page.
         */
        if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
-               tlb_addr = swiotlb_tbl_map_single(dev,
-                               phys_to_dma_unencrypted(dev, io_tlb_start),
-                               paddr, size, aligned_size, dir, attrs);
+               tlb_addr = swiotlb_tbl_map_single(dev, paddr, size,
+                               aligned_size, dir, attrs);
                if (tlb_addr == DMA_MAPPING_ERROR) {
                        goto swiotlb_error;
                } else {
index c6098ee..2aa79c3 100644 (file)
@@ -180,7 +180,6 @@ config IRQ_MIPS_CPU
        select GENERIC_IRQ_CHIP
        select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
        select IRQ_DOMAIN
-       select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI
        select GENERIC_IRQ_EFFECTIVE_AFF_MASK
 
 config CLPS711X_IRQCHIP
@@ -315,7 +314,6 @@ config KEYSTONE_IRQ
 config MIPS_GIC
        bool
        select GENERIC_IRQ_IPI
-       select IRQ_DOMAIN_HIERARCHY
        select MIPS_CM
 
 config INGENIC_IRQ
@@ -591,6 +589,7 @@ config LOONGSON_PCH_MSI
 
 config MST_IRQ
        bool "MStar Interrupt Controller"
+       depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST
        default ARCH_MEDIATEK
        select IRQ_DOMAIN
        select IRQ_DOMAIN_HIERARCHY
index 97838eb..cbc7c74 100644 (file)
@@ -244,7 +244,7 @@ static int bcm2836_cpu_dying(unsigned int cpu)
 
 #define BITS_PER_MBOX  32
 
-static void bcm2836_arm_irqchip_smp_init(void)
+static void __init bcm2836_arm_irqchip_smp_init(void)
 {
        struct irq_fwspec ipi_fwspec = {
                .fwnode         = intc.domain->fwnode,
index 4be0775..143657b 100644 (file)
@@ -154,8 +154,8 @@ static const struct irq_domain_ops mst_intc_domain_ops = {
        .free           = irq_domain_free_irqs_common,
 };
 
-int __init
-mst_intc_of_init(struct device_node *dn, struct device_node *parent)
+static int __init mst_intc_of_init(struct device_node *dn,
+                                  struct device_node *parent)
 {
        struct irq_domain *domain, *domain_parent;
        struct mst_intc_chip_data *cd;
index 3819185..cb7f60b 100644 (file)
@@ -71,8 +71,7 @@ struct intc_irqpin_priv {
 };
 
 struct intc_irqpin_config {
-       unsigned int irlm_bit;
-       unsigned needs_irlm:1;
+       int irlm_bit;           /* -1 if non-existent */
 };
 
 static unsigned long intc_irqpin_read32(void __iomem *iomem)
@@ -349,11 +348,10 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = {
 
 static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = {
        .irlm_bit = 23, /* ICR0.IRLM0 */
-       .needs_irlm = 1,
 };
 
 static const struct intc_irqpin_config intc_irqpin_rmobile = {
-       .needs_irlm = 0,
+       .irlm_bit = -1,
 };
 
 static const struct of_device_id intc_irqpin_dt_ids[] = {
@@ -470,7 +468,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
        }
 
        /* configure "individual IRQ mode" where needed */
-       if (config && config->needs_irlm) {
+       if (config && config->irlm_bit >= 0) {
                if (io[INTC_IRQPIN_REG_IRLM])
                        intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM,
                                                      config->irlm_bit, 1, 1);
index eaa3e9f..6f432d2 100644 (file)
@@ -99,7 +99,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
                                   struct irq_data *d, int enable)
 {
        int cpu;
-       struct plic_priv *priv = irq_get_chip_data(d->irq);
+       struct plic_priv *priv = irq_data_get_irq_chip_data(d);
 
        writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
        for_each_cpu(cpu, mask) {
@@ -115,7 +115,7 @@ static void plic_irq_unmask(struct irq_data *d)
 {
        struct cpumask amask;
        unsigned int cpu;
-       struct plic_priv *priv = irq_get_chip_data(d->irq);
+       struct plic_priv *priv = irq_data_get_irq_chip_data(d);
 
        cpumask_and(&amask, &priv->lmask, cpu_online_mask);
        cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
@@ -127,7 +127,7 @@ static void plic_irq_unmask(struct irq_data *d)
 
 static void plic_irq_mask(struct irq_data *d)
 {
-       struct plic_priv *priv = irq_get_chip_data(d->irq);
+       struct plic_priv *priv = irq_data_get_irq_chip_data(d);
 
        plic_irq_toggle(&priv->lmask, d, 0);
 }
@@ -138,7 +138,7 @@ static int plic_set_affinity(struct irq_data *d,
 {
        unsigned int cpu;
        struct cpumask amask;
-       struct plic_priv *priv = irq_get_chip_data(d->irq);
+       struct plic_priv *priv = irq_data_get_irq_chip_data(d);
 
        cpumask_and(&amask, &priv->lmask, mask_val);
 
@@ -151,7 +151,7 @@ static int plic_set_affinity(struct irq_data *d,
                return -EINVAL;
 
        plic_irq_toggle(&priv->lmask, d, 0);
-       plic_irq_toggle(cpumask_of(cpu), d, 1);
+       plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d));
 
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
index 0c2c61d..8662d7b 100644 (file)
@@ -195,6 +195,10 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
        { .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct },
        { .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct },
        { .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 52, .irq_parent = 140, .chip = &stm32_exti_h_chip_direct },
+       { .exti = 53, .irq_parent = 141, .chip = &stm32_exti_h_chip_direct },
        { .exti = 54, .irq_parent = 135, .chip = &stm32_exti_h_chip_direct },
        { .exti = 61, .irq_parent = 100, .chip = &stm32_exti_h_chip_direct },
        { .exti = 65, .irq_parent = 144, .chip = &stm32_exti_h_chip },
index e0cceb8..b2ab8db 100644 (file)
@@ -85,6 +85,17 @@ struct ti_sci_inta_vint_desc {
  * @base:              Base address of the memory mapped IO registers
  * @pdev:              Pointer to platform device.
  * @ti_sci_id:         TI-SCI device identifier
+ * @unmapped_cnt:      Number of @unmapped_dev_ids entries
+ * @unmapped_dev_ids:  Pointer to an array of TI-SCI device identifiers of
+ *                     unmapped event sources.
+ *                     Unmapped Events are not part of the Global Event Map and
+ *                     they are converted to Global event within INTA to be
+ *                     received by the same INTA to generate an interrupt.
+ *                     In case an interrupt request comes for a device which is
+ *                     generating Unmapped Event, we must use the INTA's TI-SCI
+ *                     device identifier in place of the source device
+ *                     identifier to let sysfw know where it has to program the
+ *                     Global Event number.
  */
 struct ti_sci_inta_irq_domain {
        const struct ti_sci_handle *sci;
@@ -96,11 +107,37 @@ struct ti_sci_inta_irq_domain {
        void __iomem *base;
        struct platform_device *pdev;
        u32 ti_sci_id;
+
+       int unmapped_cnt;
+       u16 *unmapped_dev_ids;
 };
 
 #define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
                                        events[i])
 
+static u16 ti_sci_inta_get_dev_id(struct ti_sci_inta_irq_domain *inta, u32 hwirq)
+{
+       u16 dev_id = HWIRQ_TO_DEVID(hwirq);
+       int i;
+
+       if (inta->unmapped_cnt == 0)
+               return dev_id;
+
+       /*
+        * For devices sending Unmapped Events we must use the INTA's TI-SCI
+        * device identifier number to be able to convert it to a Global Event
+        * and map it to an interrupt.
+        */
+       for (i = 0; i < inta->unmapped_cnt; i++) {
+               if (dev_id == inta->unmapped_dev_ids[i]) {
+                       dev_id = inta->ti_sci_id;
+                       break;
+               }
+       }
+
+       return dev_id;
+}
+
 /**
  * ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs
  * @desc:      Pointer to irq_desc corresponding to the irq
@@ -251,7 +288,7 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta
        u16 dev_id, dev_index;
        int err;
 
-       dev_id = HWIRQ_TO_DEVID(hwirq);
+       dev_id = ti_sci_inta_get_dev_id(inta, hwirq);
        dev_index = HWIRQ_TO_IRQID(hwirq);
 
        event_desc = &vint_desc->events[free_bit];
@@ -352,14 +389,15 @@ static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc,
 {
        struct ti_sci_inta_vint_desc *vint_desc;
        struct ti_sci_inta_irq_domain *inta;
+       u16 dev_id;
 
        vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
        inta = vint_desc->domain->host_data;
+       dev_id = ti_sci_inta_get_dev_id(inta, hwirq);
        /* free event irq */
        mutex_lock(&inta->vint_mutex);
        inta->sci->ops.rm_irq_ops.free_event_map(inta->sci,
-                                                HWIRQ_TO_DEVID(hwirq),
-                                                HWIRQ_TO_IRQID(hwirq),
+                                                dev_id, HWIRQ_TO_IRQID(hwirq),
                                                 inta->ti_sci_id,
                                                 vint_desc->vint_id,
                                                 event_desc->global_event,
@@ -574,6 +612,41 @@ static struct msi_domain_info ti_sci_inta_msi_domain_info = {
        .chip   = &ti_sci_inta_msi_irq_chip,
 };
 
+static int ti_sci_inta_get_unmapped_sources(struct ti_sci_inta_irq_domain *inta)
+{
+       struct device *dev = &inta->pdev->dev;
+       struct device_node *node = dev_of_node(dev);
+       struct of_phandle_iterator it;
+       int count, err, ret, i;
+
+       count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL);
+       if (count <= 0)
+               return 0;
+
+       inta->unmapped_dev_ids = devm_kcalloc(dev, count,
+                                             sizeof(*inta->unmapped_dev_ids),
+                                             GFP_KERNEL);
+       if (!inta->unmapped_dev_ids)
+               return -ENOMEM;
+
+       i = 0;
+       of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) {
+               u32 dev_id;
+
+               ret = of_property_read_u32(it.node, "ti,sci-dev-id", &dev_id);
+               if (ret) {
+                       dev_err(dev, "ti,sci-dev-id read failure for %pOFf\n", it.node);
+                       of_node_put(it.node);
+                       return ret;
+               }
+               inta->unmapped_dev_ids[i++] = dev_id;
+       }
+
+       inta->unmapped_cnt = count;
+
+       return 0;
+}
+
 static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
 {
        struct irq_domain *parent_domain, *domain, *msi_domain;
@@ -629,6 +702,10 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
        if (IS_ERR(inta->base))
                return PTR_ERR(inta->base);
 
+       ret = ti_sci_inta_get_unmapped_sources(inta);
+       if (ret)
+               return ret;
+
        domain = irq_domain_add_linear(dev_of_node(dev),
                                       ti_sci_get_num_resources(inta->vint),
                                       &ti_sci_inta_irq_domain_ops, inta);
index 376096b..40ca71b 100644 (file)
@@ -4582,8 +4582,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_start_queues);
 
-
-void nvme_sync_queues(struct nvme_ctrl *ctrl)
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
@@ -4591,7 +4590,12 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl)
        list_for_each_entry(ns, &ctrl->namespaces, list)
                blk_sync_queue(ns->queue);
        up_read(&ctrl->namespaces_rwsem);
+}
+EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
 
+void nvme_sync_queues(struct nvme_ctrl *ctrl)
+{
+       nvme_sync_io_queues(ctrl);
        if (ctrl->admin_q)
                blk_sync_queue(ctrl->admin_q);
 }
index cc11113..bc330bf 100644 (file)
@@ -602,6 +602,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
 void nvme_start_queues(struct nvme_ctrl *ctrl);
 void nvme_kill_queues(struct nvme_ctrl *ctrl);
 void nvme_sync_queues(struct nvme_ctrl *ctrl);
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
 void nvme_unfreeze(struct nvme_ctrl *ctrl);
 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
index df8f361..0578ff2 100644 (file)
@@ -198,6 +198,7 @@ struct nvme_queue {
        u32 q_depth;
        u16 cq_vector;
        u16 sq_tail;
+       u16 last_sq_tail;
        u16 cq_head;
        u16 qid;
        u8 cq_phase;
@@ -455,11 +456,24 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
        return 0;
 }
 
-static inline void nvme_write_sq_db(struct nvme_queue *nvmeq)
+/*
+ * Write sq tail if we are asked to, or if the next command would wrap.
+ */
+static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
 {
+       if (!write_sq) {
+               u16 next_tail = nvmeq->sq_tail + 1;
+
+               if (next_tail == nvmeq->q_depth)
+                       next_tail = 0;
+               if (next_tail != nvmeq->last_sq_tail)
+                       return;
+       }
+
        if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
                        nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
                writel(nvmeq->sq_tail, nvmeq->q_db);
+       nvmeq->last_sq_tail = nvmeq->sq_tail;
 }
 
 /**
@@ -476,8 +490,7 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
               cmd, sizeof(*cmd));
        if (++nvmeq->sq_tail == nvmeq->q_depth)
                nvmeq->sq_tail = 0;
-       if (write_sq)
-               nvme_write_sq_db(nvmeq);
+       nvme_write_sq_db(nvmeq, write_sq);
        spin_unlock(&nvmeq->sq_lock);
 }
 
@@ -486,7 +499,8 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
        struct nvme_queue *nvmeq = hctx->driver_data;
 
        spin_lock(&nvmeq->sq_lock);
-       nvme_write_sq_db(nvmeq);
+       if (nvmeq->sq_tail != nvmeq->last_sq_tail)
+               nvme_write_sq_db(nvmeq, true);
        spin_unlock(&nvmeq->sq_lock);
 }
 
@@ -1496,6 +1510,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
        struct nvme_dev *dev = nvmeq->dev;
 
        nvmeq->sq_tail = 0;
+       nvmeq->last_sq_tail = 0;
        nvmeq->cq_head = 0;
        nvmeq->cq_phase = 1;
        nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
index 541b0cb..65e3d0e 100644 (file)
@@ -122,7 +122,6 @@ struct nvme_rdma_ctrl {
        struct sockaddr_storage src_addr;
 
        struct nvme_ctrl        ctrl;
-       struct mutex            teardown_lock;
        bool                    use_inline_data;
        u32                     io_queues[HCTX_MAX_TYPES];
 };
@@ -1010,8 +1009,8 @@ out_free_io_queues:
 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       mutex_lock(&ctrl->teardown_lock);
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       blk_sync_queue(ctrl->ctrl.admin_q);
        nvme_rdma_stop_queue(&ctrl->queues[0]);
        if (ctrl->ctrl.admin_tagset) {
                blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
@@ -1021,16 +1020,15 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
        if (remove)
                blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
        nvme_rdma_destroy_admin_queue(ctrl, remove);
-       mutex_unlock(&ctrl->teardown_lock);
 }
 
 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       mutex_lock(&ctrl->teardown_lock);
        if (ctrl->ctrl.queue_count > 1) {
                nvme_start_freeze(&ctrl->ctrl);
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_sync_io_queues(&ctrl->ctrl);
                nvme_rdma_stop_io_queues(ctrl);
                if (ctrl->ctrl.tagset) {
                        blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
@@ -1041,7 +1039,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
                        nvme_start_queues(&ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, remove);
        }
-       mutex_unlock(&ctrl->teardown_lock);
 }
 
 static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
@@ -1976,16 +1973,12 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
 {
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_rdma_queue *queue = req->queue;
-       struct nvme_rdma_ctrl *ctrl = queue->ctrl;
 
-       /* fence other contexts that may complete the command */
-       mutex_lock(&ctrl->teardown_lock);
        nvme_rdma_stop_queue(queue);
-       if (!blk_mq_request_completed(rq)) {
+       if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
                nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
                blk_mq_complete_request(rq);
        }
-       mutex_unlock(&ctrl->teardown_lock);
 }
 
 static enum blk_eh_timer_return
@@ -2320,7 +2313,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                return ERR_PTR(-ENOMEM);
        ctrl->ctrl.opts = opts;
        INIT_LIST_HEAD(&ctrl->list);
-       mutex_init(&ctrl->teardown_lock);
 
        if (!(opts->mask & NVMF_OPT_TRSVCID)) {
                opts->trsvcid =
index d6a3e14..c0c3332 100644 (file)
@@ -124,7 +124,6 @@ struct nvme_tcp_ctrl {
        struct sockaddr_storage src_addr;
        struct nvme_ctrl        ctrl;
 
-       struct mutex            teardown_lock;
        struct work_struct      err_work;
        struct delayed_work     connect_work;
        struct nvme_tcp_request async_req;
@@ -1886,8 +1885,8 @@ out_free_queue:
 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
                bool remove)
 {
-       mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
        blk_mq_quiesce_queue(ctrl->admin_q);
+       blk_sync_queue(ctrl->admin_q);
        nvme_tcp_stop_queue(ctrl, 0);
        if (ctrl->admin_tagset) {
                blk_mq_tagset_busy_iter(ctrl->admin_tagset,
@@ -1897,18 +1896,17 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
        if (remove)
                blk_mq_unquiesce_queue(ctrl->admin_q);
        nvme_tcp_destroy_admin_queue(ctrl, remove);
-       mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
 }
 
 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
                bool remove)
 {
-       mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
        if (ctrl->queue_count <= 1)
-               goto out;
+               return;
        blk_mq_quiesce_queue(ctrl->admin_q);
        nvme_start_freeze(ctrl);
        nvme_stop_queues(ctrl);
+       nvme_sync_io_queues(ctrl);
        nvme_tcp_stop_io_queues(ctrl);
        if (ctrl->tagset) {
                blk_mq_tagset_busy_iter(ctrl->tagset,
@@ -1918,8 +1916,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
        if (remove)
                nvme_start_queues(ctrl);
        nvme_tcp_destroy_io_queues(ctrl, remove);
-out:
-       mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
 }
 
 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
@@ -2171,14 +2167,11 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
 
-       /* fence other contexts that may complete the command */
-       mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
        nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
-       if (!blk_mq_request_completed(rq)) {
+       if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
                nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
                blk_mq_complete_request(rq);
        }
-       mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
 }
 
 static enum blk_eh_timer_return
@@ -2455,7 +2448,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
                        nvme_tcp_reconnect_ctrl_work);
        INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
        INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
-       mutex_init(&ctrl->teardown_lock);
 
        if (!(opts->mask & NVMF_OPT_TRSVCID)) {
                opts->trsvcid =
index f808c5f..3f0b8e2 100644 (file)
@@ -367,9 +367,9 @@ static void create_power_zone_common_attributes(
                                        &dev_attr_max_energy_range_uj.attr;
        if (power_zone->ops->get_energy_uj) {
                if (power_zone->ops->reset_energy_uj)
-                       dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
+                       dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUSR;
                else
-                       dev_attr_energy_uj.attr.mode = S_IRUGO;
+                       dev_attr_energy_uj.attr.mode = S_IRUSR;
                power_zone->zone_dev_attrs[count++] =
                                        &dev_attr_energy_uj.attr;
        }
index f32da0c..308bda2 100644 (file)
@@ -658,8 +658,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                                        rcu_read_lock();
                                        list_for_each_entry_rcu(h,
                                                &tmp_pg->dh_list, node) {
-                                               /* h->sdev should always be valid */
-                                               BUG_ON(!h->sdev);
+                                               if (!h->sdev)
+                                                       continue;
                                                h->sdev->access_state = desc[0];
                                        }
                                        rcu_read_unlock();
@@ -705,7 +705,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                        pg->expiry = 0;
                        rcu_read_lock();
                        list_for_each_entry_rcu(h, &pg->dh_list, node) {
-                               BUG_ON(!h->sdev);
+                               if (!h->sdev)
+                                       continue;
                                h->sdev->access_state =
                                        (pg->state & SCSI_ACCESS_STATE_MASK);
                                if (pg->pref)
@@ -1147,7 +1148,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
        spin_lock(&h->pg_lock);
        pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
        rcu_assign_pointer(h->pg, NULL);
-       h->sdev = NULL;
        spin_unlock(&h->pg_lock);
        if (pg) {
                spin_lock_irq(&pg->lock);
@@ -1156,6 +1156,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
                kref_put(&pg->kref, release_port_group);
        }
        sdev->handler_data = NULL;
+       synchronize_rcu();
        kfree(h);
 }
 
index 83ce4f1..8df70c9 100644 (file)
@@ -8855,7 +8855,7 @@ reinit_after_soft_reset:
        /* hook into SCSI subsystem */
        rc = hpsa_scsi_add_host(h);
        if (rc)
-               goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+               goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
 
        /* Monitor the controller for firmware lockups */
        h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
@@ -8870,6 +8870,8 @@ reinit_after_soft_reset:
                                HPSA_EVENT_MONITOR_INTERVAL);
        return 0;
 
+clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
+       kfree(h->lastlogicals);
 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
        hpsa_free_performant_mode(h);
        h->access.set_intr_mask(h, HPSA_INTR_OFF);
index 93230cd..e4cc92b 100644 (file)
@@ -1740,6 +1740,13 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
                reply_q->irq_poll_scheduled = false;
                reply_q->irq_line_enable = true;
                enable_irq(reply_q->os_irq);
+               /*
+                * Go for one more round of processing the
+                * reply descriptor post queue incase if HBA
+                * Firmware has posted some reply descriptors
+                * while reenabling the IRQ.
+                */
+               _base_process_reply_queue(reply_q);
        }
 
        return num_entries;
index 41f4120..fa876e2 100644 (file)
@@ -317,7 +317,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
         */
        baud = tty_termios_baud_rate(termios);
 
-       serial8250_do_set_termios(port, termios, old);
+       serial8250_do_set_termios(port, termios, NULL);
 
        tty_termios_encode_baud_rate(termios, baud, baud);
 
index 1044fc3..28f22e5 100644 (file)
@@ -522,6 +522,7 @@ config SERIAL_IMX_EARLYCON
        depends on OF
        select SERIAL_EARLYCON
        select SERIAL_CORE_CONSOLE
+       default y if SERIAL_IMX_CONSOLE
        help
          If you have enabled the earlycon on the Freescale IMX
          CPU you can make it the earlycon by answering Y to this option.
index b4d89e3..7a07e72 100644 (file)
@@ -1280,6 +1280,9 @@ static int __init serial_txx9_init(void)
 
 #ifdef ENABLE_SERIAL_TXX9_PCI
        ret = pci_register_driver(&serial_txx9_pci_driver);
+       if (ret) {
+               platform_driver_unregister(&serial_txx9_plat_driver);
+       }
 #endif
        if (ret == 0)
                goto out;
index 7a4c025..9f8b9a5 100644 (file)
@@ -1515,10 +1515,12 @@ static void release_tty(struct tty_struct *tty, int idx)
                tty->ops->shutdown(tty);
        tty_save_termios(tty);
        tty_driver_remove_tty(tty->driver, tty);
-       tty->port->itty = NULL;
+       if (tty->port)
+               tty->port->itty = NULL;
        if (tty->link)
                tty->link->port->itty = NULL;
-       tty_buffer_cancel_work(tty->port);
+       if (tty->port)
+               tty_buffer_cancel_work(tty->port);
        if (tty->link)
                tty_buffer_cancel_work(tty->link->port);
 
index 9506a76..d04a162 100644 (file)
@@ -4704,27 +4704,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
        return rc;
 }
 
-static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
-{
-       int con = op->height;
-       int rc;
-
-
-       console_lock();
-       if (vc->vc_mode != KD_TEXT)
-               rc = -EINVAL;
-       else if (!vc->vc_sw->con_font_copy)
-               rc = -ENOSYS;
-       else if (con < 0 || !vc_cons_allocated(con))
-               rc = -ENOTTY;
-       else if (con == vc->vc_num)     /* nothing to do */
-               rc = 0;
-       else
-               rc = vc->vc_sw->con_font_copy(vc, con);
-       console_unlock();
-       return rc;
-}
-
 int con_font_op(struct vc_data *vc, struct console_font_op *op)
 {
        switch (op->op) {
@@ -4735,7 +4714,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op)
        case KD_FONT_OP_SET_DEFAULT:
                return con_font_default(vc, op);
        case KD_FONT_OP_COPY:
-               return con_font_copy(vc, op);
+               /* was buggy and never really used */
+               return -EINVAL;
        }
        return -ENOSYS;
 }
index 10574fa..a1e3a03 100644 (file)
@@ -378,6 +378,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x0926, 0x3333), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* Kingston DataTraveler 3.0 */
+       { USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+
        /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
        { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
 
index e282067..5f18aca 100644 (file)
@@ -608,10 +608,13 @@ static int dwc2_driver_probe(struct platform_device *dev)
 #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
        return 0;
 
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+       IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
 error_debugfs:
        dwc2_debugfs_exit(hsotg);
        if (hsotg->hcd_enabled)
                dwc2_hcd_remove(hsotg);
+#endif
 error_drd:
        dwc2_drd_exit(hsotg);
 
index 242b621..bae6a70 100644 (file)
@@ -40,6 +40,7 @@
 #define PCI_DEVICE_ID_INTEL_TGPLP              0xa0ee
 #define PCI_DEVICE_ID_INTEL_TGPH               0x43ee
 #define PCI_DEVICE_ID_INTEL_JSP                        0x4dee
+#define PCI_DEVICE_ID_INTEL_ADLS               0x7ae1
 
 #define PCI_INTEL_BXT_DSM_GUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
 #define PCI_INTEL_BXT_FUNC_PMU_PWR     4
@@ -367,6 +368,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP),
          (kernel_ulong_t) &dwc3_pci_intel_properties, },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
+         (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
          (kernel_ulong_t) &dwc3_pci_amd_properties, },
        {  }    /* Terminating Entry */
index 7be3903..8b668ef 100644 (file)
@@ -1058,10 +1058,11 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
 {
        unsigned int direction = !dwc->ep0_expect_in;
 
+       dwc->delayed_status = false;
+
        if (dwc->ep0state != EP0_STATUS_PHASE)
                return;
 
-       dwc->delayed_status = false;
        __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
 }
 
index e01e366..062dfac 100644 (file)
@@ -564,9 +564,12 @@ static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
                return -ENODEV;
        }
        length = min(arg.length, event->length);
-       if (copy_to_user((void __user *)value, event, sizeof(*event) + length))
+       if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) {
+               kfree(event);
                return -EFAULT;
+       }
 
+       kfree(event);
        return 0;
 }
 
index de528e3..ad6ff9c 100644 (file)
@@ -1051,7 +1051,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
        u32 bitmask;
        struct ep_queue_head *qh;
 
-       if (!_ep || _ep->desc || !(_ep->desc->bEndpointAddress&0xF))
+       if (!_ep || !_ep->desc || !(_ep->desc->bEndpointAddress&0xF))
                return -ENODEV;
 
        ep = container_of(_ep, struct fsl_ep, ep);
index 25c1d6a..3e1267d 100644 (file)
@@ -1760,6 +1760,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err;
        }
 
+       pci_set_drvdata(pdev, dev);
        spin_lock_init(&dev->lock);
        dev->pdev = pdev;
        dev->gadget.ops = &goku_ops;
@@ -1793,7 +1794,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        dev->regs = (struct goku_udc_regs __iomem *) base;
 
-       pci_set_drvdata(pdev, dev);
        INFO(dev, "%s\n", driver_desc);
        INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
        INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
index 579d8c8..9de0171 100644 (file)
@@ -120,8 +120,10 @@ static int apple_mfi_fc_set_property(struct power_supply *psy,
        dev_dbg(&mfi->udev->dev, "prop: %d\n", psp);
 
        ret = pm_runtime_get_sync(&mfi->udev->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put_noidle(&mfi->udev->dev);
                return ret;
+       }
 
        switch (psp) {
        case POWER_SUPPLY_PROP_CHARGE_TYPE:
index 1de5c9a..38f17d6 100644 (file)
@@ -564,6 +564,7 @@ static int mtu3_gadget_stop(struct usb_gadget *g)
 
        spin_unlock_irqrestore(&mtu->lock, flags);
 
+       synchronize_irq(mtu->irq);
        return 0;
 }
 
index 8219706..2e40908 100644 (file)
@@ -357,11 +357,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
        struct device *dev = &port->dev;
        int status = urb->status;
        unsigned long flags;
+       bool resubmitted = false;
 
-       set_bit(0, &port->write_urbs_free);
        if (status) {
                dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
                        __func__, status);
+               set_bit(0, &port->write_urbs_free);
                return;
        }
 
@@ -394,6 +395,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
                        goto exit;
                }
 
+               resubmitted = true;
+
                dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent);
                dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled);
 
@@ -410,6 +413,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
 
 exit:
        spin_unlock_irqrestore(&priv->lock, flags);
+       if (!resubmitted)
+               set_bit(0, &port->write_urbs_free);
        usb_serial_port_softint(port);
 }
 
index 2a3bfd6..54ca85c 100644 (file)
@@ -250,6 +250,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EP06                   0x0306
 #define QUECTEL_PRODUCT_EM12                   0x0512
 #define QUECTEL_PRODUCT_RM500Q                 0x0800
+#define QUECTEL_PRODUCT_EC200T                 0x6026
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -1117,6 +1118,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
          .driver_info = ZLP },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
 
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1189,6 +1191,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff),    /* Telit FT980-KS */
          .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff),    /* Telit FN980 (PCIe) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1201,6 +1205,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff),    /* Telit LE910Cx (RNDIS) */
+         .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1215,6 +1221,10 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff),    /* Telit LE910Cx (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff),    /* Telit LE910Cx (RNDIS) */
+         .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
index 71ce1b7..2b385c1 100644 (file)
@@ -395,8 +395,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
         */
        trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
 
-       map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
-                                    phys, size, size, dir, attrs);
+       map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs);
        if (map == (phys_addr_t)DMA_MAPPING_ERROR)
                return DMA_MAPPING_ERROR;
 
index 7e1549a..bc920af 100644 (file)
@@ -511,7 +511,8 @@ again:
                                /*DEFAULT_RATELIMIT_BURST*/ 1);
                if (__ratelimit(&_rs))
                        WARN(1, KERN_DEBUG
-                               "BTRFS: block rsv returned %d\n", ret);
+                               "BTRFS: block rsv %d returned %d\n",
+                               block_rsv->type, ret);
        }
 try_reserve:
        ret = btrfs_reserve_metadata_bytes(root, block_rsv, blocksize,
index 5b9e3f3..1063853 100644 (file)
@@ -91,6 +91,17 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
        ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
        if (ret) {
 no_valid_dev_replace_entry_found:
+               /*
+                * We don't have a replace item or it's corrupted.  If there is
+                * a replace target, fail the mount.
+                */
+               if (btrfs_find_device(fs_info->fs_devices,
+                                     BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
+                       btrfs_err(fs_info,
+                       "found replace target device without a valid replace item");
+                       ret = -EUCLEAN;
+                       goto out;
+               }
                ret = 0;
                dev_replace->replace_state =
                        BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
@@ -143,8 +154,19 @@ no_valid_dev_replace_entry_found:
        case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
        case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
        case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
-               dev_replace->srcdev = NULL;
-               dev_replace->tgtdev = NULL;
+               /*
+                * We don't have an active replace item but if there is a
+                * replace target, fail the mount.
+                */
+               if (btrfs_find_device(fs_info->fs_devices,
+                                     BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
+                       btrfs_err(fs_info,
+                       "replace devid present without an active replace item");
+                       ret = -EUCLEAN;
+               } else {
+                       dev_replace->srcdev = NULL;
+                       dev_replace->tgtdev = NULL;
+               }
                break;
        case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
        case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
index ab408a2..69a3841 100644 (file)
@@ -1274,6 +1274,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
        u64 page_start;
        u64 page_end;
        u64 page_cnt;
+       u64 start = (u64)start_index << PAGE_SHIFT;
        int ret;
        int i;
        int i_done;
@@ -1290,8 +1291,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
        page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
 
        ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
-                       start_index << PAGE_SHIFT,
-                       page_cnt << PAGE_SHIFT);
+                       start, page_cnt << PAGE_SHIFT);
        if (ret)
                return ret;
        i_done = 0;
@@ -1380,8 +1380,7 @@ again:
                btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
                spin_unlock(&BTRFS_I(inode)->lock);
                btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved,
-                               start_index << PAGE_SHIFT,
-                               (page_cnt - i_done) << PAGE_SHIFT, true);
+                               start, (page_cnt - i_done) << PAGE_SHIFT, true);
        }
 
 
@@ -1408,8 +1407,7 @@ out:
                put_page(pages[i]);
        }
        btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved,
-                       start_index << PAGE_SHIFT,
-                       page_cnt << PAGE_SHIFT, true);
+                       start, page_cnt << PAGE_SHIFT, true);
        btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
        extent_changeset_free(data_reserved);
        return ret;
index c54ea65..77c5474 100644 (file)
@@ -3435,24 +3435,20 @@ static int qgroup_unreserve_range(struct btrfs_inode *inode,
 {
        struct rb_node *node;
        struct rb_node *next;
-       struct ulist_node *entry = NULL;
+       struct ulist_node *entry;
        int ret = 0;
 
        node = reserved->range_changed.root.rb_node;
+       if (!node)
+               return 0;
        while (node) {
                entry = rb_entry(node, struct ulist_node, rb_node);
                if (entry->val < start)
                        node = node->rb_right;
-               else if (entry)
-                       node = node->rb_left;
                else
-                       break;
+                       node = node->rb_left;
        }
 
-       /* Empty changeset */
-       if (!entry)
-               return 0;
-
        if (entry->val > start && rb_prev(&entry->rb_node))
                entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
                                 rb_node);
index 7f03dbe..78693d3 100644 (file)
@@ -860,6 +860,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
 "dropping a ref for a root that doesn't have a ref on the block");
                        dump_block_entry(fs_info, be);
                        dump_ref_action(fs_info, ra);
+                       kfree(ref);
                        kfree(ra);
                        goto out_unlock;
                }
index 3602806..9ba92d8 100644 (file)
@@ -1648,6 +1648,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
        struct btrfs_root_item *root_item;
        struct btrfs_path *path;
        struct extent_buffer *leaf;
+       int reserve_level;
        int level;
        int max_level;
        int replaced = 0;
@@ -1696,7 +1697,8 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
         * Thus the needed metadata size is at most root_level * nodesize,
         * and * 2 since we have two trees to COW.
         */
-       min_reserved = fs_info->nodesize * btrfs_root_level(root_item) * 2;
+       reserve_level = max_t(int, 1, btrfs_root_level(root_item));
+       min_reserved = fs_info->nodesize * reserve_level * 2;
        memset(&next_key, 0, sizeof(next_key));
 
        while (1) {
index cf63f1e..e71e758 100644 (file)
@@ -3866,8 +3866,9 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
        if (!is_dev_replace && !readonly &&
            !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
                mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-               btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
-                               rcu_str_deref(dev->name));
+               btrfs_err_in_rcu(fs_info,
+                       "scrub on devid %llu: filesystem on %s is not writable",
+                                devid, rcu_str_deref(dev->name));
                ret = -EROFS;
                goto out;
        }
index b1e4807..a6406b3 100644 (file)
@@ -1056,22 +1056,13 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
                        continue;
                }
 
-               if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
-                       /*
-                        * In the first step, keep the device which has
-                        * the correct fsid and the devid that is used
-                        * for the dev_replace procedure.
-                        * In the second step, the dev_replace state is
-                        * read from the device tree and it is known
-                        * whether the procedure is really active or
-                        * not, which means whether this device is
-                        * used or whether it should be removed.
-                        */
-                       if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
-                                                 &device->dev_state)) {
-                               continue;
-                       }
-               }
+               /*
+                * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
+                * in btrfs_init_dev_replace() so just continue.
+                */
+               if (device->devid == BTRFS_DEV_REPLACE_DEVID)
+                       continue;
+
                if (device->bdev) {
                        blkdev_put(device->bdev, device->mode);
                        device->bdev = NULL;
@@ -1080,9 +1071,6 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
                if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
                        list_del_init(&device->dev_alloc_list);
                        clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
-                       if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
-                                     &device->dev_state))
-                               fs_devices->rw_devices--;
                }
                list_del_init(&device->dev_list);
                fs_devices->num_devices--;
index 5027bbd..ded4229 100644 (file)
@@ -4074,7 +4074,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
             vino.snap, inode);
 
        mutex_lock(&session->s_mutex);
-       session->s_seq++;
+       inc_session_sequence(session);
        dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
             (unsigned)seq);
 
index 08f1c0c..8f1d750 100644 (file)
@@ -4231,7 +4231,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
             dname.len, dname.name);
 
        mutex_lock(&session->s_mutex);
-       session->s_seq++;
+       inc_session_sequence(session);
 
        if (!inode) {
                dout("handle_lease no inode %llx\n", vino.ino);
@@ -4385,28 +4385,48 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
 
 bool check_session_state(struct ceph_mds_session *s)
 {
-       if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
-               dout("resending session close request for mds%d\n",
-                               s->s_mds);
-               request_close_session(s);
-               return false;
-       }
-       if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
-               if (s->s_state == CEPH_MDS_SESSION_OPEN) {
+       switch (s->s_state) {
+       case CEPH_MDS_SESSION_OPEN:
+               if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
                        s->s_state = CEPH_MDS_SESSION_HUNG;
                        pr_info("mds%d hung\n", s->s_mds);
                }
-       }
-       if (s->s_state == CEPH_MDS_SESSION_NEW ||
-           s->s_state == CEPH_MDS_SESSION_RESTARTING ||
-           s->s_state == CEPH_MDS_SESSION_CLOSED ||
-           s->s_state == CEPH_MDS_SESSION_REJECTED)
-               /* this mds is failed or recovering, just wait */
+               break;
+       case CEPH_MDS_SESSION_CLOSING:
+               /* Should never reach this when we're unmounting */
+               WARN_ON_ONCE(true);
+               fallthrough;
+       case CEPH_MDS_SESSION_NEW:
+       case CEPH_MDS_SESSION_RESTARTING:
+       case CEPH_MDS_SESSION_CLOSED:
+       case CEPH_MDS_SESSION_REJECTED:
                return false;
+       }
 
        return true;
 }
 
+/*
+ * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
+ * then we need to retransmit that request.
+ */
+void inc_session_sequence(struct ceph_mds_session *s)
+{
+       lockdep_assert_held(&s->s_mutex);
+
+       s->s_seq++;
+
+       if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
+               int ret;
+
+               dout("resending session close request for mds%d\n", s->s_mds);
+               ret = request_close_session(s);
+               if (ret < 0)
+                       pr_err("unable to close session to mds%d: %d\n",
+                              s->s_mds, ret);
+       }
+}
+
 /*
  * delayed work -- periodically trim expired leases, renew caps with mds
  */
index cbf8af4..f5adbeb 100644 (file)
@@ -480,6 +480,7 @@ struct ceph_mds_client {
 extern const char *ceph_mds_op_name(int op);
 
 extern bool check_session_state(struct ceph_mds_session *s);
+void inc_session_sequence(struct ceph_mds_session *s);
 
 extern struct ceph_mds_session *
 __ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
index 83cb4f2..9b785f1 100644 (file)
@@ -53,7 +53,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
 
        /* increment msg sequence number */
        mutex_lock(&session->s_mutex);
-       session->s_seq++;
+       inc_session_sequence(session);
        mutex_unlock(&session->s_mutex);
 
        /* lookup inode */
index 0da39c1..b611f82 100644 (file)
@@ -873,7 +873,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
             ceph_snap_op_name(op), split, trace_len);
 
        mutex_lock(&session->s_mutex);
-       session->s_seq++;
+       inc_session_sequence(session);
        mutex_unlock(&session->s_mutex);
 
        down_write(&mdsc->snap_rwsem);
index d3c3e5d..d595abb 100644 (file)
@@ -269,9 +269,7 @@ unlock:
         * New inodes may not have an inode number assigned yet.
         * Hashing their inode number is delayed until later.
         */
-       if (ci->ci_inode->i_ino == 0)
-               WARN_ON(!(ci->ci_inode->i_state & I_CREATING));
-       else
+       if (ci->ci_inode->i_ino)
                fscrypt_hash_inode_number(ci, mk);
        return 0;
 }
index 139d0be..3e21c0e 100644 (file)
@@ -107,11 +107,9 @@ static struct page *erofs_read_inode(struct inode *inode,
                i_gid_write(inode, le32_to_cpu(die->i_gid));
                set_nlink(inode, le32_to_cpu(die->i_nlink));
 
-               /* ns timestamp */
-               inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
-                       le64_to_cpu(die->i_ctime);
-               inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
-                       le32_to_cpu(die->i_ctime_nsec);
+               /* extended inode has its own timestamp */
+               inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime);
+               inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec);
 
                inode->i_size = le64_to_cpu(die->i_size);
 
@@ -149,11 +147,9 @@ static struct page *erofs_read_inode(struct inode *inode,
                i_gid_write(inode, le16_to_cpu(dic->i_gid));
                set_nlink(inode, le16_to_cpu(dic->i_nlink));
 
-               /* use build time to derive all file time */
-               inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
-                       sbi->build_time;
-               inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
-                       sbi->build_time_nsec;
+               /* use build time for compact inodes */
+               inode->i_ctime.tv_sec = sbi->build_time;
+               inode->i_ctime.tv_nsec = sbi->build_time_nsec;
 
                inode->i_size = le32_to_cpu(dic->i_size);
                if (erofs_inode_is_data_compressed(vi->datalayout))
@@ -167,6 +163,11 @@ static struct page *erofs_read_inode(struct inode *inode,
                goto err_out;
        }
 
+       inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
+       inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
+       inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
+       inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
+
        if (!nblks)
                /* measure inode.i_blocks as generic filesystems */
                inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
index 50912a5..86fd3bf 100644 (file)
@@ -1078,8 +1078,11 @@ out_allocpage:
                cond_resched();
                goto repeat;
        }
-       set_page_private(page, (unsigned long)pcl);
-       SetPagePrivate(page);
+
+       if (tocache) {
+               set_page_private(page, (unsigned long)pcl);
+               SetPagePrivate(page);
+       }
 out:   /* the only exit (for tracing and debugging) */
        return page;
 }
index 45fcdbf..1b399ca 100644 (file)
@@ -1028,9 +1028,6 @@ struct ext4_inode_info {
                                         * protected by sbi->s_fc_lock.
                                         */
 
-       /* Fast commit subtid when this inode was committed */
-       unsigned int i_fc_committed_subtid;
-
        /* Start of lblk range that needs to be committed in this fast commit */
        ext4_lblk_t i_fc_lblk_start;
 
@@ -1422,16 +1419,6 @@ struct ext4_super_block {
 
 #ifdef __KERNEL__
 
-/*
- * run-time mount flags
- */
-#define EXT4_MF_MNTDIR_SAMPLED         0x0001
-#define EXT4_MF_FS_ABORTED             0x0002  /* Fatal error detected */
-#define EXT4_MF_FC_INELIGIBLE          0x0004  /* Fast commit ineligible */
-#define EXT4_MF_FC_COMMITTING          0x0008  /* File system underoing a fast
-                                                * commit.
-                                                */
-
 #ifdef CONFIG_FS_ENCRYPTION
 #define DUMMY_ENCRYPTION_ENABLED(sbi) ((sbi)->s_dummy_enc_policy.policy != NULL)
 #else
@@ -1466,7 +1453,7 @@ struct ext4_sb_info {
        struct buffer_head * __rcu *s_group_desc;
        unsigned int s_mount_opt;
        unsigned int s_mount_opt2;
-       unsigned int s_mount_flags;
+       unsigned long s_mount_flags;
        unsigned int s_def_mount_opt;
        ext4_fsblk_t s_sb_block;
        atomic64_t s_resv_clusters;
@@ -1694,6 +1681,34 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
        _v;                                                                \
 })
 
+/*
+ * run-time mount flags
+ */
+enum {
+       EXT4_MF_MNTDIR_SAMPLED,
+       EXT4_MF_FS_ABORTED,     /* Fatal error detected */
+       EXT4_MF_FC_INELIGIBLE,  /* Fast commit ineligible */
+       EXT4_MF_FC_COMMITTING   /* File system underoing a fast
+                                * commit.
+                                */
+};
+
+static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
+{
+       set_bit(bit, &EXT4_SB(sb)->s_mount_flags);
+}
+
+static inline void ext4_clear_mount_flag(struct super_block *sb, int bit)
+{
+       clear_bit(bit, &EXT4_SB(sb)->s_mount_flags);
+}
+
+static inline int ext4_test_mount_flag(struct super_block *sb, int bit)
+{
+       return test_bit(bit, &EXT4_SB(sb)->s_mount_flags);
+}
+
+
 /*
  * Simulate_fail codes
  */
@@ -1863,6 +1878,13 @@ static inline bool ext4_verity_in_progress(struct inode *inode)
 #define EXT4_FEATURE_COMPAT_RESIZE_INODE       0x0010
 #define EXT4_FEATURE_COMPAT_DIR_INDEX          0x0020
 #define EXT4_FEATURE_COMPAT_SPARSE_SUPER2      0x0200
+/*
+ * The reason why "FAST_COMMIT" is a compat feature is that, FS becomes
+ * incompatible only if fast commit blocks are present in the FS. Since we
+ * clear the journal (and thus the fast commit blocks), we don't mark FS as
+ * incompatible. We also have a JBD2 incompat feature, which gets set when
+ * there are fast commit blocks present in the journal.
+ */
 #define EXT4_FEATURE_COMPAT_FAST_COMMIT                0x0400
 #define EXT4_FEATURE_COMPAT_STABLE_INODES      0x0800
 
@@ -2731,12 +2753,16 @@ extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
 int ext4_fc_info_show(struct seq_file *seq, void *v);
 void ext4_fc_init(struct super_block *sb, journal_t *journal);
 void ext4_fc_init_inode(struct inode *inode);
-void ext4_fc_track_range(struct inode *inode, ext4_lblk_t start,
+void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t start,
                         ext4_lblk_t end);
-void ext4_fc_track_unlink(struct inode *inode, struct dentry *dentry);
-void ext4_fc_track_link(struct inode *inode, struct dentry *dentry);
-void ext4_fc_track_create(struct inode *inode, struct dentry *dentry);
-void ext4_fc_track_inode(struct inode *inode);
+void __ext4_fc_track_unlink(handle_t *handle, struct inode *inode,
+       struct dentry *dentry);
+void __ext4_fc_track_link(handle_t *handle, struct inode *inode,
+       struct dentry *dentry);
+void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry);
+void ext4_fc_track_link(handle_t *handle, struct dentry *dentry);
+void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
+void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
 void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
 void ext4_fc_start_ineligible(struct super_block *sb, int reason);
 void ext4_fc_stop_ineligible(struct super_block *sb);
@@ -3452,7 +3478,7 @@ extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode,
 extern int ext4_ci_compare(const struct inode *parent,
                           const struct qstr *fname,
                           const struct qstr *entry, bool quick);
-extern int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
+extern int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name,
                         struct inode *inode);
 extern int __ext4_link(struct inode *dir, struct inode *inode,
                       struct dentry *dentry);
index 57cfa28..17d7096 100644 (file)
@@ -3724,7 +3724,6 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
        err = ext4_ext_dirty(handle, inode, path + path->p_depth);
 out:
        ext4_ext_show_leaf(inode, path);
-       ext4_fc_track_range(inode, ee_block, ee_block + ee_len - 1);
        return err;
 }
 
@@ -3796,7 +3795,6 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
        if (*allocated > map->m_len)
                *allocated = map->m_len;
        map->m_len = *allocated;
-       ext4_fc_track_range(inode, ee_block, ee_block + ee_len - 1);
        return 0;
 }
 
@@ -4329,7 +4327,6 @@ got_allocated_blocks:
        map->m_len = ar.len;
        allocated = map->m_len;
        ext4_ext_show_leaf(inode, path);
-       ext4_fc_track_range(inode, map->m_lblk, map->m_lblk + map->m_len - 1);
 out:
        ext4_ext_drop_refs(path);
        kfree(path);
@@ -4602,7 +4599,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
        ret = ext4_mark_inode_dirty(handle, inode);
        if (unlikely(ret))
                goto out_handle;
-       ext4_fc_track_range(inode, offset >> inode->i_sb->s_blocksize_bits,
+       ext4_fc_track_range(handle, inode, offset >> inode->i_sb->s_blocksize_bits,
                        (offset + len - 1) >> inode->i_sb->s_blocksize_bits);
        /* Zero out partial block at the edges of the range */
        ret = ext4_zero_partial_blocks(handle, inode, offset, len);
@@ -4651,8 +4648,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
                     FALLOC_FL_INSERT_RANGE))
                return -EOPNOTSUPP;
-       ext4_fc_track_range(inode, offset >> blkbits,
-                       (offset + len - 1) >> blkbits);
 
        ext4_fc_start_update(inode);
 
index 8d43058..f2033e1 100644 (file)
@@ -83,7 +83,7 @@
  *
  * Atomicity of commits
  * --------------------
- * In order to gaurantee atomicity during the commit operation, fast commit
+ * In order to guarantee atomicity during the commit operation, fast commit
  * uses "EXT4_FC_TAG_TAIL" tag that marks a fast commit as complete. Tail
  * tag contains CRC of the contents and TID of the transaction after which
  * this fast commit should be applied. Recovery code replays fast commit
@@ -152,7 +152,31 @@ void ext4_fc_init_inode(struct inode *inode)
        INIT_LIST_HEAD(&ei->i_fc_list);
        init_waitqueue_head(&ei->i_fc_wait);
        atomic_set(&ei->i_fc_updates, 0);
-       ei->i_fc_committed_subtid = 0;
+}
+
+/* This function must be called with sbi->s_fc_lock held. */
+static void ext4_fc_wait_committing_inode(struct inode *inode)
+__releases(&EXT4_SB(inode->i_sb)->s_fc_lock)
+{
+       wait_queue_head_t *wq;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+
+#if (BITS_PER_LONG < 64)
+       DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
+                       EXT4_STATE_FC_COMMITTING);
+       wq = bit_waitqueue(&ei->i_state_flags,
+                               EXT4_STATE_FC_COMMITTING);
+#else
+       DEFINE_WAIT_BIT(wait, &ei->i_flags,
+                       EXT4_STATE_FC_COMMITTING);
+       wq = bit_waitqueue(&ei->i_flags,
+                               EXT4_STATE_FC_COMMITTING);
+#endif
+       lockdep_assert_held(&EXT4_SB(inode->i_sb)->s_fc_lock);
+       prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+       spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
+       schedule();
+       finish_wait(wq, &wait.wq_entry);
 }
 
 /*
@@ -176,22 +200,7 @@ restart:
                goto out;
 
        if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
-               wait_queue_head_t *wq;
-#if (BITS_PER_LONG < 64)
-               DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
-                               EXT4_STATE_FC_COMMITTING);
-               wq = bit_waitqueue(&ei->i_state_flags,
-                                  EXT4_STATE_FC_COMMITTING);
-#else
-               DEFINE_WAIT_BIT(wait, &ei->i_flags,
-                               EXT4_STATE_FC_COMMITTING);
-               wq = bit_waitqueue(&ei->i_flags,
-                                  EXT4_STATE_FC_COMMITTING);
-#endif
-               prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
-               spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
-               schedule();
-               finish_wait(wq, &wait.wq_entry);
+               ext4_fc_wait_committing_inode(inode);
                goto restart;
        }
 out:
@@ -234,26 +243,10 @@ restart:
        }
 
        if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
-               wait_queue_head_t *wq;
-#if (BITS_PER_LONG < 64)
-               DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
-                               EXT4_STATE_FC_COMMITTING);
-               wq = bit_waitqueue(&ei->i_state_flags,
-                                  EXT4_STATE_FC_COMMITTING);
-#else
-               DEFINE_WAIT_BIT(wait, &ei->i_flags,
-                               EXT4_STATE_FC_COMMITTING);
-               wq = bit_waitqueue(&ei->i_flags,
-                                  EXT4_STATE_FC_COMMITTING);
-#endif
-               prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
-               spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
-               schedule();
-               finish_wait(wq, &wait.wq_entry);
+               ext4_fc_wait_committing_inode(inode);
                goto restart;
        }
-       if (!list_empty(&ei->i_fc_list))
-               list_del_init(&ei->i_fc_list);
+       list_del_init(&ei->i_fc_list);
        spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
 }
 
@@ -269,7 +262,7 @@ void ext4_fc_mark_ineligible(struct super_block *sb, int reason)
            (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
                return;
 
-       sbi->s_mount_flags |= EXT4_MF_FC_INELIGIBLE;
+       ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
        WARN_ON(reason >= EXT4_FC_REASON_MAX);
        sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
 }
@@ -302,14 +295,14 @@ void ext4_fc_stop_ineligible(struct super_block *sb)
            (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
                return;
 
-       EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FC_INELIGIBLE;
+       ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
        atomic_dec(&EXT4_SB(sb)->s_fc_ineligible_updates);
 }
 
 static inline int ext4_fc_is_ineligible(struct super_block *sb)
 {
-       return (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FC_INELIGIBLE) ||
-               atomic_read(&EXT4_SB(sb)->s_fc_ineligible_updates);
+       return (ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE) ||
+               atomic_read(&EXT4_SB(sb)->s_fc_ineligible_updates));
 }
 
 /*
@@ -323,13 +316,14 @@ static inline int ext4_fc_is_ineligible(struct super_block *sb)
  * If enqueue is set, this function enqueues the inode in fast commit list.
  */
 static int ext4_fc_track_template(
-       struct inode *inode, int (*__fc_track_fn)(struct inode *, void *, bool),
+       handle_t *handle, struct inode *inode,
+       int (*__fc_track_fn)(struct inode *, void *, bool),
        void *args, int enqueue)
 {
-       tid_t running_txn_tid;
        bool update = false;
        struct ext4_inode_info *ei = EXT4_I(inode);
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       tid_t tid = 0;
        int ret;
 
        if (!test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT) ||
@@ -339,15 +333,13 @@ static int ext4_fc_track_template(
        if (ext4_fc_is_ineligible(inode->i_sb))
                return -EINVAL;
 
-       running_txn_tid = sbi->s_journal ?
-               sbi->s_journal->j_commit_sequence + 1 : 0;
-
+       tid = handle->h_transaction->t_tid;
        mutex_lock(&ei->i_fc_lock);
-       if (running_txn_tid == ei->i_sync_tid) {
+       if (tid == ei->i_sync_tid) {
                update = true;
        } else {
                ext4_fc_reset_inode(inode);
-               ei->i_sync_tid = running_txn_tid;
+               ei->i_sync_tid = tid;
        }
        ret = __fc_track_fn(inode, args, update);
        mutex_unlock(&ei->i_fc_lock);
@@ -358,7 +350,7 @@ static int ext4_fc_track_template(
        spin_lock(&sbi->s_fc_lock);
        if (list_empty(&EXT4_I(inode)->i_fc_list))
                list_add_tail(&EXT4_I(inode)->i_fc_list,
-                               (sbi->s_mount_flags & EXT4_MF_FC_COMMITTING) ?
+                               (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_COMMITTING)) ?
                                &sbi->s_fc_q[FC_Q_STAGING] :
                                &sbi->s_fc_q[FC_Q_MAIN]);
        spin_unlock(&sbi->s_fc_lock);
@@ -384,7 +376,7 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
        mutex_unlock(&ei->i_fc_lock);
        node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
        if (!node) {
-               ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_MEM);
+               ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
                mutex_lock(&ei->i_fc_lock);
                return -ENOMEM;
        }
@@ -397,7 +389,7 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
                if (!node->fcd_name.name) {
                        kmem_cache_free(ext4_fc_dentry_cachep, node);
                        ext4_fc_mark_ineligible(inode->i_sb,
-                               EXT4_FC_REASON_MEM);
+                               EXT4_FC_REASON_NOMEM);
                        mutex_lock(&ei->i_fc_lock);
                        return -ENOMEM;
                }
@@ -411,7 +403,7 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
        node->fcd_name.len = dentry->d_name.len;
 
        spin_lock(&sbi->s_fc_lock);
-       if (sbi->s_mount_flags & EXT4_MF_FC_COMMITTING)
+       if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_COMMITTING))
                list_add_tail(&node->fcd_list,
                                &sbi->s_fc_dentry_q[FC_Q_STAGING]);
        else
@@ -422,7 +414,8 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
        return 0;
 }
 
-void ext4_fc_track_unlink(struct inode *inode, struct dentry *dentry)
+void __ext4_fc_track_unlink(handle_t *handle,
+               struct inode *inode, struct dentry *dentry)
 {
        struct __track_dentry_update_args args;
        int ret;
@@ -430,12 +423,18 @@ void ext4_fc_track_unlink(struct inode *inode, struct dentry *dentry)
        args.dentry = dentry;
        args.op = EXT4_FC_TAG_UNLINK;
 
-       ret = ext4_fc_track_template(inode, __track_dentry_update,
+       ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
                                        (void *)&args, 0);
        trace_ext4_fc_track_unlink(inode, dentry, ret);
 }
 
-void ext4_fc_track_link(struct inode *inode, struct dentry *dentry)
+void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry)
+{
+       __ext4_fc_track_unlink(handle, d_inode(dentry), dentry);
+}
+
+void __ext4_fc_track_link(handle_t *handle,
+       struct inode *inode, struct dentry *dentry)
 {
        struct __track_dentry_update_args args;
        int ret;
@@ -443,20 +442,26 @@ void ext4_fc_track_link(struct inode *inode, struct dentry *dentry)
        args.dentry = dentry;
        args.op = EXT4_FC_TAG_LINK;
 
-       ret = ext4_fc_track_template(inode, __track_dentry_update,
+       ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
                                        (void *)&args, 0);
        trace_ext4_fc_track_link(inode, dentry, ret);
 }
 
-void ext4_fc_track_create(struct inode *inode, struct dentry *dentry)
+void ext4_fc_track_link(handle_t *handle, struct dentry *dentry)
+{
+       __ext4_fc_track_link(handle, d_inode(dentry), dentry);
+}
+
+void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
 {
        struct __track_dentry_update_args args;
+       struct inode *inode = d_inode(dentry);
        int ret;
 
        args.dentry = dentry;
        args.op = EXT4_FC_TAG_CREAT;
 
-       ret = ext4_fc_track_template(inode, __track_dentry_update,
+       ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
                                        (void *)&args, 0);
        trace_ext4_fc_track_create(inode, dentry, ret);
 }
@@ -472,14 +477,20 @@ static int __track_inode(struct inode *inode, void *arg, bool update)
        return 0;
 }
 
-void ext4_fc_track_inode(struct inode *inode)
+void ext4_fc_track_inode(handle_t *handle, struct inode *inode)
 {
        int ret;
 
        if (S_ISDIR(inode->i_mode))
                return;
 
-       ret = ext4_fc_track_template(inode, __track_inode, NULL, 1);
+       if (ext4_should_journal_data(inode)) {
+               ext4_fc_mark_ineligible(inode->i_sb,
+                                       EXT4_FC_REASON_INODE_JOURNAL_DATA);
+               return;
+       }
+
+       ret = ext4_fc_track_template(handle, inode, __track_inode, NULL, 1);
        trace_ext4_fc_track_inode(inode, ret);
 }
 
@@ -515,7 +526,7 @@ static int __track_range(struct inode *inode, void *arg, bool update)
        return 0;
 }
 
-void ext4_fc_track_range(struct inode *inode, ext4_lblk_t start,
+void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t start,
                         ext4_lblk_t end)
 {
        struct __track_range_args args;
@@ -527,7 +538,7 @@ void ext4_fc_track_range(struct inode *inode, ext4_lblk_t start,
        args.start = start;
        args.end = end;
 
-       ret = ext4_fc_track_template(inode,  __track_range, &args, 1);
+       ret = ext4_fc_track_template(handle, inode,  __track_range, &args, 1);
 
        trace_ext4_fc_track_range(inode, start, end, ret);
 }
@@ -537,10 +548,11 @@ static void ext4_fc_submit_bh(struct super_block *sb)
        int write_flags = REQ_SYNC;
        struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh;
 
+       /* TODO: REQ_FUA | REQ_PREFLUSH is unnecessarily expensive. */
        if (test_opt(sb, BARRIER))
                write_flags |= REQ_FUA | REQ_PREFLUSH;
        lock_buffer(bh);
-       clear_buffer_dirty(bh);
+       set_buffer_dirty(bh);
        set_buffer_uptodate(bh);
        bh->b_end_io = ext4_end_buffer_io_sync;
        submit_bh(REQ_OP_WRITE, write_flags, bh);
@@ -846,7 +858,7 @@ static int ext4_fc_submit_inode_data_all(journal_t *journal)
        int ret = 0;
 
        spin_lock(&sbi->s_fc_lock);
-       sbi->s_mount_flags |= EXT4_MF_FC_COMMITTING;
+       ext4_set_mount_flag(sb, EXT4_MF_FC_COMMITTING);
        list_for_each(pos, &sbi->s_fc_q[FC_Q_MAIN]) {
                ei = list_entry(pos, struct ext4_inode_info, i_fc_list);
                ext4_set_inode_state(&ei->vfs_inode, EXT4_STATE_FC_COMMITTING);
@@ -900,6 +912,8 @@ static int ext4_fc_wait_inode_data_all(journal_t *journal)
 
 /* Commit all the directory entry updates */
 static int ext4_fc_commit_dentry_updates(journal_t *journal, u32 *crc)
+__acquires(&sbi->s_fc_lock)
+__releases(&sbi->s_fc_lock)
 {
        struct super_block *sb = (struct super_block *)(journal->j_private);
        struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -996,6 +1010,13 @@ static int ext4_fc_perform_commit(journal_t *journal)
        if (ret)
                return ret;
 
+       /*
+        * If file system device is different from journal device, issue a cache
+        * flush before we start writing fast commit blocks.
+        */
+       if (journal->j_fs_dev != journal->j_dev)
+               blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
+
        blk_start_plug(&plug);
        if (sbi->s_fc_bytes == 0) {
                /*
@@ -1031,8 +1052,6 @@ static int ext4_fc_perform_commit(journal_t *journal)
                if (ret)
                        goto out;
                spin_lock(&sbi->s_fc_lock);
-               EXT4_I(inode)->i_fc_committed_subtid =
-                       atomic_read(&sbi->s_fc_subtid);
        }
        spin_unlock(&sbi->s_fc_lock);
 
@@ -1131,7 +1150,7 @@ out:
                "Fast commit ended with blks = %d, reason = %d, subtid - %d",
                nblks, reason, subtid);
        if (reason == EXT4_FC_REASON_FC_FAILED)
-               return jbd2_fc_end_commit_fallback(journal, commit_tid);
+               return jbd2_fc_end_commit_fallback(journal);
        if (reason == EXT4_FC_REASON_FC_START_FAILED ||
                reason == EXT4_FC_REASON_INELIGIBLE)
                return jbd2_complete_transaction(journal, commit_tid);
@@ -1190,8 +1209,8 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
        list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
                                &sbi->s_fc_q[FC_Q_STAGING]);
 
-       sbi->s_mount_flags &= ~EXT4_MF_FC_COMMITTING;
-       sbi->s_mount_flags &= ~EXT4_MF_FC_INELIGIBLE;
+       ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
+       ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
 
        if (full)
                sbi->s_fc_bytes = 0;
@@ -1263,7 +1282,7 @@ static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
                return 0;
        }
 
-       ret = __ext4_unlink(old_parent, &entry, inode);
+       ret = __ext4_unlink(NULL, old_parent, &entry, inode);
        /* -ENOENT ok coz it might not exist anymore. */
        if (ret == -ENOENT)
                ret = 0;
@@ -2079,8 +2098,6 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
 
 void ext4_fc_init(struct super_block *sb, journal_t *journal)
 {
-       int num_fc_blocks;
-
        /*
         * We set replay callback even if fast commit disabled because we may
         * could still have fast commit blocks that need to be replayed even if
@@ -2090,21 +2107,9 @@ void ext4_fc_init(struct super_block *sb, journal_t *journal)
        if (!test_opt2(sb, JOURNAL_FAST_COMMIT))
                return;
        journal->j_fc_cleanup_callback = ext4_fc_cleanup;
-       if (!buffer_uptodate(journal->j_sb_buffer)
-               && ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO,
-                                       true)) {
-               ext4_msg(sb, KERN_ERR, "I/O error on journal");
-               return;
-       }
-       num_fc_blocks = be32_to_cpu(journal->j_superblock->s_num_fc_blks);
-       if (jbd2_fc_init(journal, num_fc_blocks ? num_fc_blocks :
-                                       EXT4_NUM_FC_BLKS)) {
-               pr_warn("Error while enabling fast commits, turning off.");
-               ext4_clear_feature_fast_commit(sb);
-       }
 }
 
-const char *fc_ineligible_reasons[] = {
+static const char *fc_ineligible_reasons[] = {
        "Extended attributes changed",
        "Cross rename",
        "Journal flag changed",
@@ -2113,6 +2118,7 @@ const char *fc_ineligible_reasons[] = {
        "Resize",
        "Dir renamed",
        "Falloc range op",
+       "Data journalling",
        "FC Commit Failed"
 };
 
index 06907d4..3a6e5a1 100644 (file)
@@ -3,9 +3,6 @@
 #ifndef __FAST_COMMIT_H__
 #define __FAST_COMMIT_H__
 
-/* Number of blocks in journal area to allocate for fast commits */
-#define EXT4_NUM_FC_BLKS               256
-
 /* Fast commit tags */
 #define EXT4_FC_TAG_ADD_RANGE          0x0001
 #define EXT4_FC_TAG_DEL_RANGE          0x0002
@@ -100,11 +97,12 @@ enum {
        EXT4_FC_REASON_XATTR = 0,
        EXT4_FC_REASON_CROSS_RENAME,
        EXT4_FC_REASON_JOURNAL_FLAG_CHANGE,
-       EXT4_FC_REASON_MEM,
+       EXT4_FC_REASON_NOMEM,
        EXT4_FC_REASON_SWAP_BOOT,
        EXT4_FC_REASON_RESIZE,
        EXT4_FC_REASON_RENAME_DIR,
        EXT4_FC_REASON_FALLOC_RANGE,
+       EXT4_FC_REASON_INODE_JOURNAL_DATA,
        EXT4_FC_COMMIT_FAILED,
        EXT4_FC_REASON_MAX
 };
index d85412d..3ed8c04 100644 (file)
@@ -761,7 +761,6 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
        if (!daxdev_mapping_supported(vma, dax_dev))
                return -EOPNOTSUPP;
 
-       ext4_fc_start_update(inode);
        file_accessed(file);
        if (IS_DAX(file_inode(file))) {
                vma->vm_ops = &ext4_dax_vm_ops;
@@ -769,7 +768,6 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
        } else {
                vma->vm_ops = &ext4_file_vm_ops;
        }
-       ext4_fc_stop_update(inode);
        return 0;
 }
 
@@ -782,13 +780,13 @@ static int ext4_sample_last_mounted(struct super_block *sb,
        handle_t *handle;
        int err;
 
-       if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
+       if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
                return 0;
 
        if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
                return 0;
 
-       sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
+       ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
        /*
         * Sample where the filesystem has been mounted and
         * store it in the superblock for sysadmin convenience
index b232c27..4c2a9fe 100644 (file)
@@ -280,7 +280,7 @@ static int ext4_getfsmap_logdev(struct super_block *sb, struct ext4_fsmap *keys,
 
        /* Fabricate an rmap entry for the external log device. */
        irec.fmr_physical = journal->j_blk_offset;
-       irec.fmr_length = journal->j_maxlen;
+       irec.fmr_length = journal->j_total_len;
        irec.fmr_owner = EXT4_FMR_OWN_LOG;
        irec.fmr_flags = 0;
 
index 81a545f..a42ca95 100644 (file)
@@ -143,7 +143,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        if (sb_rdonly(inode->i_sb)) {
                /* Make sure that we read updated s_mount_flags value */
                smp_rmb();
-               if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
+               if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FS_ABORTED))
                        ret = -EROFS;
                goto out;
        }
index caa5147..b41512d 100644 (file)
@@ -1880,6 +1880,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
 
        ext4_write_lock_xattr(inode, &no_expand);
        if (!ext4_has_inline_data(inode)) {
+               ext4_write_unlock_xattr(inode, &no_expand);
                *has_inline = 0;
                ext4_journal_stop(handle);
                return 0;
index b96a186..0d8385a 100644 (file)
@@ -327,6 +327,8 @@ stop_handle:
        ext4_xattr_inode_array_free(ea_inode_array);
        return;
 no_delete:
+       if (!list_empty(&EXT4_I(inode)->i_fc_list))
+               ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
        ext4_clear_inode(inode);        /* We must guarantee clearing of inode... */
 }
 
@@ -730,7 +732,7 @@ out_sem:
                        if (ret)
                                return ret;
                }
-               ext4_fc_track_range(inode, map->m_lblk,
+               ext4_fc_track_range(handle, inode, map->m_lblk,
                            map->m_lblk + map->m_len - 1);
        }
 
@@ -2440,7 +2442,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
                        struct super_block *sb = inode->i_sb;
 
                        if (ext4_forced_shutdown(EXT4_SB(sb)) ||
-                           EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+                           ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
                                goto invalidate_dirty_pages;
                        /*
                         * Let the uper layers retry transient errors.
@@ -2674,7 +2676,7 @@ static int ext4_writepages(struct address_space *mapping,
         * the stack trace.
         */
        if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
-                    sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
+                    ext4_test_mount_flag(inode->i_sb, EXT4_MF_FS_ABORTED))) {
                ret = -EROFS;
                goto out_writepages;
        }
@@ -3310,8 +3312,7 @@ static bool ext4_inode_datasync_dirty(struct inode *inode)
                        EXT4_I(inode)->i_datasync_tid))
                        return false;
                if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
-                       return atomic_read(&EXT4_SB(inode->i_sb)->s_fc_subtid) <
-                               EXT4_I(inode)->i_fc_committed_subtid;
+                       return !list_empty(&EXT4_I(inode)->i_fc_list);
                return true;
        }
 
@@ -4109,7 +4110,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 
                up_write(&EXT4_I(inode)->i_data_sem);
        }
-       ext4_fc_track_range(inode, first_block, stop_block);
+       ext4_fc_track_range(handle, inode, first_block, stop_block);
        if (IS_SYNC(inode))
                ext4_handle_sync(handle);
 
@@ -5442,14 +5443,14 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
                        }
 
                        if (shrink)
-                               ext4_fc_track_range(inode,
+                               ext4_fc_track_range(handle, inode,
                                        (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
                                        inode->i_sb->s_blocksize_bits,
                                        (oldsize > 0 ? oldsize - 1 : 0) >>
                                        inode->i_sb->s_blocksize_bits);
                        else
                                ext4_fc_track_range(
-                                       inode,
+                                       handle, inode,
                                        (oldsize > 0 ? oldsize - 1 : oldsize) >>
                                        inode->i_sb->s_blocksize_bits,
                                        (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
@@ -5699,7 +5700,7 @@ int ext4_mark_iloc_dirty(handle_t *handle,
                put_bh(iloc->bh);
                return -EIO;
        }
-       ext4_fc_track_inode(inode);
+       ext4_fc_track_inode(handle, inode);
 
        if (IS_I_VERSION(inode))
                inode_inc_iversion(inode);
index 85abbfb..24af9ed 100644 (file)
@@ -4477,7 +4477,7 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
 {
        ext4_group_t i, ngroups;
 
-       if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+       if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
                return;
 
        ngroups = ext4_get_groups_count(sb);
@@ -4508,7 +4508,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
 {
        struct super_block *sb = ac->ac_sb;
 
-       if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+       if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
                return;
 
        mb_debug(sb, "Can't allocate:"
@@ -5167,7 +5167,7 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
        struct super_block *sb = ar->inode->i_sb;
        ext4_group_t group;
        ext4_grpblk_t blkoff;
-       int  i;
+       int i = sb->s_blocksize;
        ext4_fsblk_t goal, block;
        struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 
index f458d1d..3350926 100644 (file)
@@ -2606,7 +2606,7 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
                       bool excl)
 {
        handle_t *handle;
-       struct inode *inode, *inode_save;
+       struct inode *inode;
        int err, credits, retries = 0;
 
        err = dquot_initialize(dir);
@@ -2624,11 +2624,9 @@ retry:
                inode->i_op = &ext4_file_inode_operations;
                inode->i_fop = &ext4_file_operations;
                ext4_set_aops(inode);
-               inode_save = inode;
-               ihold(inode_save);
                err = ext4_add_nondir(handle, dentry, &inode);
-               ext4_fc_track_create(inode_save, dentry);
-               iput(inode_save);
+               if (!err)
+                       ext4_fc_track_create(handle, dentry);
        }
        if (handle)
                ext4_journal_stop(handle);
@@ -2643,7 +2641,7 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry,
                      umode_t mode, dev_t rdev)
 {
        handle_t *handle;
-       struct inode *inode, *inode_save;
+       struct inode *inode;
        int err, credits, retries = 0;
 
        err = dquot_initialize(dir);
@@ -2660,12 +2658,9 @@ retry:
        if (!IS_ERR(inode)) {
                init_special_inode(inode, inode->i_mode, rdev);
                inode->i_op = &ext4_special_inode_operations;
-               inode_save = inode;
-               ihold(inode_save);
                err = ext4_add_nondir(handle, dentry, &inode);
                if (!err)
-                       ext4_fc_track_create(inode_save, dentry);
-               iput(inode_save);
+                       ext4_fc_track_create(handle, dentry);
        }
        if (handle)
                ext4_journal_stop(handle);
@@ -2829,7 +2824,6 @@ out_clear_inode:
                iput(inode);
                goto out_retry;
        }
-       ext4_fc_track_create(inode, dentry);
        ext4_inc_count(dir);
 
        ext4_update_dx_flag(dir);
@@ -2837,6 +2831,7 @@ out_clear_inode:
        if (err)
                goto out_clear_inode;
        d_instantiate_new(dentry, inode);
+       ext4_fc_track_create(handle, dentry);
        if (IS_DIRSYNC(dir))
                ext4_handle_sync(handle);
 
@@ -3171,7 +3166,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
                goto end_rmdir;
        ext4_dec_count(dir);
        ext4_update_dx_flag(dir);
-       ext4_fc_track_unlink(inode, dentry);
+       ext4_fc_track_unlink(handle, dentry);
        retval = ext4_mark_inode_dirty(handle, dir);
 
 #ifdef CONFIG_UNICODE
@@ -3192,13 +3187,12 @@ end_rmdir:
        return retval;
 }
 
-int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
+int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name,
                  struct inode *inode)
 {
        int retval = -ENOENT;
        struct buffer_head *bh;
        struct ext4_dir_entry_2 *de;
-       handle_t *handle = NULL;
        int skip_remove_dentry = 0;
 
        bh = ext4_find_entry(dir, d_name, &de, NULL);
@@ -3217,14 +3211,7 @@ int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
                if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
                        skip_remove_dentry = 1;
                else
-                       goto out_bh;
-       }
-
-       handle = ext4_journal_start(dir, EXT4_HT_DIR,
-                                   EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
-       if (IS_ERR(handle)) {
-               retval = PTR_ERR(handle);
-               goto out_bh;
+                       goto out;
        }
 
        if (IS_DIRSYNC(dir))
@@ -3233,12 +3220,12 @@ int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
        if (!skip_remove_dentry) {
                retval = ext4_delete_entry(handle, dir, de, bh);
                if (retval)
-                       goto out_handle;
+                       goto out;
                dir->i_ctime = dir->i_mtime = current_time(dir);
                ext4_update_dx_flag(dir);
                retval = ext4_mark_inode_dirty(handle, dir);
                if (retval)
-                       goto out_handle;
+                       goto out;
        } else {
                retval = 0;
        }
@@ -3252,15 +3239,14 @@ int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
        inode->i_ctime = current_time(inode);
        retval = ext4_mark_inode_dirty(handle, inode);
 
-out_handle:
-       ext4_journal_stop(handle);
-out_bh:
+out:
        brelse(bh);
        return retval;
 }
 
 static int ext4_unlink(struct inode *dir, struct dentry *dentry)
 {
+       handle_t *handle;
        int retval;
 
        if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
@@ -3278,9 +3264,16 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
        if (retval)
                goto out_trace;
 
-       retval = __ext4_unlink(dir, &dentry->d_name, d_inode(dentry));
+       handle = ext4_journal_start(dir, EXT4_HT_DIR,
+                                   EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
+       if (IS_ERR(handle)) {
+               retval = PTR_ERR(handle);
+               goto out_trace;
+       }
+
+       retval = __ext4_unlink(handle, dir, &dentry->d_name, d_inode(dentry));
        if (!retval)
-               ext4_fc_track_unlink(d_inode(dentry), dentry);
+               ext4_fc_track_unlink(handle, dentry);
 #ifdef CONFIG_UNICODE
        /* VFS negative dentries are incompatible with Encoding and
         * Case-insensitiveness. Eventually we'll want avoid
@@ -3291,6 +3284,8 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
        if (IS_CASEFOLDED(dir))
                d_invalidate(dentry);
 #endif
+       if (handle)
+               ext4_journal_stop(handle);
 
 out_trace:
        trace_ext4_unlink_exit(dentry, retval);
@@ -3447,7 +3442,6 @@ retry:
 
        err = ext4_add_entry(handle, dentry, inode);
        if (!err) {
-               ext4_fc_track_link(inode, dentry);
                err = ext4_mark_inode_dirty(handle, inode);
                /* this can happen only for tmpfile being
                 * linked the first time
@@ -3455,6 +3449,7 @@ retry:
                if (inode->i_nlink == 1)
                        ext4_orphan_del(handle, inode);
                d_instantiate(dentry, inode);
+               ext4_fc_track_link(handle, dentry);
        } else {
                drop_nlink(inode);
                iput(inode);
@@ -3915,9 +3910,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                        EXT4_FC_REASON_RENAME_DIR);
        } else {
                if (new.inode)
-                       ext4_fc_track_unlink(new.inode, new.dentry);
-               ext4_fc_track_link(old.inode, new.dentry);
-               ext4_fc_track_unlink(old.inode, old.dentry);
+                       ext4_fc_track_unlink(handle, new.dentry);
+               __ext4_fc_track_link(handle, old.inode, new.dentry);
+               __ext4_fc_track_unlink(handle, old.inode, old.dentry);
        }
 
        if (new.inode) {
index ef4734b..c3b8645 100644 (file)
@@ -686,7 +686,7 @@ static void ext4_handle_error(struct super_block *sb)
        if (!test_opt(sb, ERRORS_CONT)) {
                journal_t *journal = EXT4_SB(sb)->s_journal;
 
-               EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
+               ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
                if (journal)
                        jbd2_journal_abort(journal, -EIO);
        }
@@ -904,7 +904,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
        va_end(args);
 
        if (sb_rdonly(sb) == 0) {
-               EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
+               ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
                if (EXT4_SB(sb)->s_journal)
                        jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
 
@@ -1716,11 +1716,10 @@ enum {
        Opt_dioread_nolock, Opt_dioread_lock,
        Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
        Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
-       Opt_prefetch_block_bitmaps, Opt_no_fc,
+       Opt_prefetch_block_bitmaps,
 #ifdef CONFIG_EXT4_DEBUG
-       Opt_fc_debug_max_replay,
+       Opt_fc_debug_max_replay, Opt_fc_debug_force
 #endif
-       Opt_fc_debug_force
 };
 
 static const match_table_t tokens = {
@@ -1807,9 +1806,8 @@ static const match_table_t tokens = {
        {Opt_init_itable, "init_itable=%u"},
        {Opt_init_itable, "init_itable"},
        {Opt_noinit_itable, "noinit_itable"},
-       {Opt_no_fc, "no_fc"},
-       {Opt_fc_debug_force, "fc_debug_force"},
 #ifdef CONFIG_EXT4_DEBUG
+       {Opt_fc_debug_force, "fc_debug_force"},
        {Opt_fc_debug_max_replay, "fc_debug_max_replay=%u"},
 #endif
        {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
@@ -2027,8 +2025,8 @@ static const struct mount_opts {
        {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
                       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
                                                        MOPT_CLEAR | MOPT_Q},
-       {Opt_usrjquota, 0, MOPT_Q},
-       {Opt_grpjquota, 0, MOPT_Q},
+       {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
+       {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
        {Opt_offusrjquota, 0, MOPT_Q},
        {Opt_offgrpjquota, 0, MOPT_Q},
        {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
@@ -2039,11 +2037,9 @@ static const struct mount_opts {
        {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
        {Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS,
         MOPT_SET},
-       {Opt_no_fc, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
-        MOPT_CLEAR | MOPT_2 | MOPT_EXT4_ONLY},
+#ifdef CONFIG_EXT4_DEBUG
        {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
         MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
-#ifdef CONFIG_EXT4_DEBUG
        {Opt_fc_debug_max_replay, 0, MOPT_GTE0},
 #endif
        {Opt_err, 0, 0}
@@ -2153,7 +2149,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
                return 1;
        case Opt_abort:
-               sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
+               ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
                return 1;
        case Opt_i_version:
                sb->s_flags |= SB_I_VERSION;
@@ -3976,7 +3972,7 @@ int ext4_calculate_overhead(struct super_block *sb)
         * loaded or not
         */
        if (sbi->s_journal && !sbi->s_journal_bdev)
-               overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
+               overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
        else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
                /* j_inum for internal journal is non-zero */
                j_inode = ext4_get_journal_inode(sb, j_inum);
@@ -4340,9 +4336,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 #endif
 
        if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
-               printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, and O_DIRECT support!\n");
+               printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, O_DIRECT and fast_commit support!\n");
                /* can't mount with both data=journal and dioread_nolock. */
                clear_opt(sb, DIOREAD_NOLOCK);
+               clear_opt2(sb, JOURNAL_FAST_COMMIT);
                if (test_opt2(sb, EXPLICIT_DELALLOC)) {
                        ext4_msg(sb, KERN_ERR, "can't mount with "
                                 "both data=journal and delalloc");
@@ -4777,8 +4774,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
        INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
        sbi->s_fc_bytes = 0;
-       sbi->s_mount_flags &= ~EXT4_MF_FC_INELIGIBLE;
-       sbi->s_mount_flags &= ~EXT4_MF_FC_COMMITTING;
+       ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+       ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
        spin_lock_init(&sbi->s_fc_lock);
        memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
        sbi->s_fc_replay_state.fc_regions = NULL;
@@ -4857,6 +4854,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                goto failed_mount_wq;
        }
 
+       if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
+               !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
+                                         JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) {
+               ext4_msg(sb, KERN_ERR,
+                       "Failed to set fast commit journal feature");
+               goto failed_mount_wq;
+       }
+
        /* We have now updated the journal if required, so we can
         * validate the data journaling mode. */
        switch (test_opt(sb, DATA_FLAGS)) {
@@ -5872,7 +5877,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                goto restore_opts;
        }
 
-       if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
+       if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
                ext4_abort(sb, EXT4_ERR_ESHUTDOWN, "Abort forced by user");
 
        sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
@@ -5886,7 +5891,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 
        if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
-               if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
+               if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) {
                        err = -EROFS;
                        goto restore_opts;
                }
@@ -6560,10 +6565,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
        brelse(bh);
 out:
        if (inode->i_size < off + len) {
-               ext4_fc_track_range(inode,
-                       (inode->i_size > 0 ? inode->i_size - 1 : 0)
-                               >> inode->i_sb->s_blocksize_bits,
-                       (off + len) >> inode->i_sb->s_blocksize_bits);
                i_size_write(inode, off + len);
                EXT4_I(inode)->i_disksize = inode->i_size;
                err2 = ext4_mark_inode_dirty(handle, inode);
index 02894df..b53c055 100644 (file)
@@ -482,6 +482,10 @@ static void io_impersonate_work(struct io_worker *worker,
                current->files = work->identity->files;
                current->nsproxy = work->identity->nsproxy;
                task_unlock(current);
+               if (!work->identity->files) {
+                       /* failed grabbing files, ensure work gets cancelled */
+                       work->flags |= IO_WQ_WORK_CANCEL;
+               }
        }
        if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs)
                current->fs = work->identity->fs;
index a7429c9..8018c70 100644 (file)
@@ -995,20 +995,33 @@ static void io_sq_thread_drop_mm(void)
        if (mm) {
                kthread_unuse_mm(mm);
                mmput(mm);
+               current->mm = NULL;
        }
 }
 
 static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
 {
-       if (!current->mm) {
-               if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) ||
-                            !ctx->sqo_task->mm ||
-                            !mmget_not_zero(ctx->sqo_task->mm)))
-                       return -EFAULT;
-               kthread_use_mm(ctx->sqo_task->mm);
+       struct mm_struct *mm;
+
+       if (current->mm)
+               return 0;
+
+       /* Should never happen */
+       if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
+               return -EFAULT;
+
+       task_lock(ctx->sqo_task);
+       mm = ctx->sqo_task->mm;
+       if (unlikely(!mm || !mmget_not_zero(mm)))
+               mm = NULL;
+       task_unlock(ctx->sqo_task);
+
+       if (mm) {
+               kthread_use_mm(mm);
+               return 0;
        }
 
-       return 0;
+       return -EFAULT;
 }
 
 static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
@@ -1274,9 +1287,12 @@ static bool io_identity_cow(struct io_kiocb *req)
        /* add one for this request */
        refcount_inc(&id->count);
 
-       /* drop old identity, assign new one. one ref for req, one for tctx */
-       if (req->work.identity != tctx->identity &&
-           refcount_sub_and_test(2, &req->work.identity->count))
+       /* drop tctx and req identity references, if needed */
+       if (tctx->identity != &tctx->__identity &&
+           refcount_dec_and_test(&tctx->identity->count))
+               kfree(tctx->identity);
+       if (req->work.identity != &tctx->__identity &&
+           refcount_dec_and_test(&req->work.identity->count))
                kfree(req->work.identity);
 
        req->work.identity = id;
@@ -1577,14 +1593,29 @@ static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
        }
 }
 
-static inline bool io_match_files(struct io_kiocb *req,
-                                      struct files_struct *files)
+static inline bool __io_match_files(struct io_kiocb *req,
+                                   struct files_struct *files)
 {
+       return ((req->flags & REQ_F_WORK_INITIALIZED) &&
+               (req->work.flags & IO_WQ_WORK_FILES)) &&
+               req->work.identity->files == files;
+}
+
+static bool io_match_files(struct io_kiocb *req,
+                          struct files_struct *files)
+{
+       struct io_kiocb *link;
+
        if (!files)
                return true;
-       if ((req->flags & REQ_F_WORK_INITIALIZED) &&
-           (req->work.flags & IO_WQ_WORK_FILES))
-               return req->work.identity->files == files;
+       if (__io_match_files(req, files))
+               return true;
+       if (req->flags & REQ_F_LINK_HEAD) {
+               list_for_each_entry(link, &req->link_list, link_list) {
+                       if (__io_match_files(link, files))
+                               return true;
+               }
+       }
        return false;
 }
 
@@ -1668,7 +1699,8 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
                WRITE_ONCE(cqe->user_data, req->user_data);
                WRITE_ONCE(cqe->res, res);
                WRITE_ONCE(cqe->flags, cflags);
-       } else if (ctx->cq_overflow_flushed || req->task->io_uring->in_idle) {
+       } else if (ctx->cq_overflow_flushed ||
+                  atomic_read(&req->task->io_uring->in_idle)) {
                /*
                 * If we're in ring overflow flush mode, or in task cancel mode,
                 * then we cannot store the request for later flushing, we need
@@ -1838,7 +1870,7 @@ static void __io_free_req(struct io_kiocb *req)
        io_dismantle_req(req);
 
        percpu_counter_dec(&tctx->inflight);
-       if (tctx->in_idle)
+       if (atomic_read(&tctx->in_idle))
                wake_up(&tctx->wait);
        put_task_struct(req->task);
 
@@ -7695,7 +7727,8 @@ static int io_uring_alloc_task_context(struct task_struct *task)
        xa_init(&tctx->xa);
        init_waitqueue_head(&tctx->wait);
        tctx->last = NULL;
-       tctx->in_idle = 0;
+       atomic_set(&tctx->in_idle, 0);
+       tctx->sqpoll = false;
        io_init_identity(&tctx->__identity);
        tctx->identity = &tctx->__identity;
        task->io_uring = tctx;
@@ -8388,22 +8421,6 @@ static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
        return false;
 }
 
-static bool io_match_link_files(struct io_kiocb *req,
-                               struct files_struct *files)
-{
-       struct io_kiocb *link;
-
-       if (io_match_files(req, files))
-               return true;
-       if (req->flags & REQ_F_LINK_HEAD) {
-               list_for_each_entry(link, &req->link_list, link_list) {
-                       if (io_match_files(link, files))
-                               return true;
-               }
-       }
-       return false;
-}
-
 /*
  * We're looking to cancel 'req' because it's holding on to our files, but
  * 'req' could be a link to another request. See if it is, and cancel that
@@ -8453,7 +8470,21 @@ static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
 
 static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
 {
-       return io_match_link(container_of(work, struct io_kiocb, work), data);
+       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+       bool ret;
+
+       if (req->flags & REQ_F_LINK_TIMEOUT) {
+               unsigned long flags;
+               struct io_ring_ctx *ctx = req->ctx;
+
+               /* protect against races with linked timeouts */
+               spin_lock_irqsave(&ctx->completion_lock, flags);
+               ret = io_match_link(req, data);
+               spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       } else {
+               ret = io_match_link(req, data);
+       }
+       return ret;
 }
 
 static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
@@ -8479,6 +8510,7 @@ static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
 }
 
 static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+                                 struct task_struct *task,
                                  struct files_struct *files)
 {
        struct io_defer_entry *de = NULL;
@@ -8486,7 +8518,8 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
 
        spin_lock_irq(&ctx->completion_lock);
        list_for_each_entry_reverse(de, &ctx->defer_list, list) {
-               if (io_match_link_files(de->req, files)) {
+               if (io_task_match(de->req, task) &&
+                   io_match_files(de->req, files)) {
                        list_cut_position(&list, &ctx->defer_list, &de->list);
                        break;
                }
@@ -8512,7 +8545,6 @@ static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
        if (list_empty_careful(&ctx->inflight_list))
                return false;
 
-       io_cancel_defer_files(ctx, files);
        /* cancel all at once, should be faster than doing it one by one*/
        io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
 
@@ -8598,8 +8630,16 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 {
        struct task_struct *task = current;
 
-       if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data)
+       if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
                task = ctx->sq_data->thread;
+               atomic_inc(&task->io_uring->in_idle);
+               io_sq_thread_park(ctx->sq_data);
+       }
+
+       if (files)
+               io_cancel_defer_files(ctx, NULL, files);
+       else
+               io_cancel_defer_files(ctx, task, NULL);
 
        io_cqring_overflow_flush(ctx, true, task, files);
 
@@ -8607,12 +8647,23 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
                io_run_task_work();
                cond_resched();
        }
+
+       if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
+               atomic_dec(&task->io_uring->in_idle);
+               /*
+                * If the files that are going away are the ones in the thread
+                * identity, clear them out.
+                */
+               if (task->io_uring->identity->files == files)
+                       task->io_uring->identity->files = NULL;
+               io_sq_thread_unpark(ctx->sq_data);
+       }
 }
 
 /*
  * Note that this task has used io_uring. We use it for cancelation purposes.
  */
-static int io_uring_add_task_file(struct file *file)
+static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
 {
        struct io_uring_task *tctx = current->io_uring;
 
@@ -8634,6 +8685,14 @@ static int io_uring_add_task_file(struct file *file)
                tctx->last = file;
        }
 
+       /*
+        * This is race safe in that the task itself is doing this, hence it
+        * cannot be going through the exit/cancel paths at the same time.
+        * This cannot be modified while exit/cancel is running.
+        */
+       if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
+               tctx->sqpoll = true;
+
        return 0;
 }
 
@@ -8675,7 +8734,7 @@ void __io_uring_files_cancel(struct files_struct *files)
        unsigned long index;
 
        /* make sure overflow events are dropped */
-       tctx->in_idle = true;
+       atomic_inc(&tctx->in_idle);
 
        xa_for_each(&tctx->xa, index, file) {
                struct io_ring_ctx *ctx = file->private_data;
@@ -8684,6 +8743,35 @@ void __io_uring_files_cancel(struct files_struct *files)
                if (files)
                        io_uring_del_task_file(file);
        }
+
+       atomic_dec(&tctx->in_idle);
+}
+
+static s64 tctx_inflight(struct io_uring_task *tctx)
+{
+       unsigned long index;
+       struct file *file;
+       s64 inflight;
+
+       inflight = percpu_counter_sum(&tctx->inflight);
+       if (!tctx->sqpoll)
+               return inflight;
+
+       /*
+        * If we have SQPOLL rings, then we need to iterate and find them, and
+        * add the pending count for those.
+        */
+       xa_for_each(&tctx->xa, index, file) {
+               struct io_ring_ctx *ctx = file->private_data;
+
+               if (ctx->flags & IORING_SETUP_SQPOLL) {
+                       struct io_uring_task *__tctx = ctx->sqo_task->io_uring;
+
+                       inflight += percpu_counter_sum(&__tctx->inflight);
+               }
+       }
+
+       return inflight;
 }
 
 /*
@@ -8697,11 +8785,11 @@ void __io_uring_task_cancel(void)
        s64 inflight;
 
        /* make sure overflow events are dropped */
-       tctx->in_idle = true;
+       atomic_inc(&tctx->in_idle);
 
        do {
                /* read completions before cancelations */
-               inflight = percpu_counter_sum(&tctx->inflight);
+               inflight = tctx_inflight(tctx);
                if (!inflight)
                        break;
                __io_uring_files_cancel(NULL);
@@ -8712,13 +8800,13 @@ void __io_uring_task_cancel(void)
                 * If we've seen completions, retry. This avoids a race where
                 * a completion comes in before we did prepare_to_wait().
                 */
-               if (inflight != percpu_counter_sum(&tctx->inflight))
+               if (inflight != tctx_inflight(tctx))
                        continue;
                schedule();
        } while (1);
 
        finish_wait(&tctx->wait, &wait);
-       tctx->in_idle = false;
+       atomic_dec(&tctx->in_idle);
 }
 
 static int io_uring_flush(struct file *file, void *data)
@@ -8863,7 +8951,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                        io_sqpoll_wait_sq(ctx);
                submitted = to_submit;
        } else if (to_submit) {
-               ret = io_uring_add_task_file(f.file);
+               ret = io_uring_add_task_file(ctx, f.file);
                if (unlikely(ret))
                        goto out;
                mutex_lock(&ctx->uring_lock);
@@ -8900,7 +8988,8 @@ out_fput:
 #ifdef CONFIG_PROC_FS
 static int io_uring_show_cred(int id, void *p, void *data)
 {
-       const struct cred *cred = p;
+       struct io_identity *iod = p;
+       const struct cred *cred = iod->creds;
        struct seq_file *m = data;
        struct user_namespace *uns = seq_user_ns(m);
        struct group_info *gi;
@@ -9092,7 +9181,7 @@ err_fd:
 #if defined(CONFIG_UNIX)
        ctx->ring_sock->file = file;
 #endif
-       if (unlikely(io_uring_add_task_file(file))) {
+       if (unlikely(io_uring_add_task_file(ctx, file))) {
                file = ERR_PTR(-ENOMEM);
                goto err_fd;
        }
index 8180061..10cc797 100644 (file)
@@ -1374,6 +1374,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
        WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
        WARN_ON_ONCE(!PageLocked(page));
        WARN_ON_ONCE(PageWriteback(page));
+       WARN_ON_ONCE(PageDirty(page));
 
        /*
         * We cannot cancel the ioend directly here on error.  We may have
@@ -1382,33 +1383,22 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
         * appropriately.
         */
        if (unlikely(error)) {
+               /*
+                * Let the filesystem know what portion of the current page
+                * failed to map. If the page wasn't been added to ioend, it
+                * won't be affected by I/O completion and we must unlock it
+                * now.
+                */
+               if (wpc->ops->discard_page)
+                       wpc->ops->discard_page(page, file_offset);
                if (!count) {
-                       /*
-                        * If the current page hasn't been added to ioend, it
-                        * won't be affected by I/O completions and we must
-                        * discard and unlock it right here.
-                        */
-                       if (wpc->ops->discard_page)
-                               wpc->ops->discard_page(page);
                        ClearPageUptodate(page);
                        unlock_page(page);
                        goto done;
                }
-
-               /*
-                * If the page was not fully cleaned, we need to ensure that the
-                * higher layers come back to it correctly.  That means we need
-                * to keep the page dirty, and for WB_SYNC_ALL writeback we need
-                * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
-                * so another attempt to write this page in this writeback sweep
-                * will be made.
-                */
-               set_page_writeback_keepwrite(page);
-       } else {
-               clear_page_dirty_for_io(page);
-               set_page_writeback(page);
        }
 
+       set_page_writeback(page);
        unlock_page(page);
 
        /*
index 263f02a..472932b 100644 (file)
@@ -106,6 +106,8 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
  * for a checkpoint to free up some space in the log.
  */
 void __jbd2_log_wait_for_space(journal_t *journal)
+__acquires(&journal->j_state_lock)
+__releases(&journal->j_state_lock)
 {
        int nblocks, space_left;
        /* assert_spin_locked(&journal->j_state_lock); */
index fa688e1..b121d7d 100644 (file)
@@ -450,6 +450,15 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                schedule();
                write_lock(&journal->j_state_lock);
                finish_wait(&journal->j_fc_wait, &wait);
+               /*
+                * TODO: by blocking fast commits here, we are increasing
+                * fsync() latency slightly. Strictly speaking, we don't need
+                * to block fast commits until the transaction enters T_FLUSH
+                * state. So an optimization is possible where we block new fast
+                * commits here and wait for existing ones to complete
+                * just before we enter T_FLUSH. That way, the existing fast
+                * commits and this full commit can proceed parallely.
+                */
        }
        write_unlock(&journal->j_state_lock);
 
@@ -801,7 +810,7 @@ start_journal_io:
                if (first_block < journal->j_tail)
                        freed += journal->j_last - journal->j_first;
                /* Update tail only if we free significant amount of space */
-               if (freed < journal->j_maxlen / 4)
+               if (freed < jbd2_journal_get_max_txn_bufs(journal))
                        update_tail = 0;
        }
        J_ASSERT(commit_transaction->t_state == T_COMMIT);
index 0c7c42b..0c3d5e3 100644 (file)
@@ -727,6 +727,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
  */
 int jbd2_fc_begin_commit(journal_t *journal, tid_t tid)
 {
+       if (unlikely(is_journal_aborted(journal)))
+               return -EIO;
        /*
         * Fast commits only allowed if at least one full commit has
         * been processed.
@@ -734,10 +736,12 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid)
        if (!journal->j_stats.ts_tid)
                return -EINVAL;
 
-       if (tid <= journal->j_commit_sequence)
+       write_lock(&journal->j_state_lock);
+       if (tid <= journal->j_commit_sequence) {
+               write_unlock(&journal->j_state_lock);
                return -EALREADY;
+       }
 
-       write_lock(&journal->j_state_lock);
        if (journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
            (journal->j_flags & JBD2_FAST_COMMIT_ONGOING)) {
                DEFINE_WAIT(wait);
@@ -777,13 +781,19 @@ static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
 
 int jbd2_fc_end_commit(journal_t *journal)
 {
-       return __jbd2_fc_end_commit(journal, 0, 0);
+       return __jbd2_fc_end_commit(journal, 0, false);
 }
 EXPORT_SYMBOL(jbd2_fc_end_commit);
 
-int jbd2_fc_end_commit_fallback(journal_t *journal, tid_t tid)
+int jbd2_fc_end_commit_fallback(journal_t *journal)
 {
-       return __jbd2_fc_end_commit(journal, tid, 1);
+       tid_t tid;
+
+       read_lock(&journal->j_state_lock);
+       tid = journal->j_running_transaction ?
+               journal->j_running_transaction->t_tid : 0;
+       read_unlock(&journal->j_state_lock);
+       return __jbd2_fc_end_commit(journal, tid, true);
 }
 EXPORT_SYMBOL(jbd2_fc_end_commit_fallback);
 
@@ -865,7 +875,6 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out)
        int fc_off;
 
        *bh_out = NULL;
-       write_lock(&journal->j_state_lock);
 
        if (journal->j_fc_off + journal->j_fc_first < journal->j_fc_last) {
                fc_off = journal->j_fc_off;
@@ -874,7 +883,6 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out)
        } else {
                ret = -EINVAL;
        }
-       write_unlock(&journal->j_state_lock);
 
        if (ret)
                return ret;
@@ -887,11 +895,7 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out)
        if (!bh)
                return -ENOMEM;
 
-       lock_buffer(bh);
 
-       clear_buffer_uptodate(bh);
-       set_buffer_dirty(bh);
-       unlock_buffer(bh);
        journal->j_fc_wbuf[fc_off] = bh;
 
        *bh_out = bh;
@@ -909,9 +913,7 @@ int jbd2_fc_wait_bufs(journal_t *journal, int num_blks)
        struct buffer_head *bh;
        int i, j_fc_off;
 
-       read_lock(&journal->j_state_lock);
        j_fc_off = journal->j_fc_off;
-       read_unlock(&journal->j_state_lock);
 
        /*
         * Wait in reverse order to minimize chances of us being woken up before
@@ -939,9 +941,7 @@ int jbd2_fc_release_bufs(journal_t *journal)
        struct buffer_head *bh;
        int i, j_fc_off;
 
-       read_lock(&journal->j_state_lock);
        j_fc_off = journal->j_fc_off;
-       read_unlock(&journal->j_state_lock);
 
        /*
         * Wait in reverse order to minimize chances of us being woken up before
@@ -1348,23 +1348,16 @@ static journal_t *journal_init_common(struct block_device *bdev,
        journal->j_dev = bdev;
        journal->j_fs_dev = fs_dev;
        journal->j_blk_offset = start;
-       journal->j_maxlen = len;
+       journal->j_total_len = len;
        /* We need enough buffers to write out full descriptor block. */
        n = journal->j_blocksize / jbd2_min_tag_size();
        journal->j_wbufsize = n;
+       journal->j_fc_wbuf = NULL;
        journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
                                        GFP_KERNEL);
        if (!journal->j_wbuf)
                goto err_cleanup;
 
-       if (journal->j_fc_wbufsize > 0) {
-               journal->j_fc_wbuf = kmalloc_array(journal->j_fc_wbufsize,
-                                       sizeof(struct buffer_head *),
-                                       GFP_KERNEL);
-               if (!journal->j_fc_wbuf)
-                       goto err_cleanup;
-       }
-
        bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
        if (!bh) {
                pr_err("%s: Cannot get buffer for journal superblock\n",
@@ -1378,23 +1371,11 @@ static journal_t *journal_init_common(struct block_device *bdev,
 
 err_cleanup:
        kfree(journal->j_wbuf);
-       kfree(journal->j_fc_wbuf);
        jbd2_journal_destroy_revoke(journal);
        kfree(journal);
        return NULL;
 }
 
-int jbd2_fc_init(journal_t *journal, int num_fc_blks)
-{
-       journal->j_fc_wbufsize = num_fc_blks;
-       journal->j_fc_wbuf = kmalloc_array(journal->j_fc_wbufsize,
-                               sizeof(struct buffer_head *), GFP_KERNEL);
-       if (!journal->j_fc_wbuf)
-               return -ENOMEM;
-       return 0;
-}
-EXPORT_SYMBOL(jbd2_fc_init);
-
 /* jbd2_journal_init_dev and jbd2_journal_init_inode:
  *
  * Create a journal structure assigned some fixed set of disk blocks to
@@ -1512,16 +1493,7 @@ static int journal_reset(journal_t *journal)
        }
 
        journal->j_first = first;
-
-       if (jbd2_has_feature_fast_commit(journal) &&
-           journal->j_fc_wbufsize > 0) {
-               journal->j_fc_last = last;
-               journal->j_last = last - journal->j_fc_wbufsize;
-               journal->j_fc_first = journal->j_last + 1;
-               journal->j_fc_off = 0;
-       } else {
-               journal->j_last = last;
-       }
+       journal->j_last = last;
 
        journal->j_head = journal->j_first;
        journal->j_tail = journal->j_first;
@@ -1531,7 +1503,14 @@ static int journal_reset(journal_t *journal)
        journal->j_commit_sequence = journal->j_transaction_sequence - 1;
        journal->j_commit_request = journal->j_commit_sequence;
 
-       journal->j_max_transaction_buffers = journal->j_maxlen / 4;
+       journal->j_max_transaction_buffers = jbd2_journal_get_max_txn_bufs(journal);
+
+       /*
+        * Now that journal recovery is done, turn fast commits off here. This
+        * way, if fast commit was enabled before the crash but if now FS has
+        * disabled it, we don't enable fast commits.
+        */
+       jbd2_clear_feature_fast_commit(journal);
 
        /*
         * As a special case, if the on-disk copy is already marked as needing
@@ -1792,15 +1771,15 @@ static int journal_get_superblock(journal_t *journal)
                goto out;
        }
 
-       if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen)
-               journal->j_maxlen = be32_to_cpu(sb->s_maxlen);
-       else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) {
+       if (be32_to_cpu(sb->s_maxlen) < journal->j_total_len)
+               journal->j_total_len = be32_to_cpu(sb->s_maxlen);
+       else if (be32_to_cpu(sb->s_maxlen) > journal->j_total_len) {
                printk(KERN_WARNING "JBD2: journal file too short\n");
                goto out;
        }
 
        if (be32_to_cpu(sb->s_first) == 0 ||
-           be32_to_cpu(sb->s_first) >= journal->j_maxlen) {
+           be32_to_cpu(sb->s_first) >= journal->j_total_len) {
                printk(KERN_WARNING
                        "JBD2: Invalid start block of journal: %u\n",
                        be32_to_cpu(sb->s_first));
@@ -1872,6 +1851,7 @@ static int load_superblock(journal_t *journal)
 {
        int err;
        journal_superblock_t *sb;
+       int num_fc_blocks;
 
        err = journal_get_superblock(journal);
        if (err)
@@ -1883,15 +1863,17 @@ static int load_superblock(journal_t *journal)
        journal->j_tail = be32_to_cpu(sb->s_start);
        journal->j_first = be32_to_cpu(sb->s_first);
        journal->j_errno = be32_to_cpu(sb->s_errno);
+       journal->j_last = be32_to_cpu(sb->s_maxlen);
 
-       if (jbd2_has_feature_fast_commit(journal) &&
-           journal->j_fc_wbufsize > 0) {
+       if (jbd2_has_feature_fast_commit(journal)) {
                journal->j_fc_last = be32_to_cpu(sb->s_maxlen);
-               journal->j_last = journal->j_fc_last - journal->j_fc_wbufsize;
+               num_fc_blocks = be32_to_cpu(sb->s_num_fc_blks);
+               if (!num_fc_blocks)
+                       num_fc_blocks = JBD2_MIN_FC_BLOCKS;
+               if (journal->j_last - num_fc_blocks >= JBD2_MIN_JOURNAL_BLOCKS)
+                       journal->j_last = journal->j_fc_last - num_fc_blocks;
                journal->j_fc_first = journal->j_last + 1;
                journal->j_fc_off = 0;
-       } else {
-               journal->j_last = be32_to_cpu(sb->s_maxlen);
        }
 
        return 0;
@@ -1954,9 +1936,6 @@ int jbd2_journal_load(journal_t *journal)
         */
        journal->j_flags &= ~JBD2_ABORT;
 
-       if (journal->j_fc_wbufsize > 0)
-               jbd2_journal_set_features(journal, 0, 0,
-                                         JBD2_FEATURE_INCOMPAT_FAST_COMMIT);
        /* OK, we've finished with the dynamic journal bits:
         * reinitialise the dynamic contents of the superblock in memory
         * and reset them on disk. */
@@ -2040,8 +2019,7 @@ int jbd2_journal_destroy(journal_t *journal)
                jbd2_journal_destroy_revoke(journal);
        if (journal->j_chksum_driver)
                crypto_free_shash(journal->j_chksum_driver);
-       if (journal->j_fc_wbufsize > 0)
-               kfree(journal->j_fc_wbuf);
+       kfree(journal->j_fc_wbuf);
        kfree(journal->j_wbuf);
        kfree(journal);
 
@@ -2116,6 +2094,37 @@ int jbd2_journal_check_available_features(journal_t *journal, unsigned long comp
        return 0;
 }
 
+static int
+jbd2_journal_initialize_fast_commit(journal_t *journal)
+{
+       journal_superblock_t *sb = journal->j_superblock;
+       unsigned long long num_fc_blks;
+
+       num_fc_blks = be32_to_cpu(sb->s_num_fc_blks);
+       if (num_fc_blks == 0)
+               num_fc_blks = JBD2_MIN_FC_BLOCKS;
+       if (journal->j_last - num_fc_blks < JBD2_MIN_JOURNAL_BLOCKS)
+               return -ENOSPC;
+
+       /* Are we called twice? */
+       WARN_ON(journal->j_fc_wbuf != NULL);
+       journal->j_fc_wbuf = kmalloc_array(num_fc_blks,
+                               sizeof(struct buffer_head *), GFP_KERNEL);
+       if (!journal->j_fc_wbuf)
+               return -ENOMEM;
+
+       journal->j_fc_wbufsize = num_fc_blks;
+       journal->j_fc_last = journal->j_last;
+       journal->j_last = journal->j_fc_last - num_fc_blks;
+       journal->j_fc_first = journal->j_last + 1;
+       journal->j_fc_off = 0;
+       journal->j_free = journal->j_last - journal->j_first;
+       journal->j_max_transaction_buffers =
+               jbd2_journal_get_max_txn_bufs(journal);
+
+       return 0;
+}
+
 /**
  * int jbd2_journal_set_features() - Mark a given journal feature in the superblock
  * @journal: Journal to act on.
@@ -2159,6 +2168,13 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
 
        sb = journal->j_superblock;
 
+       if (incompat & JBD2_FEATURE_INCOMPAT_FAST_COMMIT) {
+               if (jbd2_journal_initialize_fast_commit(journal)) {
+                       pr_err("JBD2: Cannot enable fast commits.\n");
+                       return 0;
+               }
+       }
+
        /* Load the checksum driver if necessary */
        if ((journal->j_chksum_driver == NULL) &&
            INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
index eb26061..dc0694f 100644 (file)
@@ -74,8 +74,8 @@ static int do_readahead(journal_t *journal, unsigned int start)
 
        /* Do up to 128K of readahead */
        max = start + (128 * 1024 / journal->j_blocksize);
-       if (max > journal->j_maxlen)
-               max = journal->j_maxlen;
+       if (max > journal->j_total_len)
+               max = journal->j_total_len;
 
        /* Do the readahead itself.  We'll submit MAXBUF buffer_heads at
         * a time to the block device IO layer. */
@@ -134,7 +134,7 @@ static int jread(struct buffer_head **bhp, journal_t *journal,
 
        *bhp = NULL;
 
-       if (offset >= journal->j_maxlen) {
+       if (offset >= journal->j_total_len) {
                printk(KERN_ERR "JBD2: corrupted journal superblock\n");
                return -EFSCORRUPTED;
        }
index 4398573..d54f046 100644 (file)
@@ -195,8 +195,10 @@ static void wait_transaction_switching(journal_t *journal)
        DEFINE_WAIT(wait);
 
        if (WARN_ON(!journal->j_running_transaction ||
-                   journal->j_running_transaction->t_state != T_SWITCH))
+                   journal->j_running_transaction->t_state != T_SWITCH)) {
+               read_unlock(&journal->j_state_lock);
                return;
+       }
        prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
                        TASK_UNINTERRUPTIBLE);
        read_unlock(&journal->j_state_lock);
index cb52db9..4e011ad 100644 (file)
@@ -955,7 +955,6 @@ out:
 
 static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
 {
-       struct inode *inode = file_inode(filp);
        struct nfs_open_dir_context *dir_ctx = filp->private_data;
 
        dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
@@ -967,15 +966,15 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
        case SEEK_SET:
                if (offset < 0)
                        return -EINVAL;
-               inode_lock(inode);
+               spin_lock(&filp->f_lock);
                break;
        case SEEK_CUR:
                if (offset == 0)
                        return filp->f_pos;
-               inode_lock(inode);
+               spin_lock(&filp->f_lock);
                offset += filp->f_pos;
                if (offset < 0) {
-                       inode_unlock(inode);
+                       spin_unlock(&filp->f_lock);
                        return -EINVAL;
                }
        }
@@ -987,7 +986,7 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
                        dir_ctx->dir_cookie = 0;
                dir_ctx->duped = 0;
        }
-       inode_unlock(inode);
+       spin_unlock(&filp->f_lock);
        return offset;
 }
 
@@ -998,13 +997,9 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
 static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
                         int datasync)
 {
-       struct inode *inode = file_inode(filp);
-
        dfprintk(FILE, "NFS: fsync dir(%pD2) datasync %d\n", filp, datasync);
 
-       inode_lock(inode);
-       nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
-       inode_unlock(inode);
+       nfs_inc_stats(file_inode(filp), NFSIOS_VFSFSYNC);
        return 0;
 }
 
index b51424f..6c2ce79 100644 (file)
@@ -1047,8 +1047,10 @@ out4:
 
 void nfs4_xattr_cache_exit(void)
 {
+       unregister_shrinker(&nfs4_xattr_large_entry_shrinker);
        unregister_shrinker(&nfs4_xattr_entry_shrinker);
        unregister_shrinker(&nfs4_xattr_cache_shrinker);
+       list_lru_destroy(&nfs4_xattr_large_entry_lru);
        list_lru_destroy(&nfs4_xattr_entry_lru);
        list_lru_destroy(&nfs4_xattr_cache_lru);
        kmem_cache_destroy(nfs4_xattr_cache_cachep);
index 0dc31ad..6e060a8 100644 (file)
                                 1 + nfs4_xattr_name_maxsz + 1)
 #define decode_setxattr_maxsz   (op_decode_hdr_maxsz + decode_change_info_maxsz)
 #define encode_listxattrs_maxsz  (op_encode_hdr_maxsz + 2 + 1)
-#define decode_listxattrs_maxsz  (op_decode_hdr_maxsz + 2 + 1 + 1)
+#define decode_listxattrs_maxsz  (op_decode_hdr_maxsz + 2 + 1 + 1 + 1)
 #define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \
                                  nfs4_xattr_name_maxsz)
 #define decode_removexattr_maxsz (op_decode_hdr_maxsz + \
@@ -531,7 +531,7 @@ static void encode_listxattrs(struct xdr_stream *xdr,
 {
        __be32 *p;
 
-       encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz + 1, hdr);
+       encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz, hdr);
 
        p = reserve_space(xdr, 12);
        if (unlikely(!p))
index 8d32788..fa14830 100644 (file)
 #define NFS_ROOT               "/tftpboot/%s"
 
 /* Default NFSROOT mount options. */
+#if defined(CONFIG_NFS_V2)
 #define NFS_DEF_OPTIONS                "vers=2,tcp,rsize=4096,wsize=4096"
+#elif defined(CONFIG_NFS_V3)
+#define NFS_DEF_OPTIONS                "vers=3,tcp,rsize=4096,wsize=4096"
+#else
+#define NFS_DEF_OPTIONS                "vers=4,tcp,rsize=4096,wsize=4096"
+#endif
 
 /* Parameters passed from the kernel command line */
 static char nfs_root_parms[NFS_MAXPATHLEN + 1] __initdata = "";
index 1446861..a633044 100644 (file)
@@ -316,10 +316,6 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp)
        fh_copy(&resp->dirfh, &argp->fh);
        fh_init(&resp->fh, NFS3_FHSIZE);
 
-       if (argp->ftype == 0 || argp->ftype >= NF3BAD) {
-               resp->status = nfserr_inval;
-               goto out;
-       }
        if (argp->ftype == NF3CHR || argp->ftype == NF3BLK) {
                rdev = MKDEV(argp->major, argp->minor);
                if (MAJOR(rdev) != argp->major ||
@@ -328,7 +324,7 @@ nfsd3_proc_mknod(struct svc_rqst *rqstp)
                        goto out;
                }
        } else if (argp->ftype != NF3SOCK && argp->ftype != NF3FIFO) {
-               resp->status = nfserr_inval;
+               resp->status = nfserr_badtype;
                goto out;
        }
 
index 9c23b6a..2277f83 100644 (file)
@@ -1114,6 +1114,7 @@ nfs3svc_encode_pathconfres(struct svc_rqst *rqstp, __be32 *p)
 {
        struct nfsd3_pathconfres *resp = rqstp->rq_resp;
 
+       *p++ = resp->status;
        *p++ = xdr_zero;        /* no post_op_attr */
 
        if (resp->status == 0) {
index ad2fa1a..e83b217 100644 (file)
@@ -1299,7 +1299,7 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
                        struct nfsd_file *dst)
 {
        nfs42_ssc_close(src->nf_file);
-       nfsd_file_put(src);
+       /* 'src' is freed by nfsd4_do_async_copy */
        nfsd_file_put(dst);
        mntput(ss_mnt);
 }
@@ -1486,6 +1486,7 @@ do_callback:
        cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
        if (!cb_copy)
                goto out;
+       refcount_set(&cb_copy->refcount, 1);
        memcpy(&cb_copy->cp_res, &copy->cp_res, sizeof(copy->cp_res));
        cb_copy->cp_clp = copy->cp_clp;
        cb_copy->nfserr = copy->nfserr;
index b9a9d69..db52e84 100644 (file)
@@ -877,7 +877,7 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
                goto done;
        }
 
-       trace_ocfs2_journal_init_maxlen(j_journal->j_maxlen);
+       trace_ocfs2_journal_init_maxlen(j_journal->j_total_len);
 
        *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
                  OCFS2_JOURNAL_DIRTY_FL);
index d0989a4..419760f 100644 (file)
@@ -19,7 +19,7 @@ static int cpuinfo_open(struct inode *inode, struct file *file)
 static const struct proc_ops cpuinfo_proc_ops = {
        .proc_flags     = PROC_ENTRY_PERMANENT,
        .proc_open      = cpuinfo_open,
-       .proc_read      = seq_read,
+       .proc_read_iter = seq_read_iter,
        .proc_lseek     = seq_lseek,
        .proc_release   = seq_release,
 };
index 2f9fa17..b846632 100644 (file)
@@ -590,7 +590,7 @@ static int proc_seq_release(struct inode *inode, struct file *file)
 static const struct proc_ops proc_seq_ops = {
        /* not permanent -- can call into arbitrary seq_operations */
        .proc_open      = proc_seq_open,
-       .proc_read      = seq_read,
+       .proc_read_iter = seq_read_iter,
        .proc_lseek     = seq_lseek,
        .proc_release   = proc_seq_release,
 };
@@ -621,7 +621,7 @@ static int proc_single_open(struct inode *inode, struct file *file)
 static const struct proc_ops proc_single_ops = {
        /* not permanent -- can call into arbitrary ->single_show */
        .proc_open      = proc_single_open,
-       .proc_read      = seq_read,
+       .proc_read_iter = seq_read_iter,
        .proc_lseek     = seq_lseek,
        .proc_release   = single_release,
 };
index 58c075e..bde6b6f 100644 (file)
@@ -597,6 +597,7 @@ static const struct file_operations proc_iter_file_ops = {
        .llseek         = proc_reg_llseek,
        .read_iter      = proc_reg_read_iter,
        .write          = proc_reg_write,
+       .splice_read    = generic_file_splice_read,
        .poll           = proc_reg_poll,
        .unlocked_ioctl = proc_reg_unlocked_ioctl,
        .mmap           = proc_reg_mmap,
@@ -622,6 +623,7 @@ static const struct file_operations proc_reg_file_ops_compat = {
 static const struct file_operations proc_iter_file_ops_compat = {
        .llseek         = proc_reg_llseek,
        .read_iter      = proc_reg_read_iter,
+       .splice_read    = generic_file_splice_read,
        .write          = proc_reg_write,
        .poll           = proc_reg_poll,
        .unlocked_ioctl = proc_reg_unlocked_ioctl,
index 46b3293..4695b6d 100644 (file)
@@ -226,7 +226,7 @@ static int stat_open(struct inode *inode, struct file *file)
 static const struct proc_ops stat_proc_ops = {
        .proc_flags     = PROC_ENTRY_PERMANENT,
        .proc_open      = stat_open,
-       .proc_read      = seq_read,
+       .proc_read_iter = seq_read_iter,
        .proc_lseek     = seq_lseek,
        .proc_release   = single_release,
 };
index 31219c1..3b20e21 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/mm.h>
 #include <linux/printk.h>
 #include <linux/string_helpers.h>
+#include <linux/uio.h>
 
 #include <linux/uaccess.h>
 #include <asm/page.h>
@@ -146,7 +147,28 @@ Eoverflow:
  */
 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
 {
-       struct seq_file *m = file->private_data;
+       struct iovec iov = { .iov_base = buf, .iov_len = size};
+       struct kiocb kiocb;
+       struct iov_iter iter;
+       ssize_t ret;
+
+       init_sync_kiocb(&kiocb, file);
+       iov_iter_init(&iter, READ, &iov, 1, size);
+
+       kiocb.ki_pos = *ppos;
+       ret = seq_read_iter(&kiocb, &iter);
+       *ppos = kiocb.ki_pos;
+       return ret;
+}
+EXPORT_SYMBOL(seq_read);
+
+/*
+ * Ready-made ->f_op->read_iter()
+ */
+ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct seq_file *m = iocb->ki_filp->private_data;
+       size_t size = iov_iter_count(iter);
        size_t copied = 0;
        size_t n;
        void *p;
@@ -158,14 +180,14 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
         * if request is to read from zero offset, reset iterator to first
         * record as it might have been already advanced by previous requests
         */
-       if (*ppos == 0) {
+       if (iocb->ki_pos == 0) {
                m->index = 0;
                m->count = 0;
        }
 
-       /* Don't assume *ppos is where we left it */
-       if (unlikely(*ppos != m->read_pos)) {
-               while ((err = traverse(m, *ppos)) == -EAGAIN)
+       /* Don't assume ki_pos is where we left it */
+       if (unlikely(iocb->ki_pos != m->read_pos)) {
+               while ((err = traverse(m, iocb->ki_pos)) == -EAGAIN)
                        ;
                if (err) {
                        /* With prejudice... */
@@ -174,7 +196,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
                        m->count = 0;
                        goto Done;
                } else {
-                       m->read_pos = *ppos;
+                       m->read_pos = iocb->ki_pos;
                }
        }
 
@@ -187,13 +209,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
        /* if not empty - flush it first */
        if (m->count) {
                n = min(m->count, size);
-               err = copy_to_user(buf, m->buf + m->from, n);
-               if (err)
+               if (copy_to_iter(m->buf + m->from, n, iter) != n)
                        goto Efault;
                m->count -= n;
                m->from += n;
                size -= n;
-               buf += n;
                copied += n;
                if (!size)
                        goto Done;
@@ -254,8 +274,7 @@ Fill:
        }
        m->op->stop(m, p);
        n = min(m->count, size);
-       err = copy_to_user(buf, m->buf, n);
-       if (err)
+       if (copy_to_iter(m->buf, n, iter) != n)
                goto Efault;
        copied += n;
        m->count -= n;
@@ -264,7 +283,7 @@ Done:
        if (!copied)
                copied = err;
        else {
-               *ppos += copied;
+               iocb->ki_pos += copied;
                m->read_pos += copied;
        }
        mutex_unlock(&m->lock);
@@ -276,7 +295,7 @@ Efault:
        err = -EFAULT;
        goto Done;
 }
-EXPORT_SYMBOL(seq_read);
+EXPORT_SYMBOL(seq_read_iter);
 
 /**
  *     seq_lseek -     ->llseek() method for sequential files.
index 852b536..1564001 100644 (file)
@@ -2467,6 +2467,7 @@ xfs_defer_agfl_block(
        new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
        new->xefi_blockcount = 1;
        new->xefi_oinfo = *oinfo;
+       new->xefi_skip_discard = false;
 
        trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
 
index e1bd484..6747e97 100644 (file)
@@ -52,9 +52,9 @@ struct xfs_extent_free_item
 {
        xfs_fsblock_t           xefi_startblock;/* starting fs block number */
        xfs_extlen_t            xefi_blockcount;/* number of blocks in extent */
+       bool                    xefi_skip_discard;
        struct list_head        xefi_list;
        struct xfs_owner_info   xefi_oinfo;     /* extent owner */
-       bool                    xefi_skip_discard;
 };
 
 #define        XFS_BMAP_MAX_NMAP       4
index 3aa85b6..bb25ff1 100644 (file)
@@ -121,8 +121,7 @@ xchk_inode_flags(
                goto bad;
 
        /* rt flags require rt device */
-       if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) &&
-           !mp->m_rtdev_targp)
+       if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
                goto bad;
 
        /* new rt bitmap flag only valid for rbmino */
index 55d126d..4304c64 100644 (file)
@@ -346,8 +346,8 @@ xfs_map_blocks(
        ssize_t                 count = i_blocksize(inode);
        xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
        xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + count);
-       xfs_fileoff_t           cow_fsb = NULLFILEOFF;
-       int                     whichfork = XFS_DATA_FORK;
+       xfs_fileoff_t           cow_fsb;
+       int                     whichfork;
        struct xfs_bmbt_irec    imap;
        struct xfs_iext_cursor  icur;
        int                     retries = 0;
@@ -381,6 +381,8 @@ xfs_map_blocks(
         * landed in a hole and we skip the block.
         */
 retry:
+       cow_fsb = NULLFILEOFF;
+       whichfork = XFS_DATA_FORK;
        xfs_ilock(ip, XFS_ILOCK_SHARED);
        ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
               (ip->i_df.if_flags & XFS_IFEXTENTS));
@@ -527,13 +529,15 @@ xfs_prepare_ioend(
  */
 static void
 xfs_discard_page(
-       struct page             *page)
+       struct page             *page,
+       loff_t                  fileoff)
 {
        struct inode            *inode = page->mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
-       loff_t                  offset = page_offset(page);
-       xfs_fileoff_t           start_fsb = XFS_B_TO_FSBT(mp, offset);
+       unsigned int            pageoff = offset_in_page(fileoff);
+       xfs_fileoff_t           start_fsb = XFS_B_TO_FSBT(mp, fileoff);
+       xfs_fileoff_t           pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff);
        int                     error;
 
        if (XFS_FORCED_SHUTDOWN(mp))
@@ -541,14 +545,14 @@ xfs_discard_page(
 
        xfs_alert_ratelimited(mp,
                "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
-                       page, ip->i_ino, offset);
+                       page, ip->i_ino, fileoff);
 
        error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
-                       i_blocks_per_page(inode, page));
+                       i_blocks_per_page(inode, page) - pageoff_fsb);
        if (error && !XFS_FORCED_SHUTDOWN(mp))
                xfs_alert(mp, "page discard unable to remove delalloc mapping.");
 out_invalidate:
-       iomap_invalidatepage(page, 0, PAGE_SIZE);
+       iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff);
 }
 
 static const struct iomap_writeback_ops xfs_writeback_ops = {
index 5e16545..1414ab7 100644 (file)
@@ -911,6 +911,16 @@ xfs_setattr_size(
                error = iomap_zero_range(inode, oldsize, newsize - oldsize,
                                &did_zeroing, &xfs_buffered_write_iomap_ops);
        } else {
+               /*
+                * iomap won't detect a dirty page over an unwritten block (or a
+                * cow block over a hole) and subsequently skips zeroing the
+                * newly post-EOF portion of the page. Flush the new EOF to
+                * convert the block before the pagecache truncate.
+                */
+               error = filemap_write_and_wait_range(inode->i_mapping, newsize,
+                                                    newsize);
+               if (error)
+                       return error;
                error = iomap_truncate_page(inode, newsize, &did_zeroing,
                                &xfs_buffered_write_iomap_ops);
        }
index 16098dc..6fa05fb 100644 (file)
@@ -1502,7 +1502,8 @@ xfs_reflink_unshare(
                        &xfs_buffered_write_iomap_ops);
        if (error)
                goto out;
-       error = filemap_write_and_wait(inode->i_mapping);
+
+       error = filemap_write_and_wait_range(inode->i_mapping, offset, len);
        if (error)
                goto out;
 
index 1eaa04f..acbad3b 100644 (file)
@@ -109,6 +109,12 @@ struct cpufreq_policy {
        bool                    fast_switch_possible;
        bool                    fast_switch_enabled;
 
+       /*
+        * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
+        * governor.
+        */
+       bool                    strict_target;
+
        /*
         * Preferred average time interval between consecutive invocations of
         * the driver to set the frequency for this policy.  To be set by the
@@ -570,12 +576,20 @@ struct cpufreq_governor {
                                         char *buf);
        int     (*store_setspeed)       (struct cpufreq_policy *policy,
                                         unsigned int freq);
-       /* For governors which change frequency dynamically by themselves */
-       bool                    dynamic_switching;
        struct list_head        governor_list;
        struct module           *owner;
+       u8                      flags;
 };
 
+/* Governor flags */
+
+/* For governors which change frequency dynamically by themselves */
+#define CPUFREQ_GOV_DYNAMIC_SWITCHING  BIT(0)
+
+/* For governors wanting the target frequency to be set exactly */
+#define CPUFREQ_GOV_STRICT_TARGET      BIT(1)
+
+
 /* Pass a target to the cpufreq driver */
 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
                                        unsigned int target_freq);
index 868364c..35b2d84 100644 (file)
@@ -30,7 +30,8 @@ struct io_uring_task {
        struct percpu_counter   inflight;
        struct io_identity      __identity;
        struct io_identity      *identity;
-       bool                    in_idle;
+       atomic_t                in_idle;
+       bool                    sqpoll;
 };
 
 #if defined(CONFIG_IO_URING)
index 172b339..5bd3cac 100644 (file)
@@ -221,7 +221,7 @@ struct iomap_writeback_ops {
         * Optional, allows the file system to discard state on a page where
         * we failed to submit any I/O.
         */
-       void (*discard_page)(struct page *page);
+       void (*discard_page)(struct page *page, loff_t fileoff);
 };
 
 struct iomap_writepage_ctx {
index 1d5566a..1c49fd6 100644 (file)
@@ -68,6 +68,7 @@ extern void *jbd2_alloc(size_t size, gfp_t flags);
 extern void jbd2_free(void *ptr, size_t size);
 
 #define JBD2_MIN_JOURNAL_BLOCKS 1024
+#define JBD2_MIN_FC_BLOCKS     256
 
 #ifdef __KERNEL__
 
@@ -944,8 +945,9 @@ struct journal_s
        /**
         * @j_fc_off:
         *
-        * Number of fast commit blocks currently allocated.
-        * [j_state_lock].
+        * Number of fast commit blocks currently allocated. Accessed only
+        * during fast commit. Currently only process can do fast commit, so
+        * this field is not protected by any lock.
         */
        unsigned long           j_fc_off;
 
@@ -988,9 +990,9 @@ struct journal_s
        struct block_device     *j_fs_dev;
 
        /**
-        * @j_maxlen: Total maximum capacity of the journal region on disk.
+        * @j_total_len: Total maximum capacity of the journal region on disk.
         */
-       unsigned int            j_maxlen;
+       unsigned int            j_total_len;
 
        /**
         * @j_reserved_credits:
@@ -1108,8 +1110,9 @@ struct journal_s
        struct buffer_head      **j_wbuf;
 
        /**
-        * @j_fc_wbuf: Array of fast commit bhs for
-        * jbd2_journal_commit_transaction.
+        * @j_fc_wbuf: Array of fast commit bhs for fast commit. Accessed only
+        * during a fast commit. Currently only process can do fast commit, so
+        * this field is not protected by any lock.
         */
        struct buffer_head      **j_fc_wbuf;
 
@@ -1614,16 +1617,20 @@ extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
 extern int jbd2_cleanup_journal_tail(journal_t *);
 
 /* Fast commit related APIs */
-int jbd2_fc_init(journal_t *journal, int num_fc_blks);
 int jbd2_fc_begin_commit(journal_t *journal, tid_t tid);
 int jbd2_fc_end_commit(journal_t *journal);
-int jbd2_fc_end_commit_fallback(journal_t *journal, tid_t tid);
+int jbd2_fc_end_commit_fallback(journal_t *journal);
 int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
 int jbd2_submit_inode_data(struct jbd2_inode *jinode);
 int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
 int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
 int jbd2_fc_release_bufs(journal_t *journal);
 
+static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal)
+{
+       return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
+}
+
 /*
  * is_journal_abort
  *
index 813614d..b83b3ae 100644 (file)
@@ -107,6 +107,7 @@ void seq_pad(struct seq_file *m, char c);
 char *mangle_path(char *s, const char *p, const char *esc);
 int seq_open(struct file *, const struct seq_operations *);
 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
+ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter);
 loff_t seq_lseek(struct file *, loff_t, int);
 int seq_release(struct inode *, struct file *);
 int seq_write(struct seq_file *seq, const void *data, size_t len);
index 513913f..3bb7226 100644 (file)
@@ -45,13 +45,9 @@ enum dma_sync_target {
        SYNC_FOR_DEVICE = 1,
 };
 
-extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
-                                         dma_addr_t tbl_dma_addr,
-                                         phys_addr_t phys,
-                                         size_t mapping_size,
-                                         size_t alloc_size,
-                                         enum dma_data_direction dir,
-                                         unsigned long attrs);
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
+               size_t mapping_size, size_t alloc_size,
+               enum dma_data_direction dir, unsigned long attrs);
 
 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
                                     phys_addr_t tlb_addr,
index b14314f..70ae549 100644 (file)
@@ -100,11 +100,12 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B);
                { EXT4_FC_REASON_XATTR,         "XATTR"},               \
                { EXT4_FC_REASON_CROSS_RENAME,  "CROSS_RENAME"},        \
                { EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, "JOURNAL_FLAG_CHANGE"}, \
-               { EXT4_FC_REASON_MEM,   "NO_MEM"},                      \
+               { EXT4_FC_REASON_NOMEM, "NO_MEM"},                      \
                { EXT4_FC_REASON_SWAP_BOOT,     "SWAP_BOOT"},           \
                { EXT4_FC_REASON_RESIZE,        "RESIZE"},              \
                { EXT4_FC_REASON_RENAME_DIR,    "RENAME_DIR"},          \
-               { EXT4_FC_REASON_FALLOC_RANGE,  "FALLOC_RANGE"})
+               { EXT4_FC_REASON_FALLOC_RANGE,  "FALLOC_RANGE"},        \
+               { EXT4_FC_REASON_INODE_JOURNAL_DATA,    "INODE_JOURNAL_DATA"})
 
 TRACE_EVENT(ext4_other_inode_update_time,
        TP_PROTO(struct inode *inode, ino_t orig_ino),
@@ -2917,17 +2918,18 @@ TRACE_EVENT(ext4_fc_stats,
                    ),
 
            TP_printk("dev %d:%d fc ineligible reasons:\n"
-                     "%s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s,%d; "
+                     "%s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d; "
                      "num_commits:%ld, ineligible: %ld, numblks: %ld",
                      MAJOR(__entry->dev), MINOR(__entry->dev),
                      FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR),
                      FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME),
                      FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE),
-                     FC_REASON_NAME_STAT(EXT4_FC_REASON_MEM),
+                     FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM),
                      FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT),
                      FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE),
                      FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR),
                      FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE),
+                     FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA),
                      __entry->sbi->s_fc_stats.fc_num_commits,
                      __entry->sbi->s_fc_stats.fc_ineligible_commits,
                      __entry->sbi->s_fc_stats.fc_numblks)
index f45b3c0..2477014 100644 (file)
@@ -655,10 +655,10 @@ TRACE_EVENT(rpc_xdr_overflow,
                __field(size_t, tail_len)
                __field(unsigned int, page_len)
                __field(unsigned int, len)
-               __string(progname,
-                        xdr->rqst->rq_task->tk_client->cl_program->name)
-               __string(procedure,
-                        xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
+               __string(progname, xdr->rqst ?
+                        xdr->rqst->rq_task->tk_client->cl_program->name : "unknown")
+               __string(procedure, xdr->rqst ?
+                        xdr->rqst->rq_task->tk_msg.rpc_proc->p_name : "unknown")
        ),
 
        TP_fast_assign(
index b4eea0a..781b9dc 100644 (file)
@@ -229,6 +229,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
                io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
        }
        io_tlb_index = 0;
+       no_iotlb_memory = false;
 
        if (verbose)
                swiotlb_print_info();
@@ -260,9 +261,11 @@ swiotlb_init(int verbose)
        if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
                return;
 
-       if (io_tlb_start)
+       if (io_tlb_start) {
                memblock_free_early(io_tlb_start,
                                    PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+               io_tlb_start = 0;
+       }
        pr_warn("Cannot allocate buffer");
        no_iotlb_memory = true;
 }
@@ -360,6 +363,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
                io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
        }
        io_tlb_index = 0;
+       no_iotlb_memory = false;
 
        swiotlb_print_info();
 
@@ -441,14 +445,11 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
        }
 }
 
-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
-                                  dma_addr_t tbl_dma_addr,
-                                  phys_addr_t orig_addr,
-                                  size_t mapping_size,
-                                  size_t alloc_size,
-                                  enum dma_data_direction dir,
-                                  unsigned long attrs)
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
+               size_t mapping_size, size_t alloc_size,
+               enum dma_data_direction dir, unsigned long attrs)
 {
+       dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(hwdev, io_tlb_start);
        unsigned long flags;
        phys_addr_t tlb_addr;
        unsigned int nslots, stride, index, wrap;
@@ -667,9 +668,8 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
        trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
                              swiotlb_force);
 
-       swiotlb_addr = swiotlb_tbl_map_single(dev,
-                       phys_to_dma_unencrypted(dev, io_tlb_start),
-                       paddr, size, size, dir, attrs);
+       swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
+                       attrs);
        if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
                return DMA_MAPPING_ERROR;
 
index 2b83666..e9e2df3 100644 (file)
@@ -337,10 +337,10 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
         * already contains a warning when RCU is not watching, so no point
         * in having another one here.
         */
+       lockdep_hardirqs_off(CALLER_ADDR0);
        instrumentation_begin();
        rcu_irq_enter_check_tick();
-       /* Use the combo lockdep/tracing function */
-       trace_hardirqs_off();
+       trace_hardirqs_off_finish();
        instrumentation_end();
 
        return ret;
index da467e1..5a29ab0 100644 (file)
@@ -10085,6 +10085,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
                        if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
                                int fpos = token == IF_SRC_FILE ? 2 : 1;
 
+                               kfree(filename);
                                filename = match_strdup(&args[fpos]);
                                if (!filename) {
                                        ret = -ENOMEM;
@@ -10131,16 +10132,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
                                 */
                                ret = -EOPNOTSUPP;
                                if (!event->ctx->task)
-                                       goto fail_free_name;
+                                       goto fail;
 
                                /* look up the path and grab its inode */
                                ret = kern_path(filename, LOOKUP_FOLLOW,
                                                &filter->path);
                                if (ret)
-                                       goto fail_free_name;
-
-                               kfree(filename);
-                               filename = NULL;
+                                       goto fail;
 
                                ret = -EINVAL;
                                if (!filter->path.dentry ||
@@ -10160,13 +10158,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
        if (state != IF_STATE_ACTION)
                goto fail;
 
+       kfree(filename);
        kfree(orig);
 
        return 0;
 
-fail_free_name:
-       kfree(filename);
 fail:
+       kfree(filename);
        free_filters_list(filters);
        kfree(orig);
 
index 87a2d51..1f236ed 100644 (file)
@@ -454,7 +454,10 @@ static void exit_mm(void)
                mmap_read_unlock(mm);
 
                self.task = current;
-               self.next = xchg(&core_state->dumper.next, &self);
+               if (self.task->flags & PF_SIGNALED)
+                       self.next = xchg(&core_state->dumper.next, &self);
+               else
+                       self.task = NULL;
                /*
                 * Implies mb(), the result of xchg() must be visible
                 * to core_state->dumper.
index 32083db..6d26638 100644 (file)
@@ -2167,14 +2167,9 @@ static __latent_entropy struct task_struct *copy_process(
        /* ok, now we should be set up.. */
        p->pid = pid_nr(pid);
        if (clone_flags & CLONE_THREAD) {
-               p->exit_signal = -1;
                p->group_leader = current->group_leader;
                p->tgid = current->tgid;
        } else {
-               if (clone_flags & CLONE_PARENT)
-                       p->exit_signal = current->group_leader->exit_signal;
-               else
-                       p->exit_signal = args->exit_signal;
                p->group_leader = p;
                p->tgid = p->pid;
        }
@@ -2218,9 +2213,14 @@ static __latent_entropy struct task_struct *copy_process(
        if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
                p->real_parent = current->real_parent;
                p->parent_exec_id = current->parent_exec_id;
+               if (clone_flags & CLONE_THREAD)
+                       p->exit_signal = -1;
+               else
+                       p->exit_signal = current->group_leader->exit_signal;
        } else {
                p->real_parent = current;
                p->parent_exec_id = current->self_exec_id;
+               p->exit_signal = args->exit_signal;
        }
 
        klp_copy_process(p);
index f8614ef..ac32887 100644 (file)
@@ -2380,10 +2380,22 @@ retry:
                }
 
                /*
-                * Since we just failed the trylock; there must be an owner.
+                * The trylock just failed, so either there is an owner or
+                * there is a higher priority waiter than this one.
                 */
                newowner = rt_mutex_owner(&pi_state->pi_mutex);
-               BUG_ON(!newowner);
+               /*
+                * If the higher priority waiter has not yet taken over the
+                * rtmutex then newowner is NULL. We can't return here with
+                * that state because it's inconsistent vs. the user space
+                * state. So drop the locks and try again. It's a valid
+                * situation and not any different from the other retry
+                * conditions.
+                */
+               if (unlikely(!newowner)) {
+                       err = -EAGAIN;
+                       goto handle_err;
+               }
        } else {
                WARN_ON_ONCE(argowner != current);
                if (oldowner == current) {
index 10a5aff..164a031 100644 (file)
@@ -82,6 +82,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS
 # Generic IRQ IPI support
 config GENERIC_IRQ_IPI
        bool
+       select IRQ_DOMAIN_HIERARCHY
 
 # Generic MSI interrupt support
 config GENERIC_MSI_IRQ
index d73bccd..97d318b 100644 (file)
@@ -881,7 +881,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
 struct cpufreq_governor schedutil_gov = {
        .name                   = "schedutil",
        .owner                  = THIS_MODULE,
-       .dynamic_switching      = true,
+       .flags                  = CPUFREQ_GOV_DYNAMIC_SWITCHING,
        .init                   = sugov_init,
        .exit                   = sugov_exit,
        .start                  = sugov_start,
index a18b36b..3aad6ef 100644 (file)
@@ -63,19 +63,20 @@ static int proc_do_xprt(struct ctl_table *table, int write,
                        void *buffer, size_t *lenp, loff_t *ppos)
 {
        char tmpbuf[256];
-       size_t len;
+       ssize_t len;
 
-       if ((*ppos && !write) || !*lenp) {
+       if (write || *ppos) {
                *lenp = 0;
                return 0;
        }
        len = svc_print_xprts(tmpbuf, sizeof(tmpbuf));
-       *lenp = memory_read_from_buffer(buffer, *lenp, ppos, tmpbuf, len);
+       len = memory_read_from_buffer(buffer, *lenp, ppos, tmpbuf, len);
 
-       if (*lenp < 0) {
+       if (len < 0) {
                *lenp = 0;
                return -EINVAL;
        }
+       *lenp = len;
        return 0;
 }
 
index 2cb592f..68dab82 100755 (executable)
@@ -287,6 +287,8 @@ my $bondary = qr { (?<![\w\/\`\{])(?=[\w\/\`\{])|(?<=[\w\/\`\{])(?![\w\/\`\{]) }
 sub output_rest {
        create_labels();
 
+       my $part = "";
+
        foreach my $what (sort {
                                ($data{$a}->{type} eq "File") cmp ($data{$b}->{type} eq "File") ||
                                $a cmp $b
@@ -306,6 +308,21 @@ sub output_rest {
                $w =~ s/([\(\)\_\-\*\=\^\~\\])/\\$1/g;
 
                if ($type ne "File") {
+                       my $cur_part = $what;
+                       if ($what =~ '/') {
+                               if ($what =~ m#^(\/?(?:[\w\-]+\/?){1,2})#) {
+                                       $cur_part = "Symbols under $1";
+                                       $cur_part =~ s,/$,,;
+                               }
+                       }
+
+                       if ($cur_part ne "" && $part ne $cur_part) {
+                           $part = $cur_part;
+                           my $bar = $part;
+                           $bar =~ s/./-/g;
+                           print "$part\n$bar\n\n";
+                       }
+
                        printf ".. _%s:\n\n", $data{$what}->{label};
 
                        my @names = split /, /,$w;
@@ -352,6 +369,12 @@ sub output_rest {
 
                if (!($desc =~ /^\s*$/)) {
                        if ($description_is_rst) {
+                               # Remove title markups from the description
+                               # Having titles inside ABI files will only work if extra
+                               # care would be taken in order to strictly follow the same
+                               # level order for each markup.
+                               $desc =~ s/\n[\-\*\=\^\~]+\n/\n\n/g;
+
                                # Enrich text by creating cross-references
 
                                $desc =~ s,Documentation/(?!devicetree)(\S+)\.rst,:doc:`/$1`,g;
index 2b65512..f3e3c94 100644 (file)
@@ -12,11 +12,12 @@ turbostat : turbostat.c
 override CFLAGS +=     -O2 -Wall -I../../../include
 override CFLAGS +=     -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
 override CFLAGS +=     -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
+override CFLAGS +=     -D_FILE_OFFSET_BITS=64
 override CFLAGS +=     -D_FORTIFY_SOURCE=2
 
 %: %.c
        @mkdir -p $(BUILD_OUTPUT)
-       $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS) -lcap
+       $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS) -lcap -lrt
 
 .PHONY : clean
 clean :
index a6db83a..f6b7e85 100644 (file)
@@ -335,7 +335,7 @@ that they count at TSC rate, which is true on all processors tested to date.
 
 .SH REFERENCES
 Volume 3B: System Programming Guide"
-http://www.intel.com/products/processor/manuals/
+https://www.intel.com/products/processor/manuals/
 
 .SH FILES
 .ta
index 33b3708..f3a1746 100644 (file)
@@ -79,6 +79,7 @@ unsigned long long  gfx_cur_rc6_ms;
 unsigned long long cpuidle_cur_cpu_lpi_us;
 unsigned long long cpuidle_cur_sys_lpi_us;
 unsigned int gfx_cur_mhz;
+unsigned int gfx_act_mhz;
 unsigned int tcc_activation_temp;
 unsigned int tcc_activation_temp_override;
 double rapl_power_units, rapl_time_units;
@@ -210,13 +211,14 @@ struct pkg_data {
        unsigned long long pkg_both_core_gfxe_c0;
        long long gfx_rc6_ms;
        unsigned int gfx_mhz;
+       unsigned int gfx_act_mhz;
        unsigned int package_id;
-       unsigned int energy_pkg;        /* MSR_PKG_ENERGY_STATUS */
-       unsigned int energy_dram;       /* MSR_DRAM_ENERGY_STATUS */
-       unsigned int energy_cores;      /* MSR_PP0_ENERGY_STATUS */
-       unsigned int energy_gfx;        /* MSR_PP1_ENERGY_STATUS */
-       unsigned int rapl_pkg_perf_status;      /* MSR_PKG_PERF_STATUS */
-       unsigned int rapl_dram_perf_status;     /* MSR_DRAM_PERF_STATUS */
+       unsigned long long energy_pkg;  /* MSR_PKG_ENERGY_STATUS */
+       unsigned long long energy_dram; /* MSR_DRAM_ENERGY_STATUS */
+       unsigned long long energy_cores;        /* MSR_PP0_ENERGY_STATUS */
+       unsigned long long energy_gfx;  /* MSR_PP1_ENERGY_STATUS */
+       unsigned long long rapl_pkg_perf_status;        /* MSR_PKG_PERF_STATUS */
+       unsigned long long rapl_dram_perf_status;       /* MSR_DRAM_PERF_STATUS */
        unsigned int pkg_temp_c;
        unsigned long long counter[MAX_ADDED_COUNTERS];
 } *package_even, *package_odd;
@@ -259,6 +261,113 @@ struct msr_counter {
 #define        SYSFS_PERCPU    (1 << 1)
 };
 
+/*
+ * The accumulated sum of MSR is defined as a monotonic
+ * increasing MSR, it will be accumulated periodically,
+ * despite its register's bit width.
+ */
+enum {
+       IDX_PKG_ENERGY,
+       IDX_DRAM_ENERGY,
+       IDX_PP0_ENERGY,
+       IDX_PP1_ENERGY,
+       IDX_PKG_PERF,
+       IDX_DRAM_PERF,
+       IDX_COUNT,
+};
+
+int get_msr_sum(int cpu, off_t offset, unsigned long long *msr);
+
+struct msr_sum_array {
+       /* get_msr_sum() = sum + (get_msr() - last) */
+       struct {
+               /*The accumulated MSR value is updated by the timer*/
+               unsigned long long sum;
+               /*The MSR footprint recorded in last timer*/
+               unsigned long long last;
+       } entries[IDX_COUNT];
+};
+
+/* The percpu MSR sum array.*/
+struct msr_sum_array *per_cpu_msr_sum;
+
+int idx_to_offset(int idx)
+{
+       int offset;
+
+       switch (idx) {
+       case IDX_PKG_ENERGY:
+               offset = MSR_PKG_ENERGY_STATUS;
+               break;
+       case IDX_DRAM_ENERGY:
+               offset = MSR_DRAM_ENERGY_STATUS;
+               break;
+       case IDX_PP0_ENERGY:
+               offset = MSR_PP0_ENERGY_STATUS;
+               break;
+       case IDX_PP1_ENERGY:
+               offset = MSR_PP1_ENERGY_STATUS;
+               break;
+       case IDX_PKG_PERF:
+               offset = MSR_PKG_PERF_STATUS;
+               break;
+       case IDX_DRAM_PERF:
+               offset = MSR_DRAM_PERF_STATUS;
+               break;
+       default:
+               offset = -1;
+       }
+       return offset;
+}
+
+int offset_to_idx(int offset)
+{
+       int idx;
+
+       switch (offset) {
+       case MSR_PKG_ENERGY_STATUS:
+               idx = IDX_PKG_ENERGY;
+               break;
+       case MSR_DRAM_ENERGY_STATUS:
+               idx = IDX_DRAM_ENERGY;
+               break;
+       case MSR_PP0_ENERGY_STATUS:
+               idx = IDX_PP0_ENERGY;
+               break;
+       case MSR_PP1_ENERGY_STATUS:
+               idx = IDX_PP1_ENERGY;
+               break;
+       case MSR_PKG_PERF_STATUS:
+               idx = IDX_PKG_PERF;
+               break;
+       case MSR_DRAM_PERF_STATUS:
+               idx = IDX_DRAM_PERF;
+               break;
+       default:
+               idx = -1;
+       }
+       return idx;
+}
+
+int idx_valid(int idx)
+{
+       switch (idx) {
+       case IDX_PKG_ENERGY:
+               return do_rapl & RAPL_PKG;
+       case IDX_DRAM_ENERGY:
+               return do_rapl & RAPL_DRAM;
+       case IDX_PP0_ENERGY:
+               return do_rapl & RAPL_CORES_ENERGY_STATUS;
+       case IDX_PP1_ENERGY:
+               return do_rapl & RAPL_GFX;
+       case IDX_PKG_PERF:
+               return do_rapl & RAPL_PKG_PERF_STATUS;
+       case IDX_DRAM_PERF:
+               return do_rapl & RAPL_DRAM_PERF_STATUS;
+       default:
+               return 0;
+       }
+}
 struct sys_counters {
        unsigned int added_thread_counters;
        unsigned int added_core_counters;
@@ -451,6 +560,7 @@ struct msr_counter bic[] = {
        { 0x0, "APIC" },
        { 0x0, "X2APIC" },
        { 0x0, "Die" },
+       { 0x0, "GFXAMHz" },
 };
 
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -505,6 +615,7 @@ struct msr_counter bic[] = {
 #define        BIC_APIC        (1ULL << 48)
 #define        BIC_X2APIC      (1ULL << 49)
 #define        BIC_Die         (1ULL << 50)
+#define        BIC_GFXACTMHz   (1ULL << 51)
 
 #define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
 
@@ -724,6 +835,9 @@ void print_header(char *delim)
        if (DO_BIC(BIC_GFXMHz))
                outp += sprintf(outp, "%sGFXMHz", (printed++ ? delim : ""));
 
+       if (DO_BIC(BIC_GFXACTMHz))
+               outp += sprintf(outp, "%sGFXAMHz", (printed++ ? delim : ""));
+
        if (DO_BIC(BIC_Totl_c0))
                outp += sprintf(outp, "%sTotl%%C0", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Any_c0))
@@ -858,13 +972,13 @@ int dump_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
                outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi);
                outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi);
-               outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
-               outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
-               outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
-               outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
-               outp += sprintf(outp, "Throttle PKG: %0X\n",
+               outp += sprintf(outp, "Joules PKG: %0llX\n", p->energy_pkg);
+               outp += sprintf(outp, "Joules COR: %0llX\n", p->energy_cores);
+               outp += sprintf(outp, "Joules GFX: %0llX\n", p->energy_gfx);
+               outp += sprintf(outp, "Joules RAM: %0llX\n", p->energy_dram);
+               outp += sprintf(outp, "Throttle PKG: %0llX\n",
                        p->rapl_pkg_perf_status);
-               outp += sprintf(outp, "Throttle RAM: %0X\n",
+               outp += sprintf(outp, "Throttle RAM: %0llX\n",
                        p->rapl_dram_perf_status);
                outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
 
@@ -1062,14 +1176,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
                }
        }
 
-       /*
-        * If measurement interval exceeds minimum RAPL Joule Counter range,
-        * indicate that results are suspect by printing "**" in fraction place.
-        */
-       if (interval_float < rapl_joule_counter_range)
-               fmt8 = "%s%.2f";
-       else
-               fmt8 = "%6.0f**";
+       fmt8 = "%s%.2f";
 
        if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
@@ -1098,6 +1205,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (DO_BIC(BIC_GFXMHz))
                outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_mhz);
 
+       /* GFXACTMHz */
+       if (DO_BIC(BIC_GFXACTMHz))
+               outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_act_mhz);
+
        /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
        if (DO_BIC(BIC_Totl_c0))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0/tsc);
@@ -1210,11 +1321,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
 }
 
 #define DELTA_WRAP32(new, old)                 \
-       if (new > old) {                        \
-               old = new - old;                \
-       } else {                                \
-               old = 0x100000000 + new - old;  \
-       }
+       old = ((((unsigned long long)new << 32) - ((unsigned long long)old << 32)) >> 32);
 
 int
 delta_package(struct pkg_data *new, struct pkg_data *old)
@@ -1253,13 +1360,14 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
                old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
 
        old->gfx_mhz = new->gfx_mhz;
+       old->gfx_act_mhz = new->gfx_act_mhz;
 
-       DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
-       DELTA_WRAP32(new->energy_cores, old->energy_cores);
-       DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
-       DELTA_WRAP32(new->energy_dram, old->energy_dram);
-       DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
-       DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
+       old->energy_pkg = new->energy_pkg - old->energy_pkg;
+       old->energy_cores = new->energy_cores - old->energy_cores;
+       old->energy_gfx = new->energy_gfx - old->energy_gfx;
+       old->energy_dram = new->energy_dram - old->energy_dram;
+       old->rapl_pkg_perf_status = new->rapl_pkg_perf_status - old->rapl_pkg_perf_status;
+       old->rapl_dram_perf_status = new->rapl_dram_perf_status - old->rapl_dram_perf_status;
 
        for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
                if (mp->format == FORMAT_RAW)
@@ -1469,6 +1577,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
 
        p->gfx_rc6_ms = 0;
        p->gfx_mhz = 0;
+       p->gfx_act_mhz = 0;
        for (i = 0, mp = sys.tp; mp; i++, mp = mp->next)
                t->counter[i] = 0;
 
@@ -1564,6 +1673,7 @@ int sum_counters(struct thread_data *t, struct core_data *c,
 
        average.packages.gfx_rc6_ms = p->gfx_rc6_ms;
        average.packages.gfx_mhz = p->gfx_mhz;
+       average.packages.gfx_act_mhz = p->gfx_act_mhz;
 
        average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
 
@@ -1784,7 +1894,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        int i;
 
        if (cpu_migrate(cpu)) {
-               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
+               fprintf(outf, "get_counters: Could not migrate to CPU %d\n", cpu);
                return -1;
        }
 
@@ -1966,39 +2076,39 @@ retry:
                p->sys_lpi = cpuidle_cur_sys_lpi_us;
 
        if (do_rapl & RAPL_PKG) {
-               if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
+               if (get_msr_sum(cpu, MSR_PKG_ENERGY_STATUS, &msr))
                        return -13;
-               p->energy_pkg = msr & 0xFFFFFFFF;
+               p->energy_pkg = msr;
        }
        if (do_rapl & RAPL_CORES_ENERGY_STATUS) {
-               if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
+               if (get_msr_sum(cpu, MSR_PP0_ENERGY_STATUS, &msr))
                        return -14;
-               p->energy_cores = msr & 0xFFFFFFFF;
+               p->energy_cores = msr;
        }
        if (do_rapl & RAPL_DRAM) {
-               if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
+               if (get_msr_sum(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
                        return -15;
-               p->energy_dram = msr & 0xFFFFFFFF;
+               p->energy_dram = msr;
        }
        if (do_rapl & RAPL_GFX) {
-               if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
+               if (get_msr_sum(cpu, MSR_PP1_ENERGY_STATUS, &msr))
                        return -16;
-               p->energy_gfx = msr & 0xFFFFFFFF;
+               p->energy_gfx = msr;
        }
        if (do_rapl & RAPL_PKG_PERF_STATUS) {
-               if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
+               if (get_msr_sum(cpu, MSR_PKG_PERF_STATUS, &msr))
                        return -16;
-               p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
+               p->rapl_pkg_perf_status = msr;
        }
        if (do_rapl & RAPL_DRAM_PERF_STATUS) {
-               if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
+               if (get_msr_sum(cpu, MSR_DRAM_PERF_STATUS, &msr))
                        return -16;
-               p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
+               p->rapl_dram_perf_status = msr;
        }
        if (do_rapl & RAPL_AMD_F17H) {
-               if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
+               if (get_msr_sum(cpu, MSR_PKG_ENERGY_STAT, &msr))
                        return -13;
-               p->energy_pkg = msr & 0xFFFFFFFF;
+               p->energy_pkg = msr;
        }
        if (DO_BIC(BIC_PkgTmp)) {
                if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
@@ -2012,6 +2122,9 @@ retry:
        if (DO_BIC(BIC_GFXMHz))
                p->gfx_mhz = gfx_cur_mhz;
 
+       if (DO_BIC(BIC_GFXACTMHz))
+               p->gfx_act_mhz = gfx_act_mhz;
+
        for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
                if (get_mp(cpu, mp, &p->counter[i]))
                        return -10;
@@ -2173,6 +2286,7 @@ int has_turbo_ratio_group_limits(int family, int model)
        case INTEL_FAM6_ATOM_GOLDMONT:
        case INTEL_FAM6_SKYLAKE_X:
        case INTEL_FAM6_ATOM_GOLDMONT_D:
+       case INTEL_FAM6_ATOM_TREMONT_D:
                return 1;
        }
        return 0;
@@ -2650,7 +2764,12 @@ int get_thread_siblings(struct cpu_topology *thiscpu)
 
        sprintf(path,
                "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", cpu);
-       filep = fopen_or_die(path, "r");
+       filep = fopen(path, "r");
+
+       if (!filep) {
+               warnx("%s: open failed", path);
+               return -1;
+       }
        do {
                offset -= BITMASK_SIZE;
                if (fscanf(filep, "%lx%c", &map, &character) != 2)
@@ -2763,18 +2882,25 @@ void re_initialize(void)
 {
        free_all_buffers();
        setup_all_buffers();
-       printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
+       fprintf(outf, "turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
 }
 
 void set_max_cpu_num(void)
 {
        FILE *filep;
+       int base_cpu;
        unsigned long dummy;
+       char pathname[64];
+
+       base_cpu = sched_getcpu();
+       if (base_cpu < 0)
+               err(1, "cannot find calling cpu ID");
+       sprintf(pathname,
+               "/sys/devices/system/cpu/cpu%d/topology/thread_siblings",
+               base_cpu);
 
+       filep = fopen_or_die(pathname, "r");
        topo.max_cpu_num = 0;
-       filep = fopen_or_die(
-                       "/sys/devices/system/cpu/cpu0/topology/thread_siblings",
-                       "r");
        while (fscanf(filep, "%lx,", &dummy) == 1)
                topo.max_cpu_num += BITMASK_SIZE;
        fclose(filep);
@@ -2915,6 +3041,33 @@ int snapshot_gfx_mhz(void)
        return 0;
 }
 
+/*
+ * snapshot_gfx_cur_mhz()
+ *
+ * record snapshot of
+ * /sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz
+ *
+ * return 1 if config change requires a restart, else return 0
+ */
+int snapshot_gfx_act_mhz(void)
+{
+       static FILE *fp;
+       int retval;
+
+       if (fp == NULL)
+               fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", "r");
+       else {
+               rewind(fp);
+               fflush(fp);
+       }
+
+       retval = fscanf(fp, "%d", &gfx_act_mhz);
+       if (retval != 1)
+               err(1, "GFX ACT MHz");
+
+       return 0;
+}
+
 /*
  * snapshot_cpu_lpi()
  *
@@ -2980,6 +3133,9 @@ int snapshot_proc_sysfs_files(void)
        if (DO_BIC(BIC_GFXMHz))
                snapshot_gfx_mhz();
 
+       if (DO_BIC(BIC_GFXACTMHz))
+               snapshot_gfx_act_mhz();
+
        if (DO_BIC(BIC_CPU_LPI))
                snapshot_cpu_lpi_us();
 
@@ -3057,6 +3213,111 @@ void do_sleep(void)
        }
 }
 
+int get_msr_sum(int cpu, off_t offset, unsigned long long *msr)
+{
+       int ret, idx;
+       unsigned long long msr_cur, msr_last;
+
+       if (!per_cpu_msr_sum)
+               return 1;
+
+       idx = offset_to_idx(offset);
+       if (idx < 0)
+               return idx;
+       /* get_msr_sum() = sum + (get_msr() - last) */
+       ret = get_msr(cpu, offset, &msr_cur);
+       if (ret)
+               return ret;
+       msr_last = per_cpu_msr_sum[cpu].entries[idx].last;
+       DELTA_WRAP32(msr_cur, msr_last);
+       *msr = msr_last + per_cpu_msr_sum[cpu].entries[idx].sum;
+
+       return 0;
+}
+
+timer_t timerid;
+
+/* Timer callback, update the sum of MSRs periodically. */
+static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+       int i, ret;
+       int cpu = t->cpu_id;
+
+       for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) {
+               unsigned long long msr_cur, msr_last;
+               int offset;
+
+               if (!idx_valid(i))
+                       continue;
+               offset = idx_to_offset(i);
+               if (offset < 0)
+                       continue;
+               ret = get_msr(cpu, offset, &msr_cur);
+               if (ret) {
+                       fprintf(outf, "Can not update msr(0x%x)\n", offset);
+                       continue;
+               }
+
+               msr_last = per_cpu_msr_sum[cpu].entries[i].last;
+               per_cpu_msr_sum[cpu].entries[i].last = msr_cur & 0xffffffff;
+
+               DELTA_WRAP32(msr_cur, msr_last);
+               per_cpu_msr_sum[cpu].entries[i].sum += msr_last;
+       }
+       return 0;
+}
+
+static void
+msr_record_handler(union sigval v)
+{
+       for_all_cpus(update_msr_sum, EVEN_COUNTERS);
+}
+
+void msr_sum_record(void)
+{
+       struct itimerspec its;
+       struct sigevent sev;
+
+       per_cpu_msr_sum = calloc(topo.max_cpu_num + 1, sizeof(struct msr_sum_array));
+       if (!per_cpu_msr_sum) {
+               fprintf(outf, "Can not allocate memory for long time MSR.\n");
+               return;
+       }
+       /*
+        * Signal handler might be restricted, so use thread notifier instead.
+        */
+       memset(&sev, 0, sizeof(struct sigevent));
+       sev.sigev_notify = SIGEV_THREAD;
+       sev.sigev_notify_function = msr_record_handler;
+
+       sev.sigev_value.sival_ptr = &timerid;
+       if (timer_create(CLOCK_REALTIME, &sev, &timerid) == -1) {
+               fprintf(outf, "Can not create timer.\n");
+               goto release_msr;
+       }
+
+       its.it_value.tv_sec = 0;
+       its.it_value.tv_nsec = 1;
+       /*
+        * A wraparound time has been calculated early.
+        * Some sources state that the peak power for a
+        * microprocessor is usually 1.5 times the TDP rating,
+        * use 2 * TDP for safety.
+        */
+       its.it_interval.tv_sec = rapl_joule_counter_range / 2;
+       its.it_interval.tv_nsec = 0;
+
+       if (timer_settime(timerid, 0, &its, NULL) == -1) {
+               fprintf(outf, "Can not set timer.\n");
+               goto release_timer;
+       }
+       return;
+
+ release_timer:
+       timer_delete(timerid);
+ release_msr:
+       free(per_cpu_msr_sum);
+}
 
 void turbostat_loop()
 {
@@ -3075,7 +3336,7 @@ restart:
        if (retval < -1) {
                exit(retval);
        } else if (retval == -1) {
-               if (restarted > 1) {
+               if (restarted > 10) {
                        exit(retval);
                }
                re_initialize();
@@ -3279,6 +3540,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
        case INTEL_FAM6_ATOM_GOLDMONT_D:        /* DNV */
        case INTEL_FAM6_ATOM_TREMONT:   /* EHL */
+       case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */
                pkg_cstate_limits = glm_pkg_cstate_limits;
                break;
        default:
@@ -3361,6 +3623,17 @@ int is_ehl(unsigned int family, unsigned int model)
        }
        return 0;
 }
+int is_jvl(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+
+       switch (model) {
+       case INTEL_FAM6_ATOM_TREMONT_D:
+               return 1;
+       }
+       return 0;
+}
 
 int has_turbo_ratio_limit(unsigned int family, unsigned int model)
 {
@@ -3474,6 +3747,20 @@ int has_config_tdp(unsigned int family, unsigned int model)
        }
 }
 
+static void
+remove_underbar(char *s)
+{
+       char *to = s;
+
+       while (*s) {
+               if (*s != '_')
+                       *to++ = *s;
+               s++;
+       }
+
+       *to = 0;
+}
+
 static void
 dump_cstate_pstate_config_info(unsigned int family, unsigned int model)
 {
@@ -3530,9 +3817,6 @@ dump_sysfs_cstate_config(void)
        int state;
        char *sp;
 
-       if (!DO_BIC(BIC_sysfs))
-               return;
-
        if (access("/sys/devices/system/cpu/cpuidle", R_OK)) {
                fprintf(outf, "cpuidle not loaded\n");
                return;
@@ -3559,6 +3843,8 @@ dump_sysfs_cstate_config(void)
                *sp = '\0';
                fclose(input);
 
+               remove_underbar(name_buf);
+
                sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
                        base_cpu, state);
                input = fopen(path, "r");
@@ -3645,7 +3931,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return 0;
 
        if (cpu_migrate(cpu)) {
-               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
+               fprintf(outf, "print_epb: Could not migrate to CPU %d\n", cpu);
                return -1;
        }
 
@@ -3689,7 +3975,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return 0;
 
        if (cpu_migrate(cpu)) {
-               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
+               fprintf(outf, "print_hwp: Could not migrate to CPU %d\n", cpu);
                return -1;
        }
 
@@ -3777,7 +4063,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
                return 0;
 
        if (cpu_migrate(cpu)) {
-               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
+               fprintf(outf, "print_perf_limit: Could not migrate to CPU %d\n", cpu);
                return -1;
        }
 
@@ -3881,13 +4167,8 @@ double get_tdp_intel(unsigned int model)
 
 double get_tdp_amd(unsigned int family)
 {
-       switch (family) {
-       case 0x17:
-       case 0x18:
-       default:
-               /* This is the max stock TDP of HEDT/Server Fam17h chips */
-               return 250.0;
-       }
+       /* This is the max stock TDP of HEDT/Server Fam17h+ chips */
+       return 280.0;
 }
 
 /*
@@ -3959,6 +4240,14 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
                        BIC_PRESENT(BIC_GFXWatt);
                }
                break;
+       case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */
+               do_rapl = RAPL_PKG | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
+               BIC_PRESENT(BIC_PKG__);
+               if (rapl_joules)
+                       BIC_PRESENT(BIC_Pkg_J);
+               else
+                       BIC_PRESENT(BIC_PkgWatt);
+               break;
        case INTEL_FAM6_SKYLAKE_L:      /* SKL */
        case INTEL_FAM6_CANNONLAKE_L:   /* CNL */
                do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
@@ -4069,27 +4358,20 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
 
        if (max_extended_level >= 0x80000007) {
                __cpuid(0x80000007, eax, ebx, ecx, edx);
-               /* RAPL (Fam 17h) */
+               /* RAPL (Fam 17h+) */
                has_rapl = edx & (1 << 14);
        }
 
-       if (!has_rapl)
+       if (!has_rapl || family < 0x17)
                return;
 
-       switch (family) {
-       case 0x17: /* Zen, Zen+ */
-       case 0x18: /* Hygon Dhyana */
-               do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
-               if (rapl_joules) {
-                       BIC_PRESENT(BIC_Pkg_J);
-                       BIC_PRESENT(BIC_Cor_J);
-               } else {
-                       BIC_PRESENT(BIC_PkgWatt);
-                       BIC_PRESENT(BIC_CorWatt);
-               }
-               break;
-       default:
-               return;
+       do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
+       if (rapl_joules) {
+               BIC_PRESENT(BIC_Pkg_J);
+               BIC_PRESENT(BIC_Cor_J);
+       } else {
+               BIC_PRESENT(BIC_PkgWatt);
+               BIC_PRESENT(BIC_CorWatt);
        }
 
        if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
@@ -4162,7 +4444,7 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                return 0;
 
        if (cpu_migrate(cpu)) {
-               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
+               fprintf(outf, "print_thermal: Could not migrate to CPU %d\n", cpu);
                return -1;
        }
 
@@ -4234,7 +4516,7 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 
        cpu = t->cpu_id;
        if (cpu_migrate(cpu)) {
-               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
+               fprintf(outf, "print_rapl: Could not migrate to CPU %d\n", cpu);
                return -1;
        }
 
@@ -4361,6 +4643,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
        case INTEL_FAM6_ATOM_GOLDMONT_D:        /* DNV */
        case INTEL_FAM6_ATOM_TREMONT:           /* EHL */
+       case INTEL_FAM6_ATOM_TREMONT_D:         /* JVL */
                return 1;
        }
        return 0;
@@ -4507,12 +4790,33 @@ double discover_bclk(unsigned int family, unsigned int model)
  * below this value, including the Digital Thermal Sensor (DTS),
  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
  */
-int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+int read_tcc_activation_temp()
 {
        unsigned long long msr;
-       unsigned int target_c_local;
-       int cpu;
+       unsigned int tcc, target_c, offset_c;
 
+       /* Temperature Target MSR is Nehalem and newer only */
+       if (!do_nhm_platform_info)
+               return 0;
+
+       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
+               return 0;
+
+       target_c = (msr >> 16) & 0xFF;
+
+       offset_c = (msr >> 24) & 0xF;
+
+       tcc = target_c - offset_c;
+
+       if (!quiet)
+               fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n",
+                       base_cpu, msr, tcc, target_c, offset_c);
+
+       return tcc;
+}
+
+int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
        /* tcc_activation_temp is used only for dts or ptm */
        if (!(do_dts || do_ptm))
                return 0;
@@ -4521,43 +4825,18 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
                return 0;
 
-       cpu = t->cpu_id;
-       if (cpu_migrate(cpu)) {
-               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
-               return -1;
-       }
-
        if (tcc_activation_temp_override != 0) {
                tcc_activation_temp = tcc_activation_temp_override;
-               fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
-                       cpu, tcc_activation_temp);
+               fprintf(outf, "Using cmdline TCC Target (%d C)\n", tcc_activation_temp);
                return 0;
        }
 
-       /* Temperature Target MSR is Nehalem and newer only */
-       if (!do_nhm_platform_info)
-               goto guess;
-
-       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
-               goto guess;
-
-       target_c_local = (msr >> 16) & 0xFF;
-
-       if (!quiet)
-               fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
-                       cpu, msr, target_c_local);
-
-       if (!target_c_local)
-               goto guess;
-
-       tcc_activation_temp = target_c_local;
-
-       return 0;
+       tcc_activation_temp = read_tcc_activation_temp();
+       if (tcc_activation_temp)
+               return 0;
 
-guess:
        tcc_activation_temp = TJMAX_DEFAULT;
-       fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
-               cpu, tcc_activation_temp);
+       fprintf(outf, "Guessing tjMax %d C, Please use -T to specify\n", tcc_activation_temp);
 
        return 0;
 }
@@ -4685,19 +4964,46 @@ unsigned int intel_model_duplicates(unsigned int model)
        case INTEL_FAM6_ICELAKE_NNPI:
        case INTEL_FAM6_TIGERLAKE_L:
        case INTEL_FAM6_TIGERLAKE:
+       case INTEL_FAM6_ROCKETLAKE:
+       case INTEL_FAM6_LAKEFIELD:
+       case INTEL_FAM6_ALDERLAKE:
                return INTEL_FAM6_CANNONLAKE_L;
 
-       case INTEL_FAM6_ATOM_TREMONT_D:
-               return INTEL_FAM6_ATOM_GOLDMONT_D;
-
        case INTEL_FAM6_ATOM_TREMONT_L:
                return INTEL_FAM6_ATOM_TREMONT;
 
        case INTEL_FAM6_ICELAKE_X:
+       case INTEL_FAM6_SAPPHIRERAPIDS_X:
                return INTEL_FAM6_SKYLAKE_X;
        }
        return model;
 }
+
+void print_dev_latency(void)
+{
+       char *path = "/dev/cpu_dma_latency";
+       int fd;
+       int value;
+       int retval;
+
+       fd = open(path, O_RDONLY);
+       if (fd < 0) {
+               warn("fopen %s\n", path);
+               return;
+       }
+
+       retval = read(fd, (void *)&value, sizeof(int));
+       if (retval != sizeof(int)) {
+               warn("read %s\n", path);
+               close(fd);
+               return;
+       }
+       fprintf(outf, "/dev/cpu_dma_latency: %d usec (%s)\n",
+               value, value == 2000000000 ? "default" : "constrained");
+
+       close(fd);
+}
+
 void process_cpuid()
 {
        unsigned int eax, ebx, ecx, edx;
@@ -4916,6 +5222,14 @@ void process_cpuid()
                BIC_PRESENT(BIC_Mod_c6);
                use_c1_residency_msr = 1;
        }
+       if (is_jvl(family, model)) {
+               BIC_NOT_PRESENT(BIC_CPU_c3);
+               BIC_NOT_PRESENT(BIC_CPU_c7);
+               BIC_NOT_PRESENT(BIC_Pkgpc2);
+               BIC_NOT_PRESENT(BIC_Pkgpc3);
+               BIC_NOT_PRESENT(BIC_Pkgpc6);
+               BIC_NOT_PRESENT(BIC_Pkgpc7);
+       }
        if (is_dnv(family, model)) {
                BIC_PRESENT(BIC_CPU_c1);
                BIC_NOT_PRESENT(BIC_CPU_c3);
@@ -4935,9 +5249,12 @@ void process_cpuid()
                BIC_NOT_PRESENT(BIC_Pkgpc7);
        }
        if (has_c8910_msrs(family, model)) {
-               BIC_PRESENT(BIC_Pkgpc8);
-               BIC_PRESENT(BIC_Pkgpc9);
-               BIC_PRESENT(BIC_Pkgpc10);
+               if (pkg_cstate_limit >= PCL__8)
+                       BIC_PRESENT(BIC_Pkgpc8);
+               if (pkg_cstate_limit >= PCL__9)
+                       BIC_PRESENT(BIC_Pkgpc9);
+               if (pkg_cstate_limit >= PCL_10)
+                       BIC_PRESENT(BIC_Pkgpc10);
        }
        do_irtl_hsw = has_c8910_msrs(family, model);
        if (has_skl_msrs(family, model)) {
@@ -4966,6 +5283,8 @@ void process_cpuid()
        if (!quiet)
                dump_cstate_pstate_config_info(family, model);
 
+       if (!quiet)
+               print_dev_latency();
        if (!quiet)
                dump_sysfs_cstate_config();
        if (!quiet)
@@ -4980,6 +5299,9 @@ void process_cpuid()
        if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK))
                BIC_PRESENT(BIC_GFXMHz);
 
+       if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", R_OK))
+               BIC_PRESENT(BIC_GFXACTMHz);
+
        if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", R_OK))
                BIC_PRESENT(BIC_CPU_LPI);
        else
@@ -5390,7 +5712,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 20.03.20"
+       fprintf(outf, "turbostat version 20.09.30"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5597,6 +5919,8 @@ void probe_sysfs(void)
                *sp = '%';
                *(sp + 1) = '\0';
 
+               remove_underbar(name_buf);
+
                fclose(input);
 
                sprintf(path, "cpuidle/state%d/time", state);
@@ -5624,6 +5948,8 @@ void probe_sysfs(void)
                *sp = '\0';
                fclose(input);
 
+               remove_underbar(name_buf);
+
                sprintf(path, "cpuidle/state%d/usage", state);
 
                if (is_deferred_skip(name_buf))
@@ -5868,6 +6194,7 @@ int main(int argc, char **argv)
                return 0;
        }
 
+       msr_sum_record();
        /*
         * if any params left, it must be a command to fork
         */
index 3fe1eed..ff6c666 100644 (file)
@@ -622,6 +622,57 @@ void cmdline(int argc, char **argv)
        }
 }
 
+/*
+ * Open a file, and exit on failure
+ */
+FILE *fopen_or_die(const char *path, const char *mode)
+{
+       FILE *filep = fopen(path, "r");
+
+       if (!filep)
+               err(1, "%s: open failed", path);
+       return filep;
+}
+
+void err_on_hypervisor(void)
+{
+       FILE *cpuinfo;
+       char *flags, *hypervisor;
+       char *buffer;
+
+       /* On VMs /proc/cpuinfo contains a "flags" entry for hypervisor */
+       cpuinfo = fopen_or_die("/proc/cpuinfo", "ro");
+
+       buffer = malloc(4096);
+       if (!buffer) {
+               fclose(cpuinfo);
+               err(-ENOMEM, "buffer malloc fail");
+       }
+
+       if (!fread(buffer, 1024, 1, cpuinfo)) {
+               fclose(cpuinfo);
+               free(buffer);
+               err(1, "Reading /proc/cpuinfo failed");
+       }
+
+       flags = strstr(buffer, "flags");
+       rewind(cpuinfo);
+       fseek(cpuinfo, flags - buffer, SEEK_SET);
+       if (!fgets(buffer, 4096, cpuinfo)) {
+               fclose(cpuinfo);
+               free(buffer);
+               err(1, "Reading /proc/cpuinfo failed");
+       }
+       fclose(cpuinfo);
+
+       hypervisor = strstr(buffer, "hypervisor");
+
+       free(buffer);
+
+       if (hypervisor)
+               err(-1,
+                   "not supported on this virtual machine");
+}
 
 int get_msr(int cpu, int offset, unsigned long long *msr)
 {
@@ -635,8 +686,10 @@ int get_msr(int cpu, int offset, unsigned long long *msr)
                err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
 
        retval = pread(fd, msr, sizeof(*msr), offset);
-       if (retval != sizeof(*msr))
+       if (retval != sizeof(*msr)) {
+               err_on_hypervisor();
                err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
+       }
 
        if (debug > 1)
                fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr);
@@ -1086,18 +1139,6 @@ int update_cpu_msrs(int cpu)
        return 0;
 }
 
-/*
- * Open a file, and exit on failure
- */
-FILE *fopen_or_die(const char *path, const char *mode)
-{
-       FILE *filep = fopen(path, "r");
-
-       if (!filep)
-               err(1, "%s: open failed", path);
-       return filep;
-}
-
 unsigned int get_pkg_num(int cpu)
 {
        FILE *fp;
index 55bd387..52d3f03 100644 (file)
@@ -145,7 +145,7 @@ TEST(clone3_cap_checkpoint_restore)
        test_clone3_supported();
 
        EXPECT_EQ(getuid(), 0)
-               XFAIL(return, "Skipping all tests as non-root\n");
+               SKIP(return, "Skipping all tests as non-root");
 
        memset(&set_tid, 0, sizeof(set_tid));
 
index c99b98b..575b391 100644 (file)
@@ -44,7 +44,7 @@ TEST(close_range)
                fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
                ASSERT_GE(fd, 0) {
                        if (errno == ENOENT)
-                               XFAIL(return, "Skipping test since /dev/null does not exist");
+                               SKIP(return, "Skipping test since /dev/null does not exist");
                }
 
                open_fds[i] = fd;
@@ -52,7 +52,7 @@ TEST(close_range)
 
        EXPECT_EQ(-1, sys_close_range(open_fds[0], open_fds[100], -1)) {
                if (errno == ENOSYS)
-                       XFAIL(return, "close_range() syscall not supported");
+                       SKIP(return, "close_range() syscall not supported");
        }
 
        EXPECT_EQ(0, sys_close_range(open_fds[0], open_fds[50], 0));
@@ -108,7 +108,7 @@ TEST(close_range_unshare)
                fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
                ASSERT_GE(fd, 0) {
                        if (errno == ENOENT)
-                               XFAIL(return, "Skipping test since /dev/null does not exist");
+                               SKIP(return, "Skipping test since /dev/null does not exist");
                }
 
                open_fds[i] = fd;
@@ -197,7 +197,7 @@ TEST(close_range_unshare_capped)
                fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
                ASSERT_GE(fd, 0) {
                        if (errno == ENOENT)
-                               XFAIL(return, "Skipping test since /dev/null does not exist");
+                               SKIP(return, "Skipping test since /dev/null does not exist");
                }
 
                open_fds[i] = fd;
index 1d27f52..477cbb0 100644 (file)
@@ -74,7 +74,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
        ret = mount(NULL, binderfs_mntpt, "binder", 0, 0);
        EXPECT_EQ(ret, 0) {
                if (errno == ENODEV)
-                       XFAIL(goto out, "binderfs missing");
+                       SKIP(goto out, "binderfs missing");
                TH_LOG("%s - Failed to mount binderfs", strerror(errno));
                goto rmdir;
        }
@@ -475,10 +475,10 @@ TEST(binderfs_stress)
 TEST(binderfs_test_privileged)
 {
        if (geteuid() != 0)
-               XFAIL(return, "Tests are not run as root. Skipping privileged tests");
+               SKIP(return, "Tests are not run as root. Skipping privileged tests");
 
        if (__do_binderfs_test(_metadata))
-               XFAIL(return, "The Android binderfs filesystem is not available");
+               SKIP(return, "The Android binderfs filesystem is not available");
 }
 
 TEST(binderfs_test_unprivileged)
@@ -511,7 +511,7 @@ TEST(binderfs_test_unprivileged)
        ret = wait_for_pid(pid);
        if (ret) {
                if (ret == 2)
-                       XFAIL(return, "The Android binderfs filesystem is not available");
+                       SKIP(return, "The Android binderfs filesystem is not available");
                ASSERT_EQ(ret, 0) {
                        TH_LOG("wait_for_pid() failed");
                }
index 3bcd4c3..b4da41d 100644 (file)
@@ -6,7 +6,7 @@
 echo 0 > events/enable
 echo > dynamic_events
 
-PLACE=kernel_clone
+PLACE=$FUNCTION_FORK
 
 echo "p:myevent1 $PLACE" >> dynamic_events
 echo "r:myevent2 $PLACE" >> dynamic_events
index 4389619..3a0e288 100644 (file)
@@ -6,7 +6,7 @@
 echo 0 > events/enable
 echo > dynamic_events
 
-PLACE=kernel_clone
+PLACE=$FUNCTION_FORK
 
 setup_events() {
 echo "p:myevent1 $PLACE" >> dynamic_events
index a8603bd..d3e138e 100644 (file)
@@ -6,7 +6,7 @@
 echo 0 > events/enable
 echo > dynamic_events
 
-PLACE=kernel_clone
+PLACE=$FUNCTION_FORK
 
 setup_events() {
 echo "p:myevent1 $PLACE" >> dynamic_events
index acb17ce..8054196 100644 (file)
@@ -39,7 +39,7 @@ do_test() {
     disable_tracing
 
     echo do_execve* > set_ftrace_filter
-    echo *do_fork >> set_ftrace_filter
+    echo $FUNCTION_FORK >> set_ftrace_filter
 
     echo $PID > set_ftrace_notrace_pid
     echo function > current_tracer
index 9f0a968..2f72112 100644 (file)
@@ -39,7 +39,7 @@ do_test() {
     disable_tracing
 
     echo do_execve* > set_ftrace_filter
-    echo *do_fork >> set_ftrace_filter
+    echo $FUNCTION_FORK >> set_ftrace_filter
 
     echo $PID > set_ftrace_pid
     echo function > current_tracer
index 98305d7..191d116 100644 (file)
@@ -4,9 +4,9 @@
 # requires: set_ftrace_filter
 # flags: instance
 
-echo kernel_clone:stacktrace >> set_ftrace_filter
+echo $FUNCTION_FORK:stacktrace >> set_ftrace_filter
 
-grep -q "kernel_clone:stacktrace:unlimited" set_ftrace_filter
+grep -q "$FUNCTION_FORK:stacktrace:unlimited" set_ftrace_filter
 
 (echo "forked"; sleep 1)
 
index c5dec55..a6fac92 100644 (file)
@@ -133,6 +133,13 @@ yield() {
     ping $LOCALHOST -c 1 || sleep .001 || usleep 1 || sleep 1
 }
 
+# The fork function in the kernel was renamed from "_do_fork" to
+# "kernel_fork". As older tests should still work with older kernels
+# as well as newer kernels, check which version of fork is used on this
+# kernel so that the tests can use the fork function for the running kernel.
+FUNCTION_FORK=`(if grep '\bkernel_clone\b' /proc/kallsyms > /dev/null; then
+                echo kernel_clone; else echo '_do_fork'; fi)`
+
 # Since probe event command may include backslash, explicitly use printf "%s"
 # to NOT interpret it.
 ftrace_errlog_check() { # err-prefix command-with-error-pos-by-^ command-file
index 9737cd0..2428a3e 100644 (file)
@@ -3,7 +3,7 @@
 # description: Kprobe dynamic event - adding and removing
 # requires: kprobe_events
 
-echo p:myevent kernel_clone > kprobe_events
+echo p:myevent $FUNCTION_FORK > kprobe_events
 grep myevent kprobe_events
 test -d events/kprobes/myevent
 echo > kprobe_events
index f9a40af..010a8b1 100644 (file)
@@ -3,7 +3,7 @@
 # description: Kprobe dynamic event - busy event check
 # requires: kprobe_events
 
-echo p:myevent kernel_clone > kprobe_events
+echo p:myevent $FUNCTION_FORK > kprobe_events
 test -d events/kprobes/myevent
 echo 1 > events/kprobes/myevent/enable
 echo > kprobe_events && exit_fail # this must fail
index eb543d3..a96a1dc 100644 (file)
@@ -3,13 +3,13 @@
 # description: Kprobe dynamic event with arguments
 # requires: kprobe_events
 
-echo 'p:testprobe kernel_clone $stack $stack0 +0($stack)' > kprobe_events
+echo "p:testprobe $FUNCTION_FORK \$stack \$stack0 +0(\$stack)" > kprobe_events
 grep testprobe kprobe_events | grep -q 'arg1=\$stack arg2=\$stack0 arg3=+0(\$stack)'
 test -d events/kprobes/testprobe
 
 echo 1 > events/kprobes/testprobe/enable
 ( echo "forked")
-grep testprobe trace | grep 'kernel_clone' | \
+grep testprobe trace | grep "$FUNCTION_FORK" | \
   grep -q 'arg1=0x[[:xdigit:]]* arg2=0x[[:xdigit:]]* arg3=0x[[:xdigit:]]*$'
 
 echo 0 > events/kprobes/testprobe/enable
index 4e5b63b..a053ee2 100644 (file)
@@ -5,7 +5,7 @@
 
 grep -A1 "fetcharg:" README | grep -q "\$comm" || exit_unsupported # this is too old
 
-echo 'p:testprobe kernel_clone comm=$comm ' > kprobe_events
+echo "p:testprobe $FUNCTION_FORK comm=\$comm " > kprobe_events
 grep testprobe kprobe_events | grep -q 'comm=$comm'
 test -d events/kprobes/testprobe
 
index a1d7058..84285a6 100644 (file)
@@ -30,13 +30,13 @@ esac
 : "Test get argument (1)"
 echo "p:testprobe tracefs_create_dir arg1=+0(${ARG1}):string" > kprobe_events
 echo 1 > events/kprobes/testprobe/enable
-echo "p:test kernel_clone" >> kprobe_events
+echo "p:test $FUNCTION_FORK" >> kprobe_events
 grep -qe "testprobe.* arg1=\"test\"" trace
 
 echo 0 > events/kprobes/testprobe/enable
 : "Test get argument (2)"
 echo "p:testprobe tracefs_create_dir arg1=+0(${ARG1}):string arg2=+0(${ARG1}):string" > kprobe_events
 echo 1 > events/kprobes/testprobe/enable
-echo "p:test kernel_clone" >> kprobe_events
+echo "p:test $FUNCTION_FORK" >> kprobe_events
 grep -qe "testprobe.* arg1=\"test\" arg2=\"test\"" trace
 
index bd25dd0..717130e 100644 (file)
@@ -14,12 +14,12 @@ elif ! grep "$SYMBOL\$" /proc/kallsyms; then
 fi
 
 : "Test get basic types symbol argument"
-echo "p:testprobe_u kernel_clone arg1=@linux_proc_banner:u64 arg2=@linux_proc_banner:u32 arg3=@linux_proc_banner:u16 arg4=@linux_proc_banner:u8" > kprobe_events
-echo "p:testprobe_s kernel_clone arg1=@linux_proc_banner:s64 arg2=@linux_proc_banner:s32 arg3=@linux_proc_banner:s16 arg4=@linux_proc_banner:s8" >> kprobe_events
+echo "p:testprobe_u $FUNCTION_FORK arg1=@linux_proc_banner:u64 arg2=@linux_proc_banner:u32 arg3=@linux_proc_banner:u16 arg4=@linux_proc_banner:u8" > kprobe_events
+echo "p:testprobe_s $FUNCTION_FORK arg1=@linux_proc_banner:s64 arg2=@linux_proc_banner:s32 arg3=@linux_proc_banner:s16 arg4=@linux_proc_banner:s8" >> kprobe_events
 if grep -q "x8/16/32/64" README; then
-  echo "p:testprobe_x kernel_clone arg1=@linux_proc_banner:x64 arg2=@linux_proc_banner:x32 arg3=@linux_proc_banner:x16 arg4=@linux_proc_banner:x8" >> kprobe_events
+  echo "p:testprobe_x $FUNCTION_FORK arg1=@linux_proc_banner:x64 arg2=@linux_proc_banner:x32 arg3=@linux_proc_banner:x16 arg4=@linux_proc_banner:x8" >> kprobe_events
 fi
-echo "p:testprobe_bf kernel_clone arg1=@linux_proc_banner:b8@4/32" >> kprobe_events
+echo "p:testprobe_bf $FUNCTION_FORK arg1=@linux_proc_banner:b8@4/32" >> kprobe_events
 echo 1 > events/kprobes/enable
 (echo "forked")
 echo 0 > events/kprobes/enable
@@ -27,7 +27,7 @@ grep "testprobe_[usx]:.* arg1=.* arg2=.* arg3=.* arg4=.*" trace
 grep "testprobe_bf:.* arg1=.*" trace
 
 : "Test get string symbol argument"
-echo "p:testprobe_str kernel_clone arg1=@linux_proc_banner:string" > kprobe_events
+echo "p:testprobe_str $FUNCTION_FORK arg1=@linux_proc_banner:string" > kprobe_events
 echo 1 > events/kprobes/enable
 (echo "forked")
 echo 0 > events/kprobes/enable
index 91fcce1..25b7708 100644 (file)
@@ -4,7 +4,7 @@
 # requires: kprobe_events "x8/16/32/64":README
 
 gen_event() { # Bitsize
-  echo "p:testprobe kernel_clone \$stack0:s$1 \$stack0:u$1 \$stack0:x$1 \$stack0:b4@4/$1"
+  echo "p:testprobe $FUNCTION_FORK \$stack0:s$1 \$stack0:u$1 \$stack0:x$1 \$stack0:b4@4/$1"
 }
 
 check_types() { # s-type u-type x-type bf-type width
index a30a9c0..d25d01a 100644 (file)
@@ -9,12 +9,16 @@ grep -A10 "fetcharg:" README | grep -q '\[u\]<offset>' || exit_unsupported
 :;: "user-memory access syntax and ustring working on user memory";:
 echo 'p:myevent do_sys_open path=+0($arg2):ustring path2=+u0($arg2):string' \
        > kprobe_events
+echo 'p:myevent2 do_sys_openat2 path=+0($arg2):ustring path2=+u0($arg2):string' \
+       >> kprobe_events
 
 grep myevent kprobe_events | \
        grep -q 'path=+0($arg2):ustring path2=+u0($arg2):string'
 echo 1 > events/kprobes/myevent/enable
+echo 1 > events/kprobes/myevent2/enable
 echo > /dev/null
 echo 0 > events/kprobes/myevent/enable
+echo 0 > events/kprobes/myevent2/enable
 
 grep myevent trace | grep -q 'path="/dev/null" path2="/dev/null"'
 
index 0d17909..5556292 100644 (file)
@@ -5,29 +5,29 @@
 
 # prepare
 echo nop > current_tracer
-echo kernel_clone > set_ftrace_filter
-echo 'p:testprobe kernel_clone' > kprobe_events
+echo $FUNCTION_FORK > set_ftrace_filter
+echo "p:testprobe $FUNCTION_FORK" > kprobe_events
 
 # kprobe on / ftrace off
 echo 1 > events/kprobes/testprobe/enable
 echo > trace
 ( echo "forked")
 grep testprobe trace
-! grep 'kernel_clone <-' trace
+! grep "$FUNCTION_FORK <-" trace
 
 # kprobe on / ftrace on
 echo function > current_tracer
 echo > trace
 ( echo "forked")
 grep testprobe trace
-grep 'kernel_clone <-' trace
+grep "$FUNCTION_FORK <-" trace
 
 # kprobe off / ftrace on
 echo 0 > events/kprobes/testprobe/enable
 echo > trace
 ( echo "forked")
 ! grep testprobe trace
-grep 'kernel_clone <-' trace
+grep "$FUNCTION_FORK <-" trace
 
 # kprobe on / ftrace on
 echo 1 > events/kprobes/testprobe/enable
@@ -35,11 +35,11 @@ echo function > current_tracer
 echo > trace
 ( echo "forked")
 grep testprobe trace
-grep 'kernel_clone <-' trace
+grep "$FUNCTION_FORK <-" trace
 
 # kprobe on / ftrace off
 echo nop > current_tracer
 echo > trace
 ( echo "forked")
 grep testprobe trace
-! grep 'kernel_clone <-' trace
+! grep "$FUNCTION_FORK <-" trace
index 45d90b6..f0d5b77 100644 (file)
@@ -4,7 +4,7 @@
 # requires: kprobe_events "Create/append/":README
 
 # Choose 2 symbols for target
-SYM1=kernel_clone
+SYM1=$FUNCTION_FORK
 SYM2=do_exit
 EVENT_NAME=kprobes/testevent
 
index 1b5550e..fa928b4 100644 (file)
@@ -86,15 +86,15 @@ esac
 
 # multiprobe errors
 if grep -q "Create/append/" README && grep -q "imm-value" README; then
-echo 'p:kprobes/testevent kernel_clone' > kprobe_events
+echo "p:kprobes/testevent $FUNCTION_FORK" > kprobe_events
 check_error '^r:kprobes/testevent do_exit'     # DIFF_PROBE_TYPE
 
 # Explicitly use printf "%s" to not interpret \1
-printf "%s" 'p:kprobes/testevent kernel_clone abcd=\1' > kprobe_events
-check_error 'p:kprobes/testevent kernel_clone ^bcd=\1' # DIFF_ARG_TYPE
-check_error 'p:kprobes/testevent kernel_clone ^abcd=\1:u8'     # DIFF_ARG_TYPE
-check_error 'p:kprobes/testevent kernel_clone ^abcd=\"foo"'    # DIFF_ARG_TYPE
-check_error '^p:kprobes/testevent kernel_clone abcd=\1'        # SAME_PROBE
+printf "%s" "p:kprobes/testevent $FUNCTION_FORK abcd=\\1" > kprobe_events
+check_error "p:kprobes/testevent $FUNCTION_FORK ^bcd=\\1"      # DIFF_ARG_TYPE
+check_error "p:kprobes/testevent $FUNCTION_FORK ^abcd=\\1:u8"  # DIFF_ARG_TYPE
+check_error "p:kprobes/testevent $FUNCTION_FORK ^abcd=\\\"foo\"" # DIFF_ARG_TYPE
+check_error "^p:kprobes/testevent $FUNCTION_FORK abcd=\\1"     # SAME_PROBE
 fi
 
 # %return suffix errors
index 7ae492c..197cc2a 100644 (file)
@@ -4,14 +4,14 @@
 # requires: kprobe_events
 
 # Add new kretprobe event
-echo 'r:testprobe2 kernel_clone $retval' > kprobe_events
+echo "r:testprobe2 $FUNCTION_FORK \$retval" > kprobe_events
 grep testprobe2 kprobe_events | grep -q 'arg1=\$retval'
 test -d events/kprobes/testprobe2
 
 echo 1 > events/kprobes/testprobe2/enable
 ( echo "forked")
 
-cat trace | grep testprobe2 | grep -q '<- kernel_clone'
+cat trace | grep testprobe2 | grep -q "<- $FUNCTION_FORK"
 
 echo 0 > events/kprobes/testprobe2/enable
 echo '-:testprobe2' >> kprobe_events
index c4093fc..98166fa 100644 (file)
@@ -4,7 +4,7 @@
 # requires: kprobe_events
 
 ! grep -q 'myevent' kprobe_profile
-echo p:myevent kernel_clone > kprobe_events
+echo "p:myevent $FUNCTION_FORK" > kprobe_events
 grep -q 'myevent[[:space:]]*0[[:space:]]*0$' kprobe_profile
 echo 1 > events/kprobes/myevent/enable
 ( echo "forked" )
index d747d6b..edce854 100644 (file)
        snprintf(_metadata->results->reason, \
                 sizeof(_metadata->results->reason), fmt, ##__VA_ARGS__); \
        if (TH_LOG_ENABLED) { \
-               fprintf(TH_LOG_STREAM, "#      SKIP     %s\n", \
+               fprintf(TH_LOG_STREAM, "#      SKIP      %s\n", \
                        _metadata->results->reason); \
        } \
        _metadata->passed = 1; \
index d2c2d62..7a2c242 100644 (file)
@@ -1,10 +1,13 @@
 # SPDX-License-Identifier: GPL-2.0-only
+/aarch64/get-reg-list
+/aarch64/get-reg-list-sve
 /s390x/memop
 /s390x/resets
 /s390x/sync_regs_test
 /x86_64/cr4_cpuid_sync_test
 /x86_64/debug_regs
 /x86_64/evmcs_test
+/x86_64/kvm_pv_test
 /x86_64/hyperv_cpuid
 /x86_64/mmio_warning_test
 /x86_64/platform_info_test
@@ -24,6 +27,7 @@
 /clear_dirty_log_test
 /demand_paging_test
 /dirty_log_test
+/dirty_log_perf_test
 /kvm_create_max_vcpus
 /set_memory_region_test
 /steal_time
index 30afbad..3d14ef7 100644 (file)
@@ -34,13 +34,14 @@ ifeq ($(ARCH),s390)
 endif
 
 LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c
-LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c
+LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
 LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
 LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c
 
 TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
 TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
+TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
 TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
 TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
 TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
@@ -58,14 +59,15 @@ TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
 TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
 TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
 TEST_GEN_PROGS_x86_64 += x86_64/user_msr_test
-TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
+TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
 TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
 TEST_GEN_PROGS_x86_64 += set_memory_region_test
 TEST_GEN_PROGS_x86_64 += steal_time
 
-TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
+TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
+TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list-sve
 TEST_GEN_PROGS_aarch64 += demand_paging_test
 TEST_GEN_PROGS_aarch64 += dirty_log_test
 TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
@@ -111,14 +113,21 @@ LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
 include ../lib.mk
 
 STATIC_LIBS := $(OUTPUT)/libkvm.a
-LIBKVM_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM))
-EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS) cscope.*
+LIBKVM_C := $(filter %.c,$(LIBKVM))
+LIBKVM_S := $(filter %.S,$(LIBKVM))
+LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
+LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
+EXTRA_CLEAN += $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(STATIC_LIBS) cscope.*
+
+x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
+$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
+       $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
 
-x := $(shell mkdir -p $(sort $(dir $(LIBKVM_OBJ))))
-$(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
+$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S
        $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
 
-$(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
+LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)
+$(OUTPUT)/libkvm.a: $(LIBKVM_OBJS)
        $(AR) crs $@ $^
 
 x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list-sve.c b/tools/testing/selftests/kvm/aarch64/get-reg-list-sve.c
new file mode 100644 (file)
index 0000000..efba766
--- /dev/null
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define REG_LIST_SVE
+#include "get-reg-list.c"
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
new file mode 100644 (file)
index 0000000..33218a3
--- /dev/null
@@ -0,0 +1,841 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check for KVM_GET_REG_LIST regressions.
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ *
+ * When attempting to migrate from a host with an older kernel to a host
+ * with a newer kernel we allow the newer kernel on the destination to
+ * list new registers with get-reg-list. We assume they'll be unused, at
+ * least until the guest reboots, and so they're relatively harmless.
+ * However, if the destination host with the newer kernel is missing
+ * registers which the source host with the older kernel has, then that's
+ * a regression in get-reg-list. This test checks for that regression by
+ * checking the current list against a blessed list. We should never have
+ * missing registers, but if new ones appear then they can probably be
+ * added to the blessed list. A completely new blessed list can be created
+ * by running the test with the --list command line argument.
+ *
+ * Note, the blessed list should be created from the oldest possible
+ * kernel. We can't go older than v4.15, though, because that's the first
+ * release to expose the ID system registers in KVM_GET_REG_LIST, see
+ * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features
+ * from guests"). Also, one must use the --core-reg-fixup command line
+ * option when running on an older kernel that doesn't include df205b5c6328
+ * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST")
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "kvm_util.h"
+#include "test_util.h"
+#include "processor.h"
+
+#ifdef REG_LIST_SVE
+#define reg_list_sve() (true)
+#else
+#define reg_list_sve() (false)
+#endif
+
+#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
+
+#define for_each_reg(i)                                                                \
+       for ((i) = 0; (i) < reg_list->n; ++(i))
+
+#define for_each_missing_reg(i)                                                        \
+       for ((i) = 0; (i) < blessed_n; ++(i))                                   \
+               if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))
+
+#define for_each_new_reg(i)                                                    \
+       for ((i) = 0; (i) < reg_list->n; ++(i))                                 \
+               if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
+
+
+static struct kvm_reg_list *reg_list;
+
+static __u64 base_regs[], vregs[], sve_regs[], rejects_set[];
+static __u64 base_regs_n, vregs_n, sve_regs_n, rejects_set_n;
+static __u64 *blessed_reg, blessed_n;
+
+static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
+{
+       int i;
+
+       for (i = 0; i < nr_regs; ++i)
+               if (reg == regs[i])
+                       return true;
+       return false;
+}
+
+static const char *str_with_index(const char *template, __u64 index)
+{
+       char *str, *p;
+       int n;
+
+       str = strdup(template);
+       p = strstr(str, "##");
+       n = sprintf(p, "%lld", index);
+       strcat(p + n, strstr(template, "##") + 2);
+
+       return (const char *)str;
+}
+
+#define CORE_REGS_XX_NR_WORDS  2
+#define CORE_SPSR_XX_NR_WORDS  2
+#define CORE_FPREGS_XX_NR_WORDS        4
+
+static const char *core_id_to_str(__u64 id)
+{
+       __u64 core_off = id & ~REG_MASK, idx;
+
+       /*
+        * core_off is the offset into struct kvm_regs
+        */
+       switch (core_off) {
+       case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+            KVM_REG_ARM_CORE_REG(regs.regs[30]):
+               idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
+               TEST_ASSERT(idx < 31, "Unexpected regs.regs index: %lld", idx);
+               return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
+       case KVM_REG_ARM_CORE_REG(regs.sp):
+               return "KVM_REG_ARM_CORE_REG(regs.sp)";
+       case KVM_REG_ARM_CORE_REG(regs.pc):
+               return "KVM_REG_ARM_CORE_REG(regs.pc)";
+       case KVM_REG_ARM_CORE_REG(regs.pstate):
+               return "KVM_REG_ARM_CORE_REG(regs.pstate)";
+       case KVM_REG_ARM_CORE_REG(sp_el1):
+               return "KVM_REG_ARM_CORE_REG(sp_el1)";
+       case KVM_REG_ARM_CORE_REG(elr_el1):
+               return "KVM_REG_ARM_CORE_REG(elr_el1)";
+       case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+            KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+               idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
+               TEST_ASSERT(idx < KVM_NR_SPSR, "Unexpected spsr index: %lld", idx);
+               return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
+       case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+            KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+               idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
+               TEST_ASSERT(idx < 32, "Unexpected fp_regs.vregs index: %lld", idx);
+               return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+               return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+               return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
+       }
+
+       TEST_FAIL("Unknown core reg id: 0x%llx", id);
+       return NULL;
+}
+
+static const char *sve_id_to_str(__u64 id)
+{
+       __u64 sve_off, n, i;
+
+       if (id == KVM_REG_ARM64_SVE_VLS)
+               return "KVM_REG_ARM64_SVE_VLS";
+
+       sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
+       i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
+
+       TEST_ASSERT(i == 0, "Currently we don't expect slice > 0, reg id 0x%llx", id);
+
+       switch (sve_off) {
+       case KVM_REG_ARM64_SVE_ZREG_BASE ...
+            KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
+               n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
+               TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
+                           "Unexpected bits set in SVE ZREG id: 0x%llx", id);
+               return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
+       case KVM_REG_ARM64_SVE_PREG_BASE ...
+            KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
+               n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
+               TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
+                           "Unexpected bits set in SVE PREG id: 0x%llx", id);
+               return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
+       case KVM_REG_ARM64_SVE_FFR_BASE:
+               TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
+                           "Unexpected bits set in SVE FFR id: 0x%llx", id);
+               return "KVM_REG_ARM64_SVE_FFR(0)";
+       }
+
+       return NULL;
+}
+
+static void print_reg(__u64 id)
+{
+       unsigned op0, op1, crn, crm, op2;
+       const char *reg_size = NULL;
+
+       TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
+                   "KVM_REG_ARM64 missing in reg id: 0x%llx", id);
+
+       switch (id & KVM_REG_SIZE_MASK) {
+       case KVM_REG_SIZE_U8:
+               reg_size = "KVM_REG_SIZE_U8";
+               break;
+       case KVM_REG_SIZE_U16:
+               reg_size = "KVM_REG_SIZE_U16";
+               break;
+       case KVM_REG_SIZE_U32:
+               reg_size = "KVM_REG_SIZE_U32";
+               break;
+       case KVM_REG_SIZE_U64:
+               reg_size = "KVM_REG_SIZE_U64";
+               break;
+       case KVM_REG_SIZE_U128:
+               reg_size = "KVM_REG_SIZE_U128";
+               break;
+       case KVM_REG_SIZE_U256:
+               reg_size = "KVM_REG_SIZE_U256";
+               break;
+       case KVM_REG_SIZE_U512:
+               reg_size = "KVM_REG_SIZE_U512";
+               break;
+       case KVM_REG_SIZE_U1024:
+               reg_size = "KVM_REG_SIZE_U1024";
+               break;
+       case KVM_REG_SIZE_U2048:
+               reg_size = "KVM_REG_SIZE_U2048";
+               break;
+       default:
+               TEST_FAIL("Unexpected reg size: 0x%llx in reg id: 0x%llx",
+                         (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
+       }
+
+       switch (id & KVM_REG_ARM_COPROC_MASK) {
+       case KVM_REG_ARM_CORE:
+               printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(id));
+               break;
+       case KVM_REG_ARM_DEMUX:
+               TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
+                           "Unexpected bits set in DEMUX reg id: 0x%llx", id);
+               printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
+                      reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
+               break;
+       case KVM_REG_ARM64_SYSREG:
+               op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
+               op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
+               crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
+               crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
+               op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
+               TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
+                           "Unexpected bits set in SYSREG reg id: 0x%llx", id);
+               printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
+               break;
+       case KVM_REG_ARM_FW:
+               TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
+                           "Unexpected bits set in FW reg id: 0x%llx", id);
+               printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
+               break;
+       case KVM_REG_ARM64_SVE:
+               if (reg_list_sve())
+                       printf("\t%s,\n", sve_id_to_str(id));
+               else
+                       TEST_FAIL("KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", id);
+               break;
+       default:
+               TEST_FAIL("Unexpected coproc type: 0x%llx in reg id: 0x%llx",
+                         (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
+       }
+}
+
+/*
+ * Older kernels listed each 32-bit word of CORE registers separately.
+ * For 64 and 128-bit registers we need to ignore the extra words. We
+ * also need to fixup the sizes, because the older kernels stated all
+ * registers were 64-bit, even when they weren't.
+ */
+static void core_reg_fixup(void)
+{
+       struct kvm_reg_list *tmp;
+       __u64 id, core_off;
+       int i;
+
+       tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64));
+
+       for (i = 0; i < reg_list->n; ++i) {
+               id = reg_list->reg[i];
+
+               if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) {
+                       tmp->reg[tmp->n++] = id;
+                       continue;
+               }
+
+               core_off = id & ~REG_MASK;
+
+               switch (core_off) {
+               case 0x52: case 0xd2: case 0xd6:
+                       /*
+                        * These offsets are pointing at padding.
+                        * We need to ignore them too.
+                        */
+                       continue;
+               case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+                    KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+                       if (core_off & 3)
+                               continue;
+                       id &= ~KVM_REG_SIZE_MASK;
+                       id |= KVM_REG_SIZE_U128;
+                       tmp->reg[tmp->n++] = id;
+                       continue;
+               case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+               case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+                       id &= ~KVM_REG_SIZE_MASK;
+                       id |= KVM_REG_SIZE_U32;
+                       tmp->reg[tmp->n++] = id;
+                       continue;
+               default:
+                       if (core_off & 1)
+                               continue;
+                       tmp->reg[tmp->n++] = id;
+                       break;
+               }
+       }
+
+       free(reg_list);
+       reg_list = tmp;
+}
+
+static void prepare_vcpu_init(struct kvm_vcpu_init *init)
+{
+       if (reg_list_sve())
+               init->features[0] |= 1 << KVM_ARM_VCPU_SVE;
+}
+
+static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
+{
+       int feature;
+
+       if (reg_list_sve()) {
+               feature = KVM_ARM_VCPU_SVE;
+               vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature);
+       }
+}
+
+static void check_supported(void)
+{
+       if (reg_list_sve() && !kvm_check_cap(KVM_CAP_ARM_SVE)) {
+               fprintf(stderr, "SVE not available, skipping tests\n");
+               exit(KSFT_SKIP);
+       }
+}
+
+int main(int ac, char **av)
+{
+       struct kvm_vcpu_init init = { .target = -1, };
+       int new_regs = 0, missing_regs = 0, i;
+       int failed_get = 0, failed_set = 0, failed_reject = 0;
+       bool print_list = false, fixup_core_regs = false;
+       struct kvm_vm *vm;
+       __u64 *vec_regs;
+
+       check_supported();
+
+       for (i = 1; i < ac; ++i) {
+               if (strcmp(av[i], "--core-reg-fixup") == 0)
+                       fixup_core_regs = true;
+               else if (strcmp(av[i], "--list") == 0)
+                       print_list = true;
+               else
+                       fprintf(stderr, "Ignoring unknown option: %s\n", av[i]);
+       }
+
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+       prepare_vcpu_init(&init);
+       aarch64_vcpu_add_default(vm, 0, &init, NULL);
+       finalize_vcpu(vm, 0);
+
+       reg_list = vcpu_get_reg_list(vm, 0);
+
+       if (fixup_core_regs)
+               core_reg_fixup();
+
+       if (print_list) {
+               putchar('\n');
+               for_each_reg(i)
+                       print_reg(reg_list->reg[i]);
+               putchar('\n');
+               return 0;
+       }
+
+       /*
+        * We only test that we can get the register and then write back the
+        * same value. Some registers may allow other values to be written
+        * back, but others only allow some bits to be changed, and at least
+        * for ID registers set will fail if the value does not exactly match
+        * what was returned by get. If registers that allow other values to
+        * be written need to have the other values tested, then we should
+        * create a new set of tests for those in a new independent test
+        * executable.
+        */
+       for_each_reg(i) {
+               uint8_t addr[2048 / 8];
+               struct kvm_one_reg reg = {
+                       .id = reg_list->reg[i],
+                       .addr = (__u64)&addr,
+               };
+               int ret;
+
+               ret = _vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, &reg);
+               if (ret) {
+                       puts("Failed to get ");
+                       print_reg(reg.id);
+                       putchar('\n');
+                       ++failed_get;
+               }
+
+               /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
+               if (find_reg(rejects_set, rejects_set_n, reg.id)) {
+                       ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
+                       if (ret != -1 || errno != EPERM) {
+                               printf("Failed to reject (ret=%d, errno=%d) ", ret, errno);
+                               print_reg(reg.id);
+                               putchar('\n');
+                               ++failed_reject;
+                       }
+                       continue;
+               }
+
+               ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
+               if (ret) {
+                       puts("Failed to set ");
+                       print_reg(reg.id);
+                       putchar('\n');
+                       ++failed_set;
+               }
+       }
+
+       if (reg_list_sve()) {
+               blessed_n = base_regs_n + sve_regs_n;
+               vec_regs = sve_regs;
+       } else {
+               blessed_n = base_regs_n + vregs_n;
+               vec_regs = vregs;
+       }
+
+       blessed_reg = calloc(blessed_n, sizeof(__u64));
+       for (i = 0; i < base_regs_n; ++i)
+               blessed_reg[i] = base_regs[i];
+       for (i = 0; i < blessed_n - base_regs_n; ++i)
+               blessed_reg[base_regs_n + i] = vec_regs[i];
+
+       for_each_new_reg(i)
+               ++new_regs;
+
+       for_each_missing_reg(i)
+               ++missing_regs;
+
+       if (new_regs || missing_regs) {
+               printf("Number blessed registers: %5lld\n", blessed_n);
+               printf("Number registers:         %5lld\n", reg_list->n);
+       }
+
+       if (new_regs) {
+               printf("\nThere are %d new registers.\n"
+                      "Consider adding them to the blessed reg "
+                      "list with the following lines:\n\n", new_regs);
+               for_each_new_reg(i)
+                       print_reg(reg_list->reg[i]);
+               putchar('\n');
+       }
+
+       if (missing_regs) {
+               printf("\nThere are %d missing registers.\n"
+                      "The following lines are missing registers:\n\n", missing_regs);
+               for_each_missing_reg(i)
+                       print_reg(blessed_reg[i]);
+               putchar('\n');
+       }
+
+       TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
+                   "There are %d missing registers; "
+                   "%d registers failed get; %d registers failed set; %d registers failed reject",
+                   missing_regs, failed_get, failed_set, failed_reject);
+
+       return 0;
+}
+
+/*
+ * The current blessed list was primed with the output of kernel version
+ * v4.15 with --core-reg-fixup and then later updated with new registers.
+ */
+static __u64 base_regs[] = {
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
+       KVM_REG_ARM_FW_REG(0),
+       KVM_REG_ARM_FW_REG(1),
+       KVM_REG_ARM_FW_REG(2),
+       ARM64_SYS_REG(3, 3, 14, 3, 1),  /* CNTV_CTL_EL0 */
+       ARM64_SYS_REG(3, 3, 14, 3, 2),  /* CNTV_CVAL_EL0 */
+       ARM64_SYS_REG(3, 3, 14, 0, 2),
+       ARM64_SYS_REG(3, 0, 0, 0, 0),   /* MIDR_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 0, 6),   /* REVIDR_EL1 */
+       ARM64_SYS_REG(3, 1, 0, 0, 1),   /* CLIDR_EL1 */
+       ARM64_SYS_REG(3, 1, 0, 0, 7),   /* AIDR_EL1 */
+       ARM64_SYS_REG(3, 3, 0, 0, 1),   /* CTR_EL0 */
+       ARM64_SYS_REG(2, 0, 0, 0, 4),
+       ARM64_SYS_REG(2, 0, 0, 0, 5),
+       ARM64_SYS_REG(2, 0, 0, 0, 6),
+       ARM64_SYS_REG(2, 0, 0, 0, 7),
+       ARM64_SYS_REG(2, 0, 0, 1, 4),
+       ARM64_SYS_REG(2, 0, 0, 1, 5),
+       ARM64_SYS_REG(2, 0, 0, 1, 6),
+       ARM64_SYS_REG(2, 0, 0, 1, 7),
+       ARM64_SYS_REG(2, 0, 0, 2, 0),   /* MDCCINT_EL1 */
+       ARM64_SYS_REG(2, 0, 0, 2, 2),   /* MDSCR_EL1 */
+       ARM64_SYS_REG(2, 0, 0, 2, 4),
+       ARM64_SYS_REG(2, 0, 0, 2, 5),
+       ARM64_SYS_REG(2, 0, 0, 2, 6),
+       ARM64_SYS_REG(2, 0, 0, 2, 7),
+       ARM64_SYS_REG(2, 0, 0, 3, 4),
+       ARM64_SYS_REG(2, 0, 0, 3, 5),
+       ARM64_SYS_REG(2, 0, 0, 3, 6),
+       ARM64_SYS_REG(2, 0, 0, 3, 7),
+       ARM64_SYS_REG(2, 0, 0, 4, 4),
+       ARM64_SYS_REG(2, 0, 0, 4, 5),
+       ARM64_SYS_REG(2, 0, 0, 4, 6),
+       ARM64_SYS_REG(2, 0, 0, 4, 7),
+       ARM64_SYS_REG(2, 0, 0, 5, 4),
+       ARM64_SYS_REG(2, 0, 0, 5, 5),
+       ARM64_SYS_REG(2, 0, 0, 5, 6),
+       ARM64_SYS_REG(2, 0, 0, 5, 7),
+       ARM64_SYS_REG(2, 0, 0, 6, 4),
+       ARM64_SYS_REG(2, 0, 0, 6, 5),
+       ARM64_SYS_REG(2, 0, 0, 6, 6),
+       ARM64_SYS_REG(2, 0, 0, 6, 7),
+       ARM64_SYS_REG(2, 0, 0, 7, 4),
+       ARM64_SYS_REG(2, 0, 0, 7, 5),
+       ARM64_SYS_REG(2, 0, 0, 7, 6),
+       ARM64_SYS_REG(2, 0, 0, 7, 7),
+       ARM64_SYS_REG(2, 0, 0, 8, 4),
+       ARM64_SYS_REG(2, 0, 0, 8, 5),
+       ARM64_SYS_REG(2, 0, 0, 8, 6),
+       ARM64_SYS_REG(2, 0, 0, 8, 7),
+       ARM64_SYS_REG(2, 0, 0, 9, 4),
+       ARM64_SYS_REG(2, 0, 0, 9, 5),
+       ARM64_SYS_REG(2, 0, 0, 9, 6),
+       ARM64_SYS_REG(2, 0, 0, 9, 7),
+       ARM64_SYS_REG(2, 0, 0, 10, 4),
+       ARM64_SYS_REG(2, 0, 0, 10, 5),
+       ARM64_SYS_REG(2, 0, 0, 10, 6),
+       ARM64_SYS_REG(2, 0, 0, 10, 7),
+       ARM64_SYS_REG(2, 0, 0, 11, 4),
+       ARM64_SYS_REG(2, 0, 0, 11, 5),
+       ARM64_SYS_REG(2, 0, 0, 11, 6),
+       ARM64_SYS_REG(2, 0, 0, 11, 7),
+       ARM64_SYS_REG(2, 0, 0, 12, 4),
+       ARM64_SYS_REG(2, 0, 0, 12, 5),
+       ARM64_SYS_REG(2, 0, 0, 12, 6),
+       ARM64_SYS_REG(2, 0, 0, 12, 7),
+       ARM64_SYS_REG(2, 0, 0, 13, 4),
+       ARM64_SYS_REG(2, 0, 0, 13, 5),
+       ARM64_SYS_REG(2, 0, 0, 13, 6),
+       ARM64_SYS_REG(2, 0, 0, 13, 7),
+       ARM64_SYS_REG(2, 0, 0, 14, 4),
+       ARM64_SYS_REG(2, 0, 0, 14, 5),
+       ARM64_SYS_REG(2, 0, 0, 14, 6),
+       ARM64_SYS_REG(2, 0, 0, 14, 7),
+       ARM64_SYS_REG(2, 0, 0, 15, 4),
+       ARM64_SYS_REG(2, 0, 0, 15, 5),
+       ARM64_SYS_REG(2, 0, 0, 15, 6),
+       ARM64_SYS_REG(2, 0, 0, 15, 7),
+       ARM64_SYS_REG(2, 4, 0, 7, 0),   /* DBGVCR32_EL2 */
+       ARM64_SYS_REG(3, 0, 0, 0, 5),   /* MPIDR_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 1, 0),   /* ID_PFR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 1, 1),   /* ID_PFR1_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 1, 2),   /* ID_DFR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 1, 3),   /* ID_AFR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 1, 4),   /* ID_MMFR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 1, 5),   /* ID_MMFR1_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 1, 6),   /* ID_MMFR2_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 1, 7),   /* ID_MMFR3_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 2, 0),   /* ID_ISAR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 2, 1),   /* ID_ISAR1_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 2, 2),   /* ID_ISAR2_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 2, 3),   /* ID_ISAR3_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 2, 4),   /* ID_ISAR4_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 2, 5),   /* ID_ISAR5_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 2, 6),   /* ID_MMFR4_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 2, 7),   /* ID_ISAR6_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 3, 0),   /* MVFR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 3, 1),   /* MVFR1_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 3, 2),   /* MVFR2_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 3, 3),
+       ARM64_SYS_REG(3, 0, 0, 3, 4),   /* ID_PFR2_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 3, 5),   /* ID_DFR1_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 3, 6),   /* ID_MMFR5_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 3, 7),
+       ARM64_SYS_REG(3, 0, 0, 4, 0),   /* ID_AA64PFR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 4, 1),   /* ID_AA64PFR1_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 4, 2),
+       ARM64_SYS_REG(3, 0, 0, 4, 3),
+       ARM64_SYS_REG(3, 0, 0, 4, 4),   /* ID_AA64ZFR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 4, 5),
+       ARM64_SYS_REG(3, 0, 0, 4, 6),
+       ARM64_SYS_REG(3, 0, 0, 4, 7),
+       ARM64_SYS_REG(3, 0, 0, 5, 0),   /* ID_AA64DFR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 5, 1),   /* ID_AA64DFR1_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 5, 2),
+       ARM64_SYS_REG(3, 0, 0, 5, 3),
+       ARM64_SYS_REG(3, 0, 0, 5, 4),   /* ID_AA64AFR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 5, 5),   /* ID_AA64AFR1_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 5, 6),
+       ARM64_SYS_REG(3, 0, 0, 5, 7),
+       ARM64_SYS_REG(3, 0, 0, 6, 0),   /* ID_AA64ISAR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 6, 1),   /* ID_AA64ISAR1_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 6, 2),
+       ARM64_SYS_REG(3, 0, 0, 6, 3),
+       ARM64_SYS_REG(3, 0, 0, 6, 4),
+       ARM64_SYS_REG(3, 0, 0, 6, 5),
+       ARM64_SYS_REG(3, 0, 0, 6, 6),
+       ARM64_SYS_REG(3, 0, 0, 6, 7),
+       ARM64_SYS_REG(3, 0, 0, 7, 0),   /* ID_AA64MMFR0_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 7, 1),   /* ID_AA64MMFR1_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 7, 2),   /* ID_AA64MMFR2_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 7, 3),
+       ARM64_SYS_REG(3, 0, 0, 7, 4),
+       ARM64_SYS_REG(3, 0, 0, 7, 5),
+       ARM64_SYS_REG(3, 0, 0, 7, 6),
+       ARM64_SYS_REG(3, 0, 0, 7, 7),
+       ARM64_SYS_REG(3, 0, 1, 0, 0),   /* SCTLR_EL1 */
+       ARM64_SYS_REG(3, 0, 1, 0, 1),   /* ACTLR_EL1 */
+       ARM64_SYS_REG(3, 0, 1, 0, 2),   /* CPACR_EL1 */
+       ARM64_SYS_REG(3, 0, 2, 0, 0),   /* TTBR0_EL1 */
+       ARM64_SYS_REG(3, 0, 2, 0, 1),   /* TTBR1_EL1 */
+       ARM64_SYS_REG(3, 0, 2, 0, 2),   /* TCR_EL1 */
+       ARM64_SYS_REG(3, 0, 5, 1, 0),   /* AFSR0_EL1 */
+       ARM64_SYS_REG(3, 0, 5, 1, 1),   /* AFSR1_EL1 */
+       ARM64_SYS_REG(3, 0, 5, 2, 0),   /* ESR_EL1 */
+       ARM64_SYS_REG(3, 0, 6, 0, 0),   /* FAR_EL1 */
+       ARM64_SYS_REG(3, 0, 7, 4, 0),   /* PAR_EL1 */
+       ARM64_SYS_REG(3, 0, 9, 14, 1),  /* PMINTENSET_EL1 */
+       ARM64_SYS_REG(3, 0, 9, 14, 2),  /* PMINTENCLR_EL1 */
+       ARM64_SYS_REG(3, 0, 10, 2, 0),  /* MAIR_EL1 */
+       ARM64_SYS_REG(3, 0, 10, 3, 0),  /* AMAIR_EL1 */
+       ARM64_SYS_REG(3, 0, 12, 0, 0),  /* VBAR_EL1 */
+       ARM64_SYS_REG(3, 0, 12, 1, 1),  /* DISR_EL1 */
+       ARM64_SYS_REG(3, 0, 13, 0, 1),  /* CONTEXTIDR_EL1 */
+       ARM64_SYS_REG(3, 0, 13, 0, 4),  /* TPIDR_EL1 */
+       ARM64_SYS_REG(3, 0, 14, 1, 0),  /* CNTKCTL_EL1 */
+       ARM64_SYS_REG(3, 2, 0, 0, 0),   /* CSSELR_EL1 */
+       ARM64_SYS_REG(3, 3, 9, 12, 0),  /* PMCR_EL0 */
+       ARM64_SYS_REG(3, 3, 9, 12, 1),  /* PMCNTENSET_EL0 */
+       ARM64_SYS_REG(3, 3, 9, 12, 2),  /* PMCNTENCLR_EL0 */
+       ARM64_SYS_REG(3, 3, 9, 12, 3),  /* PMOVSCLR_EL0 */
+       ARM64_SYS_REG(3, 3, 9, 12, 4),  /* PMSWINC_EL0 */
+       ARM64_SYS_REG(3, 3, 9, 12, 5),  /* PMSELR_EL0 */
+       ARM64_SYS_REG(3, 3, 9, 13, 0),  /* PMCCNTR_EL0 */
+       ARM64_SYS_REG(3, 3, 9, 14, 0),  /* PMUSERENR_EL0 */
+       ARM64_SYS_REG(3, 3, 9, 14, 3),  /* PMOVSSET_EL0 */
+       ARM64_SYS_REG(3, 3, 13, 0, 2),  /* TPIDR_EL0 */
+       ARM64_SYS_REG(3, 3, 13, 0, 3),  /* TPIDRRO_EL0 */
+       ARM64_SYS_REG(3, 3, 14, 8, 0),
+       ARM64_SYS_REG(3, 3, 14, 8, 1),
+       ARM64_SYS_REG(3, 3, 14, 8, 2),
+       ARM64_SYS_REG(3, 3, 14, 8, 3),
+       ARM64_SYS_REG(3, 3, 14, 8, 4),
+       ARM64_SYS_REG(3, 3, 14, 8, 5),
+       ARM64_SYS_REG(3, 3, 14, 8, 6),
+       ARM64_SYS_REG(3, 3, 14, 8, 7),
+       ARM64_SYS_REG(3, 3, 14, 9, 0),
+       ARM64_SYS_REG(3, 3, 14, 9, 1),
+       ARM64_SYS_REG(3, 3, 14, 9, 2),
+       ARM64_SYS_REG(3, 3, 14, 9, 3),
+       ARM64_SYS_REG(3, 3, 14, 9, 4),
+       ARM64_SYS_REG(3, 3, 14, 9, 5),
+       ARM64_SYS_REG(3, 3, 14, 9, 6),
+       ARM64_SYS_REG(3, 3, 14, 9, 7),
+       ARM64_SYS_REG(3, 3, 14, 10, 0),
+       ARM64_SYS_REG(3, 3, 14, 10, 1),
+       ARM64_SYS_REG(3, 3, 14, 10, 2),
+       ARM64_SYS_REG(3, 3, 14, 10, 3),
+       ARM64_SYS_REG(3, 3, 14, 10, 4),
+       ARM64_SYS_REG(3, 3, 14, 10, 5),
+       ARM64_SYS_REG(3, 3, 14, 10, 6),
+       ARM64_SYS_REG(3, 3, 14, 10, 7),
+       ARM64_SYS_REG(3, 3, 14, 11, 0),
+       ARM64_SYS_REG(3, 3, 14, 11, 1),
+       ARM64_SYS_REG(3, 3, 14, 11, 2),
+       ARM64_SYS_REG(3, 3, 14, 11, 3),
+       ARM64_SYS_REG(3, 3, 14, 11, 4),
+       ARM64_SYS_REG(3, 3, 14, 11, 5),
+       ARM64_SYS_REG(3, 3, 14, 11, 6),
+       ARM64_SYS_REG(3, 3, 14, 12, 0),
+       ARM64_SYS_REG(3, 3, 14, 12, 1),
+       ARM64_SYS_REG(3, 3, 14, 12, 2),
+       ARM64_SYS_REG(3, 3, 14, 12, 3),
+       ARM64_SYS_REG(3, 3, 14, 12, 4),
+       ARM64_SYS_REG(3, 3, 14, 12, 5),
+       ARM64_SYS_REG(3, 3, 14, 12, 6),
+       ARM64_SYS_REG(3, 3, 14, 12, 7),
+       ARM64_SYS_REG(3, 3, 14, 13, 0),
+       ARM64_SYS_REG(3, 3, 14, 13, 1),
+       ARM64_SYS_REG(3, 3, 14, 13, 2),
+       ARM64_SYS_REG(3, 3, 14, 13, 3),
+       ARM64_SYS_REG(3, 3, 14, 13, 4),
+       ARM64_SYS_REG(3, 3, 14, 13, 5),
+       ARM64_SYS_REG(3, 3, 14, 13, 6),
+       ARM64_SYS_REG(3, 3, 14, 13, 7),
+       ARM64_SYS_REG(3, 3, 14, 14, 0),
+       ARM64_SYS_REG(3, 3, 14, 14, 1),
+       ARM64_SYS_REG(3, 3, 14, 14, 2),
+       ARM64_SYS_REG(3, 3, 14, 14, 3),
+       ARM64_SYS_REG(3, 3, 14, 14, 4),
+       ARM64_SYS_REG(3, 3, 14, 14, 5),
+       ARM64_SYS_REG(3, 3, 14, 14, 6),
+       ARM64_SYS_REG(3, 3, 14, 14, 7),
+       ARM64_SYS_REG(3, 3, 14, 15, 0),
+       ARM64_SYS_REG(3, 3, 14, 15, 1),
+       ARM64_SYS_REG(3, 3, 14, 15, 2),
+       ARM64_SYS_REG(3, 3, 14, 15, 3),
+       ARM64_SYS_REG(3, 3, 14, 15, 4),
+       ARM64_SYS_REG(3, 3, 14, 15, 5),
+       ARM64_SYS_REG(3, 3, 14, 15, 6),
+       ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */
+       ARM64_SYS_REG(3, 4, 3, 0, 0),   /* DACR32_EL2 */
+       ARM64_SYS_REG(3, 4, 5, 0, 1),   /* IFSR32_EL2 */
+       ARM64_SYS_REG(3, 4, 5, 3, 0),   /* FPEXC32_EL2 */
+       KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 0,
+       KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 1,
+       KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 2,
+};
+static __u64 base_regs_n = ARRAY_SIZE(base_regs);
+
+static __u64 vregs[] = {
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
+       KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
+};
+static __u64 vregs_n = ARRAY_SIZE(vregs);
+
+static __u64 sve_regs[] = {
+       KVM_REG_ARM64_SVE_VLS,
+       KVM_REG_ARM64_SVE_ZREG(0, 0),
+       KVM_REG_ARM64_SVE_ZREG(1, 0),
+       KVM_REG_ARM64_SVE_ZREG(2, 0),
+       KVM_REG_ARM64_SVE_ZREG(3, 0),
+       KVM_REG_ARM64_SVE_ZREG(4, 0),
+       KVM_REG_ARM64_SVE_ZREG(5, 0),
+       KVM_REG_ARM64_SVE_ZREG(6, 0),
+       KVM_REG_ARM64_SVE_ZREG(7, 0),
+       KVM_REG_ARM64_SVE_ZREG(8, 0),
+       KVM_REG_ARM64_SVE_ZREG(9, 0),
+       KVM_REG_ARM64_SVE_ZREG(10, 0),
+       KVM_REG_ARM64_SVE_ZREG(11, 0),
+       KVM_REG_ARM64_SVE_ZREG(12, 0),
+       KVM_REG_ARM64_SVE_ZREG(13, 0),
+       KVM_REG_ARM64_SVE_ZREG(14, 0),
+       KVM_REG_ARM64_SVE_ZREG(15, 0),
+       KVM_REG_ARM64_SVE_ZREG(16, 0),
+       KVM_REG_ARM64_SVE_ZREG(17, 0),
+       KVM_REG_ARM64_SVE_ZREG(18, 0),
+       KVM_REG_ARM64_SVE_ZREG(19, 0),
+       KVM_REG_ARM64_SVE_ZREG(20, 0),
+       KVM_REG_ARM64_SVE_ZREG(21, 0),
+       KVM_REG_ARM64_SVE_ZREG(22, 0),
+       KVM_REG_ARM64_SVE_ZREG(23, 0),
+       KVM_REG_ARM64_SVE_ZREG(24, 0),
+       KVM_REG_ARM64_SVE_ZREG(25, 0),
+       KVM_REG_ARM64_SVE_ZREG(26, 0),
+       KVM_REG_ARM64_SVE_ZREG(27, 0),
+       KVM_REG_ARM64_SVE_ZREG(28, 0),
+       KVM_REG_ARM64_SVE_ZREG(29, 0),
+       KVM_REG_ARM64_SVE_ZREG(30, 0),
+       KVM_REG_ARM64_SVE_ZREG(31, 0),
+       KVM_REG_ARM64_SVE_PREG(0, 0),
+       KVM_REG_ARM64_SVE_PREG(1, 0),
+       KVM_REG_ARM64_SVE_PREG(2, 0),
+       KVM_REG_ARM64_SVE_PREG(3, 0),
+       KVM_REG_ARM64_SVE_PREG(4, 0),
+       KVM_REG_ARM64_SVE_PREG(5, 0),
+       KVM_REG_ARM64_SVE_PREG(6, 0),
+       KVM_REG_ARM64_SVE_PREG(7, 0),
+       KVM_REG_ARM64_SVE_PREG(8, 0),
+       KVM_REG_ARM64_SVE_PREG(9, 0),
+       KVM_REG_ARM64_SVE_PREG(10, 0),
+       KVM_REG_ARM64_SVE_PREG(11, 0),
+       KVM_REG_ARM64_SVE_PREG(12, 0),
+       KVM_REG_ARM64_SVE_PREG(13, 0),
+       KVM_REG_ARM64_SVE_PREG(14, 0),
+       KVM_REG_ARM64_SVE_PREG(15, 0),
+       KVM_REG_ARM64_SVE_FFR(0),
+       ARM64_SYS_REG(3, 0, 1, 2, 0),   /* ZCR_EL1 */
+};
+static __u64 sve_regs_n = ARRAY_SIZE(sve_regs);
+
+static __u64 rejects_set[] = {
+#ifdef REG_LIST_SVE
+       KVM_REG_ARM64_SVE_VLS,
+#endif
+};
+static __u64 rejects_set_n = ARRAY_SIZE(rejects_set);
diff --git a/tools/testing/selftests/kvm/clear_dirty_log_test.c b/tools/testing/selftests/kvm/clear_dirty_log_test.c
deleted file mode 100644 (file)
index 11672ec..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#define USE_CLEAR_DIRTY_LOG
-#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
-#define KVM_DIRTY_LOG_INITIALLY_SET         (1 << 1)
-#define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
-               KVM_DIRTY_LOG_INITIALLY_SET)
-#include "dirty_log_test.c"
index 360cd3e..3d96a7b 100644 (file)
 #include <linux/bitops.h>
 #include <linux/userfaultfd.h>
 
-#include "test_util.h"
-#include "kvm_util.h"
+#include "perf_test_util.h"
 #include "processor.h"
+#include "test_util.h"
 
 #ifdef __NR_userfaultfd
 
-/* The memory slot index demand page */
-#define TEST_MEM_SLOT_INDEX            1
-
-/* Default guest test virtual memory offset */
-#define DEFAULT_GUEST_TEST_MEM         0xc0000000
-
-#define DEFAULT_GUEST_TEST_MEM_SIZE (1 << 30) /* 1G */
-
 #ifdef PRINT_PER_PAGE_UPDATES
 #define PER_PAGE_DEBUG(...) printf(__VA_ARGS__)
 #else
 #define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__)
 #endif
 
-#define MAX_VCPUS 512
-
-/*
- * Guest/Host shared variables. Ensure addr_gva2hva() and/or
- * sync_global_to/from_guest() are used when accessing from
- * the host. READ/WRITE_ONCE() should also be used with anything
- * that may change.
- */
-static uint64_t host_page_size;
-static uint64_t guest_page_size;
-
 static char *guest_data_prototype;
 
-/*
- * Guest physical memory offset of the testing memory slot.
- * This will be set to the topmost valid physical address minus
- * the test memory size.
- */
-static uint64_t guest_test_phys_mem;
-
-/*
- * Guest virtual memory offset of the testing memory slot.
- * Must not conflict with identity mapped test code.
- */
-static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
-
-struct vcpu_args {
-       uint64_t gva;
-       uint64_t pages;
-
-       /* Only used by the host userspace part of the vCPU thread */
-       int vcpu_id;
-       struct kvm_vm *vm;
-};
-
-static struct vcpu_args vcpu_args[MAX_VCPUS];
-
-/*
- * Continuously write to the first 8 bytes of each page in the demand paging
- * memory region.
- */
-static void guest_code(uint32_t vcpu_id)
-{
-       uint64_t gva;
-       uint64_t pages;
-       int i;
-
-       /* Make sure vCPU args data structure is not corrupt. */
-       GUEST_ASSERT(vcpu_args[vcpu_id].vcpu_id == vcpu_id);
-
-       gva = vcpu_args[vcpu_id].gva;
-       pages = vcpu_args[vcpu_id].pages;
-
-       for (i = 0; i < pages; i++) {
-               uint64_t addr = gva + (i * guest_page_size);
-
-               addr &= ~(host_page_size - 1);
-               *(uint64_t *)addr = 0x0123456789ABCDEF;
-       }
-
-       GUEST_SYNC(1);
-}
-
 static void *vcpu_worker(void *data)
 {
        int ret;
-       struct vcpu_args *args = (struct vcpu_args *)data;
-       struct kvm_vm *vm = args->vm;
-       int vcpu_id = args->vcpu_id;
+       struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
+       int vcpu_id = vcpu_args->vcpu_id;
+       struct kvm_vm *vm = perf_test_args.vm;
        struct kvm_run *run;
-       struct timespec start, end, ts_diff;
+       struct timespec start;
+       struct timespec ts_diff;
 
        vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
        run = vcpu_state(vm, vcpu_id);
@@ -133,52 +65,18 @@ static void *vcpu_worker(void *data)
                            exit_reason_str(run->exit_reason));
        }
 
-       clock_gettime(CLOCK_MONOTONIC, &end);
-       ts_diff = timespec_sub(end, start);
+       ts_diff = timespec_diff_now(start);
        PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id,
                       ts_diff.tv_sec, ts_diff.tv_nsec);
 
        return NULL;
 }
 
-#define PAGE_SHIFT_4K  12
-#define PTES_PER_4K_PT 512
-
-static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
-                               uint64_t vcpu_memory_bytes)
-{
-       struct kvm_vm *vm;
-       uint64_t pages = DEFAULT_GUEST_PHY_PAGES;
-
-       /* Account for a few pages per-vCPU for stacks */
-       pages += DEFAULT_STACK_PGS * vcpus;
-
-       /*
-        * Reserve twice the ammount of memory needed to map the test region and
-        * the page table / stacks region, at 4k, for page tables. Do the
-        * calculation with 4K page size: the smallest of all archs. (e.g., 64K
-        * page size guest will need even less memory for page tables).
-        */
-       pages += (2 * pages) / PTES_PER_4K_PT;
-       pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) /
-                PTES_PER_4K_PT;
-       pages = vm_adjust_num_guest_pages(mode, pages);
-
-       pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
-
-       vm = _vm_create(mode, pages, O_RDWR);
-       kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
-#ifdef __x86_64__
-       vm_create_irqchip(vm);
-#endif
-       return vm;
-}
-
 static int handle_uffd_page_request(int uffd, uint64_t addr)
 {
        pid_t tid;
        struct timespec start;
-       struct timespec end;
+       struct timespec ts_diff;
        struct uffdio_copy copy;
        int r;
 
@@ -186,7 +84,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
 
        copy.src = (uint64_t)guest_data_prototype;
        copy.dst = addr;
-       copy.len = host_page_size;
+       copy.len = perf_test_args.host_page_size;
        copy.mode = 0;
 
        clock_gettime(CLOCK_MONOTONIC, &start);
@@ -198,12 +96,12 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
                return r;
        }
 
-       clock_gettime(CLOCK_MONOTONIC, &end);
+       ts_diff = timespec_diff_now(start);
 
        PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid,
-                      timespec_to_ns(timespec_sub(end, start)));
+                      timespec_to_ns(ts_diff));
        PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
-                      host_page_size, addr, tid);
+                      perf_test_args.host_page_size, addr, tid);
 
        return 0;
 }
@@ -223,7 +121,8 @@ static void *uffd_handler_thread_fn(void *arg)
        int pipefd = uffd_args->pipefd;
        useconds_t delay = uffd_args->delay;
        int64_t pages = 0;
-       struct timespec start, end, ts_diff;
+       struct timespec start;
+       struct timespec ts_diff;
 
        clock_gettime(CLOCK_MONOTONIC, &start);
        while (!quit_uffd_thread) {
@@ -292,8 +191,7 @@ static void *uffd_handler_thread_fn(void *arg)
                pages++;
        }
 
-       clock_gettime(CLOCK_MONOTONIC, &end);
-       ts_diff = timespec_sub(end, start);
+       ts_diff = timespec_diff_now(start);
        PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n",
                       pages, ts_diff.tv_sec, ts_diff.tv_nsec,
                       pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
@@ -351,99 +249,54 @@ static int setup_demand_paging(struct kvm_vm *vm,
 }
 
 static void run_test(enum vm_guest_mode mode, bool use_uffd,
-                    useconds_t uffd_delay, int vcpus,
-                    uint64_t vcpu_memory_bytes)
+                    useconds_t uffd_delay)
 {
        pthread_t *vcpu_threads;
        pthread_t *uffd_handler_threads = NULL;
        struct uffd_handler_args *uffd_args = NULL;
-       struct timespec start, end, ts_diff;
+       struct timespec start;
+       struct timespec ts_diff;
        int *pipefds = NULL;
        struct kvm_vm *vm;
-       uint64_t guest_num_pages;
        int vcpu_id;
        int r;
 
-       vm = create_vm(mode, vcpus, vcpu_memory_bytes);
-
-       guest_page_size = vm_get_page_size(vm);
-
-       TEST_ASSERT(vcpu_memory_bytes % guest_page_size == 0,
-                   "Guest memory size is not guest page size aligned.");
-
-       guest_num_pages = (vcpus * vcpu_memory_bytes) / guest_page_size;
-       guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
-
-       /*
-        * If there should be more memory in the guest test region than there
-        * can be pages in the guest, it will definitely cause problems.
-        */
-       TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
-                   "Requested more guest memory than address space allows.\n"
-                   "    guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
-                   guest_num_pages, vm_get_max_gfn(vm), vcpus,
-                   vcpu_memory_bytes);
-
-       host_page_size = getpagesize();
-       TEST_ASSERT(vcpu_memory_bytes % host_page_size == 0,
-                   "Guest memory size is not host page size aligned.");
+       vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
 
-       guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
-                             guest_page_size;
-       guest_test_phys_mem &= ~(host_page_size - 1);
+       perf_test_args.wr_fract = 1;
 
-#ifdef __s390x__
-       /* Align to 1M (segment size) */
-       guest_test_phys_mem &= ~((1 << 20) - 1);
-#endif
-
-       pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
-
-       /* Add an extra memory slot for testing demand paging */
-       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
-                                   guest_test_phys_mem,
-                                   TEST_MEM_SLOT_INDEX,
-                                   guest_num_pages, 0);
-
-       /* Do mapping for the demand paging memory slot */
-       virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
-
-       ucall_init(vm, NULL);
-
-       guest_data_prototype = malloc(host_page_size);
+       guest_data_prototype = malloc(perf_test_args.host_page_size);
        TEST_ASSERT(guest_data_prototype,
                    "Failed to allocate buffer for guest data pattern");
-       memset(guest_data_prototype, 0xAB, host_page_size);
+       memset(guest_data_prototype, 0xAB, perf_test_args.host_page_size);
 
-       vcpu_threads = malloc(vcpus * sizeof(*vcpu_threads));
+       vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
        TEST_ASSERT(vcpu_threads, "Memory allocation failed");
 
+       add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+
        if (use_uffd) {
                uffd_handler_threads =
-                       malloc(vcpus * sizeof(*uffd_handler_threads));
+                       malloc(nr_vcpus * sizeof(*uffd_handler_threads));
                TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
 
-               uffd_args = malloc(vcpus * sizeof(*uffd_args));
+               uffd_args = malloc(nr_vcpus * sizeof(*uffd_args));
                TEST_ASSERT(uffd_args, "Memory allocation failed");
 
-               pipefds = malloc(sizeof(int) * vcpus * 2);
+               pipefds = malloc(sizeof(int) * nr_vcpus * 2);
                TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
-       }
-
-       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
-               vm_paddr_t vcpu_gpa;
-               void *vcpu_hva;
 
-               vm_vcpu_add_default(vm, vcpu_id, guest_code);
+               for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+                       vm_paddr_t vcpu_gpa;
+                       void *vcpu_hva;
 
-               vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
-               PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
-                              vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
+                       vcpu_gpa = guest_test_phys_mem + (vcpu_id * guest_percpu_mem_size);
+                       PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
+                                      vcpu_id, vcpu_gpa, vcpu_gpa + guest_percpu_mem_size);
 
-               /* Cache the HVA pointer of the region */
-               vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
+                       /* Cache the HVA pointer of the region */
+                       vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
 
-               if (use_uffd) {
                        /*
                         * Set up user fault fd to handle demand paging
                         * requests.
@@ -456,53 +309,41 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
                                                &uffd_handler_threads[vcpu_id],
                                                pipefds[vcpu_id * 2],
                                                uffd_delay, &uffd_args[vcpu_id],
-                                               vcpu_hva, vcpu_memory_bytes);
+                                               vcpu_hva, guest_percpu_mem_size);
                        if (r < 0)
                                exit(-r);
                }
-
-#ifdef __x86_64__
-               vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid());
-#endif
-
-               vcpu_args[vcpu_id].vm = vm;
-               vcpu_args[vcpu_id].vcpu_id = vcpu_id;
-               vcpu_args[vcpu_id].gva = guest_test_virt_mem +
-                                        (vcpu_id * vcpu_memory_bytes);
-               vcpu_args[vcpu_id].pages = vcpu_memory_bytes / guest_page_size;
        }
 
        /* Export the shared variables to the guest */
-       sync_global_to_guest(vm, host_page_size);
-       sync_global_to_guest(vm, guest_page_size);
-       sync_global_to_guest(vm, vcpu_args);
+       sync_global_to_guest(vm, perf_test_args);
 
        pr_info("Finished creating vCPUs and starting uffd threads\n");
 
        clock_gettime(CLOCK_MONOTONIC, &start);
 
-       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
                pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
-                              &vcpu_args[vcpu_id]);
+                              &perf_test_args.vcpu_args[vcpu_id]);
        }
 
        pr_info("Started all vCPUs\n");
 
        /* Wait for the vcpu threads to quit */
-       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
                pthread_join(vcpu_threads[vcpu_id], NULL);
                PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id);
        }
 
-       pr_info("All vCPU threads joined\n");
+       ts_diff = timespec_diff_now(start);
 
-       clock_gettime(CLOCK_MONOTONIC, &end);
+       pr_info("All vCPU threads joined\n");
 
        if (use_uffd) {
                char c;
 
                /* Tell the user fault fd handler threads to quit */
-               for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+               for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
                        r = write(pipefds[vcpu_id * 2 + 1], &c, 1);
                        TEST_ASSERT(r == 1, "Unable to write to pipefd");
 
@@ -510,11 +351,11 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
                }
        }
 
-       ts_diff = timespec_sub(end, start);
        pr_info("Total guest execution time: %ld.%.9lds\n",
                ts_diff.tv_sec, ts_diff.tv_nsec);
        pr_info("Overall demand paging rate: %f pgs/sec\n",
-               guest_num_pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
+               perf_test_args.vcpu_args[0].pages * nr_vcpus /
+               ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
 
        ucall_uninit(vm);
        kvm_vm_free(vm);
@@ -568,9 +409,8 @@ static void help(char *name)
 
 int main(int argc, char *argv[])
 {
+       int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
        bool mode_selected = false;
-       uint64_t vcpu_memory_bytes = DEFAULT_GUEST_TEST_MEM_SIZE;
-       int vcpus = 1;
        unsigned int mode;
        int opt, i;
        bool use_uffd = false;
@@ -619,15 +459,12 @@ int main(int argc, char *argv[])
                                    "A negative UFFD delay is not supported.");
                        break;
                case 'b':
-                       vcpu_memory_bytes = parse_size(optarg);
+                       guest_percpu_mem_size = parse_size(optarg);
                        break;
                case 'v':
-                       vcpus = atoi(optarg);
-                       TEST_ASSERT(vcpus > 0,
-                                   "Must have a positive number of vCPUs");
-                       TEST_ASSERT(vcpus <= MAX_VCPUS,
-                                   "This test does not currently support\n"
-                                   "more than %d vCPUs.", MAX_VCPUS);
+                       nr_vcpus = atoi(optarg);
+                       TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
+                                   "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
                        break;
                case 'h':
                default:
@@ -642,7 +479,7 @@ int main(int argc, char *argv[])
                TEST_ASSERT(guest_modes[i].supported,
                            "Guest mode ID %d (%s) not supported.",
                            i, vm_guest_mode_string(i));
-               run_test(i, use_uffd, uffd_delay, vcpus, vcpu_memory_bytes);
+               run_test(i, use_uffd, uffd_delay);
        }
 
        return 0;
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
new file mode 100644 (file)
index 0000000..85c9b8f
--- /dev/null
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KVM dirty page logging performance test
+ *
+ * Based on dirty_log_test.c
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ * Copyright (C) 2020, Google, Inc.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_name */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+#include <pthread.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+
+#include "kvm_util.h"
+#include "perf_test_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
+#define TEST_HOST_LOOP_N               2UL
+
+/* Host variables */
+static bool host_quit;
+static uint64_t iteration;
+static uint64_t vcpu_last_completed_iteration[MAX_VCPUS];
+
+static void *vcpu_worker(void *data)
+{
+       int ret;
+       struct kvm_vm *vm = perf_test_args.vm;
+       uint64_t pages_count = 0;
+       struct kvm_run *run;
+       struct timespec start;
+       struct timespec ts_diff;
+       struct timespec total = (struct timespec){0};
+       struct timespec avg;
+       struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
+       int vcpu_id = vcpu_args->vcpu_id;
+
+       vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+       run = vcpu_state(vm, vcpu_id);
+
+       while (!READ_ONCE(host_quit)) {
+               uint64_t current_iteration = READ_ONCE(iteration);
+
+               clock_gettime(CLOCK_MONOTONIC, &start);
+               ret = _vcpu_run(vm, vcpu_id);
+               ts_diff = timespec_diff_now(start);
+
+               TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
+               TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC,
+                           "Invalid guest sync status: exit_reason=%s\n",
+                           exit_reason_str(run->exit_reason));
+
+               pr_debug("Got sync event from vCPU %d\n", vcpu_id);
+               vcpu_last_completed_iteration[vcpu_id] = current_iteration;
+               pr_debug("vCPU %d updated last completed iteration to %lu\n",
+                        vcpu_id, vcpu_last_completed_iteration[vcpu_id]);
+
+               if (current_iteration) {
+                       pages_count += vcpu_args->pages;
+                       total = timespec_add(total, ts_diff);
+                       pr_debug("vCPU %d iteration %lu dirty memory time: %ld.%.9lds\n",
+                               vcpu_id, current_iteration, ts_diff.tv_sec,
+                               ts_diff.tv_nsec);
+               } else {
+                       pr_debug("vCPU %d iteration %lu populate memory time: %ld.%.9lds\n",
+                               vcpu_id, current_iteration, ts_diff.tv_sec,
+                               ts_diff.tv_nsec);
+               }
+
+               while (current_iteration == READ_ONCE(iteration) &&
+                      !READ_ONCE(host_quit)) {}
+       }
+
+       avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_id]);
+       pr_debug("\nvCPU %d dirtied 0x%lx pages over %lu iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
+               vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
+               total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
+
+       return NULL;
+}
+
+#ifdef USE_CLEAR_DIRTY_LOG
+static u64 dirty_log_manual_caps;
+#endif
+
+static void run_test(enum vm_guest_mode mode, unsigned long iterations,
+                    uint64_t phys_offset, int wr_fract)
+{
+       pthread_t *vcpu_threads;
+       struct kvm_vm *vm;
+       unsigned long *bmap;
+       uint64_t guest_num_pages;
+       uint64_t host_num_pages;
+       int vcpu_id;
+       struct timespec start;
+       struct timespec ts_diff;
+       struct timespec get_dirty_log_total = (struct timespec){0};
+       struct timespec vcpu_dirty_total = (struct timespec){0};
+       struct timespec avg;
+#ifdef USE_CLEAR_DIRTY_LOG
+       struct kvm_enable_cap cap = {};
+       struct timespec clear_dirty_log_total = (struct timespec){0};
+#endif
+
+       vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
+
+       perf_test_args.wr_fract = wr_fract;
+
+       guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
+       guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
+       host_num_pages = vm_num_host_pages(mode, guest_num_pages);
+       bmap = bitmap_alloc(host_num_pages);
+
+#ifdef USE_CLEAR_DIRTY_LOG
+       cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
+       cap.args[0] = dirty_log_manual_caps;
+       vm_enable_cap(vm, &cap);
+#endif
+
+       vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
+       TEST_ASSERT(vcpu_threads, "Memory allocation failed");
+
+       add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+
+       sync_global_to_guest(vm, perf_test_args);
+
+       /* Start the iterations */
+       iteration = 0;
+       host_quit = false;
+
+       clock_gettime(CLOCK_MONOTONIC, &start);
+       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+               pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
+                              &perf_test_args.vcpu_args[vcpu_id]);
+       }
+
+       /* Allow the vCPU to populate memory */
+       pr_debug("Starting iteration %lu - Populating\n", iteration);
+       while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration)
+               pr_debug("Waiting for vcpu_last_completed_iteration == %lu\n",
+                       iteration);
+
+       ts_diff = timespec_diff_now(start);
+       pr_info("Populate memory time: %ld.%.9lds\n",
+               ts_diff.tv_sec, ts_diff.tv_nsec);
+
+       /* Enable dirty logging */
+       clock_gettime(CLOCK_MONOTONIC, &start);
+       vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX,
+                               KVM_MEM_LOG_DIRTY_PAGES);
+       ts_diff = timespec_diff_now(start);
+       pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
+               ts_diff.tv_sec, ts_diff.tv_nsec);
+
+       while (iteration < iterations) {
+               /*
+                * Incrementing the iteration number will start the vCPUs
+                * dirtying memory again.
+                */
+               clock_gettime(CLOCK_MONOTONIC, &start);
+               iteration++;
+
+               pr_debug("Starting iteration %lu\n", iteration);
+               for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+                       while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration)
+                               pr_debug("Waiting for vCPU %d vcpu_last_completed_iteration == %lu\n",
+                                        vcpu_id, iteration);
+               }
+
+               ts_diff = timespec_diff_now(start);
+               vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff);
+               pr_info("Iteration %lu dirty memory time: %ld.%.9lds\n",
+                       iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
+
+               clock_gettime(CLOCK_MONOTONIC, &start);
+               kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
+
+               ts_diff = timespec_diff_now(start);
+               get_dirty_log_total = timespec_add(get_dirty_log_total,
+                                                  ts_diff);
+               pr_info("Iteration %lu get dirty log time: %ld.%.9lds\n",
+                       iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
+
+#ifdef USE_CLEAR_DIRTY_LOG
+               clock_gettime(CLOCK_MONOTONIC, &start);
+               kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
+                                      host_num_pages);
+
+               ts_diff = timespec_diff_now(start);
+               clear_dirty_log_total = timespec_add(clear_dirty_log_total,
+                                                    ts_diff);
+               pr_info("Iteration %lu clear dirty log time: %ld.%.9lds\n",
+                       iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
+#endif
+       }
+
+       /* Tell the vcpu thread to quit */
+       host_quit = true;
+       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
+               pthread_join(vcpu_threads[vcpu_id], NULL);
+
+       /* Disable dirty logging */
+       clock_gettime(CLOCK_MONOTONIC, &start);
+       vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, 0);
+       ts_diff = timespec_diff_now(start);
+       pr_info("Disabling dirty logging time: %ld.%.9lds\n",
+               ts_diff.tv_sec, ts_diff.tv_nsec);
+
+       avg = timespec_div(get_dirty_log_total, iterations);
+       pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
+               iterations, get_dirty_log_total.tv_sec,
+               get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
+
+#ifdef USE_CLEAR_DIRTY_LOG
+       avg = timespec_div(clear_dirty_log_total, iterations);
+       pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
+               iterations, clear_dirty_log_total.tv_sec,
+               clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
+#endif
+
+       free(bmap);
+       free(vcpu_threads);
+       ucall_uninit(vm);
+       kvm_vm_free(vm);
+}
+
+struct guest_mode {
+       bool supported;
+       bool enabled;
+};
+static struct guest_mode guest_modes[NUM_VM_MODES];
+
+#define guest_mode_init(mode, supported, enabled) ({ \
+       guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
+})
+
+static void help(char *name)
+{
+       int i;
+
+       puts("");
+       printf("usage: %s [-h] [-i iterations] [-p offset] "
+              "[-m mode] [-b vcpu bytes] [-v vcpus]\n", name);
+       puts("");
+       printf(" -i: specify iteration counts (default: %"PRIu64")\n",
+              TEST_HOST_LOOP_N);
+       printf(" -p: specify guest physical test memory offset\n"
+              "     Warning: a low offset can conflict with the loaded test code.\n");
+       printf(" -m: specify the guest mode ID to test "
+              "(default: test all supported modes)\n"
+              "     This option may be used multiple times.\n"
+              "     Guest mode IDs:\n");
+       for (i = 0; i < NUM_VM_MODES; ++i) {
+               printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
+                      guest_modes[i].supported ? " (supported)" : "");
+       }
+       printf(" -b: specify the size of the memory region which should be\n"
+              "     dirtied by each vCPU. e.g. 10M or 3G.\n"
+              "     (default: 1G)\n");
+       printf(" -f: specify the fraction of pages which should be written to\n"
+              "     as opposed to simply read, in the form\n"
+              "     1/<fraction of pages to write>.\n"
+              "     (default: 1 i.e. all pages are written to.)\n");
+       printf(" -v: specify the number of vCPUs to run.\n");
+       puts("");
+       exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+       unsigned long iterations = TEST_HOST_LOOP_N;
+       bool mode_selected = false;
+       uint64_t phys_offset = 0;
+       unsigned int mode;
+       int opt, i;
+       int wr_fract = 1;
+
+#ifdef USE_CLEAR_DIRTY_LOG
+       dirty_log_manual_caps =
+               kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
+       if (!dirty_log_manual_caps) {
+               print_skip("KVM_CLEAR_DIRTY_LOG not available");
+               exit(KSFT_SKIP);
+       }
+       dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
+                                 KVM_DIRTY_LOG_INITIALLY_SET);
+#endif
+
+#ifdef __x86_64__
+       guest_mode_init(VM_MODE_PXXV48_4K, true, true);
+#endif
+#ifdef __aarch64__
+       guest_mode_init(VM_MODE_P40V48_4K, true, true);
+       guest_mode_init(VM_MODE_P40V48_64K, true, true);
+
+       {
+               unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
+
+               if (limit >= 52)
+                       guest_mode_init(VM_MODE_P52V48_64K, true, true);
+               if (limit >= 48) {
+                       guest_mode_init(VM_MODE_P48V48_4K, true, true);
+                       guest_mode_init(VM_MODE_P48V48_64K, true, true);
+               }
+       }
+#endif
+#ifdef __s390x__
+       guest_mode_init(VM_MODE_P40V48_4K, true, true);
+#endif
+
+       while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) {
+               switch (opt) {
+               case 'i':
+                       iterations = strtol(optarg, NULL, 10);
+                       break;
+               case 'p':
+                       phys_offset = strtoull(optarg, NULL, 0);
+                       break;
+               case 'm':
+                       if (!mode_selected) {
+                               for (i = 0; i < NUM_VM_MODES; ++i)
+                                       guest_modes[i].enabled = false;
+                               mode_selected = true;
+                       }
+                       mode = strtoul(optarg, NULL, 10);
+                       TEST_ASSERT(mode < NUM_VM_MODES,
+                                   "Guest mode ID %d too big", mode);
+                       guest_modes[mode].enabled = true;
+                       break;
+               case 'b':
+                       guest_percpu_mem_size = parse_size(optarg);
+                       break;
+               case 'f':
+                       wr_fract = atoi(optarg);
+                       TEST_ASSERT(wr_fract >= 1,
+                                   "Write fraction cannot be less than one");
+                       break;
+               case 'v':
+                       nr_vcpus = atoi(optarg);
+                       TEST_ASSERT(nr_vcpus > 0,
+                                   "Must have a positive number of vCPUs");
+                       TEST_ASSERT(nr_vcpus <= MAX_VCPUS,
+                                   "This test does not currently support\n"
+                                   "more than %d vCPUs.", MAX_VCPUS);
+                       break;
+               case 'h':
+               default:
+                       help(argv[0]);
+                       break;
+               }
+       }
+
+       TEST_ASSERT(iterations >= 2, "The test should have at least two iterations");
+
+       pr_info("Test iterations: %"PRIu64"\n", iterations);
+
+       for (i = 0; i < NUM_VM_MODES; ++i) {
+               if (!guest_modes[i].enabled)
+                       continue;
+               TEST_ASSERT(guest_modes[i].supported,
+                           "Guest mode ID %d (%s) not supported.",
+                           i, vm_guest_mode_string(i));
+               run_test(i, iterations, phys_offset, wr_fract);
+       }
+
+       return 0;
+}
index 752ec15..54da9cc 100644 (file)
@@ -128,6 +128,78 @@ static uint64_t host_dirty_count;
 static uint64_t host_clear_count;
 static uint64_t host_track_next_count;
 
+enum log_mode_t {
+       /* Only use KVM_GET_DIRTY_LOG for logging */
+       LOG_MODE_DIRTY_LOG = 0,
+
+       /* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
+       LOG_MODE_CLEAR_LOG = 1,
+
+       LOG_MODE_NUM,
+
+       /* Run all supported modes */
+       LOG_MODE_ALL = LOG_MODE_NUM,
+};
+
+/* Mode of logging to test.  Default is to run all supported modes */
+static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
+/* Logging mode for current run */
+static enum log_mode_t host_log_mode;
+
+static bool clear_log_supported(void)
+{
+       return kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
+}
+
+static void clear_log_create_vm_done(struct kvm_vm *vm)
+{
+       struct kvm_enable_cap cap = {};
+       u64 manual_caps;
+
+       manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
+       TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
+       manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
+                       KVM_DIRTY_LOG_INITIALLY_SET);
+       cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
+       cap.args[0] = manual_caps;
+       vm_enable_cap(vm, &cap);
+}
+
+static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
+                                         void *bitmap, uint32_t num_pages)
+{
+       kvm_vm_get_dirty_log(vm, slot, bitmap);
+}
+
+static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
+                                         void *bitmap, uint32_t num_pages)
+{
+       kvm_vm_get_dirty_log(vm, slot, bitmap);
+       kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
+}
+
+struct log_mode {
+       const char *name;
+       /* Return true if this mode is supported, otherwise false */
+       bool (*supported)(void);
+       /* Hook when the vm creation is done (before vcpu creation) */
+       void (*create_vm_done)(struct kvm_vm *vm);
+       /* Hook to collect the dirty pages into the bitmap provided */
+       void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
+                                    void *bitmap, uint32_t num_pages);
+} log_modes[LOG_MODE_NUM] = {
+       {
+               .name = "dirty-log",
+               .collect_dirty_pages = dirty_log_collect_dirty_pages,
+       },
+       {
+               .name = "clear-log",
+               .supported = clear_log_supported,
+               .create_vm_done = clear_log_create_vm_done,
+               .collect_dirty_pages = clear_log_collect_dirty_pages,
+       },
+};
+
 /*
  * We use this bitmap to track some pages that should have its dirty
  * bit set in the _next_ iteration.  For example, if we detected the
@@ -137,6 +209,44 @@ static uint64_t host_track_next_count;
  */
 static unsigned long *host_bmap_track;
 
+static void log_modes_dump(void)
+{
+       int i;
+
+       printf("all");
+       for (i = 0; i < LOG_MODE_NUM; i++)
+               printf(", %s", log_modes[i].name);
+       printf("\n");
+}
+
+static bool log_mode_supported(void)
+{
+       struct log_mode *mode = &log_modes[host_log_mode];
+
+       if (mode->supported)
+               return mode->supported();
+
+       return true;
+}
+
+static void log_mode_create_vm_done(struct kvm_vm *vm)
+{
+       struct log_mode *mode = &log_modes[host_log_mode];
+
+       if (mode->create_vm_done)
+               mode->create_vm_done(vm);
+}
+
+static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
+                                        void *bitmap, uint32_t num_pages)
+{
+       struct log_mode *mode = &log_modes[host_log_mode];
+
+       TEST_ASSERT(mode->collect_dirty_pages != NULL,
+                   "collect_dirty_pages() is required for any log mode!");
+       mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
+}
+
 static void generate_random_array(uint64_t *guest_array, uint64_t size)
 {
        uint64_t i;
@@ -195,7 +305,7 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
                                    page);
                }
 
-               if (test_bit_le(page, bmap)) {
+               if (test_and_clear_bit_le(page, bmap)) {
                        host_dirty_count++;
                        /*
                         * If the bit is set, the value written onto
@@ -252,11 +362,12 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
 
        pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
 
-       vm = _vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
+       vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
        kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
 #ifdef __x86_64__
        vm_create_irqchip(vm);
 #endif
+       log_mode_create_vm_done(vm);
        vm_vcpu_add_default(vm, vcpuid, guest_code);
        return vm;
 }
@@ -264,10 +375,6 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
 #define DIRTY_MEM_BITS 30 /* 1G */
 #define PAGE_SHIFT_4K  12
 
-#ifdef USE_CLEAR_DIRTY_LOG
-static u64 dirty_log_manual_caps;
-#endif
-
 static void run_test(enum vm_guest_mode mode, unsigned long iterations,
                     unsigned long interval, uint64_t phys_offset)
 {
@@ -275,6 +382,12 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        struct kvm_vm *vm;
        unsigned long *bmap;
 
+       if (!log_mode_supported()) {
+               print_skip("Log mode '%s' not supported",
+                          log_modes[host_log_mode].name);
+               return;
+       }
+
        /*
         * We reserve page table for 2 times of extra dirty mem which
         * will definitely cover the original (1G+) test range.  Here
@@ -317,14 +430,6 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        bmap = bitmap_alloc(host_num_pages);
        host_bmap_track = bitmap_alloc(host_num_pages);
 
-#ifdef USE_CLEAR_DIRTY_LOG
-       struct kvm_enable_cap cap = {};
-
-       cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
-       cap.args[0] = dirty_log_manual_caps;
-       vm_enable_cap(vm, &cap);
-#endif
-
        /* Add an extra memory slot for testing dirty logging */
        vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
                                    guest_test_phys_mem,
@@ -362,11 +467,8 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        while (iteration < iterations) {
                /* Give the vcpu thread some time to dirty some pages */
                usleep(interval * 1000);
-               kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
-#ifdef USE_CLEAR_DIRTY_LOG
-               kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
-                                      host_num_pages);
-#endif
+               log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
+                                            bmap, host_num_pages);
                vm_dirty_log_verify(mode, bmap);
                iteration++;
                sync_global_to_guest(vm, iteration);
@@ -410,6 +512,9 @@ static void help(char *name)
               TEST_HOST_LOOP_INTERVAL);
        printf(" -p: specify guest physical test memory offset\n"
               "     Warning: a low offset can conflict with the loaded test code.\n");
+       printf(" -M: specify the host logging mode "
+              "(default: run all log modes).  Supported modes: \n\t");
+       log_modes_dump();
        printf(" -m: specify the guest mode ID to test "
               "(default: test all supported modes)\n"
               "     This option may be used multiple times.\n"
@@ -429,18 +534,7 @@ int main(int argc, char *argv[])
        bool mode_selected = false;
        uint64_t phys_offset = 0;
        unsigned int mode;
-       int opt, i;
-
-#ifdef USE_CLEAR_DIRTY_LOG
-       dirty_log_manual_caps =
-               kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
-       if (!dirty_log_manual_caps) {
-               print_skip("KVM_CLEAR_DIRTY_LOG not available");
-               exit(KSFT_SKIP);
-       }
-       dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
-                                 KVM_DIRTY_LOG_INITIALLY_SET);
-#endif
+       int opt, i, j;
 
 #ifdef __x86_64__
        guest_mode_init(VM_MODE_PXXV48_4K, true, true);
@@ -464,7 +558,7 @@ int main(int argc, char *argv[])
        guest_mode_init(VM_MODE_P40V48_4K, true, true);
 #endif
 
-       while ((opt = getopt(argc, argv, "hi:I:p:m:")) != -1) {
+       while ((opt = getopt(argc, argv, "hi:I:p:m:M:")) != -1) {
                switch (opt) {
                case 'i':
                        iterations = strtol(optarg, NULL, 10);
@@ -486,6 +580,26 @@ int main(int argc, char *argv[])
                                    "Guest mode ID %d too big", mode);
                        guest_modes[mode].enabled = true;
                        break;
+               case 'M':
+                       if (!strcmp(optarg, "all")) {
+                               host_log_mode_option = LOG_MODE_ALL;
+                               break;
+                       }
+                       for (i = 0; i < LOG_MODE_NUM; i++) {
+                               if (!strcmp(optarg, log_modes[i].name)) {
+                                       pr_info("Setting log mode to: '%s'\n",
+                                               optarg);
+                                       host_log_mode_option = i;
+                                       break;
+                               }
+                       }
+                       if (i == LOG_MODE_NUM) {
+                               printf("Log mode '%s' invalid. Please choose "
+                                      "from: ", optarg);
+                               log_modes_dump();
+                               exit(1);
+                       }
+                       break;
                case 'h':
                default:
                        help(argv[0]);
@@ -507,7 +621,18 @@ int main(int argc, char *argv[])
                TEST_ASSERT(guest_modes[i].supported,
                            "Guest mode ID %d (%s) not supported.",
                            i, vm_guest_mode_string(i));
-               run_test(i, iterations, interval, phys_offset);
+               if (host_log_mode_option == LOG_MODE_ALL) {
+                       /* Run each log mode */
+                       for (j = 0; j < LOG_MODE_NUM; j++) {
+                               pr_info("Testing Log Mode '%s'\n",
+                                       log_modes[j].name);
+                               host_log_mode = j;
+                               run_test(i, iterations, interval, phys_offset);
+                       }
+               } else {
+                       host_log_mode = host_log_mode_option;
+                       run_test(i, iterations, interval, phys_offset);
+               }
        }
 
        return 0;
index 919e161..7d29aa7 100644 (file)
@@ -63,9 +63,11 @@ enum vm_mem_backing_src_type {
 
 int kvm_check_cap(long cap);
 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
+int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
+                   struct kvm_enable_cap *cap);
+void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
 
 struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
-struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
 void kvm_vm_free(struct kvm_vm *vmp);
 void kvm_vm_restart(struct kvm_vm *vmp, int perm);
 void kvm_vm_release(struct kvm_vm *vmp);
@@ -149,6 +151,7 @@ void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
                          struct kvm_guest_debug *debug);
 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
                       struct kvm_mp_state *mp_state);
+struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid);
 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
 void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
 
@@ -294,6 +297,8 @@ int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
        memcpy(&(g), _p, sizeof(g));                            \
 })
 
+void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid);
+
 /* Common ucalls */
 enum {
        UCALL_NONE,
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
new file mode 100644 (file)
index 0000000..2618052
--- /dev/null
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * tools/testing/selftests/kvm/include/perf_test_util.h
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#ifndef SELFTEST_KVM_PERF_TEST_UTIL_H
+#define SELFTEST_KVM_PERF_TEST_UTIL_H
+
+#include "kvm_util.h"
+#include "processor.h"
+
+#define MAX_VCPUS 512
+
+#define PAGE_SHIFT_4K  12
+#define PTES_PER_4K_PT 512
+
+#define TEST_MEM_SLOT_INDEX            1
+
+/* Default guest test virtual memory offset */
+#define DEFAULT_GUEST_TEST_MEM         0xc0000000
+
+#define DEFAULT_PER_VCPU_MEM_SIZE      (1 << 30) /* 1G */
+
+/*
+ * Guest physical memory offset of the testing memory slot.
+ * This will be set to the topmost valid physical address minus
+ * the test memory size.
+ */
+static uint64_t guest_test_phys_mem;
+
+/*
+ * Guest virtual memory offset of the testing memory slot.
+ * Must not conflict with identity mapped test code.
+ */
+static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+
+/* Number of VCPUs for the test */
+static int nr_vcpus = 1;
+
+struct vcpu_args {
+       uint64_t gva;
+       uint64_t pages;
+
+       /* Only used by the host userspace part of the vCPU thread */
+       int vcpu_id;
+};
+
+struct perf_test_args {
+       struct kvm_vm *vm;
+       uint64_t host_page_size;
+       uint64_t guest_page_size;
+       int wr_fract;
+
+       struct vcpu_args vcpu_args[MAX_VCPUS];
+};
+
+static struct perf_test_args perf_test_args;
+
+/*
+ * Continuously write to the first 8 bytes of each page in the
+ * specified region.
+ */
+static void guest_code(uint32_t vcpu_id)
+{
+       struct vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+       uint64_t gva;
+       uint64_t pages;
+       int i;
+
+       /* Make sure vCPU args data structure is not corrupt. */
+       GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
+
+       gva = vcpu_args->gva;
+       pages = vcpu_args->pages;
+
+       while (true) {
+               for (i = 0; i < pages; i++) {
+                       uint64_t addr = gva + (i * perf_test_args.guest_page_size);
+
+                       if (i % perf_test_args.wr_fract == 0)
+                               *(uint64_t *)addr = 0x0123456789ABCDEF;
+                       else
+                               READ_ONCE(*(uint64_t *)addr);
+               }
+
+               GUEST_SYNC(1);
+       }
+}
+
+static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
+                               uint64_t vcpu_memory_bytes)
+{
+       struct kvm_vm *vm;
+       uint64_t pages = DEFAULT_GUEST_PHY_PAGES;
+       uint64_t guest_num_pages;
+
+       /* Account for a few pages per-vCPU for stacks */
+       pages += DEFAULT_STACK_PGS * vcpus;
+
+       /*
+        * Reserve twice the ammount of memory needed to map the test region and
+        * the page table / stacks region, at 4k, for page tables. Do the
+        * calculation with 4K page size: the smallest of all archs. (e.g., 64K
+        * page size guest will need even less memory for page tables).
+        */
+       pages += (2 * pages) / PTES_PER_4K_PT;
+       pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) /
+                PTES_PER_4K_PT;
+       pages = vm_adjust_num_guest_pages(mode, pages);
+
+       pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+
+       vm = vm_create(mode, pages, O_RDWR);
+       kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+#ifdef __x86_64__
+       vm_create_irqchip(vm);
+#endif
+
+       perf_test_args.vm = vm;
+       perf_test_args.guest_page_size = vm_get_page_size(vm);
+       perf_test_args.host_page_size = getpagesize();
+
+       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
+                   "Guest memory size is not guest page size aligned.");
+
+       guest_num_pages = (vcpus * vcpu_memory_bytes) /
+                         perf_test_args.guest_page_size;
+       guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
+
+       /*
+        * If there should be more memory in the guest test region than there
+        * can be pages in the guest, it will definitely cause problems.
+        */
+       TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
+                   "Requested more guest memory than address space allows.\n"
+                   "    guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
+                   guest_num_pages, vm_get_max_gfn(vm), vcpus,
+                   vcpu_memory_bytes);
+
+       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
+                   "Guest memory size is not host page size aligned.");
+
+       guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
+                             perf_test_args.guest_page_size;
+       guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
+
+#ifdef __s390x__
+       /* Align to 1M (segment size) */
+       guest_test_phys_mem &= ~((1 << 20) - 1);
+#endif
+
+       pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
+
+       /* Add an extra memory slot for testing */
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+                                   guest_test_phys_mem,
+                                   TEST_MEM_SLOT_INDEX,
+                                   guest_num_pages, 0);
+
+       /* Do mapping for the demand paging memory slot */
+       virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
+
+       ucall_init(vm, NULL);
+
+       return vm;
+}
+
+static void add_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
+{
+       vm_paddr_t vcpu_gpa;
+       struct vcpu_args *vcpu_args;
+       int vcpu_id;
+
+       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+               vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+
+               vm_vcpu_add_default(vm, vcpu_id, guest_code);
+
+#ifdef __x86_64__
+               vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid());
+#endif
+
+               vcpu_args->vcpu_id = vcpu_id;
+               vcpu_args->gva = guest_test_virt_mem +
+                                (vcpu_id * vcpu_memory_bytes);
+               vcpu_args->pages = vcpu_memory_bytes /
+                                  perf_test_args.guest_page_size;
+
+               vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
+               pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
+                        vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
+       }
+}
+
+#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
index 5eb01bf..ffffa56 100644 (file)
@@ -64,5 +64,7 @@ int64_t timespec_to_ns(struct timespec ts);
 struct timespec timespec_add_ns(struct timespec ts, int64_t ns);
 struct timespec timespec_add(struct timespec ts1, struct timespec ts2);
 struct timespec timespec_sub(struct timespec ts1, struct timespec ts2);
+struct timespec timespec_diff_now(struct timespec start);
+struct timespec timespec_div(struct timespec ts, int divisor);
 
 #endif /* SELFTEST_KVM_TEST_UTIL_H */
index 82b7fe1..8e61340 100644 (file)
@@ -36,6 +36,8 @@
 #define X86_CR4_SMAP           (1ul << 21)
 #define X86_CR4_PKE            (1ul << 22)
 
+#define UNEXPECTED_VECTOR_PORT 0xfff0u
+
 /* General Registers in 64-Bit Mode */
 struct gpr64_regs {
        u64 rax;
@@ -59,7 +61,7 @@ struct gpr64_regs {
 struct desc64 {
        uint16_t limit0;
        uint16_t base0;
-       unsigned base1:8, s:1, type:4, dpl:2, p:1;
+       unsigned base1:8, type:4, s:1, dpl:2, p:1;
        unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
        uint32_t base3;
        uint32_t zero1;
@@ -239,6 +241,11 @@ static inline struct desc_ptr get_idt(void)
        return idt;
 }
 
+static inline void outl(uint16_t port, uint32_t value)
+{
+       __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
+}
+
 #define SET_XMM(__var, __xmm) \
        asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm)
 
@@ -338,6 +345,35 @@ uint32_t kvm_get_cpuid_max_basic(void);
 uint32_t kvm_get_cpuid_max_extended(void);
 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
 
+struct ex_regs {
+       uint64_t rax, rcx, rdx, rbx;
+       uint64_t rbp, rsi, rdi;
+       uint64_t r8, r9, r10, r11;
+       uint64_t r12, r13, r14, r15;
+       uint64_t vector;
+       uint64_t error_code;
+       uint64_t rip;
+       uint64_t cs;
+       uint64_t rflags;
+};
+
+void vm_init_descriptor_tables(struct kvm_vm *vm);
+void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
+void vm_handle_exception(struct kvm_vm *vm, int vector,
+                       void (*handler)(struct ex_regs *));
+
+/*
+ * set_cpuid() - overwrites a matching cpuid entry with the provided value.
+ *              matches based on ent->function && ent->index. returns true
+ *              if a match was found and successfully overwritten.
+ * @cpuid: the kvm cpuid list to modify.
+ * @ent: cpuid entry to insert
+ */
+bool set_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *ent);
+
+uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
+                      uint64_t a3);
+
 /*
  * Basic CPU control in CR0
  */
index 2afa661..d6c32c3 100644 (file)
@@ -350,3 +350,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
 
        va_end(ap);
 }
+
+void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+{
+}
index c8e0ec2..2f37b90 100644 (file)
@@ -94,6 +94,9 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
        struct kvm_run *run = vcpu_state(vm, vcpu_id);
        struct ucall ucall = {};
 
+       if (uc)
+               memset(uc, 0, sizeof(*uc));
+
        if (run->exit_reason == KVM_EXIT_MMIO &&
            run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
                vm_vaddr_t gva;
index 3327ceb..126c672 100644 (file)
@@ -86,6 +86,34 @@ int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
        return ret;
 }
 
+/* VCPU Enable Capability
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   vcpu_id - VCPU
+ *   cap - Capability
+ *
+ * Output Args: None
+ *
+ * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
+ *
+ * Enables a capability (KVM_CAP_*) on the VCPU.
+ */
+int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
+                   struct kvm_enable_cap *cap)
+{
+       struct vcpu *vcpu = vcpu_find(vm, vcpu_id);
+       int r;
+
+       TEST_ASSERT(vcpu, "cannot find vcpu %d", vcpu_id);
+
+       r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap);
+       TEST_ASSERT(!r, "KVM_ENABLE_CAP vCPU ioctl failed,\n"
+                       "  rc: %i, errno: %i", r, errno);
+
+       return r;
+}
+
 static void vm_open(struct kvm_vm *vm, int perm)
 {
        vm->kvm_fd = open(KVM_DEV_PATH, perm);
@@ -152,7 +180,7 @@ _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params)
  * descriptor to control the created VM is created with the permissions
  * given by perm (e.g. O_RDWR).
  */
-struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
+struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
 {
        struct kvm_vm *vm;
 
@@ -243,11 +271,6 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
        return vm;
 }
 
-struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
-{
-       return _vm_create(mode, phy_pages, perm);
-}
-
 /*
  * VM Restart
  *
@@ -1204,6 +1227,9 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
        do {
                rc = ioctl(vcpu->fd, KVM_RUN, NULL);
        } while (rc == -1 && errno == EINTR);
+
+       assert_on_unhandled_exception(vm, vcpuid);
+
        return rc;
 }
 
@@ -1260,6 +1286,35 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
                "rc: %i errno: %i", ret, errno);
 }
 
+/*
+ * VM VCPU Get Reg List
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   vcpuid - VCPU ID
+ *
+ * Output Args:
+ *   None
+ *
+ * Return:
+ *   A pointer to an allocated struct kvm_reg_list
+ *
+ * Get the list of guest registers which are supported for
+ * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls
+ */
+struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid)
+{
+       struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
+       int ret;
+
+       ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, &reg_list_n);
+       TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
+       reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
+       reg_list->n = reg_list_n.n;
+       vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list);
+       return reg_list;
+}
+
 /*
  * VM VCPU Regs Get
  *
index 2ef4465..f07d383 100644 (file)
@@ -50,6 +50,8 @@ struct kvm_vm {
        vm_paddr_t pgd;
        vm_vaddr_t gdt;
        vm_vaddr_t tss;
+       vm_vaddr_t idt;
+       vm_vaddr_t handlers;
 };
 
 struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid);
index a88c5d6..7349bb2 100644 (file)
@@ -241,3 +241,7 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
        fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
                indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr);
 }
+
+void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+{
+}
index fd589dc..9d3b0f1 100644 (file)
@@ -38,6 +38,9 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
        struct kvm_run *run = vcpu_state(vm, vcpu_id);
        struct ucall ucall = {};
 
+       if (uc)
+               memset(uc, 0, sizeof(*uc));
+
        if (run->exit_reason == KVM_EXIT_S390_SIEIC &&
            run->s390_sieic.icptcode == 4 &&
            (run->s390_sieic.ipa >> 8) == 0x83 &&    /* 0x83 means DIAGNOSE */
index 689e97c..8e04c0b 100644 (file)
@@ -4,10 +4,13 @@
  *
  * Copyright (C) 2020, Google LLC.
  */
-#include <stdlib.h>
+
+#include <assert.h>
 #include <ctype.h>
 #include <limits.h>
-#include <assert.h>
+#include <stdlib.h>
+#include <time.h>
+
 #include "test_util.h"
 
 /*
@@ -81,6 +84,21 @@ struct timespec timespec_sub(struct timespec ts1, struct timespec ts2)
        return timespec_add_ns((struct timespec){0}, ns1 - ns2);
 }
 
+struct timespec timespec_diff_now(struct timespec start)
+{
+       struct timespec end;
+
+       clock_gettime(CLOCK_MONOTONIC, &end);
+       return timespec_sub(end, start);
+}
+
+struct timespec timespec_div(struct timespec ts, int divisor)
+{
+       int64_t ns = timespec_to_ns(ts) / divisor;
+
+       return timespec_add_ns((struct timespec){0}, ns);
+}
+
 void print_skip(const char *fmt, ...)
 {
        va_list ap;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/handlers.S b/tools/testing/selftests/kvm/lib/x86_64/handlers.S
new file mode 100644 (file)
index 0000000..aaf7bc7
--- /dev/null
@@ -0,0 +1,81 @@
+handle_exception:
+       push %r15
+       push %r14
+       push %r13
+       push %r12
+       push %r11
+       push %r10
+       push %r9
+       push %r8
+
+       push %rdi
+       push %rsi
+       push %rbp
+       push %rbx
+       push %rdx
+       push %rcx
+       push %rax
+       mov %rsp, %rdi
+
+       call route_exception
+
+       pop %rax
+       pop %rcx
+       pop %rdx
+       pop %rbx
+       pop %rbp
+       pop %rsi
+       pop %rdi
+       pop %r8
+       pop %r9
+       pop %r10
+       pop %r11
+       pop %r12
+       pop %r13
+       pop %r14
+       pop %r15
+
+       /* Discard vector and error code. */
+       add $16, %rsp
+       iretq
+
+/*
+ * Build the handle_exception wrappers which push the vector/error code on the
+ * stack and an array of pointers to those wrappers.
+ */
+.pushsection .rodata
+.globl idt_handlers
+idt_handlers:
+.popsection
+
+.macro HANDLERS has_error from to
+       vector = \from
+       .rept \to - \from + 1
+       .align 8
+
+       /* Fetch current address and append it to idt_handlers. */
+       current_handler = .
+.pushsection .rodata
+.quad current_handler
+.popsection
+
+       .if ! \has_error
+       pushq $0
+       .endif
+       pushq $vector
+       jmp handle_exception
+       vector = vector + 1
+       .endr
+.endm
+
+.global idt_handler_code
+idt_handler_code:
+       HANDLERS has_error=0 from=0  to=7
+       HANDLERS has_error=1 from=8  to=8
+       HANDLERS has_error=0 from=9  to=9
+       HANDLERS has_error=1 from=10 to=14
+       HANDLERS has_error=0 from=15 to=16
+       HANDLERS has_error=1 from=17 to=17
+       HANDLERS has_error=0 from=18 to=255
+
+.section        .note.GNU-stack, "", %progbits
index f6eb34e..d10c5c0 100644 (file)
 #include "../kvm_util_internal.h"
 #include "processor.h"
 
+#ifndef NUM_INTERRUPTS
+#define NUM_INTERRUPTS 256
+#endif
+
+#define DEFAULT_CODE_SELECTOR 0x8
+#define DEFAULT_DATA_SELECTOR 0x10
+
 /* Minimum physical address used for virtual translation tables. */
 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
 
+vm_vaddr_t exception_handlers;
+
 /* Virtual translation table structure declarations */
 struct pageMapL4Entry {
        uint64_t present:1;
@@ -392,11 +401,12 @@ static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
        desc->limit0 = segp->limit & 0xFFFF;
        desc->base0 = segp->base & 0xFFFF;
        desc->base1 = segp->base >> 16;
-       desc->s = segp->s;
        desc->type = segp->type;
+       desc->s = segp->s;
        desc->dpl = segp->dpl;
        desc->p = segp->present;
        desc->limit1 = segp->limit >> 16;
+       desc->avl = segp->avl;
        desc->l = segp->l;
        desc->db = segp->db;
        desc->g = segp->g;
@@ -556,9 +566,9 @@ static void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_m
                sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
 
                kvm_seg_set_unusable(&sregs.ldt);
-               kvm_seg_set_kernel_code_64bit(vm, 0x8, &sregs.cs);
-               kvm_seg_set_kernel_data_64bit(vm, 0x10, &sregs.ds);
-               kvm_seg_set_kernel_data_64bit(vm, 0x10, &sregs.es);
+               kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs);
+               kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds);
+               kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es);
                kvm_setup_tss_64bit(vm, &sregs.tr, 0x18, gdt_memslot, pgd_memslot);
                break;
 
@@ -1118,3 +1128,131 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
                *va_bits = (entry->eax >> 8) & 0xff;
        }
 }
+
+struct idt_entry {
+       uint16_t offset0;
+       uint16_t selector;
+       uint16_t ist : 3;
+       uint16_t : 5;
+       uint16_t type : 4;
+       uint16_t : 1;
+       uint16_t dpl : 2;
+       uint16_t p : 1;
+       uint16_t offset1;
+       uint32_t offset2; uint32_t reserved;
+};
+
+static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
+                         int dpl, unsigned short selector)
+{
+       struct idt_entry *base =
+               (struct idt_entry *)addr_gva2hva(vm, vm->idt);
+       struct idt_entry *e = &base[vector];
+
+       memset(e, 0, sizeof(*e));
+       e->offset0 = addr;
+       e->selector = selector;
+       e->ist = 0;
+       e->type = 14;
+       e->dpl = dpl;
+       e->p = 1;
+       e->offset1 = addr >> 16;
+       e->offset2 = addr >> 32;
+}
+
+void kvm_exit_unexpected_vector(uint32_t value)
+{
+       outl(UNEXPECTED_VECTOR_PORT, value);
+}
+
+void route_exception(struct ex_regs *regs)
+{
+       typedef void(*handler)(struct ex_regs *);
+       handler *handlers = (handler *)exception_handlers;
+
+       if (handlers && handlers[regs->vector]) {
+               handlers[regs->vector](regs);
+               return;
+       }
+
+       kvm_exit_unexpected_vector(regs->vector);
+}
+
+void vm_init_descriptor_tables(struct kvm_vm *vm)
+{
+       extern void *idt_handlers;
+       int i;
+
+       vm->idt = vm_vaddr_alloc(vm, getpagesize(), 0x2000, 0, 0);
+       vm->handlers = vm_vaddr_alloc(vm, 256 * sizeof(void *), 0x2000, 0, 0);
+       /* Handlers have the same address in both address spaces.*/
+       for (i = 0; i < NUM_INTERRUPTS; i++)
+               set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
+                       DEFAULT_CODE_SELECTOR);
+}
+
+void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
+{
+       struct kvm_sregs sregs;
+
+       vcpu_sregs_get(vm, vcpuid, &sregs);
+       sregs.idt.base = vm->idt;
+       sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
+       sregs.gdt.base = vm->gdt;
+       sregs.gdt.limit = getpagesize() - 1;
+       kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs);
+       vcpu_sregs_set(vm, vcpuid, &sregs);
+       *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
+}
+
+void vm_handle_exception(struct kvm_vm *vm, int vector,
+                        void (*handler)(struct ex_regs *))
+{
+       vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
+
+       handlers[vector] = (vm_vaddr_t)handler;
+}
+
+void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+{
+       if (vcpu_state(vm, vcpuid)->exit_reason == KVM_EXIT_IO
+               && vcpu_state(vm, vcpuid)->io.port == UNEXPECTED_VECTOR_PORT
+               && vcpu_state(vm, vcpuid)->io.size == 4) {
+               /* Grab pointer to io data */
+               uint32_t *data = (void *)vcpu_state(vm, vcpuid)
+                       + vcpu_state(vm, vcpuid)->io.data_offset;
+
+               TEST_ASSERT(false,
+                           "Unexpected vectored event in guest (vector:0x%x)",
+                           *data);
+       }
+}
+
+bool set_cpuid(struct kvm_cpuid2 *cpuid,
+              struct kvm_cpuid_entry2 *ent)
+{
+       int i;
+
+       for (i = 0; i < cpuid->nent; i++) {
+               struct kvm_cpuid_entry2 *cur = &cpuid->entries[i];
+
+               if (cur->function != ent->function || cur->index != ent->index)
+                       continue;
+
+               memcpy(cur, ent, sizeof(struct kvm_cpuid_entry2));
+               return true;
+       }
+
+       return false;
+}
+
+uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
+                      uint64_t a3)
+{
+       uint64_t r;
+
+       asm volatile("vmcall"
+                    : "=a"(r)
+                    : "b"(a0), "c"(a1), "d"(a2), "S"(a3));
+       return r;
+}
index da4d89a..a348997 100644 (file)
@@ -40,6 +40,9 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
        struct kvm_run *run = vcpu_state(vm, vcpu_id);
        struct ucall ucall = {};
 
+       if (uc)
+               memset(uc, 0, sizeof(*uc));
+
        if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
                struct kvm_regs regs;
 
diff --git a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
new file mode 100644 (file)
index 0000000..b10a274
--- /dev/null
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020, Google LLC.
+ *
+ * Tests for KVM paravirtual feature disablement
+ */
+#include <asm/kvm_para.h>
+#include <linux/kvm_para.h>
+#include <stdint.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+extern unsigned char rdmsr_start;
+extern unsigned char rdmsr_end;
+
+static u64 do_rdmsr(u32 idx)
+{
+       u32 lo, hi;
+
+       asm volatile("rdmsr_start: rdmsr;"
+                    "rdmsr_end:"
+                    : "=a"(lo), "=c"(hi)
+                    : "c"(idx));
+
+       return (((u64) hi) << 32) | lo;
+}
+
+extern unsigned char wrmsr_start;
+extern unsigned char wrmsr_end;
+
+static void do_wrmsr(u32 idx, u64 val)
+{
+       u32 lo, hi;
+
+       lo = val;
+       hi = val >> 32;
+
+       asm volatile("wrmsr_start: wrmsr;"
+                    "wrmsr_end:"
+                    : : "a"(lo), "c"(idx), "d"(hi));
+}
+
+static int nr_gp;
+
+static void guest_gp_handler(struct ex_regs *regs)
+{
+       unsigned char *rip = (unsigned char *)regs->rip;
+       bool r, w;
+
+       r = rip == &rdmsr_start;
+       w = rip == &wrmsr_start;
+       GUEST_ASSERT(r || w);
+
+       nr_gp++;
+
+       if (r)
+               regs->rip = (uint64_t)&rdmsr_end;
+       else
+               regs->rip = (uint64_t)&wrmsr_end;
+}
+
+struct msr_data {
+       uint32_t idx;
+       const char *name;
+};
+
+#define TEST_MSR(msr) { .idx = msr, .name = #msr }
+#define UCALL_PR_MSR 0xdeadbeef
+#define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr)
+
+/*
+ * KVM paravirtual msrs to test. Expect a #GP if any of these msrs are read or
+ * written, as the KVM_CPUID_FEATURES leaf is cleared.
+ */
+static struct msr_data msrs_to_test[] = {
+       TEST_MSR(MSR_KVM_SYSTEM_TIME),
+       TEST_MSR(MSR_KVM_SYSTEM_TIME_NEW),
+       TEST_MSR(MSR_KVM_WALL_CLOCK),
+       TEST_MSR(MSR_KVM_WALL_CLOCK_NEW),
+       TEST_MSR(MSR_KVM_ASYNC_PF_EN),
+       TEST_MSR(MSR_KVM_STEAL_TIME),
+       TEST_MSR(MSR_KVM_PV_EOI_EN),
+       TEST_MSR(MSR_KVM_POLL_CONTROL),
+       TEST_MSR(MSR_KVM_ASYNC_PF_INT),
+       TEST_MSR(MSR_KVM_ASYNC_PF_ACK),
+};
+
+static void test_msr(struct msr_data *msr)
+{
+       PR_MSR(msr);
+       do_rdmsr(msr->idx);
+       GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
+
+       nr_gp = 0;
+       do_wrmsr(msr->idx, 0);
+       GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
+       nr_gp = 0;
+}
+
+struct hcall_data {
+       uint64_t nr;
+       const char *name;
+};
+
+#define TEST_HCALL(hc) { .nr = hc, .name = #hc }
+#define UCALL_PR_HCALL 0xdeadc0de
+#define PR_HCALL(hc) ucall(UCALL_PR_HCALL, 1, hc)
+
+/*
+ * KVM hypercalls to test. Expect -KVM_ENOSYS when called, as the corresponding
+ * features have been cleared in KVM_CPUID_FEATURES.
+ */
+static struct hcall_data hcalls_to_test[] = {
+       TEST_HCALL(KVM_HC_KICK_CPU),
+       TEST_HCALL(KVM_HC_SEND_IPI),
+       TEST_HCALL(KVM_HC_SCHED_YIELD),
+};
+
+static void test_hcall(struct hcall_data *hc)
+{
+       uint64_t r;
+
+       PR_HCALL(hc);
+       r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
+       GUEST_ASSERT(r == -KVM_ENOSYS);
+}
+
+static void guest_main(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(msrs_to_test); i++) {
+               test_msr(&msrs_to_test[i]);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(hcalls_to_test); i++) {
+               test_hcall(&hcalls_to_test[i]);
+       }
+
+       GUEST_DONE();
+}
+
+static void clear_kvm_cpuid_features(struct kvm_cpuid2 *cpuid)
+{
+       struct kvm_cpuid_entry2 ent = {0};
+
+       ent.function = KVM_CPUID_FEATURES;
+       TEST_ASSERT(set_cpuid(cpuid, &ent),
+                   "failed to clear KVM_CPUID_FEATURES leaf");
+}
+
+static void pr_msr(struct ucall *uc)
+{
+       struct msr_data *msr = (struct msr_data *)uc->args[0];
+
+       pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx);
+}
+
+static void pr_hcall(struct ucall *uc)
+{
+       struct hcall_data *hc = (struct hcall_data *)uc->args[0];
+
+       pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr);
+}
+
+static void handle_abort(struct ucall *uc)
+{
+       TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0],
+                 __FILE__, uc->args[1]);
+}
+
+#define VCPU_ID 0
+
+static void enter_guest(struct kvm_vm *vm)
+{
+       struct kvm_run *run;
+       struct ucall uc;
+       int r;
+
+       run = vcpu_state(vm, VCPU_ID);
+
+       while (true) {
+               r = _vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "unexpected exit reason: %u (%s)",
+                           run->exit_reason, exit_reason_str(run->exit_reason));
+
+               switch (get_ucall(vm, VCPU_ID, &uc)) {
+               case UCALL_PR_MSR:
+                       pr_msr(&uc);
+                       break;
+               case UCALL_PR_HCALL:
+                       pr_hcall(&uc);
+                       break;
+               case UCALL_ABORT:
+                       handle_abort(&uc);
+                       return;
+               case UCALL_DONE:
+                       return;
+               }
+       }
+}
+
+int main(void)
+{
+       struct kvm_enable_cap cap = {0};
+       struct kvm_cpuid2 *best;
+       struct kvm_vm *vm;
+
+       if (!kvm_check_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)) {
+               pr_info("will skip kvm paravirt restriction tests.\n");
+               return 0;
+       }
+
+       vm = vm_create_default(VCPU_ID, 0, guest_main);
+
+       cap.cap = KVM_CAP_ENFORCE_PV_FEATURE_CPUID;
+       cap.args[0] = 1;
+       vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+       best = kvm_get_supported_cpuid();
+       clear_kvm_cpuid_features(best);
+       vcpu_set_cpuid(vm, VCPU_ID, best);
+
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vm, VCPU_ID);
+       vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
+
+       enter_guest(vm);
+       kvm_vm_free(vm);
+}
index 30848ca..a5ce26d 100644 (file)
@@ -136,7 +136,7 @@ endif
 ifeq ($(OVERRIDE_TARGETS),)
 LOCAL_HDRS := $(selfdir)/kselftest_harness.h $(selfdir)/kselftest.h
 $(OUTPUT)/%:%.c $(LOCAL_HDRS)
-       $(LINK.c) $^ $(LDLIBS) -o $@
+       $(LINK.c) $(filter-out $(LOCAL_HDRS),$^) $(LDLIBS) -o $@
 
 $(OUTPUT)/%.o:%.S
        $(COMPILE.S) $^ -o $@
index bb11de9..f6f2965 100644 (file)
@@ -4,3 +4,4 @@ CONFIG_USER_NS=y
 CONFIG_PID_NS=y
 CONFIG_NET_NS=y
 CONFIG_CGROUPS=y
+CONFIG_CHECKPOINT_RESTORE=y
index 7758c98..0930e24 100644 (file)
@@ -204,7 +204,10 @@ TEST_F(child, fetch_fd)
        fd = sys_pidfd_getfd(self->pidfd, self->remote_fd, 0);
        ASSERT_GE(fd, 0);
 
-       EXPECT_EQ(0, sys_kcmp(getpid(), self->pid, KCMP_FILE, fd, self->remote_fd));
+       ret = sys_kcmp(getpid(), self->pid, KCMP_FILE, fd, self->remote_fd);
+       if (ret < 0 && errno == ENOSYS)
+               SKIP(return, "kcmp() syscall not supported");
+       EXPECT_EQ(ret, 0);
 
        ret = fcntl(fd, F_GETFD);
        ASSERT_GE(ret, 0);
index b9fe75f..8a59438 100644 (file)
@@ -6,7 +6,6 @@
 #include <inttypes.h>
 #include <limits.h>
 #include <linux/types.h>
-#include <linux/wait.h>
 #include <sched.h>
 #include <signal.h>
 #include <stdbool.h>
index 4b11544..6108112 100644 (file)
@@ -3,7 +3,6 @@
 #define _GNU_SOURCE
 #include <errno.h>
 #include <linux/types.h>
-#include <linux/wait.h>
 #include <poll.h>
 #include <signal.h>
 #include <stdbool.h>
index 1f085b9..6e2f2cd 100644 (file)
@@ -16,7 +16,6 @@
 #include <unistd.h>
 #include <sys/socket.h>
 #include <sys/stat.h>
-#include <linux/kcmp.h>
 
 #include "pidfd.h"
 #include "../clone3/clone3_selftests.h"
index c585aaa..529eb70 100644 (file)
@@ -330,7 +330,7 @@ static int test_pidfd_send_signal_recycled_pid_fail(void)
                ksft_exit_fail_msg("%s test: Failed to recycle pid %d\n",
                                   test_name, PID_RECYCLE);
        case PIDFD_SKIP:
-               ksft_print_msg("%s test: Skipping test\n", test_name);
+               ksft_test_result_skip("%s test: Skipping test\n", test_name);
                ret = 0;
                break;
        case PIDFD_XFAIL:
index 471e2aa..fb4fe91 100644 (file)
@@ -14,7 +14,6 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 /* Test that /proc/loadavg correctly reports last pid in pid namespace. */
-#define _GNU_SOURCE
 #include <errno.h>
 #include <sched.h>
 #include <sys/types.h>
index 9f6d000..8511dcf 100644 (file)
@@ -13,7 +13,6 @@
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
-#define _GNU_SOURCE
 #include <unistd.h>
 #include <sys/syscall.h>
 #include <sys/types.h>
index 30e2b78..e7ceabe 100644 (file)
@@ -15,7 +15,6 @@
  */
 // Test that values in /proc/uptime increment monotonically
 // while shifting across CPUs.
-#define _GNU_SOURCE
 #undef NDEBUG
 #include <assert.h>
 #include <unistd.h>