Merge tag 'kbuild-v5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 19 May 2019 18:53:58 +0000 (11:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 19 May 2019 18:53:58 +0000 (11:53 -0700)
Pull more Kbuild updates from Masahiro Yamada:

 - remove unneeded use of cc-option, cc-disable-warning, cc-ldoption

 - exclude tracked files from .gitignore

 - re-enable -Wint-in-bool-context warning

 - refactor samples/Makefile

 - stop building immediately if syncconfig fails

 - do not sprinkle error messages when $(CC) does not exist

 - move arch/alpha/defconfig to the configs subdirectory

 - remove crappy header search path manipulation

 - add comment lines to .config to clarify the end of menu blocks

 - check uniqueness of module names (adding new warnings intentionally)

* tag 'kbuild-v5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild: (24 commits)
  kconfig: use 'else ifneq' for Makefile to improve readability
  kbuild: check uniqueness of module names
  kconfig: Terminate menu blocks with a comment in the generated config
  kbuild: add LICENSES to KBUILD_ALLDIRS
  kbuild: remove 'addtree' and 'flags' magic for header search paths
  treewide: prefix header search paths with $(srctree)/
  media: prefix header search paths with $(srctree)/
  media: remove unneeded header search paths
  alpha: move arch/alpha/defconfig to arch/alpha/configs/defconfig
  kbuild: terminate Kconfig when $(CC) or $(LD) is missing
  kbuild: turn auto.conf.cmd into a mandatory include file
  .gitignore: exclude .get_maintainer.ignore and .gitattributes
  kbuild: add all Clang-specific flags unconditionally
  kbuild: Don't try to add '-fcatch-undefined-behavior' flag
  kbuild: add some extra warning flags unconditionally
  kbuild: add -Wvla flag unconditionally
  arch: remove dangling asm-generic wrappers
  samples: guard sub-directories with CONFIG options
  kbuild: re-enable int-in-bool-context warning
  MAINTAINERS: kbuild: Add pattern for scripts/*vmlinux*
  ...

261 files changed:
.get_maintainer.ignore
Documentation/devicetree/bindings/arm/atmel-at91.txt
Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt [new file with mode: 0644]
Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt [new file with mode: 0644]
Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt
Documentation/driver-model/devres.txt
MAINTAINERS
arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
arch/arm/configs/sama5_defconfig
arch/arm/configs/socfpga_defconfig
arch/arm/mach-at91/Kconfig
arch/arm/mach-at91/at91sam9.c
arch/arm/mach-at91/generic.h
arch/arm/mach-at91/pm.c
arch/arm/mach-at91/pm_suspend.S
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-mvebu/board-v7.c
arch/arm/mach-mvebu/coherency_ll.S
arch/arm/mach-mvebu/kirkwood.c
arch/arm/mach-mvebu/pm-board.c
arch/arm/mach-mvebu/pmsu_ll.S
arch/arm/mach-omap1/board-ams-delta.c
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
arch/arm64/boot/dts/nvidia/tegra186.dtsi
arch/arm64/boot/dts/sprd/whale2.dtsi
arch/mips/Kconfig
arch/mips/alchemy/common/platform.c
arch/mips/generic/init.c
arch/mips/include/asm/mach-ip27/topology.h
arch/mips/include/asm/pci/bridge.h
arch/mips/include/asm/sn/irq_alloc.h [new file with mode: 0644]
arch/mips/include/asm/xtalk/xtalk.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/pci/Makefile
arch/mips/pci/ops-bridge.c [deleted file]
arch/mips/pci/pci-ip27.c
arch/mips/pci/pci-xtalk-bridge.c [new file with mode: 0644]
arch/mips/sgi-ip22/ip22-platform.c
arch/mips/sgi-ip27/ip27-init.c
arch/mips/sgi-ip27/ip27-irq.c
arch/mips/sgi-ip27/ip27-xtalk.c
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/kernel/cacheinfo.c
arch/powerpc/mm/book3s32/hash_low.S
arch/powerpc/mm/hugetlbpage.c
arch/riscv/Kconfig
arch/riscv/Makefile
arch/riscv/include/asm/Kbuild
arch/riscv/include/asm/bug.h
arch/riscv/include/asm/cacheflush.h
arch/riscv/include/asm/csr.h
arch/riscv/include/asm/elf.h
arch/riscv/include/asm/futex.h
arch/riscv/include/asm/irqflags.h
arch/riscv/include/asm/mmu_context.h
arch/riscv/include/asm/ptrace.h
arch/riscv/include/asm/sbi.h
arch/riscv/include/asm/sifive_l2_cache.h [new file with mode: 0644]
arch/riscv/include/asm/thread_info.h
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/asm-offsets.c
arch/riscv/kernel/cpu.c
arch/riscv/kernel/entry.S
arch/riscv/kernel/head.S
arch/riscv/kernel/irq.c
arch/riscv/kernel/perf_event.c
arch/riscv/kernel/reset.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/signal.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/smpboot.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/traps.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/mm/Makefile
arch/riscv/mm/cacheflush.c
arch/riscv/mm/context.c [new file with mode: 0644]
arch/riscv/mm/fault.c
arch/riscv/mm/sifive_l2_cache.c [new file with mode: 0644]
arch/um/include/asm/mmu_context.h
arch/unicore32/include/asm/mmu_context.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/mpx.h
arch/x86/mm/mpx.c
drivers/amba/tegra-ahb.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/tcb_clksrc.c [deleted file]
drivers/clocksource/timer-atmel-tcb.c [new file with mode: 0644]
drivers/clocksource/timer-milbeaut.c
drivers/clocksource/timer-sun4i.c
drivers/clocksource/timer-tegra20.c
drivers/firmware/ti_sci.c
drivers/firmware/ti_sci.h
drivers/gpio/gpio-thunderx.c
drivers/i2c/i2c-core-base.c
drivers/iommu/Kconfig
drivers/iommu/dma-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-bcm7038-l1.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/irqchip/irq-gic-pm.c
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3-mbi.c
drivers/irqchip/irq-imx-irqsteer.c
drivers/irqchip/irq-ls-scfg-msi.c
drivers/irqchip/irq-renesas-intc-irqpin.c
drivers/irqchip/irq-stm32-exti.c
drivers/irqchip/irq-ti-sci-inta.c [new file with mode: 0644]
drivers/irqchip/irq-ti-sci-intr.c [new file with mode: 0644]
drivers/misc/Kconfig
drivers/misc/atmel_tclib.c
drivers/pwm/pwm-atmel-tcb.c
drivers/soc/fsl/qe/gpio.c
drivers/soc/ixp4xx/ixp4xx-qmgr.c
drivers/soc/ti/Kconfig
drivers/soc/ti/Makefile
drivers/soc/ti/ti_sci_inta_msi.c [new file with mode: 0644]
drivers/tty/hvc/hvc_riscv_sbi.c
drivers/video/fbdev/efifb.c
fs/cifs/cifs_debug.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smbdirect.c
fs/cifs/transport.c
fs/ext4/block_validity.c
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/fsmap.c
fs/ext4/ioctl.c
fs/ext4/namei.c
fs/ext4/super.c
fs/jbd2/journal.c
fs/jbd2/revoke.c
fs/jbd2/transaction.c
fs/unicode/README.utf8data
fs/unicode/utf8-norm.c
include/asm-generic/mm_hooks.h
include/linux/atmel_tc.h [deleted file]
include/linux/clk/at91_pmc.h
include/linux/dma-iommu.h
include/linux/i2c.h
include/linux/irq.h
include/linux/irqchip/arm-gic-v3.h
include/linux/irqdomain.h
include/linux/jbd2.h
include/linux/msi.h
include/linux/platform_data/xtalk-bridge.h [new file with mode: 0644]
include/linux/soc/ti/ti_sci_inta_msi.h [new file with mode: 0644]
include/linux/soc/ti/ti_sci_protocol.h
include/soc/at91/atmel_tcb.h [new file with mode: 0644]
kernel/irq/Kconfig
kernel/irq/chip.c
kernel/irq/irqdomain.c
mm/mmap.c
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/perf_regs.h
tools/arch/x86/lib/memcpy_64.S
tools/lib/traceevent/Documentation/Makefile [new file with mode: 0644]
tools/lib/traceevent/Documentation/asciidoc.conf [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-commands.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-cpus.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-event_find.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-event_get.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-event_list.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-field_find.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-field_print.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-field_read.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-fields.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-filter.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-func_find.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-handle.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-header_page.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-long_size.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-page_size.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-strerror.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent-tseq.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/libtraceevent.txt [new file with mode: 0644]
tools/lib/traceevent/Documentation/manpage-1.72.xsl [new file with mode: 0644]
tools/lib/traceevent/Documentation/manpage-base.xsl [new file with mode: 0644]
tools/lib/traceevent/Documentation/manpage-bold-literal.xsl [new file with mode: 0644]
tools/lib/traceevent/Documentation/manpage-normal.xsl [new file with mode: 0644]
tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl [new file with mode: 0644]
tools/lib/traceevent/Makefile
tools/lib/traceevent/libtraceevent.pc.template
tools/objtool/Makefile
tools/pci/Makefile
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf.data-file-format.txt
tools/perf/Documentation/perf.txt
tools/perf/arch/x86/include/perf_regs.h
tools/perf/arch/x86/util/perf_regs.c
tools/perf/builtin-annotate.c
tools/perf/builtin-inject.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-stat.c
tools/perf/perf.h
tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/mapfile.csv
tools/perf/pmu-events/jevents.c
tools/perf/scripts/python/exported-sql-viewer.py
tools/perf/tests/dso-data.c
tools/perf/tests/make
tools/perf/tests/shell/record+zstd_comp_decomp.sh [new file with mode: 0755]
tools/perf/util/Build
tools/perf/util/annotate.c
tools/perf/util/compress.h
tools/perf/util/env.h
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/machine.c
tools/perf/util/mmap.c
tools/perf/util/mmap.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/parse-regs-options.c
tools/perf/util/parse-regs-options.h
tools/perf/util/perf_regs.c
tools/perf/util/perf_regs.h
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/stat-display.c
tools/perf/util/stat.c
tools/perf/util/thread.c
tools/perf/util/tool.h
tools/perf/util/unwind-libunwind-local.c
tools/perf/util/unwind-libunwind.c
tools/perf/util/zstd.c [new file with mode: 0644]

index cca6d87..a64d219 100644 (file)
@@ -1 +1,2 @@
 Christoph Hellwig <hch@lst.de>
+Marc Gonzalez <marc.w.gonzalez@free.fr>
index 4bf1b4d..99dee23 100644 (file)
@@ -25,6 +25,7 @@ compatible: must be one of:
     o "atmel,at91sam9n12"
     o "atmel,at91sam9rl"
     o "atmel,at91sam9xe"
+    o "microchip,sam9x60"
  * "atmel,sama5" for SoCs using a Cortex-A5, shall be extended with the specific
    SoC family:
     o "atmel,sama5d2" shall be extended with the specific SoC compatible:
index b56a02c..6f0cd31 100644 (file)
@@ -24,7 +24,8 @@ relationship between the TI-SCI parent node to the child node.
 
 Required properties:
 -------------------
-- compatible: should be "ti,k2g-sci"
+- compatible:  should be "ti,k2g-sci" for TI 66AK2G SoC
+               should be "ti,am654-sci" for for TI AM654 SoC
 - mbox-names:
        "rx" - Mailbox corresponding to receive path
        "tx" - Mailbox corresponding to transmit path
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt
new file mode 100644 (file)
index 0000000..7841cb0
--- /dev/null
@@ -0,0 +1,66 @@
+Texas Instruments K3 Interrupt Aggregator
+=========================================
+
+The Interrupt Aggregator (INTA) provides a centralized machine
+which handles the termination of system events to that they can
+be coherently processed by the host(s) in the system. A maximum
+of 64 events can be mapped to a single interrupt.
+
+
+                              Interrupt Aggregator
+                     +-----------------------------------------+
+                     |      Intmap            VINT             |
+                     | +--------------+  +------------+        |
+            m ------>| | vint  | bit  |  | 0 |.....|63| vint0  |
+               .     | +--------------+  +------------+        |       +------+
+               .     |         .               .               |       | HOST |
+Globalevents  ------>|         .               .               |------>| IRQ  |
+               .     |         .               .               |       | CTRL |
+               .     |         .               .               |       +------+
+            n ------>| +--------------+  +------------+        |
+                     | | vint  | bit  |  | 0 |.....|63| vintx  |
+                     | +--------------+  +------------+        |
+                     |                                         |
+                     +-----------------------------------------+
+
+Configuration of these Intmap registers that maps global events to vint is done
+by a system controller (like the Device Memory and Security Controller on K3
+AM654 SoC). Driver should request the system controller to get the range
+of global events and vints assigned to the requesting host. Management
+of these requested resources should be handled by driver and requests
+system controller to map specific global event to vint, bit pair.
+
+Communication between the host processor running an OS and the system
+controller happens through a protocol called TI System Control Interface
+(TISCI protocol). For more details refer:
+Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
+
+TISCI Interrupt Aggregator Node:
+-------------------------------
+- compatible:          Must be "ti,sci-inta".
+- reg:                 Should contain registers location and length.
+- interrupt-controller:        Identifies the node as an interrupt controller
+- msi-controller:      Identifies the node as an MSI controller.
+- interrupt-parent:    phandle of irq parent.
+- ti,sci:              Phandle to TI-SCI compatible System controller node.
+- ti,sci-dev-id:       TISCI device ID of the Interrupt Aggregator.
+- ti,sci-rm-range-vint:        Array of TISCI subtype ids representing vints(inta
+                       outputs) range within this INTA, assigned to the
+                       requesting host context.
+- ti,sci-rm-range-global-event:        Array of TISCI subtype ids representing the
+                       global events range reaching this IA and are assigned
+                       to the requesting host context.
+
+Example:
+--------
+main_udmass_inta: interrupt-controller@33d00000 {
+       compatible = "ti,sci-inta";
+       reg = <0x0 0x33d00000 0x0 0x100000>;
+       interrupt-controller;
+       msi-controller;
+       interrupt-parent = <&main_navss_intr>;
+       ti,sci = <&dmsc>;
+       ti,sci-dev-id = <179>;
+       ti,sci-rm-range-vint = <0x0>;
+       ti,sci-rm-range-global-event = <0x1>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
new file mode 100644 (file)
index 0000000..1a8718f
--- /dev/null
@@ -0,0 +1,82 @@
+Texas Instruments K3 Interrupt Router
+=====================================
+
+The Interrupt Router (INTR) module provides a mechanism to mux M
+interrupt inputs to N interrupt outputs, where all M inputs are selectable
+to be driven per N output. An Interrupt Router can either handle edge triggered
+or level triggered interrupts and that is fixed in hardware.
+
+                                 Interrupt Router
+                             +----------------------+
+                             |  Inputs     Outputs  |
+        +-------+            | +------+    +-----+  |
+        | GPIO  |----------->| | irq0 |    |  0  |  |       Host IRQ
+        +-------+            | +------+    +-----+  |      controller
+                             |    .           .     |      +-------+
+        +-------+            |    .           .     |----->|  IRQ  |
+        | INTA  |----------->|    .           .     |      +-------+
+        +-------+            |    .        +-----+  |
+                             | +------+    |  N  |  |
+                             | | irqM |    +-----+  |
+                             | +------+             |
+                             |                      |
+                             +----------------------+
+
+There is one register per output (MUXCNTL_N) that controls the selection.
+Configuration of these MUXCNTL_N registers is done by a system controller
+(like the Device Memory and Security Controller on K3 AM654 SoC). System
+controller will keep track of the used and unused registers within the Router.
+Driver should request the system controller to get the range of GIC IRQs
+assigned to the requesting hosts. It is the drivers responsibility to keep
+track of Host IRQs.
+
+Communication between the host processor running an OS and the system
+controller happens through a protocol called TI System Control Interface
+(TISCI protocol). For more details refer:
+Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
+
+TISCI Interrupt Router Node:
+----------------------------
+Required Properties:
+- compatible:          Must be "ti,sci-intr".
+- ti,intr-trigger-type:        Should be one of the following:
+                       1: If intr supports edge triggered interrupts.
+                       4: If intr supports level triggered interrupts.
+- interrupt-controller:        Identifies the node as an interrupt controller
+- #interrupt-cells:    Specifies the number of cells needed to encode an
+                       interrupt source. The value should be 2.
+                       First cell should contain the TISCI device ID of source
+                       Second cell should contain the interrupt source offset
+                       within the device.
+- ti,sci:              Phandle to TI-SCI compatible System controller node.
+- ti,sci-dst-id:       TISCI device ID of the destination IRQ controller.
+- ti,sci-rm-range-girq:        Array of TISCI subtype ids representing the host irqs
+                       assigned to this interrupt router. Each subtype id
+                       corresponds to a range of host irqs.
+
+For more details on TISCI IRQ resource management refer:
+http://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html
+
+Example:
+--------
+The following example demonstrates both interrupt router node and the consumer
+node(main gpio) on the AM654 SoC:
+
+main_intr: interrupt-controller0 {
+       compatible = "ti,sci-intr";
+       ti,intr-trigger-type = <1>;
+       interrupt-controller;
+       interrupt-parent = <&gic500>;
+       #interrupt-cells = <2>;
+       ti,sci = <&dmsc>;
+       ti,sci-dst-id = <56>;
+       ti,sci-rm-range-girq = <0x1>;
+};
+
+main_gpio0: gpio@600000 {
+       ...
+       interrupt-parent = <&main_intr>;
+       interrupts = <57 256>, <57 257>, <57 258>,
+                    <57 259>, <57 260>, <57 261>;
+       ...
+};
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt
new file mode 100644 (file)
index 0000000..73d8f19
--- /dev/null
@@ -0,0 +1,51 @@
+SiFive L2 Cache Controller
+--------------------------
+The SiFive Level 2 Cache Controller is used to provide access to fast copies
+of memory for masters in a Core Complex. The Level 2 Cache Controller also
+acts as directory-based coherency manager.
+All the properties in ePAPR/DeviceTree specification applies for this platform
+
+Required Properties:
+--------------------
+- compatible: Should be "sifive,fu540-c000-ccache" and "cache"
+
+- cache-block-size: Specifies the block size in bytes of the cache.
+  Should be 64
+
+- cache-level: Should be set to 2 for a level 2 cache
+
+- cache-sets: Specifies the number of associativity sets of the cache.
+  Should be 1024
+
+- cache-size: Specifies the size in bytes of the cache. Should be 2097152
+
+- cache-unified: Specifies the cache is a unified cache
+
+- interrupts: Must contain 3 entries (DirError, DataError and DataFail signals)
+
+- reg: Physical base address and size of L2 cache controller registers map
+
+Optional Properties:
+--------------------
+- next-level-cache: phandle to the next level cache if present.
+
+- memory-region: reference to the reserved-memory for the L2 Loosely Integrated
+  Memory region. The reserved memory node should be defined as per the bindings
+  in reserved-memory.txt
+
+
+Example:
+
+       cache-controller@2010000 {
+               compatible = "sifive,fu540-c000-ccache", "cache";
+               cache-block-size = <64>;
+               cache-level = <2>;
+               cache-sets = <1024>;
+               cache-size = <2097152>;
+               cache-unified;
+               interrupt-parent = <&plic0>;
+               interrupts = <1 2 3>;
+               reg = <0x0 0x2010000 0x0 0x1000>;
+               next-level-cache = <&L25 &L40 &L36>;
+               memory-region = <&l2_lim>;
+       };
index 5c2e235..3da9d51 100644 (file)
@@ -2,7 +2,9 @@ Allwinner A1X SoCs Timer Controller
 
 Required properties:
 
-- compatible : should be "allwinner,sun4i-a10-timer"
+- compatible : should be one of the following:
+              "allwinner,sun4i-a10-timer"
+              "allwinner,suniv-f1c100s-timer"
 - reg : Specifies base physical address and size of the registers.
 - interrupts : The interrupt of the first timer
 - clocks: phandle to the source clock (usually a 24 MHz fixed clock)
index 99994a4..69c7fa7 100644 (file)
@@ -271,6 +271,9 @@ GPIO
   devm_gpio_request_one()
   devm_gpio_free()
 
+I2C
+  devm_i2c_new_dummy_device()
+
 IIO
   devm_iio_device_alloc()
   devm_iio_device_free()
index 1aa19b9..5cfbea4 100644 (file)
@@ -15548,6 +15548,12 @@ F:     Documentation/devicetree/bindings/reset/ti,sci-reset.txt
 F:     Documentation/devicetree/bindings/clock/ti,sci-clk.txt
 F:     drivers/clk/keystone/sci-clk.c
 F:     drivers/reset/reset-ti-sci.c
+F:     Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
+F:     Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt
+F:     drivers/irqchip/irq-ti-sci-intr.c
+F:     drivers/irqchip/irq-ti-sci-inta.c
+F:     include/linux/soc/ti/ti_sci_inta_msi.h
+F:     drivers/soc/ti/ti_sci_inta_msi.c
 
 Texas Instruments ASoC drivers
 M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
index 4990ed9..3e39b9a 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&mmc1_pins>;
        wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>;                /* gpio_126 */
-       cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>;              /* gpio_110 */
+       cd-gpios = <&gpio4 14 GPIO_ACTIVE_LOW>;                 /* gpio_110 */
        vmmc-supply = <&vmmc1>;
        bus-width = <4>;
        cap-power-off-card;
index 515cb37..d5341b0 100644 (file)
@@ -150,7 +150,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SOC_CAMERA=y
 CONFIG_VIDEO_ATMEL_ISI=y
-CONFIG_SOC_CAMERA_OV2640=y
+CONFIG_SOC_CAMERA_OV2640=m
 CONFIG_DRM=y
 CONFIG_DRM_ATMEL_HLCDC=y
 CONFIG_DRM_PANEL_SIMPLE=y
index 9d42cfe..6701a97 100644 (file)
@@ -21,7 +21,6 @@ CONFIG_NEON=y
 CONFIG_OPROFILE=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -128,6 +127,8 @@ CONFIG_RTC_DRV_DS1307=y
 CONFIG_DMADEVICES=y
 CONFIG_PL330_DMA=y
 CONFIG_DMATEST=m
+CONFIG_IIO=y
+CONFIG_LTC2497=y
 CONFIG_FPGA=y
 CONFIG_FPGA_MGR_SOCFPGA=y
 CONFIG_FPGA_MGR_SOCFPGA_A10=y
index 903f23c..a2220e5 100644 (file)
@@ -21,7 +21,6 @@ config SOC_SAMA5D2
        depends on ARCH_MULTI_V7
        select SOC_SAMA5
        select CACHE_L2X0
-       select HAVE_FB_ATMEL
        select HAVE_AT91_UTMI
        select HAVE_AT91_USB_CLK
        select HAVE_AT91_H32MX
@@ -36,7 +35,6 @@ config SOC_SAMA5D3
        bool "SAMA5D3 family"
        depends on ARCH_MULTI_V7
        select SOC_SAMA5
-       select HAVE_FB_ATMEL
        select HAVE_AT91_UTMI
        select HAVE_AT91_SMD
        select HAVE_AT91_USB_CLK
@@ -50,7 +48,6 @@ config SOC_SAMA5D4
        depends on ARCH_MULTI_V7
        select SOC_SAMA5
        select CACHE_L2X0
-       select HAVE_FB_ATMEL
        select HAVE_AT91_UTMI
        select HAVE_AT91_SMD
        select HAVE_AT91_USB_CLK
@@ -107,6 +104,29 @@ config SOC_AT91SAM9
            AT91SAM9X35
            AT91SAM9XE
 
+comment "Clocksource driver selection"
+
+config ATMEL_CLOCKSOURCE_PIT
+       bool "Periodic Interval Timer (PIT) support"
+       depends on SOC_AT91SAM9 || SOC_SAMA5
+       default SOC_AT91SAM9 || SOC_SAMA5
+       select ATMEL_PIT
+       help
+         Select this to get a clocksource based on the Atmel Periodic Interval
+         Timer. It has a relatively low resolution and the TC Block clocksource
+         should be preferred.
+
+config ATMEL_CLOCKSOURCE_TCB
+       bool "Timer Counter Blocks (TCB) support"
+       default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5
+       select ATMEL_TCB_CLKSRC
+       help
+         Select this to get a high precision clocksource based on a
+         TC block with a 5+ MHz base clock rate.
+         On platforms with 16-bit counters, two timer channels are combined
+         to make a single 32-bit timer.
+         It can also be used as a clock event device supporting oneshot mode.
+
 config HAVE_AT91_UTMI
        bool
 
index 3dbdef4..c12563b 100644 (file)
@@ -32,3 +32,21 @@ DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM9")
        .init_machine   = at91sam9_init,
        .dt_compat      = at91_dt_board_compat,
 MACHINE_END
+
+static void __init sam9x60_init(void)
+{
+       of_platform_default_populate(NULL, NULL, NULL);
+
+       sam9x60_pm_init();
+}
+
+static const char *const sam9x60_dt_board_compat[] __initconst = {
+       "microchip,sam9x60",
+       NULL
+};
+
+DT_MACHINE_START(sam9x60_dt, "Microchip SAM9X60")
+       /* Maintainer: Microchip */
+       .init_machine   = sam9x60_init,
+       .dt_compat      = sam9x60_dt_board_compat,
+MACHINE_END
index e2bd172..72b45ac 100644 (file)
 #ifdef CONFIG_PM
 extern void __init at91rm9200_pm_init(void);
 extern void __init at91sam9_pm_init(void);
+extern void __init sam9x60_pm_init(void);
 extern void __init sama5_pm_init(void);
 extern void __init sama5d2_pm_init(void);
 #else
 static inline void __init at91rm9200_pm_init(void) { }
 static inline void __init at91sam9_pm_init(void) { }
+static inline void __init sam9x60_pm_init(void) { }
 static inline void __init sama5_pm_init(void) { }
 static inline void __init sama5d2_pm_init(void) { }
 #endif
index 2a757dc..6c81475 100644 (file)
@@ -39,6 +39,20 @@ extern void at91_pinctrl_gpio_suspend(void);
 extern void at91_pinctrl_gpio_resume(void);
 #endif
 
+struct at91_soc_pm {
+       int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
+       int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
+       const struct of_device_id *ws_ids;
+       struct at91_pm_data data;
+};
+
+static struct at91_soc_pm soc_pm = {
+       .data = {
+               .standby_mode = AT91_PM_STANDBY,
+               .suspend_mode = AT91_PM_ULP0,
+       },
+};
+
 static const match_table_t pm_modes __initconst = {
        { AT91_PM_STANDBY, "standby" },
        { AT91_PM_ULP0, "ulp0" },
@@ -47,16 +61,11 @@ static const match_table_t pm_modes __initconst = {
        { -1, NULL },
 };
 
-static struct at91_pm_data pm_data = {
-       .standby_mode = AT91_PM_STANDBY,
-       .suspend_mode = AT91_PM_ULP0,
-};
-
 #define at91_ramc_read(id, field) \
-       __raw_readl(pm_data.ramc[id] + field)
+       __raw_readl(soc_pm.data.ramc[id] + field)
 
 #define at91_ramc_write(id, field, value) \
-       __raw_writel(value, pm_data.ramc[id] + field)
+       __raw_writel(value, soc_pm.data.ramc[id] + field)
 
 static int at91_pm_valid_state(suspend_state_t state)
 {
@@ -91,6 +100,8 @@ static const struct wakeup_source_info ws_info[] = {
        { .pmc_fsmr_bit = AT91_PMC_RTCAL,       .shdwc_mr_bit = BIT(17) },
        { .pmc_fsmr_bit = AT91_PMC_USBAL },
        { .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
+       { .pmc_fsmr_bit = AT91_PMC_RTTAL },
+       { .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
 };
 
 static const struct of_device_id sama5d2_ws_ids[] = {
@@ -105,6 +116,17 @@ static const struct of_device_id sama5d2_ws_ids[] = {
        { /* sentinel */ }
 };
 
+static const struct of_device_id sam9x60_ws_ids[] = {
+       { .compatible = "atmel,at91sam9x5-rtc",         .data = &ws_info[1] },
+       { .compatible = "atmel,at91rm9200-ohci",        .data = &ws_info[2] },
+       { .compatible = "usb-ohci",                     .data = &ws_info[2] },
+       { .compatible = "atmel,at91sam9g45-ehci",       .data = &ws_info[2] },
+       { .compatible = "usb-ehci",                     .data = &ws_info[2] },
+       { .compatible = "atmel,at91sam9260-rtt",        .data = &ws_info[4] },
+       { .compatible = "cdns,sam9x60-macb",            .data = &ws_info[5] },
+       { /* sentinel */ }
+};
+
 static int at91_pm_config_ws(unsigned int pm_mode, bool set)
 {
        const struct wakeup_source_info *wsi;
@@ -116,24 +138,22 @@ static int at91_pm_config_ws(unsigned int pm_mode, bool set)
        if (pm_mode != AT91_PM_ULP1)
                return 0;
 
-       if (!pm_data.pmc || !pm_data.shdwc)
+       if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
                return -EPERM;
 
        if (!set) {
-               writel(mode, pm_data.pmc + AT91_PMC_FSMR);
+               writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
                return 0;
        }
 
-       /* SHDWC.WUIR */
-       val = readl(pm_data.shdwc + 0x0c);
-       mode |= (val & 0x3ff);
-       polarity |= ((val >> 16) & 0x3ff);
+       if (soc_pm.config_shdwc_ws)
+               soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
 
        /* SHDWC.MR */
-       val = readl(pm_data.shdwc + 0x04);
+       val = readl(soc_pm.data.shdwc + 0x04);
 
        /* Loop through defined wakeup sources. */
-       for_each_matching_node_and_match(np, sama5d2_ws_ids, &match) {
+       for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
                pdev = of_find_device_by_node(np);
                if (!pdev)
                        continue;
@@ -155,8 +175,8 @@ put_device:
        }
 
        if (mode) {
-               writel(mode, pm_data.pmc + AT91_PMC_FSMR);
-               writel(polarity, pm_data.pmc + AT91_PMC_FSPR);
+               if (soc_pm.config_pmc_ws)
+                       soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
        } else {
                pr_err("AT91: PM: no ULP1 wakeup sources found!");
        }
@@ -164,6 +184,34 @@ put_device:
        return mode ? 0 : -EPERM;
 }
 
+static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
+                                       u32 *polarity)
+{
+       u32 val;
+
+       /* SHDWC.WUIR */
+       val = readl(shdwc + 0x0c);
+       *mode |= (val & 0x3ff);
+       *polarity |= ((val >> 16) & 0x3ff);
+
+       return 0;
+}
+
+static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
+{
+       writel(mode, pmc + AT91_PMC_FSMR);
+       writel(polarity, pmc + AT91_PMC_FSPR);
+
+       return 0;
+}
+
+static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
+{
+       writel(mode, pmc + AT91_PMC_FSMR);
+
+       return 0;
+}
+
 /*
  * Called after processes are frozen, but before we shutdown devices.
  */
@@ -171,18 +219,18 @@ static int at91_pm_begin(suspend_state_t state)
 {
        switch (state) {
        case PM_SUSPEND_MEM:
-               pm_data.mode = pm_data.suspend_mode;
+               soc_pm.data.mode = soc_pm.data.suspend_mode;
                break;
 
        case PM_SUSPEND_STANDBY:
-               pm_data.mode = pm_data.standby_mode;
+               soc_pm.data.mode = soc_pm.data.standby_mode;
                break;
 
        default:
-               pm_data.mode = -1;
+               soc_pm.data.mode = -1;
        }
 
-       return at91_pm_config_ws(pm_data.mode, true);
+       return at91_pm_config_ws(soc_pm.data.mode, true);
 }
 
 /*
@@ -194,10 +242,10 @@ static int at91_pm_verify_clocks(void)
        unsigned long scsr;
        int i;
 
-       scsr = readl(pm_data.pmc + AT91_PMC_SCSR);
+       scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
 
        /* USB must not be using PLLB */
-       if ((scsr & pm_data.uhp_udp_mask) != 0) {
+       if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
                pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
                return 0;
        }
@@ -208,7 +256,7 @@ static int at91_pm_verify_clocks(void)
 
                if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
                        continue;
-               css = readl(pm_data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
+               css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
                if (css != AT91_PMC_CSS_SLOW) {
                        pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
                        return 0;
@@ -230,7 +278,7 @@ static int at91_pm_verify_clocks(void)
  */
 int at91_suspend_entering_slow_clock(void)
 {
-       return (pm_data.mode >= AT91_PM_ULP0);
+       return (soc_pm.data.mode >= AT91_PM_ULP0);
 }
 EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
 
@@ -243,14 +291,14 @@ static int at91_suspend_finish(unsigned long val)
        flush_cache_all();
        outer_disable();
 
-       at91_suspend_sram_fn(&pm_data);
+       at91_suspend_sram_fn(&soc_pm.data);
 
        return 0;
 }
 
 static void at91_pm_suspend(suspend_state_t state)
 {
-       if (pm_data.mode == AT91_PM_BACKUP) {
+       if (soc_pm.data.mode == AT91_PM_BACKUP) {
                pm_bu->suspended = 1;
 
                cpu_suspend(0, at91_suspend_finish);
@@ -289,7 +337,7 @@ static int at91_pm_enter(suspend_state_t state)
                /*
                 * Ensure that clocks are in a valid state.
                 */
-               if (pm_data.mode >= AT91_PM_ULP0 &&
+               if (soc_pm.data.mode >= AT91_PM_ULP0 &&
                    !at91_pm_verify_clocks())
                        goto error;
 
@@ -318,7 +366,7 @@ error:
  */
 static void at91_pm_end(void)
 {
-       at91_pm_config_ws(pm_data.mode, false);
+       at91_pm_config_ws(soc_pm.data.mode, false);
 }
 
 
@@ -351,7 +399,7 @@ static void at91rm9200_standby(void)
                "    str    %2, [%1, %3]\n\t"
                "    mcr    p15, 0, %0, c7, c0, 4\n\t"
                :
-               : "r" (0), "r" (pm_data.ramc[0]),
+               : "r" (0), "r" (soc_pm.data.ramc[0]),
                  "r" (1), "r" (AT91_MC_SDRAMC_SRR));
 }
 
@@ -374,7 +422,7 @@ static void at91_ddr_standby(void)
                at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
        }
 
-       if (pm_data.ramc[1]) {
+       if (soc_pm.data.ramc[1]) {
                saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
                lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
                lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
@@ -392,14 +440,14 @@ static void at91_ddr_standby(void)
 
        /* self-refresh mode now */
        at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
-       if (pm_data.ramc[1])
+       if (soc_pm.data.ramc[1])
                at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
 
        cpu_do_idle();
 
        at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
        at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
-       if (pm_data.ramc[1]) {
+       if (soc_pm.data.ramc[1]) {
                at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
                at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
        }
@@ -429,7 +477,7 @@ static void at91sam9_sdram_standby(void)
        u32 lpr0, lpr1 = 0;
        u32 saved_lpr0, saved_lpr1 = 0;
 
-       if (pm_data.ramc[1]) {
+       if (soc_pm.data.ramc[1]) {
                saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
                lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
                lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
@@ -441,13 +489,13 @@ static void at91sam9_sdram_standby(void)
 
        /* self-refresh mode now */
        at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
-       if (pm_data.ramc[1])
+       if (soc_pm.data.ramc[1])
                at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
 
        cpu_do_idle();
 
        at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
-       if (pm_data.ramc[1])
+       if (soc_pm.data.ramc[1])
                at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
 }
 
@@ -480,14 +528,14 @@ static __init void at91_dt_ramc(void)
        const struct ramc_info *ramc;
 
        for_each_matching_node_and_match(np, ramc_ids, &of_id) {
-               pm_data.ramc[idx] = of_iomap(np, 0);
-               if (!pm_data.ramc[idx])
+               soc_pm.data.ramc[idx] = of_iomap(np, 0);
+               if (!soc_pm.data.ramc[idx])
                        panic(pr_fmt("unable to map ramc[%d] cpu registers\n"), idx);
 
                ramc = of_id->data;
                if (!standby)
                        standby = ramc->idle;
-               pm_data.memctrl = ramc->memctrl;
+               soc_pm.data.memctrl = ramc->memctrl;
 
                idx++;
        }
@@ -509,12 +557,17 @@ static void at91rm9200_idle(void)
         * Disable the processor clock.  The processor will be automatically
         * re-enabled by an interrupt or by a reset.
         */
-       writel(AT91_PMC_PCK, pm_data.pmc + AT91_PMC_SCDR);
+       writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
+}
+
+static void at91sam9x60_idle(void)
+{
+       cpu_do_idle();
 }
 
 static void at91sam9_idle(void)
 {
-       writel(AT91_PMC_PCK, pm_data.pmc + AT91_PMC_SCDR);
+       writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
        cpu_do_idle();
 }
 
@@ -566,8 +619,8 @@ static void __init at91_pm_sram_init(void)
 
 static bool __init at91_is_pm_mode_active(int pm_mode)
 {
-       return (pm_data.standby_mode == pm_mode ||
-               pm_data.suspend_mode == pm_mode);
+       return (soc_pm.data.standby_mode == pm_mode ||
+               soc_pm.data.suspend_mode == pm_mode);
 }
 
 static int __init at91_pm_backup_init(void)
@@ -577,6 +630,9 @@ static int __init at91_pm_backup_init(void)
        struct platform_device *pdev = NULL;
        int ret = -ENODEV;
 
+       if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
+               return -EPERM;
+
        if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
                return 0;
 
@@ -586,7 +642,7 @@ static int __init at91_pm_backup_init(void)
                return ret;
        }
 
-       pm_data.sfrbu = of_iomap(np, 0);
+       soc_pm.data.sfrbu = of_iomap(np, 0);
        of_node_put(np);
 
        np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
@@ -622,8 +678,8 @@ static int __init at91_pm_backup_init(void)
 securam_fail:
        put_device(&pdev->dev);
 securam_fail_no_ref_dev:
-       iounmap(pm_data.sfrbu);
-       pm_data.sfrbu = NULL;
+       iounmap(soc_pm.data.sfrbu);
+       soc_pm.data.sfrbu = NULL;
        return ret;
 }
 
@@ -632,10 +688,10 @@ static void __init at91_pm_use_default_mode(int pm_mode)
        if (pm_mode != AT91_PM_ULP1 && pm_mode != AT91_PM_BACKUP)
                return;
 
-       if (pm_data.standby_mode == pm_mode)
-               pm_data.standby_mode = AT91_PM_ULP0;
-       if (pm_data.suspend_mode == pm_mode)
-               pm_data.suspend_mode = AT91_PM_ULP0;
+       if (soc_pm.data.standby_mode == pm_mode)
+               soc_pm.data.standby_mode = AT91_PM_ULP0;
+       if (soc_pm.data.suspend_mode == pm_mode)
+               soc_pm.data.suspend_mode = AT91_PM_ULP0;
 }
 
 static void __init at91_pm_modes_init(void)
@@ -653,7 +709,7 @@ static void __init at91_pm_modes_init(void)
                goto ulp1_default;
        }
 
-       pm_data.shdwc = of_iomap(np, 0);
+       soc_pm.data.shdwc = of_iomap(np, 0);
        of_node_put(np);
 
        ret = at91_pm_backup_init();
@@ -667,8 +723,8 @@ static void __init at91_pm_modes_init(void)
        return;
 
 unmap:
-       iounmap(pm_data.shdwc);
-       pm_data.shdwc = NULL;
+       iounmap(soc_pm.data.shdwc);
+       soc_pm.data.shdwc = NULL;
 ulp1_default:
        at91_pm_use_default_mode(AT91_PM_ULP1);
 backup_default:
@@ -711,14 +767,14 @@ static void __init at91_pm_init(void (*pm_idle)(void))
                platform_device_register(&at91_cpuidle_device);
 
        pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
-       pm_data.pmc = of_iomap(pmc_np, 0);
-       if (!pm_data.pmc) {
+       soc_pm.data.pmc = of_iomap(pmc_np, 0);
+       if (!soc_pm.data.pmc) {
                pr_err("AT91: PM not supported, PMC not found\n");
                return;
        }
 
        pmc = of_id->data;
-       pm_data.uhp_udp_mask = pmc->uhp_udp_mask;
+       soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
 
        if (pm_idle)
                arm_pm_idle = pm_idle;
@@ -728,8 +784,8 @@ static void __init at91_pm_init(void (*pm_idle)(void))
        if (at91_suspend_sram_fn) {
                suspend_set_ops(&at91_pm_ops);
                pr_info("AT91: PM: standby: %s, suspend: %s\n",
-                       pm_modes[pm_data.standby_mode].pattern,
-                       pm_modes[pm_data.suspend_mode].pattern);
+                       pm_modes[soc_pm.data.standby_mode].pattern,
+                       pm_modes[soc_pm.data.suspend_mode].pattern);
        } else {
                pr_info("AT91: PM not supported, due to no SRAM allocated\n");
        }
@@ -750,6 +806,19 @@ void __init at91rm9200_pm_init(void)
        at91_pm_init(at91rm9200_idle);
 }
 
+void __init sam9x60_pm_init(void)
+{
+       if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
+               return;
+
+       at91_pm_modes_init();
+       at91_dt_ramc();
+       at91_pm_init(at91sam9x60_idle);
+
+       soc_pm.ws_ids = sam9x60_ws_ids;
+       soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
+}
+
 void __init at91sam9_pm_init(void)
 {
        if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
@@ -775,6 +844,10 @@ void __init sama5d2_pm_init(void)
 
        at91_pm_modes_init();
        sama5_pm_init();
+
+       soc_pm.ws_ids = sama5d2_ws_ids;
+       soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
+       soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
 }
 
 static int __init at91_pm_modes_select(char *str)
@@ -795,8 +868,8 @@ static int __init at91_pm_modes_select(char *str)
        if (suspend < 0)
                return 0;
 
-       pm_data.standby_mode = standby;
-       pm_data.suspend_mode = suspend;
+       soc_pm.data.standby_mode = standby;
+       soc_pm.data.suspend_mode = suspend;
 
        return 0;
 }
index bfe1c4d..77e2930 100644 (file)
@@ -50,15 +50,6 @@ tmp2 .req    r5
        beq     1b
        .endm
 
-/*
- * Wait until PLLA has locked.
- */
-       .macro wait_pllalock
-1:     ldr     tmp1, [pmc, #AT91_PMC_SR]
-       tst     tmp1, #AT91_PMC_LOCKA
-       beq     1b
-       .endm
-
 /*
  * Put the processor to enter the idle state
  */
@@ -178,11 +169,46 @@ ENDPROC(at91_backup_mode)
        orr     tmp1, tmp1, #AT91_PMC_KEY
        str     tmp1, [pmc, #AT91_CKGR_MOR]
 
+       /* Save RC oscillator state */
+       ldr     tmp1, [pmc, #AT91_PMC_SR]
+       str     tmp1, .saved_osc_status
+       tst     tmp1, #AT91_PMC_MOSCRCS
+       bne     1f
+
+       /* Turn off RC oscillator */
+       ldr     tmp1, [pmc, #AT91_CKGR_MOR]
+       bic     tmp1, tmp1, #AT91_PMC_MOSCRCEN
+       bic     tmp1, tmp1, #AT91_PMC_KEY_MASK
+       orr     tmp1, tmp1, #AT91_PMC_KEY
+       str     tmp1, [pmc, #AT91_CKGR_MOR]
+
+       /* Wait main RC disabled done */
+2:     ldr     tmp1, [pmc, #AT91_PMC_SR]
+       tst     tmp1, #AT91_PMC_MOSCRCS
+       bne     2b
+
        /* Wait for interrupt */
-       at91_cpu_idle
+1:     at91_cpu_idle
 
-       /* Turn on the crystal oscillator */
+       /* Restore RC oscillator state */
+       ldr     tmp1, .saved_osc_status
+       tst     tmp1, #AT91_PMC_MOSCRCS
+       beq     4f
+
+       /* Turn on RC oscillator */
        ldr     tmp1, [pmc, #AT91_CKGR_MOR]
+       orr     tmp1, tmp1, #AT91_PMC_MOSCRCEN
+       bic     tmp1, tmp1, #AT91_PMC_KEY_MASK
+       orr     tmp1, tmp1, #AT91_PMC_KEY
+       str     tmp1, [pmc, #AT91_CKGR_MOR]
+
+       /* Wait main RC stabilization */
+3:     ldr     tmp1, [pmc, #AT91_PMC_SR]
+       tst     tmp1, #AT91_PMC_MOSCRCS
+       beq     3b
+
+       /* Turn on the crystal oscillator */
+4:     ldr     tmp1, [pmc, #AT91_CKGR_MOR]
        orr     tmp1, tmp1, #AT91_PMC_MOSCEN
        orr     tmp1, tmp1, #AT91_PMC_KEY
        str     tmp1, [pmc, #AT91_CKGR_MOR]
@@ -197,8 +223,26 @@ ENDPROC(at91_backup_mode)
 .macro at91_pm_ulp1_mode
        ldr     pmc, .pmc_base
 
-       /* Switch the main clock source to 12-MHz RC oscillator */
+       /* Save RC oscillator state and check if it is enabled. */
+       ldr     tmp1, [pmc, #AT91_PMC_SR]
+       str     tmp1, .saved_osc_status
+       tst     tmp1, #AT91_PMC_MOSCRCS
+       bne     2f
+
+       /* Enable RC oscillator */
        ldr     tmp1, [pmc, #AT91_CKGR_MOR]
+       orr     tmp1, tmp1, #AT91_PMC_MOSCRCEN
+       bic     tmp1, tmp1, #AT91_PMC_KEY_MASK
+       orr     tmp1, tmp1, #AT91_PMC_KEY
+       str     tmp1, [pmc, #AT91_CKGR_MOR]
+
+       /* Wait main RC stabilization */
+1:     ldr     tmp1, [pmc, #AT91_PMC_SR]
+       tst     tmp1, #AT91_PMC_MOSCRCS
+       beq     1b
+
+       /* Switch the main clock source to 12-MHz RC oscillator */
+2:     ldr     tmp1, [pmc, #AT91_CKGR_MOR]
        bic     tmp1, tmp1, #AT91_PMC_MOSCSEL
        bic     tmp1, tmp1, #AT91_PMC_KEY_MASK
        orr     tmp1, tmp1, #AT91_PMC_KEY
@@ -262,6 +306,25 @@ ENDPROC(at91_backup_mode)
        str     tmp1, [pmc, #AT91_PMC_MCKR]
 
        wait_mckrdy
+
+       /* Restore RC oscillator state */
+       ldr     tmp1, .saved_osc_status
+       tst     tmp1, #AT91_PMC_MOSCRCS
+       bne     3f
+
+       /* Disable RC oscillator */
+       ldr     tmp1, [pmc, #AT91_CKGR_MOR]
+       bic     tmp1, tmp1, #AT91_PMC_MOSCRCEN
+       bic     tmp1, tmp1, #AT91_PMC_KEY_MASK
+       orr     tmp1, tmp1, #AT91_PMC_KEY
+       str     tmp1, [pmc, #AT91_CKGR_MOR]
+
+       /* Wait RC oscillator disable done */
+4:     ldr     tmp1, [pmc, #AT91_PMC_SR]
+       tst     tmp1, #AT91_PMC_MOSCRCS
+       bne     4b
+
+3:
 .endm
 
 ENTRY(at91_ulp_mode)
@@ -279,14 +342,6 @@ ENTRY(at91_ulp_mode)
 
        wait_mckrdy
 
-       /* Save PLLA setting and disable it */
-       ldr     tmp1, [pmc, #AT91_CKGR_PLLAR]
-       str     tmp1, .saved_pllar
-
-       mov     tmp1, #AT91_PMC_PLLCOUNT
-       orr     tmp1, tmp1, #(1 << 29)          /* bit 29 always set */
-       str     tmp1, [pmc, #AT91_CKGR_PLLAR]
-
        ldr     r0, .pm_mode
        cmp     r0, #AT91_PM_ULP1
        beq     ulp1_mode
@@ -301,18 +356,6 @@ ulp1_mode:
 ulp_exit:
        ldr     pmc, .pmc_base
 
-       /* Restore PLLA setting */
-       ldr     tmp1, .saved_pllar
-       str     tmp1, [pmc, #AT91_CKGR_PLLAR]
-
-       tst     tmp1, #(AT91_PMC_MUL &  0xff0000)
-       bne     3f
-       tst     tmp1, #(AT91_PMC_MUL & ~0xff0000)
-       beq     4f
-3:
-       wait_pllalock
-4:
-
        /*
         * Restore master clock setting
         */
@@ -465,8 +508,6 @@ ENDPROC(at91_sramc_self_refresh)
        .word 0
 .saved_mckr:
        .word 0
-.saved_pllar:
-       .word 0
 .saved_sam9_lpr:
        .word 0
 .saved_sam9_lpr1:
@@ -475,6 +516,8 @@ ENDPROC(at91_sramc_self_refresh)
        .word 0
 .saved_sam9_mdr1:
        .word 0
+.saved_osc_status:
+       .word 0
 
 ENTRY(at91_pm_suspend_in_sram_sz)
        .word .-at91_pm_suspend_in_sram
index cc5f156..381f452 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/cpu.h>
 #include <linux/pci.h>
 #include <linux/sched_clock.h>
-#include <linux/bitops.h>
 #include <linux/irqchip/irq-ixp4xx.h>
 #include <linux/platform_data/timer-ixp4xx.h>
 #include <mach/udc.h>
index 0b10acd..d2df5ef 100644 (file)
@@ -136,7 +136,6 @@ static void __init i2c_quirk(void)
 
                of_update_property(np, new_compat);
        }
-       return;
 }
 
 static void __init mvebu_dt_init(void)
index 8b2fbc8..2d962fe 100644 (file)
@@ -66,7 +66,7 @@ ENDPROC(ll_get_coherency_base)
  * fabric registers
  */
 ENTRY(ll_get_coherency_cpumask)
-       mrc     15, 0, r3, cr0, cr0, 5
+       mrc     p15, 0, r3, cr0, cr0, 5
        and     r3, r3, #15
        mov     r2, #(1 << 24)
        lsl     r3, r2, r3
index 9b5f4d6..ceaad6d 100644 (file)
@@ -108,8 +108,6 @@ static void __init kirkwood_dt_eth_fixup(void)
                clk_prepare_enable(clk);
 
                /* store MAC address register contents in local-mac-address */
-               pr_err(FW_INFO "%pOF: local-mac-address is not set\n", np);
-
                pmac = kzalloc(sizeof(*pmac) + 6, GFP_KERNEL);
                if (!pmac)
                        goto eth_fixup_no_mem;
index db17121..0705525 100644 (file)
@@ -79,7 +79,7 @@ static void mvebu_armada_pm_enter(void __iomem *sdram_reg, u32 srcmd)
 static int __init mvebu_armada_pm_init(void)
 {
        struct device_node *np;
-       struct device_node *gpio_ctrl_np;
+       struct device_node *gpio_ctrl_np = NULL;
        int ret = 0, i;
 
        if (!of_machine_is_compatible("marvell,axp-gp"))
@@ -126,18 +126,23 @@ static int __init mvebu_armada_pm_init(void)
                        goto out;
                }
 
+               if (gpio_ctrl_np)
+                       of_node_put(gpio_ctrl_np);
                gpio_ctrl_np = args.np;
                pic_raw_gpios[i] = args.args[0];
        }
 
        gpio_ctrl = of_iomap(gpio_ctrl_np, 0);
-       if (!gpio_ctrl)
-               return -ENOMEM;
+       if (!gpio_ctrl) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
        mvebu_pm_suspend_init(mvebu_armada_pm_enter);
 
 out:
        of_node_put(np);
+       of_node_put(gpio_ctrl_np);
        return ret;
 }
 
index 8865122..7aae9a2 100644 (file)
@@ -16,7 +16,7 @@
 ENTRY(armada_38x_scu_power_up)
        mrc     p15, 4, r1, c15, c0     @ get SCU base address
        orr     r1, r1, #0x8            @ SCU CPU Power Status Register
-       mrc     15, 0, r0, cr0, cr0, 5  @ get the CPU ID
+       mrc     p15, 0, r0, cr0, cr0, 5 @ get the CPU ID
        and     r0, r0, #15
        add     r1, r1, r0
        mov     r0, #0x0
@@ -56,7 +56,6 @@ ENDPROC(armada_38x_cpu_resume)
 
 /* The following code will be executed from SRAM */
 ENTRY(mvebu_boot_wa_start)
-mvebu_boot_wa_start:
 ARM_BE8(setend be)
        adr     r0, 1f
        ldr     r0, [r0]                @ load the address of the
index 1b15d59..b6e8141 100644 (file)
@@ -749,7 +749,7 @@ static void __init ams_delta_init(void)
                                ARRAY_SIZE(ams_delta_gpio_tables));
 
        leds_pdev = gpio_led_register_device(PLATFORM_DEVID_NONE, &leds_pdata);
-       if (!IS_ERR(leds_pdev)) {
+       if (!IS_ERR_OR_NULL(leds_pdev)) {
                leds_gpio_table.dev_id = dev_name(&leds_pdev->dev);
                gpiod_add_lookup_table(&leds_gpio_table);
        }
index 0f4d918..42eca65 100644 (file)
@@ -87,6 +87,11 @@ config ARCH_EXYNOS
 config ARCH_K3
        bool "Texas Instruments Inc. K3 multicore SoC architecture"
        select PM_GENERIC_DOMAINS if PM
+       select MAILBOX
+       select TI_MESSAGE_MANAGER
+       select TI_SCI_PROTOCOL
+       select TI_SCI_INTR_IRQCHIP
+       select TI_SCI_INTA_IRQCHIP
        help
          This enables support for Texas Instruments' K3 multicore SoC
          architecture.
@@ -215,6 +220,7 @@ config ARCH_SYNQUACER
 config ARCH_TEGRA
        bool "NVIDIA Tegra SoC Family"
        select ARCH_HAS_RESET_CONTROLLER
+       select ARM_GIC_PM
        select CLKDEV_LOOKUP
        select CLKSRC_MMIO
        select TIMER_OF
index 75ee6cf..14d7fea 100644 (file)
@@ -59,7 +59,7 @@
        };
 
        padctl@3520000 {
-               status = "okay";
+               status = "disabled";
 
                avdd-pll-erefeut-supply = <&vdd_1v8_pll>;
                avdd-usb-supply = <&vdd_3v3_sys>;
        };
 
        usb@3530000 {
-               status = "okay";
+               status = "disabled";
 
                phys = <&{/padctl@3520000/pads/usb2/lanes/usb2-0}>,
                       <&{/padctl@3520000/pads/usb2/lanes/usb2-1}>,
index f0bb6ce..426ac0b 100644 (file)
@@ -60,6 +60,7 @@
                clock-names = "master_bus", "slave_bus", "rx", "tx", "ptp_ref";
                resets = <&bpmp TEGRA186_RESET_EQOS>;
                reset-names = "eqos";
+               iommus = <&smmu TEGRA186_SID_EQOS>;
                status = "disabled";
 
                snps,write-requests = <1>;
                         <&bpmp TEGRA186_RESET_HDA2CODEC_2X>;
                reset-names = "hda", "hda2hdmi", "hda2codec_2x";
                power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>;
+               iommus = <&smmu TEGRA186_SID_HDA>;
                status = "disabled";
        };
 
                         <&bpmp TEGRA186_RESET_PCIEXCLK>;
                reset-names = "afi", "pex", "pcie_x";
 
+               iommus = <&smmu TEGRA186_SID_AFI>;
+               iommu-map = <0x0 &smmu TEGRA186_SID_AFI 0x1000>;
+               iommu-map-mask = <0x0>;
+
                status = "disabled";
 
                pci@1,0 {
 
        bpmp: bpmp {
                compatible = "nvidia,tegra186-bpmp";
+               iommus = <&smmu TEGRA186_SID_BPMP>;
                mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB
                                    TEGRA_HSP_DB_MASTER_BPMP>;
                shmem = <&cpu_bpmp_tx &cpu_bpmp_rx>;
index eb6be56..4bb862c 100644 (file)
@@ -75,7 +75,9 @@
                                             "sprd,sc9836-uart";
                                reg = <0x0 0x100>;
                                interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&ext_26m>;
+                               clock-names = "enable", "uart", "source";
+                               clocks = <&apapb_gate CLK_UART0_EB>,
+                                      <&ap_clk CLK_UART0>, <&ext_26m>;
                                status = "disabled";
                        };
 
@@ -84,7 +86,9 @@
                                             "sprd,sc9836-uart";
                                reg = <0x100000 0x100>;
                                interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&ext_26m>;
+                               clock-names = "enable", "uart", "source";
+                               clocks = <&apapb_gate CLK_UART1_EB>,
+                                      <&ap_clk CLK_UART1>, <&ext_26m>;
                                status = "disabled";
                        };
 
@@ -93,7 +97,9 @@
                                             "sprd,sc9836-uart";
                                reg = <0x200000 0x100>;
                                interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&ext_26m>;
+                               clock-names = "enable", "uart", "source";
+                               clocks = <&apapb_gate CLK_UART2_EB>,
+                                      <&ap_clk CLK_UART2>, <&ext_26m>;
                                status = "disabled";
                        };
 
                                             "sprd,sc9836-uart";
                                reg = <0x300000 0x100>;
                                interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&ext_26m>;
+                               clock-names = "enable", "uart", "source";
+                               clocks = <&apapb_gate CLK_UART3_EB>,
+                                      <&ap_clk CLK_UART3>, <&ext_26m>;
                                status = "disabled";
                        };
                };
index 677e5bf..70d3200 100644 (file)
@@ -674,7 +674,10 @@ config SGI_IP27
        select SYS_HAS_EARLY_PRINTK
        select HAVE_PCI
        select IRQ_MIPS_CPU
+       select IRQ_DOMAIN_HIERARCHY
        select NR_CPUS_DEFAULT_64
+       select PCI_DRIVERS_GENERIC
+       select PCI_XTALK_BRIDGE
        select SYS_HAS_CPU_R10000
        select SYS_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_BIG_ENDIAN
@@ -1241,6 +1244,9 @@ config IRQ_GT641XX
 config PCI_GT64XXX_PCI0
        bool
 
+config PCI_XTALK_BRIDGE
+       bool
+
 config NO_EXCEPT_FILL
        bool
 
index 1454d9f..b8f3397 100644 (file)
@@ -131,9 +131,7 @@ static void __init alchemy_setup_uarts(int ctype)
 }
 
 
-/* The dmamask must be set for OHCI/EHCI to work */
-static u64 alchemy_ohci_dmamask = DMA_BIT_MASK(32);
-static u64 __maybe_unused alchemy_ehci_dmamask = DMA_BIT_MASK(32);
+static u64 alchemy_all_dmamask = DMA_BIT_MASK(32);
 
 /* Power on callback for the ehci platform driver */
 static int alchemy_ehci_power_on(struct platform_device *pdev)
@@ -231,7 +229,7 @@ static void __init alchemy_setup_usb(int ctype)
        res[1].flags = IORESOURCE_IRQ;
        pdev->name = "ohci-platform";
        pdev->id = 0;
-       pdev->dev.dma_mask = &alchemy_ohci_dmamask;
+       pdev->dev.dma_mask = &alchemy_all_dmamask;
        pdev->dev.platform_data = &alchemy_ohci_pdata;
 
        if (platform_device_register(pdev))
@@ -251,7 +249,7 @@ static void __init alchemy_setup_usb(int ctype)
                res[1].flags = IORESOURCE_IRQ;
                pdev->name = "ehci-platform";
                pdev->id = 0;
-               pdev->dev.dma_mask = &alchemy_ehci_dmamask;
+               pdev->dev.dma_mask = &alchemy_all_dmamask;
                pdev->dev.platform_data = &alchemy_ehci_pdata;
 
                if (platform_device_register(pdev))
@@ -271,7 +269,7 @@ static void __init alchemy_setup_usb(int ctype)
                res[1].flags = IORESOURCE_IRQ;
                pdev->name = "ohci-platform";
                pdev->id = 1;
-               pdev->dev.dma_mask = &alchemy_ohci_dmamask;
+               pdev->dev.dma_mask = &alchemy_all_dmamask;
                pdev->dev.platform_data = &alchemy_ohci_pdata;
 
                if (platform_device_register(pdev))
@@ -338,7 +336,11 @@ static struct platform_device au1xxx_eth0_device = {
        .name           = "au1000-eth",
        .id             = 0,
        .num_resources  = MAC_RES_COUNT,
-       .dev.platform_data = &au1xxx_eth0_platform_data,
+       .dev = {
+               .dma_mask               = &alchemy_all_dmamask,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+               .platform_data          = &au1xxx_eth0_platform_data,
+       },
 };
 
 static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = {
@@ -370,7 +372,11 @@ static struct platform_device au1xxx_eth1_device = {
        .name           = "au1000-eth",
        .id             = 1,
        .num_resources  = MAC_RES_COUNT,
-       .dev.platform_data = &au1xxx_eth1_platform_data,
+       .dev = {
+               .dma_mask               = &alchemy_all_dmamask,
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
+               .platform_data          = &au1xxx_eth1_platform_data,
+       },
 };
 
 void __init au1xxx_override_eth_cfg(unsigned int port,
index a106f81..a84475f 100644 (file)
@@ -43,14 +43,14 @@ void __init *plat_get_fdt(void)
                /* Already set up */
                return (void *)fdt;
 
-       if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
+       if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_passed_dtb)) {
                /*
                 * We booted using the UHI boot protocol, so we have been
                 * provided with the appropriate device tree for the board.
                 * Make use of it & search for any machine struct based upon
                 * the root compatible string.
                 */
-               fdt = (void *)fw_arg1;
+               fdt = (void *)fw_passed_dtb;
 
                for_each_mips_machine(check_mach) {
                        match = mips_machine_is_compatible(check_mach, fdt);
index 42ea131..965f079 100644 (file)
@@ -7,18 +7,9 @@
 #include <asm/mmzone.h>
 
 struct cpuinfo_ip27 {
-//     cpuid_t         p_cpuid;        /* PROM assigned cpuid */
        cnodeid_t       p_nodeid;       /* my node ID in compact-id-space */
        nasid_t         p_nasid;        /* my node ID in numa-as-id-space */
        unsigned char   p_slice;        /* Physical position on node board */
-#if 0
-       unsigned long           loops_per_sec;
-       unsigned long           ipi_count;
-       unsigned long           irq_attempt[NR_IRQS];
-       unsigned long           smp_local_irq_count;
-       unsigned long           prof_multiplier;
-       unsigned long           prof_counter;
-#endif
 };
 
 extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
@@ -30,7 +21,7 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
 struct pci_bus;
 extern int pcibus_to_node(struct pci_bus *);
 
-#define cpumask_of_pcibus(bus) (cpu_online_mask)
+#define cpumask_of_pcibus(bus) (cpumask_of_node(pcibus_to_node(bus)))
 
 extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
 
index 23574c2..a92cd30 100644 (file)
@@ -801,15 +801,13 @@ struct bridge_err_cmdword {
 #define PCI64_ATTR_RMF_SHFT    48
 
 struct bridge_controller {
-       struct pci_controller   pc;
-       struct resource         mem;
-       struct resource         io;
        struct resource         busn;
        struct bridge_regs      *base;
-       nasid_t                 nasid;
-       unsigned int            widget_id;
-       u64                     baddr;
+       unsigned long           baddr;
+       unsigned long           intr_addr;
+       struct irq_domain       *domain;
        unsigned int            pci_int[8];
+       nasid_t                 nasid;
 };
 
 #define BRIDGE_CONTROLLER(bus) \
@@ -822,8 +820,4 @@ struct bridge_controller {
 #define bridge_clr(bc, reg, val)       \
        __raw_writel(__raw_readl(&bc->base->reg) & ~(val), &bc->base->reg)
 
-extern int request_bridge_irq(struct bridge_controller *bc, int pin);
-
-extern struct pci_ops bridge_pci_ops;
-
 #endif /* _ASM_PCI_BRIDGE_H */
diff --git a/arch/mips/include/asm/sn/irq_alloc.h b/arch/mips/include/asm/sn/irq_alloc.h
new file mode 100644 (file)
index 0000000..09b89ce
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SN_IRQ_ALLOC_H
+#define __ASM_SN_IRQ_ALLOC_H
+
+struct irq_alloc_info {
+       void *ctrl;
+       nasid_t nasid;
+       int pin;
+};
+
+#endif /* __ASM_SN_IRQ_ALLOC_H */
index 26d2ed1..680e7ef 100644 (file)
@@ -47,15 +47,6 @@ typedef struct xtalk_piomap_s *xtalk_piomap_t;
 #define XIO_PORT(x)    ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
 #define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
 
-#ifdef CONFIG_PCI
-extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
-#else
-static inline int bridge_probe(nasid_t nasid, int widget, int masterwid)
-{
-       return 0;
-}
-#endif
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_XTALK_XTALK_H */
index d5e335e..6126b77 100644 (file)
@@ -1973,6 +1973,14 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
                panic("Unknown Ingenic Processor ID!");
                break;
        }
+
+       /*
+        * The config0 register in the Xburst CPUs with a processor ID of
+        * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
+        * but they don't actually support this ISA.
+        */
+       if ((c->processor_id & PRID_COMP_MASK) == PRID_COMP_INGENIC_D0)
+               c->isa_level &= ~MIPS_CPU_ISA_M32R2;
 }
 
 static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
index 4138635..d67fb64 100644 (file)
@@ -64,17 +64,11 @@ struct mips_perf_event {
        #define CNTR_EVEN       0x55555555
        #define CNTR_ODD        0xaaaaaaaa
        #define CNTR_ALL        0xffffffff
-#ifdef CONFIG_MIPS_MT_SMP
        enum {
                T  = 0,
                V  = 1,
                P  = 2,
        } range;
-#else
-       #define T
-       #define V
-       #define P
-#endif
 };
 
 static struct mips_perf_event raw_event;
@@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
 {
        struct perf_event *event = container_of(evt, struct perf_event, hw);
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
-#ifdef CONFIG_MIPS_MT_SMP
        unsigned int range = evt->event_base >> 24;
-#endif /* CONFIG_MIPS_MT_SMP */
 
        WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 
@@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
                /* Make sure interrupt enabled. */
                MIPS_PERFCTRL_IE;
 
-#ifdef CONFIG_CPU_BMIPS5000
-       {
+       if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
                /* enable the counter for the calling thread */
                cpuc->saved_ctrl[idx] |=
                        (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
-       }
-#else
-#ifdef CONFIG_MIPS_MT_SMP
-       if (range > V) {
+       } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
                /* The counter is processor wide. Set it up to count all TCs. */
                pr_debug("Enabling perf counter for all TCs\n");
                cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
-       } else
-#endif /* CONFIG_MIPS_MT_SMP */
-       {
+       } else {
                unsigned int cpu, ctrl;
 
                /*
@@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
                cpuc->saved_ctrl[idx] |= ctrl;
                pr_debug("Enabling perf counter for CPU%d\n", cpu);
        }
-#endif /* CONFIG_CPU_BMIPS5000 */
        /*
         * We do not actually let the counter run. Leave it until start().
         */
index c4f9765..d6de4cb 100644 (file)
@@ -26,6 +26,7 @@ obj-$(CONFIG_PCI_AR2315)      += pci-ar2315.o
 obj-$(CONFIG_SOC_AR71XX)       += pci-ar71xx.o
 obj-$(CONFIG_PCI_AR724X)       += pci-ar724x.o
 obj-$(CONFIG_MIPS_PCI_VIRTIO)  += pci-virtio-guest.o
+obj-$(CONFIG_PCI_XTALK_BRIDGE) += pci-xtalk-bridge.o
 #
 # These are still pretty much in the old state, watch, go blind.
 #
@@ -39,7 +40,7 @@ obj-$(CONFIG_MIPS_MALTA)      += fixup-malta.o pci-malta.o
 obj-$(CONFIG_PMC_MSP7120_GW)   += fixup-pmcmsp.o ops-pmcmsp.o
 obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o
 obj-$(CONFIG_PMC_MSP7120_FPGA) += fixup-pmcmsp.o ops-pmcmsp.o
-obj-$(CONFIG_SGI_IP27)         += ops-bridge.o pci-ip27.o
+obj-$(CONFIG_SGI_IP27)         += pci-ip27.o
 obj-$(CONFIG_SGI_IP32)         += fixup-ip32.o ops-mace.o pci-ip32.o
 obj-$(CONFIG_SIBYTE_SB1250)    += fixup-sb1250.o pci-sb1250.o
 obj-$(CONFIG_SIBYTE_BCM112X)   += fixup-sb1250.o pci-sb1250.o
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
deleted file mode 100644 (file)
index df95b0d..0000000
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999, 2000, 04, 06 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- */
-#include <linux/pci.h>
-#include <asm/paccess.h>
-#include <asm/pci/bridge.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn0/hub.h>
-
-/*
- * Most of the IOC3 PCI config register aren't present
- * we emulate what is needed for a normal PCI enumeration
- */
-static u32 emulate_ioc3_cfg(int where, int size)
-{
-       if (size == 1 && where == 0x3d)
-               return 0x01;
-       else if (size == 2 && where == 0x3c)
-               return 0x0100;
-       else if (size == 4 && where == 0x3c)
-               return 0x00000100;
-
-       return 0;
-}
-
-/*
- * The Bridge ASIC supports both type 0 and type 1 access.  Type 1 is
- * not really documented, so right now I can't write code which uses it.
- * Therefore we use type 0 accesses for now even though they won't work
- * correctly for PCI-to-PCI bridges.
- *
- * The function is complicated by the ultimate brokenness of the IOC3 chip
- * which is used in SGI systems.  The IOC3 can only handle 32-bit PCI
- * accesses and does only decode parts of it's address space.
- */
-
-static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
-                                int where, int size, u32 * value)
-{
-       struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
-       struct bridge_regs *bridge = bc->base;
-       int slot = PCI_SLOT(devfn);
-       int fn = PCI_FUNC(devfn);
-       volatile void *addr;
-       u32 cf, shift, mask;
-       int res;
-
-       addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
-       if (get_dbe(cf, (u32 *) addr))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       /*
-        * IOC3 is broken beyond belief ...  Don't even give the
-        * generic PCI code a chance to look at it for real ...
-        */
-       if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
-               goto is_ioc3;
-
-       addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
-
-       if (size == 1)
-               res = get_dbe(*value, (u8 *) addr);
-       else if (size == 2)
-               res = get_dbe(*value, (u16 *) addr);
-       else
-               res = get_dbe(*value, (u32 *) addr);
-
-       return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
-       /*
-        * IOC3 special handling
-        */
-       if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
-               *value = emulate_ioc3_cfg(where, size);
-               return PCIBIOS_SUCCESSFUL;
-       }
-
-       addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
-
-       if (get_dbe(cf, (u32 *) addr))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       shift = ((where & 3) << 3);
-       mask = (0xffffffffU >> ((4 - size) << 3));
-       *value = (cf >> shift) & mask;
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
-                                int where, int size, u32 * value)
-{
-       struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
-       struct bridge_regs *bridge = bc->base;
-       int busno = bus->number;
-       int slot = PCI_SLOT(devfn);
-       int fn = PCI_FUNC(devfn);
-       volatile void *addr;
-       u32 cf, shift, mask;
-       int res;
-
-       bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
-       addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
-       if (get_dbe(cf, (u32 *) addr))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       /*
-        * IOC3 is broken beyond belief ...  Don't even give the
-        * generic PCI code a chance to look at it for real ...
-        */
-       if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
-               goto is_ioc3;
-
-       bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
-       addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
-
-       if (size == 1)
-               res = get_dbe(*value, (u8 *) addr);
-       else if (size == 2)
-               res = get_dbe(*value, (u16 *) addr);
-       else
-               res = get_dbe(*value, (u32 *) addr);
-
-       return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
-       /*
-        * IOC3 special handling
-        */
-       if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
-               *value = emulate_ioc3_cfg(where, size);
-               return PCIBIOS_SUCCESSFUL;
-       }
-
-       bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
-       addr = &bridge->b_type1_cfg.c[(fn << 8) | where];
-
-       if (get_dbe(cf, (u32 *) addr))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       shift = ((where & 3) << 3);
-       mask = (0xffffffffU >> ((4 - size) << 3));
-       *value = (cf >> shift) & mask;
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
-                          int where, int size, u32 * value)
-{
-       if (!pci_is_root_bus(bus))
-               return pci_conf1_read_config(bus, devfn, where, size, value);
-
-       return pci_conf0_read_config(bus, devfn, where, size, value);
-}
-
-static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
-                                 int where, int size, u32 value)
-{
-       struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
-       struct bridge_regs *bridge = bc->base;
-       int slot = PCI_SLOT(devfn);
-       int fn = PCI_FUNC(devfn);
-       volatile void *addr;
-       u32 cf, shift, mask, smask;
-       int res;
-
-       addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
-       if (get_dbe(cf, (u32 *) addr))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       /*
-        * IOC3 is broken beyond belief ...  Don't even give the
-        * generic PCI code a chance to look at it for real ...
-        */
-       if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
-               goto is_ioc3;
-
-       addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
-
-       if (size == 1) {
-               res = put_dbe(value, (u8 *) addr);
-       } else if (size == 2) {
-               res = put_dbe(value, (u16 *) addr);
-       } else {
-               res = put_dbe(value, (u32 *) addr);
-       }
-
-       if (res)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       return PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
-       /*
-        * IOC3 special handling
-        */
-       if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
-               return PCIBIOS_SUCCESSFUL;
-
-       addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
-
-       if (get_dbe(cf, (u32 *) addr))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       shift = ((where & 3) << 3);
-       mask = (0xffffffffU >> ((4 - size) << 3));
-       smask = mask << shift;
-
-       cf = (cf & ~smask) | ((value & mask) << shift);
-       if (put_dbe(cf, (u32 *) addr))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
-                                 int where, int size, u32 value)
-{
-       struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
-       struct bridge_regs *bridge = bc->base;
-       int slot = PCI_SLOT(devfn);
-       int fn = PCI_FUNC(devfn);
-       int busno = bus->number;
-       volatile void *addr;
-       u32 cf, shift, mask, smask;
-       int res;
-
-       bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
-       addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
-       if (get_dbe(cf, (u32 *) addr))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       /*
-        * IOC3 is broken beyond belief ...  Don't even give the
-        * generic PCI code a chance to look at it for real ...
-        */
-       if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
-               goto is_ioc3;
-
-       addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
-
-       if (size == 1) {
-               res = put_dbe(value, (u8 *) addr);
-       } else if (size == 2) {
-               res = put_dbe(value, (u16 *) addr);
-       } else {
-               res = put_dbe(value, (u32 *) addr);
-       }
-
-       if (res)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       return PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
-       /*
-        * IOC3 special handling
-        */
-       if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
-               return PCIBIOS_SUCCESSFUL;
-
-       addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
-
-       if (get_dbe(cf, (u32 *) addr))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       shift = ((where & 3) << 3);
-       mask = (0xffffffffU >> ((4 - size) << 3));
-       smask = mask << shift;
-
-       cf = (cf & ~smask) | ((value & mask) << shift);
-       if (put_dbe(cf, (u32 *) addr))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
-       int where, int size, u32 value)
-{
-       if (!pci_is_root_bus(bus))
-               return pci_conf1_write_config(bus, devfn, where, size, value);
-
-       return pci_conf0_write_config(bus, devfn, where, size, value);
-}
-
-struct pci_ops bridge_pci_ops = {
-       .read   = pci_read_config,
-       .write  = pci_write_config,
-};
index 3c177b4..441eb93 100644 (file)
@@ -7,162 +7,7 @@
  * Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/smp.h>
-#include <linux/dma-direct.h>
-#include <asm/sn/arch.h>
 #include <asm/pci/bridge.h>
-#include <asm/paccess.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn0/hub.h>
-
-/*
- * Max #PCI busses we can handle; ie, max #PCI bridges.
- */
-#define MAX_PCI_BUSSES         40
-
-/*
- * XXX: No kmalloc available when we do our crosstalk scan,
- *     we should try to move it later in the boot process.
- */
-static struct bridge_controller bridges[MAX_PCI_BUSSES];
-
-extern struct pci_ops bridge_pci_ops;
-
-int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
-{
-       unsigned long offset = NODE_OFFSET(nasid);
-       struct bridge_controller *bc;
-       static int num_bridges = 0;
-       int slot;
-
-       pci_set_flags(PCI_PROBE_ONLY);
-
-       printk("a bridge\n");
-
-       /* XXX: kludge alert.. */
-       if (!num_bridges)
-               ioport_resource.end = ~0UL;
-
-       bc = &bridges[num_bridges];
-
-       bc->pc.pci_ops          = &bridge_pci_ops;
-       bc->pc.mem_resource     = &bc->mem;
-       bc->pc.io_resource      = &bc->io;
-
-       bc->pc.index            = num_bridges;
-
-       bc->mem.name            = "Bridge PCI MEM";
-       bc->pc.mem_offset       = offset;
-       bc->mem.start           = 0;
-       bc->mem.end             = ~0UL;
-       bc->mem.flags           = IORESOURCE_MEM;
-
-       bc->io.name             = "Bridge IO MEM";
-       bc->pc.io_offset        = offset;
-       bc->io.start            = 0UL;
-       bc->io.end              = ~0UL;
-       bc->io.flags            = IORESOURCE_IO;
-
-       bc->widget_id = widget_id;
-       bc->nasid = nasid;
-
-       bc->baddr = (u64)masterwid << 60 | PCI64_ATTR_BAR;
-
-       /*
-        * point to this bridge
-        */
-       bc->base = (struct bridge_regs *)RAW_NODE_SWIN_BASE(nasid, widget_id);
-
-       /*
-        * Clear all pending interrupts.
-        */
-       bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR);
-
-       /*
-        * Until otherwise set up, assume all interrupts are from slot 0
-        */
-       bridge_write(bc, b_int_device, 0x0);
-
-       /*
-        * swap pio's to pci mem and io space (big windows)
-        */
-       bridge_set(bc, b_wid_control, BRIDGE_CTRL_IO_SWAP |
-                                     BRIDGE_CTRL_MEM_SWAP);
-#ifdef CONFIG_PAGE_SIZE_4KB
-       bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
-#else /* 16kB or larger */
-       bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
-#endif
-
-       /*
-        * Hmm...  IRIX sets additional bits in the address which
-        * are documented as reserved in the bridge docs.
-        */
-       bridge_write(bc, b_wid_int_upper, 0x8000 | (masterwid << 16));
-       bridge_write(bc, b_wid_int_lower, 0x01800090); /* PI_INT_PEND_MOD off*/
-       bridge_write(bc, b_dir_map, (masterwid << 20)); /* DMA */
-       bridge_write(bc, b_int_enable, 0);
-
-       for (slot = 0; slot < 8; slot ++) {
-               bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
-               bc->pci_int[slot] = -1;
-       }
-       bridge_read(bc, b_wid_tflush);    /* wait until Bridge PIO complete */
-
-       register_pci_controller(&bc->pc);
-
-       num_bridges++;
-
-       return 0;
-}
-
-/*
- * All observed requests have pin == 1. We could have a global here, that
- * gets incremented and returned every time - unfortunately, pci_map_irq
- * may be called on the same device over and over, and need to return the
- * same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7].
- *
- * A given PCI device, in general, should be able to intr any of the cpus
- * on any one of the hubs connected to its xbow.
- */
-int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
-       return 0;
-}
-
-static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
-{
-       while (dev->bus->parent) {
-               /* Move up the chain of bridges. */
-               dev = dev->bus->self;
-       }
-
-       return dev;
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
-       struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
-       struct pci_dev *rdev = bridge_root_dev(dev);
-       int slot = PCI_SLOT(rdev->devfn);
-       int irq;
-
-       irq = bc->pci_int[slot];
-       if (irq == -1) {
-               irq = request_bridge_irq(bc, slot);
-               if (irq < 0)
-                       return irq;
-
-               bc->pci_int[slot] = irq;
-       }
-       dev->irq = irq;
-
-       return 0;
-}
 
 dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
@@ -177,29 +22,6 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
        return dma_addr & ~(0xffUL << 56);
 }
 
-/*
- * Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
- * to find the slot number in sense of the bridge device register.
- * XXX This also means multiple devices might rely on conflicting bridge
- * settings.
- */
-
-static inline void pci_disable_swapping(struct pci_dev *dev)
-{
-       struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
-       struct bridge_regs *bridge = bc->base;
-       int slot = PCI_SLOT(dev->devfn);
-
-       /* Turn off byte swapping */
-       bridge->b_device[slot].reg &= ~BRIDGE_DEV_SWAP_DIR;
-       bridge->b_widget.w_tflush;      /* Flush */
-}
-
-static void pci_fixup_ioc3(struct pci_dev *d)
-{
-       pci_disable_swapping(d);
-}
-
 #ifdef CONFIG_NUMA
 int pcibus_to_node(struct pci_bus *bus)
 {
@@ -209,6 +31,3 @@ int pcibus_to_node(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pcibus_to_node);
 #endif /* CONFIG_NUMA */
-
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
-       pci_fixup_ioc3);
diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
new file mode 100644 (file)
index 0000000..bcf7f55
--- /dev/null
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2003 Christoph Hellwig (hch@lst.de)
+ * Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/dma-direct.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/xtalk-bridge.h>
+
+#include <asm/pci/bridge.h>
+#include <asm/paccess.h>
+#include <asm/sn/irq_alloc.h>
+
+/*
+ * Most of the IOC3 PCI config register aren't present
+ * we emulate what is needed for a normal PCI enumeration
+ */
+static u32 emulate_ioc3_cfg(int where, int size)
+{
+       if (size == 1 && where == 0x3d)
+               return 0x01;
+       else if (size == 2 && where == 0x3c)
+               return 0x0100;
+       else if (size == 4 && where == 0x3c)
+               return 0x00000100;
+
+       return 0;
+}
+
+static void bridge_disable_swapping(struct pci_dev *dev)
+{
+       struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
+       int slot = PCI_SLOT(dev->devfn);
+
+       /* Turn off byte swapping */
+       bridge_clr(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
+       bridge_read(bc, b_widget.w_tflush);     /* Flush */
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
+       bridge_disable_swapping);
+
+
+/*
+ * The Bridge ASIC supports both type 0 and type 1 access.  Type 1 is
+ * not really documented, so right now I can't write code which uses it.
+ * Therefore we use type 0 accesses for now even though they won't work
+ * correctly for PCI-to-PCI bridges.
+ *
+ * The function is complicated by the ultimate brokenness of the IOC3 chip
+ * which is used in SGI systems.  The IOC3 can only handle 32-bit PCI
+ * accesses and does only decode parts of it's address space.
+ */
+static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
+                                int where, int size, u32 *value)
+{
+       struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+       struct bridge_regs *bridge = bc->base;
+       int slot = PCI_SLOT(devfn);
+       int fn = PCI_FUNC(devfn);
+       void *addr;
+       u32 cf, shift, mask;
+       int res;
+
+       addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
+       if (get_dbe(cf, (u32 *)addr))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       /*
+        * IOC3 is broken beyond belief ...  Don't even give the
+        * generic PCI code a chance to look at it for real ...
+        */
+       if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+               goto is_ioc3;
+
+       addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
+
+       if (size == 1)
+               res = get_dbe(*value, (u8 *)addr);
+       else if (size == 2)
+               res = get_dbe(*value, (u16 *)addr);
+       else
+               res = get_dbe(*value, (u32 *)addr);
+
+       return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+       /*
+        * IOC3 special handling
+        */
+       if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
+               *value = emulate_ioc3_cfg(where, size);
+               return PCIBIOS_SUCCESSFUL;
+       }
+
+       addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+       if (get_dbe(cf, (u32 *)addr))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       shift = ((where & 3) << 3);
+       mask = (0xffffffffU >> ((4 - size) << 3));
+       *value = (cf >> shift) & mask;
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
+                                int where, int size, u32 *value)
+{
+       struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+       struct bridge_regs *bridge = bc->base;
+       int busno = bus->number;
+       int slot = PCI_SLOT(devfn);
+       int fn = PCI_FUNC(devfn);
+       void *addr;
+       u32 cf, shift, mask;
+       int res;
+
+       bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
+       addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
+       if (get_dbe(cf, (u32 *)addr))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       /*
+        * IOC3 is broken beyond belief ...  Don't even give the
+        * generic PCI code a chance to look at it for real ...
+        */
+       if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+               goto is_ioc3;
+
+       addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
+
+       if (size == 1)
+               res = get_dbe(*value, (u8 *)addr);
+       else if (size == 2)
+               res = get_dbe(*value, (u16 *)addr);
+       else
+               res = get_dbe(*value, (u32 *)addr);
+
+       return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+       /*
+        * IOC3 special handling
+        */
+       if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
+               *value = emulate_ioc3_cfg(where, size);
+               return PCIBIOS_SUCCESSFUL;
+       }
+
+       addr = &bridge->b_type1_cfg.c[(fn << 8) | where];
+       if (get_dbe(cf, (u32 *)addr))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       shift = ((where & 3) << 3);
+       mask = (0xffffffffU >> ((4 - size) << 3));
+       *value = (cf >> shift) & mask;
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
+                          int where, int size, u32 *value)
+{
+       if (!pci_is_root_bus(bus))
+               return pci_conf1_read_config(bus, devfn, where, size, value);
+
+       return pci_conf0_read_config(bus, devfn, where, size, value);
+}
+
+static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
+                                 int where, int size, u32 value)
+{
+       struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+       struct bridge_regs *bridge = bc->base;
+       int slot = PCI_SLOT(devfn);
+       int fn = PCI_FUNC(devfn);
+       void *addr;
+       u32 cf, shift, mask, smask;
+       int res;
+
+       addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
+       if (get_dbe(cf, (u32 *)addr))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       /*
+        * IOC3 is broken beyond belief ...  Don't even give the
+        * generic PCI code a chance to look at it for real ...
+        */
+       if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+               goto is_ioc3;
+
+       addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
+
+       if (size == 1)
+               res = put_dbe(value, (u8 *)addr);
+       else if (size == 2)
+               res = put_dbe(value, (u16 *)addr);
+       else
+               res = put_dbe(value, (u32 *)addr);
+
+       if (res)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       return PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+       /*
+        * IOC3 special handling
+        */
+       if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
+               return PCIBIOS_SUCCESSFUL;
+
+       addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+
+       if (get_dbe(cf, (u32 *)addr))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       shift = ((where & 3) << 3);
+       mask = (0xffffffffU >> ((4 - size) << 3));
+       smask = mask << shift;
+
+       cf = (cf & ~smask) | ((value & mask) << shift);
+       if (put_dbe(cf, (u32 *)addr))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
+                                 int where, int size, u32 value)
+{
+       struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+       struct bridge_regs *bridge = bc->base;
+       int slot = PCI_SLOT(devfn);
+       int fn = PCI_FUNC(devfn);
+       int busno = bus->number;
+       void *addr;
+       u32 cf, shift, mask, smask;
+       int res;
+
+       bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
+       addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
+       if (get_dbe(cf, (u32 *)addr))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       /*
+        * IOC3 is broken beyond belief ...  Don't even give the
+        * generic PCI code a chance to look at it for real ...
+        */
+       if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+               goto is_ioc3;
+
+       addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
+
+       if (size == 1)
+               res = put_dbe(value, (u8 *)addr);
+       else if (size == 2)
+               res = put_dbe(value, (u16 *)addr);
+       else
+               res = put_dbe(value, (u32 *)addr);
+
+       if (res)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       return PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+       /*
+        * IOC3 special handling
+        */
+       if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
+               return PCIBIOS_SUCCESSFUL;
+
+       addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+       if (get_dbe(cf, (u32 *)addr))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       shift = ((where & 3) << 3);
+       mask = (0xffffffffU >> ((4 - size) << 3));
+       smask = mask << shift;
+
+       cf = (cf & ~smask) | ((value & mask) << shift);
+       if (put_dbe(cf, (u32 *)addr))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
+       int where, int size, u32 value)
+{
+       if (!pci_is_root_bus(bus))
+               return pci_conf1_write_config(bus, devfn, where, size, value);
+
+       return pci_conf0_write_config(bus, devfn, where, size, value);
+}
+
+static struct pci_ops bridge_pci_ops = {
+       .read    = pci_read_config,
+       .write   = pci_write_config,
+};
+
+struct bridge_irq_chip_data {
+       struct bridge_controller *bc;
+       nasid_t nasid;
+};
+
+static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask,
+                              bool force)
+{
+#ifdef CONFIG_NUMA
+       struct bridge_irq_chip_data *data = d->chip_data;
+       int bit = d->parent_data->hwirq;
+       int pin = d->hwirq;
+       nasid_t nasid;
+       int ret, cpu;
+
+       ret = irq_chip_set_affinity_parent(d, mask, force);
+       if (ret >= 0) {
+               cpu = cpumask_first_and(mask, cpu_online_mask);
+               nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+               bridge_write(data->bc, b_int_addr[pin].addr,
+                            (((data->bc->intr_addr >> 30) & 0x30000) |
+                             bit | (nasid << 8)));
+               bridge_read(data->bc, b_wid_tflush);
+       }
+       return ret;
+#else
+       return irq_chip_set_affinity_parent(d, mask, force);
+#endif
+}
+
+struct irq_chip bridge_irq_chip = {
+       .name             = "BRIDGE",
+       .irq_mask         = irq_chip_mask_parent,
+       .irq_unmask       = irq_chip_unmask_parent,
+       .irq_set_affinity = bridge_set_affinity
+};
+
+static int bridge_domain_alloc(struct irq_domain *domain, unsigned int virq,
+                              unsigned int nr_irqs, void *arg)
+{
+       struct bridge_irq_chip_data *data;
+       struct irq_alloc_info *info = arg;
+       int ret;
+
+       if (nr_irqs > 1 || !info)
+               return -EINVAL;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+       if (ret >= 0) {
+               data->bc = info->ctrl;
+               data->nasid = info->nasid;
+               irq_domain_set_info(domain, virq, info->pin, &bridge_irq_chip,
+                                   data, handle_level_irq, NULL, NULL);
+       } else {
+               kfree(data);
+       }
+
+       return ret;
+}
+
+static void bridge_domain_free(struct irq_domain *domain, unsigned int virq,
+                              unsigned int nr_irqs)
+{
+       struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
+
+       if (nr_irqs)
+               return;
+
+       kfree(irqd->chip_data);
+       irq_domain_free_irqs_top(domain, virq, nr_irqs);
+}
+
+static int bridge_domain_activate(struct irq_domain *domain,
+                                 struct irq_data *irqd, bool reserve)
+{
+       struct bridge_irq_chip_data *data = irqd->chip_data;
+       struct bridge_controller *bc = data->bc;
+       int bit = irqd->parent_data->hwirq;
+       int pin = irqd->hwirq;
+       u32 device;
+
+       bridge_write(bc, b_int_addr[pin].addr,
+                    (((bc->intr_addr >> 30) & 0x30000) |
+                     bit | (data->nasid << 8)));
+       bridge_set(bc, b_int_enable, (1 << pin));
+       bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */
+
+       /*
+        * Enable sending of an interrupt clear packt to the hub on a high to
+        * low transition of the interrupt pin.
+        *
+        * IRIX sets additional bits in the address which are documented as
+        * reserved in the bridge docs.
+        */
+       bridge_set(bc, b_int_mode, (1UL << pin));
+
+       /*
+        * We assume the bridge to have a 1:1 mapping between devices
+        * (slots) and intr pins.
+        */
+       device = bridge_read(bc, b_int_device);
+       device &= ~(7 << (pin*3));
+       device |= (pin << (pin*3));
+       bridge_write(bc, b_int_device, device);
+
+       bridge_read(bc, b_wid_tflush);
+       return 0;
+}
+
+static void bridge_domain_deactivate(struct irq_domain *domain,
+                                    struct irq_data *irqd)
+{
+       struct bridge_irq_chip_data *data = irqd->chip_data;
+
+       bridge_clr(data->bc, b_int_enable, (1 << irqd->hwirq));
+       bridge_read(data->bc, b_wid_tflush);
+}
+
+static const struct irq_domain_ops bridge_domain_ops = {
+       .alloc      = bridge_domain_alloc,
+       .free       = bridge_domain_free,
+       .activate   = bridge_domain_activate,
+       .deactivate = bridge_domain_deactivate
+};
+
+/*
+ * All observed requests have pin == 1. We could have a global here, that
+ * gets incremented and returned every time - unfortunately, pci_map_irq
+ * may be called on the same device over and over, and need to return the
+ * same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7].
+ *
+ * A given PCI device, in general, should be able to intr any of the cpus
+ * on any one of the hubs connected to its xbow.
+ */
+static int bridge_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+       struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
+       struct irq_alloc_info info;
+       int irq;
+
+       irq = bc->pci_int[slot];
+       if (irq == -1) {
+               info.ctrl = bc;
+               info.nasid = bc->nasid;
+               info.pin = slot;
+
+               irq = irq_domain_alloc_irqs(bc->domain, 1, bc->nasid, &info);
+               if (irq < 0)
+                       return irq;
+
+               bc->pci_int[slot] = irq;
+       }
+       return irq;
+}
+
+static int bridge_probe(struct platform_device *pdev)
+{
+       struct xtalk_bridge_platform_data *bd = dev_get_platdata(&pdev->dev);
+       struct device *dev = &pdev->dev;
+       struct bridge_controller *bc;
+       struct pci_host_bridge *host;
+       struct irq_domain *domain, *parent;
+       struct fwnode_handle *fn;
+       int slot;
+       int err;
+
+       parent = irq_get_default_host();
+       if (!parent)
+               return -ENODEV;
+       fn = irq_domain_alloc_named_fwnode("BRIDGE");
+       if (!fn)
+               return -ENOMEM;
+       domain = irq_domain_create_hierarchy(parent, 0, 8, fn,
+                                            &bridge_domain_ops, NULL);
+       irq_domain_free_fwnode(fn);
+       if (!domain)
+               return -ENOMEM;
+
+       pci_set_flags(PCI_PROBE_ONLY);
+
+       host = devm_pci_alloc_host_bridge(dev, sizeof(*bc));
+       if (!host) {
+               err = -ENOMEM;
+               goto err_remove_domain;
+       }
+
+       bc = pci_host_bridge_priv(host);
+
+       bc->busn.name           = "Bridge PCI busn";
+       bc->busn.start          = 0;
+       bc->busn.end            = 0xff;
+       bc->busn.flags          = IORESOURCE_BUS;
+
+       bc->domain              = domain;
+
+       pci_add_resource_offset(&host->windows, &bd->mem, bd->mem_offset);
+       pci_add_resource_offset(&host->windows, &bd->io, bd->io_offset);
+       pci_add_resource(&host->windows, &bc->busn);
+
+       err = devm_request_pci_bus_resources(dev, &host->windows);
+       if (err < 0)
+               goto err_free_resource;
+
+       bc->nasid = bd->nasid;
+
+       bc->baddr = (u64)bd->masterwid << 60 | PCI64_ATTR_BAR;
+       bc->base = (struct bridge_regs *)bd->bridge_addr;
+       bc->intr_addr = bd->intr_addr;
+
+       /*
+        * Clear all pending interrupts.
+        */
+       bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR);
+
+       /*
+        * Until otherwise set up, assume all interrupts are from slot 0
+        */
+       bridge_write(bc, b_int_device, 0x0);
+
+       /*
+        * disable swapping for big windows
+        */
+       bridge_clr(bc, b_wid_control,
+                  BRIDGE_CTRL_IO_SWAP | BRIDGE_CTRL_MEM_SWAP);
+#ifdef CONFIG_PAGE_SIZE_4KB
+       bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
+#else /* 16kB or larger */
+       bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
+#endif
+
+       /*
+        * Hmm...  IRIX sets additional bits in the address which
+        * are documented as reserved in the bridge docs.
+        */
+       bridge_write(bc, b_wid_int_upper,
+                    ((bc->intr_addr >> 32) & 0xffff) | (bd->masterwid << 16));
+       bridge_write(bc, b_wid_int_lower, bc->intr_addr & 0xffffffff);
+       bridge_write(bc, b_dir_map, (bd->masterwid << 20));     /* DMA */
+       bridge_write(bc, b_int_enable, 0);
+
+       for (slot = 0; slot < 8; slot++) {
+               bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
+               bc->pci_int[slot] = -1;
+       }
+       bridge_read(bc, b_wid_tflush);    /* wait until Bridge PIO complete */
+
+       host->dev.parent = dev;
+       host->sysdata = bc;
+       host->busnr = 0;
+       host->ops = &bridge_pci_ops;
+       host->map_irq = bridge_map_irq;
+       host->swizzle_irq = pci_common_swizzle;
+
+       err = pci_scan_root_bus_bridge(host);
+       if (err < 0)
+               goto err_free_resource;
+
+       pci_bus_claim_resources(host->bus);
+       pci_bus_add_devices(host->bus);
+
+       platform_set_drvdata(pdev, host->bus);
+
+       return 0;
+
+err_free_resource:
+       pci_free_resource_list(&host->windows);
+err_remove_domain:
+       irq_domain_remove(domain);
+       return err;
+}
+
+static int bridge_remove(struct platform_device *pdev)
+{
+       struct pci_bus *bus = platform_get_drvdata(pdev);
+       struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+
+       irq_domain_remove(bc->domain);
+       pci_lock_rescan_remove();
+       pci_stop_root_bus(bus);
+       pci_remove_root_bus(bus);
+       pci_unlock_rescan_remove();
+
+       return 0;
+}
+
+static struct platform_driver bridge_driver = {
+       .probe  = bridge_probe,
+       .remove = bridge_remove,
+       .driver = {
+               .name = "xtalk-bridge",
+       }
+};
+
+builtin_platform_driver(bridge_driver);
index 37ad267..0b2002e 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/if_ether.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/paccess.h>
 #include <asm/sgi/ip22.h>
@@ -25,6 +26,8 @@ static struct sgiwd93_platform_data sgiwd93_0_pd = {
        .irq    = SGI_WD93_0_IRQ,
 };
 
+static u64 sgiwd93_0_dma_mask = DMA_BIT_MASK(32);
+
 static struct platform_device sgiwd93_0_device = {
        .name           = "sgiwd93",
        .id             = 0,
@@ -32,6 +35,8 @@ static struct platform_device sgiwd93_0_device = {
        .resource       = sgiwd93_0_resources,
        .dev = {
                .platform_data = &sgiwd93_0_pd,
+               .dma_mask = &sgiwd93_0_dma_mask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -49,6 +54,8 @@ static struct sgiwd93_platform_data sgiwd93_1_pd = {
        .irq    = SGI_WD93_1_IRQ,
 };
 
+static u64 sgiwd93_1_dma_mask = DMA_BIT_MASK(32);
+
 static struct platform_device sgiwd93_1_device = {
        .name           = "sgiwd93",
        .id             = 1,
@@ -56,6 +63,8 @@ static struct platform_device sgiwd93_1_device = {
        .resource       = sgiwd93_1_resources,
        .dev = {
                .platform_data = &sgiwd93_1_pd,
+               .dma_mask = &sgiwd93_1_dma_mask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -96,6 +105,8 @@ static struct resource sgiseeq_0_resources[] = {
 
 static struct sgiseeq_platform_data eth0_pd;
 
+static u64 sgiseeq_dma_mask = DMA_BIT_MASK(32);
+
 static struct platform_device eth0_device = {
        .name           = "sgiseeq",
        .id             = 0,
@@ -103,6 +114,8 @@ static struct platform_device eth0_device = {
        .resource       = sgiseeq_0_resources,
        .dev = {
                .platform_data = &eth0_pd,
+               .dma_mask = &sgiseeq_dma_mask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
index 6074efe..066b33f 100644 (file)
@@ -184,5 +184,7 @@ void __init plat_mem_setup(void)
 
        ioc3_eth_init();
 
+       ioport_resource.start = 0;
+       ioport_resource.end = ~0UL;
        set_io_port_base(IO_BASE);
 }
index a32f843..37be049 100644 (file)
 #include <linux/ioport.h>
 #include <linux/kernel.h>
 #include <linux/bitops.h>
+#include <linux/sched.h>
 
 #include <asm/io.h>
 #include <asm/irq_cpu.h>
-#include <asm/pci/bridge.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/agent.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/hub.h>
 #include <asm/sn/intr.h>
+#include <asm/sn/irq_alloc.h>
 
 struct hub_irq_data {
-       struct bridge_controller *bc;
        u64     *irq_mask[2];
        cpuid_t cpu;
-       int     bit;
-       int     pin;
 };
 
 static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
@@ -54,7 +52,7 @@ static void enable_hub_irq(struct irq_data *d)
        struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
        unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
 
-       set_bit(hd->bit, mask);
+       set_bit(d->hwirq, mask);
        __raw_writeq(mask[0], hd->irq_mask[0]);
        __raw_writeq(mask[1], hd->irq_mask[1]);
 }
@@ -64,71 +62,11 @@ static void disable_hub_irq(struct irq_data *d)
        struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
        unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
 
-       clear_bit(hd->bit, mask);
+       clear_bit(d->hwirq, mask);
        __raw_writeq(mask[0], hd->irq_mask[0]);
        __raw_writeq(mask[1], hd->irq_mask[1]);
 }
 
-static unsigned int startup_bridge_irq(struct irq_data *d)
-{
-       struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
-       struct bridge_controller *bc;
-       nasid_t nasid;
-       u32 device;
-       int pin;
-
-       if (!hd)
-               return -EINVAL;
-
-       pin = hd->pin;
-       bc = hd->bc;
-
-       nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(hd->cpu));
-       bridge_write(bc, b_int_addr[pin].addr,
-                    (0x20000 | hd->bit | (nasid << 8)));
-       bridge_set(bc, b_int_enable, (1 << pin));
-       bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */
-
-       /*
-        * Enable sending of an interrupt clear packt to the hub on a high to
-        * low transition of the interrupt pin.
-        *
-        * IRIX sets additional bits in the address which are documented as
-        * reserved in the bridge docs.
-        */
-       bridge_set(bc, b_int_mode, (1UL << pin));
-
-       /*
-        * We assume the bridge to have a 1:1 mapping between devices
-        * (slots) and intr pins.
-        */
-       device = bridge_read(bc, b_int_device);
-       device &= ~(7 << (pin*3));
-       device |= (pin << (pin*3));
-       bridge_write(bc, b_int_device, device);
-
-       bridge_read(bc, b_wid_tflush);
-
-       enable_hub_irq(d);
-
-       return 0;       /* Never anything pending.  */
-}
-
-static void shutdown_bridge_irq(struct irq_data *d)
-{
-       struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
-       struct bridge_controller *bc;
-
-       if (!hd)
-               return;
-
-       disable_hub_irq(d);
-
-       bc = hd->bc;
-       bridge_clr(bc, b_int_enable, (1 << hd->pin));
-       bridge_read(bc, b_wid_tflush);
-}
-
 static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
 {
        nasid_t nasid;
@@ -144,9 +82,6 @@ static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
                hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
                hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
        }
-
-       /* Make sure it's not already pending when we connect it. */
-       REMOTE_HUB_CLR_INTR(nasid, hd->bit);
 }
 
 static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
@@ -163,7 +98,7 @@ static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
        setup_hub_mask(hd, mask);
 
        if (irqd_is_started(d))
-               startup_bridge_irq(d);
+               enable_hub_irq(d);
 
        irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
 
@@ -172,20 +107,22 @@ static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
 
 static struct irq_chip hub_irq_type = {
        .name             = "HUB",
-       .irq_startup      = startup_bridge_irq,
-       .irq_shutdown     = shutdown_bridge_irq,
        .irq_mask         = disable_hub_irq,
        .irq_unmask       = enable_hub_irq,
        .irq_set_affinity = set_affinity_hub_irq,
 };
 
-int request_bridge_irq(struct bridge_controller *bc, int pin)
+static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
+                           unsigned int nr_irqs, void *arg)
 {
+       struct irq_alloc_info *info = arg;
        struct hub_irq_data *hd;
        struct hub_data *hub;
        struct irq_desc *desc;
        int swlevel;
-       int irq;
+
+       if (nr_irqs > 1 || !info)
+               return -EINVAL;
 
        hd = kzalloc(sizeof(*hd), GFP_KERNEL);
        if (!hd)
@@ -196,46 +133,41 @@ int request_bridge_irq(struct bridge_controller *bc, int pin)
                kfree(hd);
                return -EAGAIN;
        }
-       irq = swlevel + IP27_HUB_IRQ_BASE;
-
-       hd->bc = bc;
-       hd->bit = swlevel;
-       hd->pin = pin;
-       irq_set_chip_data(irq, hd);
+       irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
+                           handle_level_irq, NULL, NULL);
 
        /* use CPU connected to nearest hub */
-       hub = hub_data(NASID_TO_COMPACT_NODEID(bc->nasid));
+       hub = hub_data(NASID_TO_COMPACT_NODEID(info->nasid));
        setup_hub_mask(hd, &hub->h_cpus);
 
-       desc = irq_to_desc(irq);
-       desc->irq_common_data.node = bc->nasid;
+       /* Make sure it's not already pending when we connect it. */
+       REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
+
+       desc = irq_to_desc(virq);
+       desc->irq_common_data.node = info->nasid;
        cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
 
-       return irq;
+       return 0;
 }
 
-void ip27_hub_irq_init(void)
+static void hub_domain_free(struct irq_domain *domain,
+                           unsigned int virq, unsigned int nr_irqs)
 {
-       int i;
+       struct irq_data *irqd;
 
-       for (i = IP27_HUB_IRQ_BASE;
-            i < (IP27_HUB_IRQ_BASE + IP27_HUB_IRQ_COUNT); i++)
-               irq_set_chip_and_handler(i, &hub_irq_type, handle_level_irq);
-
-       /*
-        * Some interrupts are reserved by hardware or by software convention.
-        * Mark these as reserved right away so they won't be used accidentally
-        * later.
-        */
-       for (i = 0; i <= BASE_PCI_IRQ; i++)
-               set_bit(i, hub_irq_map);
-
-       set_bit(IP_PEND0_6_63, hub_irq_map);
+       if (nr_irqs > 1)
+               return;
 
-       for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
-               set_bit(i, hub_irq_map);
+       irqd = irq_domain_get_irq_data(domain, virq);
+       if (irqd && irqd->chip_data)
+               kfree(irqd->chip_data);
 }
 
+static const struct irq_domain_ops hub_domain_ops = {
+       .alloc = hub_domain_alloc,
+       .free  = hub_domain_free,
+};
+
 /*
  * This code is unnecessarily complex, because we do
  * intr enabling. Basically, once we grab the set of intrs we need
@@ -252,7 +184,9 @@ static void ip27_do_irq_mask0(struct irq_desc *desc)
 {
        cpuid_t cpu = smp_processor_id();
        unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+       struct irq_domain *domain;
        u64 pend0;
+       int irq;
 
        /* copied from Irix intpend0() */
        pend0 = LOCAL_HUB_L(PI_INT_PEND0);
@@ -276,7 +210,14 @@ static void ip27_do_irq_mask0(struct irq_desc *desc)
                generic_smp_call_function_interrupt();
        } else
 #endif
-               generic_handle_irq(__ffs(pend0) + IP27_HUB_IRQ_BASE);
+       {
+               domain = irq_desc_get_handler_data(desc);
+               irq = irq_linear_revmap(domain, __ffs(pend0));
+               if (irq)
+                       generic_handle_irq(irq);
+               else
+                       spurious_interrupt();
+       }
 
        LOCAL_HUB_L(PI_INT_PEND0);
 }
@@ -285,7 +226,9 @@ static void ip27_do_irq_mask1(struct irq_desc *desc)
 {
        cpuid_t cpu = smp_processor_id();
        unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+       struct irq_domain *domain;
        u64 pend1;
+       int irq;
 
        /* copied from Irix intpend0() */
        pend1 = LOCAL_HUB_L(PI_INT_PEND1);
@@ -294,7 +237,12 @@ static void ip27_do_irq_mask1(struct irq_desc *desc)
        if (!pend1)
                return;
 
-       generic_handle_irq(__ffs(pend1) + IP27_HUB_IRQ_BASE + 64);
+       domain = irq_desc_get_handler_data(desc);
+       irq = irq_linear_revmap(domain, __ffs(pend1) + 64);
+       if (irq)
+               generic_handle_irq(irq);
+       else
+               spurious_interrupt();
 
        LOCAL_HUB_L(PI_INT_PEND1);
 }
@@ -325,11 +273,41 @@ void install_ipi(void)
 
 void __init arch_init_irq(void)
 {
+       struct irq_domain *domain;
+       struct fwnode_handle *fn;
+       int i;
+
        mips_cpu_irq_init();
-       ip27_hub_irq_init();
+
+       /*
+        * Some interrupts are reserved by hardware or by software convention.
+        * Mark these as reserved right away so they won't be used accidentally
+        * later.
+        */
+       for (i = 0; i <= BASE_PCI_IRQ; i++)
+               set_bit(i, hub_irq_map);
+
+       set_bit(IP_PEND0_6_63, hub_irq_map);
+
+       for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
+               set_bit(i, hub_irq_map);
+
+       fn = irq_domain_alloc_named_fwnode("HUB");
+       WARN_ON(fn == NULL);
+       if (!fn)
+               return;
+       domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
+                                         &hub_domain_ops, NULL);
+       WARN_ON(domain == NULL);
+       if (!domain)
+               return;
+
+       irq_set_default_host(domain);
 
        irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
-       irq_set_chained_handler(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0);
+       irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
+                                        domain);
        irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
-       irq_set_chained_handler(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1);
+       irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
+                                        domain);
 }
index ce06aaa..bd5cb85 100644 (file)
@@ -9,6 +9,9 @@
 
 #include <linux/kernel.h>
 #include <linux/smp.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/xtalk-bridge.h>
+#include <asm/sn/addrs.h>
 #include <asm/sn/types.h>
 #include <asm/sn/klconfig.h>
 #include <asm/sn/hub.h>
 #define XXBOW_WIDGET_PART_NUM  0xd000  /* Xbow in Xbridge */
 #define BASE_XBOW_PORT         8     /* Lowest external port */
 
-extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
+static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
+{
+       struct xtalk_bridge_platform_data *bd;
+       struct platform_device *pdev;
+       unsigned long offset;
+
+       bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+       if (!bd)
+               goto no_mem;
+       pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
+       if (!pdev) {
+               kfree(bd);
+               goto no_mem;
+       }
+
+       offset = NODE_OFFSET(nasid);
+
+       bd->bridge_addr = RAW_NODE_SWIN_BASE(nasid, widget);
+       bd->intr_addr   = BIT_ULL(47) + 0x01800000 + PI_INT_PEND_MOD;
+       bd->nasid       = nasid;
+       bd->masterwid   = masterwid;
+
+       bd->mem.name    = "Bridge PCI MEM";
+       bd->mem.start   = offset + (widget << SWIN_SIZE_BITS);
+       bd->mem.end     = bd->mem.start + SWIN_SIZE - 1;
+       bd->mem.flags   = IORESOURCE_MEM;
+       bd->mem_offset  = offset;
+
+       bd->io.name     = "Bridge PCI IO";
+       bd->io.start    = offset + (widget << SWIN_SIZE_BITS);
+       bd->io.end      = bd->io.start + SWIN_SIZE - 1;
+       bd->io.flags    = IORESOURCE_IO;
+       bd->io_offset   = offset;
+
+       platform_device_add_data(pdev, bd, sizeof(*bd));
+       platform_device_add(pdev);
+       pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
+       return;
+
+no_mem:
+       pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+}
 
 static int probe_one_port(nasid_t nasid, int widget, int masterwid)
 {
@@ -31,13 +75,10 @@ static int probe_one_port(nasid_t nasid, int widget, int masterwid)
                (RAW_NODE_SWIN_BASE(nasid, widget) + WIDGET_ID);
        partnum = XWIDGET_PART_NUM(widget_id);
 
-       printk(KERN_INFO "Cpu %d, Nasid 0x%x, widget 0x%x (partnum 0x%x) is ",
-                       smp_processor_id(), nasid, widget, partnum);
-
        switch (partnum) {
        case BRIDGE_WIDGET_PART_NUM:
        case XBRIDGE_WIDGET_PART_NUM:
-               bridge_probe(nasid, widget, masterwid);
+               bridge_platform_create(nasid, widget, masterwid);
                break;
        default:
                break;
@@ -52,8 +93,6 @@ static int xbow_probe(nasid_t nasid)
        klxbow_t *xbow_p;
        unsigned masterwid, i;
 
-       printk("is xbow\n");
-
        /*
         * found xbow, so may have multiple bridges
         * need to probe xbow
@@ -117,19 +156,17 @@ static void xtalk_probe_node(cnodeid_t nid)
                       (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
        partnum = XWIDGET_PART_NUM(widget_id);
 
-       printk(KERN_INFO "Cpu %d, Nasid 0x%x: partnum 0x%x is ",
-                       smp_processor_id(), nasid, partnum);
-
        switch (partnum) {
        case BRIDGE_WIDGET_PART_NUM:
-               bridge_probe(nasid, 0x8, 0xa);
+               bridge_platform_create(nasid, 0x8, 0xa);
                break;
        case XBOW_WIDGET_PART_NUM:
        case XXBOW_WIDGET_PART_NUM:
+               pr_info("xtalk:n%d/0 xbow widget\n", nasid);
                xbow_probe(nasid);
                break;
        default:
-               printk(" unknown widget??\n");
+               pr_info("xtalk:n%d/0 unknown widget (0x%x)\n", nasid, partnum);
                break;
        }
 }
index 1d11830..2781ebf 100644 (file)
@@ -93,6 +93,7 @@
 #define VMALLOC_REGION_ID      NON_LINEAR_REGION_ID(H_VMALLOC_START)
 #define IO_REGION_ID           NON_LINEAR_REGION_ID(H_KERN_IO_START)
 #define VMEMMAP_REGION_ID      NON_LINEAR_REGION_ID(H_VMEMMAP_START)
+#define INVALID_REGION_ID      (VMEMMAP_REGION_ID + 1)
 
 /*
  * Defines the address of the vmemap area, in its own region on
@@ -119,14 +120,15 @@ static inline int get_region_id(unsigned long ea)
        if (id == 0)
                return USER_REGION_ID;
 
+       if (id != (PAGE_OFFSET >> 60))
+               return INVALID_REGION_ID;
+
        if (ea < H_KERN_VIRT_START)
                return LINEAR_MAP_REGION_ID;
 
-       VM_BUG_ON(id != 0xc);
        BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
 
        region_id = NON_LINEAR_REGION_ID(ea);
-       VM_BUG_ON(region_id > VMEMMAP_REGION_ID);
        return region_id;
 }
 
index 611204e..58efca9 100644 (file)
@@ -232,7 +232,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
 extern void arch_exit_mmap(struct mm_struct *mm);
 
 static inline void arch_unmap(struct mm_struct *mm,
-                             struct vm_area_struct *vma,
                              unsigned long start, unsigned long end)
 {
        if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
index f2ed3ef..862e289 100644 (file)
@@ -767,7 +767,6 @@ static void cacheinfo_create_index_dir(struct cache *cache, int index,
                                  cache_dir->kobj, "index%d", index);
        if (rc) {
                kobject_put(&index_dir->kobj);
-               kfree(index_dir);
                return;
        }
 
index e27792d..8366c2a 100644 (file)
@@ -539,7 +539,8 @@ _GLOBAL(flush_hash_pages)
 #ifdef CONFIG_SMP
        lis     r9, (mmu_hash_lock - PAGE_OFFSET)@ha
        addi    r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
-       lwz     r8,TASK_CPU(r2)
+       tophys  (r8, r2)
+       lwz     r8, TASK_CPU(r8)
        oris    r8,r8,9
 10:    lwarx   r0,0,r9
        cmpi    0,r0,0
index c5c9ff2..b5d92dc 100644 (file)
@@ -556,7 +556,7 @@ static int __init add_huge_page_size(unsigned long long size)
        if (size <= PAGE_SIZE || !is_power_of_2(size))
                return -EINVAL;
 
-       mmu_psize = check_and_get_huge_psize(size);
+       mmu_psize = check_and_get_huge_psize(shift);
        if (mmu_psize < 0)
                return -EINVAL;
 
index e66745d..ee32c66 100644 (file)
@@ -27,7 +27,7 @@ config RISCV
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select GENERIC_SMP_IDLE_THREAD
-       select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
+       select GENERIC_ATOMIC64 if !64BIT
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_DMA_CONTIGUOUS
@@ -35,7 +35,6 @@ config RISCV
        select HAVE_PERF_EVENTS
        select HAVE_SYSCALL_TRACEPOINTS
        select IRQ_DOMAIN
-       select RISCV_ISA_A if SMP
        select SPARSE_IRQ
        select SYSCTL_EXCEPTION_TRACE
        select HAVE_ARCH_TRACEHOOK
@@ -195,9 +194,6 @@ config RISCV_ISA_C
 
           If you don't know what to do here, say Y.
 
-config RISCV_ISA_A
-       def_bool y
-
 menu "supported PMU type"
        depends on PERF_EVENTS
 
index c6342e6..6b0741c 100644 (file)
@@ -39,9 +39,8 @@ endif
 KBUILD_CFLAGS += -Wall
 
 # ISA string setting
-riscv-march-$(CONFIG_ARCH_RV32I)       := rv32im
-riscv-march-$(CONFIG_ARCH_RV64I)       := rv64im
-riscv-march-$(CONFIG_RISCV_ISA_A)      := $(riscv-march-y)a
+riscv-march-$(CONFIG_ARCH_RV32I)       := rv32ima
+riscv-march-$(CONFIG_ARCH_RV64I)       := rv64ima
 riscv-march-$(CONFIG_FPU)              := $(riscv-march-y)fd
 riscv-march-$(CONFIG_RISCV_ISA_C)      := $(riscv-march-y)c
 KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
index f86d68d..3d019e0 100644 (file)
@@ -3,6 +3,7 @@ generic-y += checksum.h
 generic-y += compat.h
 generic-y += device.h
 generic-y += div64.h
+generic-y += extable.h
 generic-y += dma.h
 generic-y += dma-contiguous.h
 generic-y += dma-mapping.h
index bfc7f09..52a1fbd 100644 (file)
 #include <asm/asm.h>
 
 #ifdef CONFIG_GENERIC_BUG
-#define __BUG_INSN     _AC(0x00100073, UL) /* ebreak */
+#define __INSN_LENGTH_MASK  _UL(0x3)
+#define __INSN_LENGTH_32    _UL(0x3)
+#define __COMPRESSED_INSN_MASK _UL(0xffff)
+
+#define __BUG_INSN_32  _UL(0x00100073) /* ebreak */
+#define __BUG_INSN_16  _UL(0x9002) /* c.ebreak */
 
 #ifndef __ASSEMBLY__
 typedef u32 bug_insn_t;
@@ -38,38 +43,46 @@ typedef u32 bug_insn_t;
 #define __BUG_ENTRY                    \
        __BUG_ENTRY_ADDR "\n\t"         \
        __BUG_ENTRY_FILE "\n\t"         \
-       RISCV_SHORT " %1"
+       RISCV_SHORT " %1\n\t"           \
+       RISCV_SHORT " %2"
 #else
 #define __BUG_ENTRY                    \
-       __BUG_ENTRY_ADDR
+       __BUG_ENTRY_ADDR "\n\t"         \
+       RISCV_SHORT " %2"
 #endif
 
-#define BUG()                                                  \
+#define __BUG_FLAGS(flags)                                     \
 do {                                                           \
        __asm__ __volatile__ (                                  \
                "1:\n\t"                                        \
                        "ebreak\n"                              \
-                       ".pushsection __bug_table,\"a\"\n\t"    \
+                       ".pushsection __bug_table,\"aw\"\n\t"   \
                "2:\n\t"                                        \
                        __BUG_ENTRY "\n\t"                      \
-                       ".org 2b + %2\n\t"                      \
+                       ".org 2b + %3\n\t"                      \
                        ".popsection"                           \
                :                                               \
                : "i" (__FILE__), "i" (__LINE__),               \
-                 "i" (sizeof(struct bug_entry)));              \
-       unreachable();                                          \
+                 "i" (flags),                                  \
+                 "i" (sizeof(struct bug_entry)));              \
 } while (0)
+
 #endif /* !__ASSEMBLY__ */
 #else /* CONFIG_GENERIC_BUG */
 #ifndef __ASSEMBLY__
-#define BUG()                                                  \
-do {                                                           \
+#define __BUG_FLAGS(flags) do {                                        \
        __asm__ __volatile__ ("ebreak\n");                      \
-       unreachable();                                          \
 } while (0)
 #endif /* !__ASSEMBLY__ */
 #endif /* CONFIG_GENERIC_BUG */
 
+#define BUG() do {                                             \
+       __BUG_FLAGS(0);                                         \
+       unreachable();                                          \
+} while (0)
+
+#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
+
 #define HAVE_ARCH_BUG
 
 #include <asm-generic/bug.h>
index 8f13074..1f4ba68 100644 (file)
@@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page)
 
 #else /* CONFIG_SMP */
 
-#define flush_icache_all() sbi_remote_fence_i(NULL)
+void flush_icache_all(void);
 void flush_icache_mm(struct mm_struct *mm, bool local);
 
 #endif /* CONFIG_SMP */
index 28a0d1c..3c3c26c 100644 (file)
 #ifndef _ASM_RISCV_CSR_H
 #define _ASM_RISCV_CSR_H
 
+#include <asm/asm.h>
 #include <linux/const.h>
 
 /* Status register flags */
-#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
-#define SR_SPIE        _AC(0x00000020, UL) /* Previous Supervisor IE */
-#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
-#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
-
-#define SR_FS           _AC(0x00006000, UL) /* Floating-point Status */
-#define SR_FS_OFF       _AC(0x00000000, UL)
-#define SR_FS_INITIAL   _AC(0x00002000, UL)
-#define SR_FS_CLEAN     _AC(0x00004000, UL)
-#define SR_FS_DIRTY     _AC(0x00006000, UL)
-
-#define SR_XS           _AC(0x00018000, UL) /* Extension Status */
-#define SR_XS_OFF       _AC(0x00000000, UL)
-#define SR_XS_INITIAL   _AC(0x00008000, UL)
-#define SR_XS_CLEAN     _AC(0x00010000, UL)
-#define SR_XS_DIRTY     _AC(0x00018000, UL)
+#define SR_SIE         _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_SPIE                _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_SPP         _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_SUM         _AC(0x00040000, UL) /* Supervisor User Memory Access */
+
+#define SR_FS          _AC(0x00006000, UL) /* Floating-point Status */
+#define SR_FS_OFF      _AC(0x00000000, UL)
+#define SR_FS_INITIAL  _AC(0x00002000, UL)
+#define SR_FS_CLEAN    _AC(0x00004000, UL)
+#define SR_FS_DIRTY    _AC(0x00006000, UL)
+
+#define SR_XS          _AC(0x00018000, UL) /* Extension Status */
+#define SR_XS_OFF      _AC(0x00000000, UL)
+#define SR_XS_INITIAL  _AC(0x00008000, UL)
+#define SR_XS_CLEAN    _AC(0x00010000, UL)
+#define SR_XS_DIRTY    _AC(0x00018000, UL)
 
 #ifndef CONFIG_64BIT
-#define SR_SD   _AC(0x80000000, UL) /* FS/XS dirty */
+#define SR_SD          _AC(0x80000000, UL) /* FS/XS dirty */
 #else
-#define SR_SD   _AC(0x8000000000000000, UL) /* FS/XS dirty */
+#define SR_SD          _AC(0x8000000000000000, UL) /* FS/XS dirty */
 #endif
 
 /* SATP flags */
-#if __riscv_xlen == 32
-#define SATP_PPN     _AC(0x003FFFFF, UL)
-#define SATP_MODE_32 _AC(0x80000000, UL)
-#define SATP_MODE    SATP_MODE_32
+#ifndef CONFIG_64BIT
+#define SATP_PPN       _AC(0x003FFFFF, UL)
+#define SATP_MODE_32   _AC(0x80000000, UL)
+#define SATP_MODE      SATP_MODE_32
 #else
-#define SATP_PPN     _AC(0x00000FFFFFFFFFFF, UL)
-#define SATP_MODE_39 _AC(0x8000000000000000, UL)
-#define SATP_MODE    SATP_MODE_39
+#define SATP_PPN       _AC(0x00000FFFFFFFFFFF, UL)
+#define SATP_MODE_39   _AC(0x8000000000000000, UL)
+#define SATP_MODE      SATP_MODE_39
 #endif
 
-/* Interrupt Enable and Interrupt Pending flags */
-#define SIE_SSIE _AC(0x00000002, UL) /* Software Interrupt Enable */
-#define SIE_STIE _AC(0x00000020, UL) /* Timer Interrupt Enable */
-#define SIE_SEIE _AC(0x00000200, UL) /* External Interrupt Enable */
-
-#define EXC_INST_MISALIGNED     0
-#define EXC_INST_ACCESS         1
-#define EXC_BREAKPOINT          3
-#define EXC_LOAD_ACCESS         5
-#define EXC_STORE_ACCESS        7
-#define EXC_SYSCALL             8
-#define EXC_INST_PAGE_FAULT     12
-#define EXC_LOAD_PAGE_FAULT     13
-#define EXC_STORE_PAGE_FAULT    15
+/* SCAUSE */
+#define SCAUSE_IRQ_FLAG                (_AC(1, UL) << (__riscv_xlen - 1))
+
+#define IRQ_U_SOFT             0
+#define IRQ_S_SOFT             1
+#define IRQ_M_SOFT             3
+#define IRQ_U_TIMER            4
+#define IRQ_S_TIMER            5
+#define IRQ_M_TIMER            7
+#define IRQ_U_EXT              8
+#define IRQ_S_EXT              9
+#define IRQ_M_EXT              11
+
+#define EXC_INST_MISALIGNED    0
+#define EXC_INST_ACCESS                1
+#define EXC_BREAKPOINT         3
+#define EXC_LOAD_ACCESS                5
+#define EXC_STORE_ACCESS       7
+#define EXC_SYSCALL            8
+#define EXC_INST_PAGE_FAULT    12
+#define EXC_LOAD_PAGE_FAULT    13
+#define EXC_STORE_PAGE_FAULT   15
+
+/* SIE (Interrupt Enable) and SIP (Interrupt Pending) flags */
+#define SIE_SSIE               (_AC(0x1, UL) << IRQ_S_SOFT)
+#define SIE_STIE               (_AC(0x1, UL) << IRQ_S_TIMER)
+#define SIE_SEIE               (_AC(0x1, UL) << IRQ_S_EXT)
+
+#define CSR_CYCLE              0xc00
+#define CSR_TIME               0xc01
+#define CSR_INSTRET            0xc02
+#define CSR_SSTATUS            0x100
+#define CSR_SIE                        0x104
+#define CSR_STVEC              0x105
+#define CSR_SCOUNTEREN         0x106
+#define CSR_SSCRATCH           0x140
+#define CSR_SEPC               0x141
+#define CSR_SCAUSE             0x142
+#define CSR_STVAL              0x143
+#define CSR_SIP                        0x144
+#define CSR_SATP               0x180
+#define CSR_CYCLEH             0xc80
+#define CSR_TIMEH              0xc81
+#define CSR_INSTRETH           0xc82
 
 #ifndef __ASSEMBLY__
 
 #define csr_swap(csr, val)                                     \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrrw %0, " #csr ", %1"          \
+       __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
                              : "=r" (__v) : "rK" (__v)         \
                              : "memory");                      \
        __v;                                                    \
 #define csr_read(csr)                                          \
 ({                                                             \
        register unsigned long __v;                             \
-       __asm__ __volatile__ ("csrr %0, " #csr                  \
+       __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr)        \
                              : "=r" (__v) :                    \
                              : "memory");                      \
        __v;                                                    \
 #define csr_write(csr, val)                                    \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrw " #csr ", %0"               \
+       __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0"     \
                              : : "rK" (__v)                    \
                              : "memory");                      \
 })
 #define csr_read_set(csr, val)                                 \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrrs %0, " #csr ", %1"          \
+       __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
                              : "=r" (__v) : "rK" (__v)         \
                              : "memory");                      \
        __v;                                                    \
 #define csr_set(csr, val)                                      \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrs " #csr ", %0"               \
+       __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0"     \
                              : : "rK" (__v)                    \
                              : "memory");                      \
 })
 #define csr_read_clear(csr, val)                               \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrrc %0, " #csr ", %1"          \
+       __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
                              : "=r" (__v) : "rK" (__v)         \
                              : "memory");                      \
        __v;                                                    \
 #define csr_clear(csr, val)                                    \
 ({                                                             \
        unsigned long __v = (unsigned long)(val);               \
-       __asm__ __volatile__ ("csrc " #csr ", %0"               \
+       __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0"     \
                              : : "rK" (__v)                    \
                              : "memory");                      \
 })
index 697fc23..ce0cd7d 100644 (file)
 #define ELF_CLASS      ELFCLASS32
 #endif
 
-#if defined(__LITTLE_ENDIAN)
 #define ELF_DATA       ELFDATA2LSB
-#elif defined(__BIG_ENDIAN)
-#define ELF_DATA       ELFDATA2MSB
-#else
-#error "Unknown endianness"
-#endif
 
 /*
  * This is used to ensure we don't load something for the wrong architecture.
index 6664162..4ad6409 100644 (file)
@@ -7,18 +7,6 @@
 #ifndef _ASM_FUTEX_H
 #define _ASM_FUTEX_H
 
-#ifndef CONFIG_RISCV_ISA_A
-/*
- * Use the generic interrupt disabling versions if the A extension
- * is not supported.
- */
-#ifdef CONFIG_SMP
-#error "Can't support generic futex calls without A extension on SMP"
-#endif
-#include <asm-generic/futex.h>
-
-#else /* CONFIG_RISCV_ISA_A */
-
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <linux/errno.h>
@@ -124,5 +112,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        return ret;
 }
 
-#endif /* CONFIG_RISCV_ISA_A */
 #endif /* _ASM_FUTEX_H */
index 07a3c6d..1a69b3b 100644 (file)
 /* read interrupt enabled status */
 static inline unsigned long arch_local_save_flags(void)
 {
-       return csr_read(sstatus);
+       return csr_read(CSR_SSTATUS);
 }
 
 /* unconditionally enable interrupts */
 static inline void arch_local_irq_enable(void)
 {
-       csr_set(sstatus, SR_SIE);
+       csr_set(CSR_SSTATUS, SR_SIE);
 }
 
 /* unconditionally disable interrupts */
 static inline void arch_local_irq_disable(void)
 {
-       csr_clear(sstatus, SR_SIE);
+       csr_clear(CSR_SSTATUS, SR_SIE);
 }
 
 /* get status and disable interrupts */
 static inline unsigned long arch_local_irq_save(void)
 {
-       return csr_read_clear(sstatus, SR_SIE);
+       return csr_read_clear(CSR_SSTATUS, SR_SIE);
 }
 
 /* test flags */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
 /* set interrupt enabled status */
 static inline void arch_local_irq_restore(unsigned long flags)
 {
-       csr_set(sstatus, flags & SR_SIE);
+       csr_set(CSR_SSTATUS, flags & SR_SIE);
 }
 
 #endif /* _ASM_RISCV_IRQFLAGS_H */
index 336d60e..bf4f097 100644 (file)
@@ -20,8 +20,6 @@
 
 #include <linux/mm.h>
 #include <linux/sched.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
 
 static inline void enter_lazy_tlb(struct mm_struct *mm,
        struct task_struct *task)
@@ -39,61 +37,8 @@ static inline void destroy_context(struct mm_struct *mm)
 {
 }
 
-/*
- * When necessary, performs a deferred icache flush for the given MM context,
- * on the local CPU.  RISC-V has no direct mechanism for instruction cache
- * shoot downs, so instead we send an IPI that informs the remote harts they
- * need to flush their local instruction caches.  To avoid pathologically slow
- * behavior in a common case (a bunch of single-hart processes on a many-hart
- * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
- * executing a MM context and instead schedule a deferred local instruction
- * cache flush to be performed before execution resumes on each hart.  This
- * actually performs that local instruction cache flush, which implicitly only
- * refers to the current hart.
- */
-static inline void flush_icache_deferred(struct mm_struct *mm)
-{
-#ifdef CONFIG_SMP
-       unsigned int cpu = smp_processor_id();
-       cpumask_t *mask = &mm->context.icache_stale_mask;
-
-       if (cpumask_test_cpu(cpu, mask)) {
-               cpumask_clear_cpu(cpu, mask);
-               /*
-                * Ensure the remote hart's writes are visible to this hart.
-                * This pairs with a barrier in flush_icache_mm.
-                */
-               smp_mb();
-               local_flush_icache_all();
-       }
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev,
-       struct mm_struct *next, struct task_struct *task)
-{
-       if (likely(prev != next)) {
-               /*
-                * Mark the current MM context as inactive, and the next as
-                * active.  This is at least used by the icache flushing
-                * routines in order to determine who should
-                */
-               unsigned int cpu = smp_processor_id();
-
-               cpumask_clear_cpu(cpu, mm_cpumask(prev));
-               cpumask_set_cpu(cpu, mm_cpumask(next));
-
-               /*
-                * Use the old spbtr name instead of using the current satp
-                * name to support binutils 2.29 which doesn't know about the
-                * privileged ISA 1.10 yet.
-                */
-               csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
-               local_flush_tlb_all();
-
-               flush_icache_deferred(next);
-       }
-}
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+       struct task_struct *task);
 
 static inline void activate_mm(struct mm_struct *prev,
                               struct mm_struct *next)
index d35ec2f..9c867a4 100644 (file)
@@ -70,47 +70,38 @@ struct pt_regs {
 
 
 /* Helpers for working with the instruction pointer */
-#define GET_IP(regs) ((regs)->sepc)
-#define SET_IP(regs, val) (GET_IP(regs) = (val))
-
 static inline unsigned long instruction_pointer(struct pt_regs *regs)
 {
-       return GET_IP(regs);
+       return regs->sepc;
 }
 static inline void instruction_pointer_set(struct pt_regs *regs,
                                           unsigned long val)
 {
-       SET_IP(regs, val);
+       regs->sepc = val;
 }
 
 #define profile_pc(regs) instruction_pointer(regs)
 
 /* Helpers for working with the user stack pointer */
-#define GET_USP(regs) ((regs)->sp)
-#define SET_USP(regs, val) (GET_USP(regs) = (val))
-
 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
 {
-       return GET_USP(regs);
+       return regs->sp;
 }
 static inline void user_stack_pointer_set(struct pt_regs *regs,
                                          unsigned long val)
 {
-       SET_USP(regs, val);
+       regs->sp =  val;
 }
 
 /* Helpers for working with the frame pointer */
-#define GET_FP(regs) ((regs)->s0)
-#define SET_FP(regs, val) (GET_FP(regs) = (val))
-
 static inline unsigned long frame_pointer(struct pt_regs *regs)
 {
-       return GET_FP(regs);
+       return regs->s0;
 }
 static inline void frame_pointer_set(struct pt_regs *regs,
                                     unsigned long val)
 {
-       SET_FP(regs, val);
+       regs->s0 = val;
 }
 
 static inline unsigned long regs_return_value(struct pt_regs *regs)
index b6bb10b..19f2316 100644 (file)
 #define SBI_REMOTE_SFENCE_VMA_ASID 7
 #define SBI_SHUTDOWN 8
 
-#define SBI_CALL(which, arg0, arg1, arg2) ({                   \
+#define SBI_CALL(which, arg0, arg1, arg2, arg3) ({             \
        register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);   \
        register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);   \
        register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);   \
+       register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);   \
        register uintptr_t a7 asm ("a7") = (uintptr_t)(which);  \
        asm volatile ("ecall"                                   \
                      : "+r" (a0)                               \
-                     : "r" (a1), "r" (a2), "r" (a7)            \
+                     : "r" (a1), "r" (a2), "r" (a3), "r" (a7)  \
                      : "memory");                              \
        a0;                                                     \
 })
 
 /* Lazy implementations until SBI is finalized */
-#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0)
-#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0)
-#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0)
+#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0, 0)
+#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0, 0)
+#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0, 0)
+#define SBI_CALL_3(which, arg0, arg1, arg2) \
+               SBI_CALL(which, arg0, arg1, arg2, 0)
+#define SBI_CALL_4(which, arg0, arg1, arg2, arg3) \
+               SBI_CALL(which, arg0, arg1, arg2, arg3)
 
 static inline void sbi_console_putchar(int ch)
 {
@@ -86,7 +91,7 @@ static inline void sbi_remote_sfence_vma(const unsigned long *hart_mask,
                                         unsigned long start,
                                         unsigned long size)
 {
-       SBI_CALL_1(SBI_REMOTE_SFENCE_VMA, hart_mask);
+       SBI_CALL_3(SBI_REMOTE_SFENCE_VMA, hart_mask, start, size);
 }
 
 static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
@@ -94,7 +99,7 @@ static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
                                              unsigned long size,
                                              unsigned long asid)
 {
-       SBI_CALL_1(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask);
+       SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
 }
 
 #endif
diff --git a/arch/riscv/include/asm/sifive_l2_cache.h b/arch/riscv/include/asm/sifive_l2_cache.h
new file mode 100644 (file)
index 0000000..04f6748
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SiFive L2 Cache Controller header file
+ *
+ */
+
+#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H
+#define _ASM_RISCV_SIFIVE_L2_CACHE_H
+
+extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
+extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
+
+#define SIFIVE_L2_ERR_TYPE_CE 0
+#define SIFIVE_L2_ERR_TYPE_UE 1
+
+#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */
index 1c9cc83..9c03987 100644 (file)
@@ -28,7 +28,9 @@
 #include <asm/processor.h>
 #include <asm/csr.h>
 
-typedef unsigned long mm_segment_t;
+typedef struct {
+       unsigned long seg;
+} mm_segment_t;
 
 /*
  * low level task data that entry.S needs immediate access to
index fb53a80..b26f407 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/compiler.h>
 #include <linux/thread_info.h>
 #include <asm/byteorder.h>
+#include <asm/extable.h>
 #include <asm/asm.h>
 
 #define __enable_user_access()                                                 \
  * For historical reasons, these macros are grossly misnamed.
  */
 
-#define KERNEL_DS      (~0UL)
-#define USER_DS                (TASK_SIZE)
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS      MAKE_MM_SEG(~0UL)
+#define USER_DS                MAKE_MM_SEG(TASK_SIZE)
 
 #define get_fs()       (current_thread_info()->addr_limit)
 
@@ -48,9 +51,9 @@ static inline void set_fs(mm_segment_t fs)
        current_thread_info()->addr_limit = fs;
 }
 
-#define segment_eq(a, b) ((a) == (b))
+#define segment_eq(a, b) ((a).seg == (b).seg)
 
-#define user_addr_max()        (get_fs())
+#define user_addr_max()        (get_fs().seg)
 
 
 /**
@@ -82,7 +85,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
 {
        const mm_segment_t fs = get_fs();
 
-       return (size <= fs) && (addr <= (fs - size));
+       return size <= fs.seg && addr <= fs.seg - size;
 }
 
 /*
@@ -98,21 +101,8 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
  * on our cache or tlb entries.
  */
 
-struct exception_table_entry {
-       unsigned long insn, fixup;
-};
-
-extern int fixup_exception(struct pt_regs *state);
-
-#if defined(__LITTLE_ENDIAN)
-#define __MSW  1
 #define __LSW  0
-#elif defined(__BIG_ENDIAN)
-#define __MSW  0
-#define        __LSW   1
-#else
-#error "Unknown endianness"
-#endif
+#define __MSW  1
 
 /*
  * The "__xxx" versions of the user access functions do not verify the address
index dac9834..578bb5e 100644 (file)
@@ -312,9 +312,6 @@ void asm_offsets(void)
                - offsetof(struct task_struct, thread.fstate.f[0])
        );
 
-       /* The assembler needs access to THREAD_SIZE as well. */
-       DEFINE(ASM_THREAD_SIZE, THREAD_SIZE);
-
        /*
         * We allocate a pt_regs on the stack when entering the kernel.  This
         * ensures the alignment is sane.
index cf2fca1..c8d2a32 100644 (file)
@@ -136,8 +136,7 @@ static void c_stop(struct seq_file *m, void *v)
 static int c_show(struct seq_file *m, void *v)
 {
        unsigned long cpu_id = (unsigned long)v - 1;
-       struct device_node *node = of_get_cpu_node(cpuid_to_hartid_map(cpu_id),
-                                                  NULL);
+       struct device_node *node = of_get_cpu_node(cpu_id, NULL);
        const char *compat, *isa, *mmu;
 
        seq_printf(m, "processor\t: %lu\n", cpu_id);
index fd9b57c..1c1ecc2 100644 (file)
         * the kernel thread pointer.  If we came from the kernel, sscratch
         * will contain 0, and we should continue on the current TP.
         */
-       csrrw tp, sscratch, tp
+       csrrw tp, CSR_SSCRATCH, tp
        bnez tp, _save_context
 
 _restore_kernel_tpsp:
-       csrr tp, sscratch
+       csrr tp, CSR_SSCRATCH
        REG_S sp, TASK_TI_KERNEL_SP(tp)
 _save_context:
        REG_S sp, TASK_TI_USER_SP(tp)
@@ -87,11 +87,11 @@ _save_context:
        li t0, SR_SUM | SR_FS
 
        REG_L s0, TASK_TI_USER_SP(tp)
-       csrrc s1, sstatus, t0
-       csrr s2, sepc
-       csrr s3, sbadaddr
-       csrr s4, scause
-       csrr s5, sscratch
+       csrrc s1, CSR_SSTATUS, t0
+       csrr s2, CSR_SEPC
+       csrr s3, CSR_STVAL
+       csrr s4, CSR_SCAUSE
+       csrr s5, CSR_SSCRATCH
        REG_S s0, PT_SP(sp)
        REG_S s1, PT_SSTATUS(sp)
        REG_S s2, PT_SEPC(sp)
@@ -107,8 +107,8 @@ _save_context:
        .macro RESTORE_ALL
        REG_L a0, PT_SSTATUS(sp)
        REG_L a2, PT_SEPC(sp)
-       csrw sstatus, a0
-       csrw sepc, a2
+       csrw CSR_SSTATUS, a0
+       csrw CSR_SEPC, a2
 
        REG_L x1,  PT_RA(sp)
        REG_L x3,  PT_GP(sp)
@@ -155,7 +155,7 @@ ENTRY(handle_exception)
         * Set sscratch register to 0, so that if a recursive exception
         * occurs, the exception vector knows it came from the kernel
         */
-       csrw sscratch, x0
+       csrw CSR_SSCRATCH, x0
 
        /* Load the global pointer */
 .option push
@@ -248,7 +248,7 @@ resume_userspace:
         * Save TP into sscratch, so we can find the kernel data structures
         * again.
         */
-       csrw sscratch, tp
+       csrw CSR_SSCRATCH, tp
 
 restore_all:
        RESTORE_ALL
index fe884cd..370c66c 100644 (file)
@@ -23,7 +23,8 @@
 __INIT
 ENTRY(_start)
        /* Mask all interrupts */
-       csrw sie, zero
+       csrw CSR_SIE, zero
+       csrw CSR_SIP, zero
 
        /* Load the global pointer */
 .option push
@@ -68,14 +69,10 @@ clear_bss_done:
        /* Restore C environment */
        la tp, init_task
        sw zero, TASK_TI_CPU(tp)
-
-       la sp, init_thread_union
-       li a0, ASM_THREAD_SIZE
-       add sp, sp, a0
+       la sp, init_thread_union + THREAD_SIZE
 
        /* Start the kernel */
-       mv a0, s0
-       mv a1, s1
+       mv a0, s1
        call parse_dtb
        tail start_kernel
 
@@ -89,7 +86,7 @@ relocate:
        /* Point stvec to virtual address of intruction after satp write */
        la a0, 1f
        add a0, a0, a1
-       csrw stvec, a0
+       csrw CSR_STVEC, a0
 
        /* Compute satp for kernel page tables, but don't load it yet */
        la a2, swapper_pg_dir
@@ -99,18 +96,20 @@ relocate:
 
        /*
         * Load trampoline page directory, which will cause us to trap to
-        * stvec if VA != PA, or simply fall through if VA == PA
+        * stvec if VA != PA, or simply fall through if VA == PA.  We need a
+        * full fence here because setup_vm() just wrote these PTEs and we need
+        * to ensure the new translations are in use.
         */
        la a0, trampoline_pg_dir
        srl a0, a0, PAGE_SHIFT
        or a0, a0, a1
        sfence.vma
-       csrw sptbr, a0
+       csrw CSR_SATP, a0
 .align 2
 1:
        /* Set trap vector to spin forever to help debug */
        la a0, .Lsecondary_park
-       csrw stvec, a0
+       csrw CSR_STVEC, a0
 
        /* Reload the global pointer */
 .option push
@@ -118,8 +117,14 @@ relocate:
        la gp, __global_pointer$
 .option pop
 
-       /* Switch to kernel page tables */
-       csrw sptbr, a2
+       /*
+        * Switch to kernel page tables.  A full fence is necessary in order to
+        * avoid using the trampoline translations, which are only correct for
+        * the first superpage.  Fetching the fence is guarnteed to work
+        * because that first superpage is translated the same way.
+        */
+       csrw CSR_SATP, a2
+       sfence.vma
 
        ret
 
@@ -130,7 +135,7 @@ relocate:
 
        /* Set trap vector to spin forever to help debug */
        la a3, .Lsecondary_park
-       csrw stvec, a3
+       csrw CSR_STVEC, a3
 
        slli a3, a0, LGREG
        la a1, __cpu_up_stack_pointer
index 48e6b7d..6d86593 100644 (file)
 /*
  * Possible interrupt causes:
  */
-#define INTERRUPT_CAUSE_SOFTWARE    1
-#define INTERRUPT_CAUSE_TIMER       5
-#define INTERRUPT_CAUSE_EXTERNAL    9
-
-/*
- * The high order bit of the trap cause register is always set for
- * interrupts, which allows us to differentiate them from exceptions
- * quickly.  The INTERRUPT_CAUSE_* macros don't contain that bit, so we
- * need to mask it off.
- */
-#define INTERRUPT_CAUSE_FLAG   (1UL << (__riscv_xlen - 1))
+#define INTERRUPT_CAUSE_SOFTWARE       IRQ_S_SOFT
+#define INTERRUPT_CAUSE_TIMER          IRQ_S_TIMER
+#define INTERRUPT_CAUSE_EXTERNAL       IRQ_S_EXT
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
@@ -37,7 +29,7 @@ asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
        struct pt_regs *old_regs = set_irq_regs(regs);
 
        irq_enter();
-       switch (regs->scause & ~INTERRUPT_CAUSE_FLAG) {
+       switch (regs->scause & ~SCAUSE_IRQ_FLAG) {
        case INTERRUPT_CAUSE_TIMER:
                riscv_timer_interrupt();
                break;
@@ -54,7 +46,8 @@ asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
                handle_arch_irq(regs);
                break;
        default:
-               panic("unexpected interrupt cause");
+               pr_alert("unexpected interrupt cause 0x%lx", regs->scause);
+               BUG();
        }
        irq_exit();
 
index 667ee70..91626d9 100644 (file)
@@ -185,10 +185,10 @@ static inline u64 read_counter(int idx)
 
        switch (idx) {
        case RISCV_PMU_CYCLE:
-               val = csr_read(cycle);
+               val = csr_read(CSR_CYCLE);
                break;
        case RISCV_PMU_INSTRET:
-               val = csr_read(instret);
+               val = csr_read(CSR_INSTRET);
                break;
        default:
                WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
index 2a53d26..ed637ae 100644 (file)
  */
 
 #include <linux/reboot.h>
-#include <linux/export.h>
 #include <asm/sbi.h>
 
-void (*pm_power_off)(void) = machine_power_off;
-EXPORT_SYMBOL(pm_power_off);
+static void default_power_off(void)
+{
+       sbi_shutdown();
+       while (1);
+}
+
+void (*pm_power_off)(void) = default_power_off;
 
 void machine_restart(char *cmd)
 {
@@ -26,11 +30,10 @@ void machine_restart(char *cmd)
 
 void machine_halt(void)
 {
-       machine_power_off();
+       pm_power_off();
 }
 
 void machine_power_off(void)
 {
-       sbi_shutdown();
-       while (1);
+       pm_power_off();
 }
index 540a331..d93bcce 100644 (file)
@@ -52,9 +52,11 @@ struct screen_info screen_info = {
 atomic_t hart_lottery;
 unsigned long boot_cpu_hartid;
 
-void __init parse_dtb(unsigned int hartid, void *dtb)
+void __init parse_dtb(phys_addr_t dtb_phys)
 {
-       if (early_init_dt_scan(__va(dtb)))
+       void *dtb = __va(dtb_phys);
+
+       if (early_init_dt_scan(dtb))
                return;
 
        pr_err("No DTB passed to the kernel\n");
index 837e164..804d6ee 100644 (file)
@@ -234,6 +234,9 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 
        /* Are we from a system call? */
        if (regs->scause == EXC_SYSCALL) {
+               /* Avoid additional syscall restarting via ret_from_exception */
+               regs->scause = -1UL;
+
                /* If so, check system call restarting.. */
                switch (regs->a0) {
                case -ERESTART_RESTARTBLOCK:
@@ -272,6 +275,9 @@ static void do_signal(struct pt_regs *regs)
 
        /* Did we come from a system call? */
        if (regs->scause == EXC_SYSCALL) {
+               /* Avoid additional syscall restarting via ret_from_exception */
+               regs->scause = -1UL;
+
                /* Restart the system call - no handlers present */
                switch (regs->a0) {
                case -ERESTARTNOHAND:
index 0c41d07..b2537ff 100644 (file)
@@ -42,7 +42,7 @@ unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
 
 void __init smp_setup_processor_id(void)
 {
-       cpuid_to_hartid_map(0) = boot_cpu_hartid;
+       cpuid_to_hartid_map(0) = boot_cpu_hartid;
 }
 
 /* A collection of single bit ipi messages.  */
@@ -53,7 +53,7 @@ static struct {
 
 int riscv_hartid_to_cpuid(int hartid)
 {
-       int i = -1;
+       int i;
 
        for (i = 0; i < NR_CPUS; i++)
                if (cpuid_to_hartid_map(i) == hartid)
@@ -70,6 +70,12 @@ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
        for_each_cpu(cpu, in)
                cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
 }
+
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+       return phys_id == cpuid_to_hartid_map(cpu);
+}
+
 /* Unsupported */
 int setup_profiling_timer(unsigned int multiplier)
 {
@@ -89,7 +95,7 @@ void riscv_software_interrupt(void)
        unsigned long *stats = ipi_data[smp_processor_id()].stats;
 
        /* Clear pending IPI */
-       csr_clear(sip, SIE_SSIE);
+       csr_clear(CSR_SIP, SIE_SSIE);
 
        while (true) {
                unsigned long ops;
@@ -199,52 +205,3 @@ void smp_send_reschedule(int cpu)
        send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
-/*
- * Performs an icache flush for the given MM context.  RISC-V has no direct
- * mechanism for instruction cache shoot downs, so instead we send an IPI that
- * informs the remote harts they need to flush their local instruction caches.
- * To avoid pathologically slow behavior in a common case (a bunch of
- * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
- * IPIs for harts that are not currently executing a MM context and instead
- * schedule a deferred local instruction cache flush to be performed before
- * execution resumes on each hart.
- */
-void flush_icache_mm(struct mm_struct *mm, bool local)
-{
-       unsigned int cpu;
-       cpumask_t others, hmask, *mask;
-
-       preempt_disable();
-
-       /* Mark every hart's icache as needing a flush for this MM. */
-       mask = &mm->context.icache_stale_mask;
-       cpumask_setall(mask);
-       /* Flush this hart's I$ now, and mark it as flushed. */
-       cpu = smp_processor_id();
-       cpumask_clear_cpu(cpu, mask);
-       local_flush_icache_all();
-
-       /*
-        * Flush the I$ of other harts concurrently executing, and mark them as
-        * flushed.
-        */
-       cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
-       local |= cpumask_empty(&others);
-       if (mm != current->active_mm || !local) {
-               cpumask_clear(&hmask);
-               riscv_cpuid_to_hartid_mask(&others, &hmask);
-               sbi_remote_fence_i(hmask.bits);
-       } else {
-               /*
-                * It's assumed that at least one strongly ordered operation is
-                * performed on this hart between setting a hart's cpumask bit
-                * and scheduling this MM context on that hart.  Sending an SBI
-                * remote message will do this, but in the case where no
-                * messages are sent we still need to order this hart's writes
-                * with flush_icache_deferred().
-                */
-               smp_mb();
-       }
-
-       preempt_enable();
-}
index eb533b5..7a0b622 100644 (file)
@@ -47,6 +47,17 @@ void __init smp_prepare_boot_cpu(void)
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
+       int cpuid;
+
+       /* This covers non-smp usecase mandated by "nosmp" option */
+       if (max_cpus == 0)
+               return;
+
+       for_each_possible_cpu(cpuid) {
+               if (cpuid == smp_processor_id())
+                       continue;
+               set_cpu_present(cpuid, true);
+       }
 }
 
 void __init setup_smp(void)
@@ -73,12 +84,19 @@ void __init setup_smp(void)
                }
 
                cpuid_to_hartid_map(cpuid) = hart;
-               set_cpu_possible(cpuid, true);
-               set_cpu_present(cpuid, true);
                cpuid++;
        }
 
        BUG_ON(!found_boot_cpu);
+
+       if (cpuid > nr_cpu_ids)
+               pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
+                       cpuid, nr_cpu_ids);
+
+       for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
+               if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
+                       set_cpu_possible(cpuid, true);
+       }
 }
 
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
index 4d40327..e80a5e8 100644 (file)
@@ -33,9 +33,9 @@ static void notrace walk_stackframe(struct task_struct *task,
        unsigned long fp, sp, pc;
 
        if (regs) {
-               fp = GET_FP(regs);
-               sp = GET_USP(regs);
-               pc = GET_IP(regs);
+               fp = frame_pointer(regs);
+               sp = user_stack_pointer(regs);
+               pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
                const register unsigned long current_sp __asm__ ("sp");
                fp = (unsigned long)__builtin_frame_address(0);
@@ -64,12 +64,8 @@ static void notrace walk_stackframe(struct task_struct *task,
                frame = (struct stackframe *)fp - 1;
                sp = fp;
                fp = frame->fp;
-#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
                pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
                                           (unsigned long *)(fp - 8));
-#else
-               pc = frame->ra - 0x4;
-#endif
        }
 }
 
@@ -82,8 +78,8 @@ static void notrace walk_stackframe(struct task_struct *task,
        unsigned long *ksp;
 
        if (regs) {
-               sp = GET_USP(regs);
-               pc = GET_IP(regs);
+               sp = user_stack_pointer(regs);
+               pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
                const register unsigned long current_sp __asm__ ("sp");
                sp = current_sp;
index 24a9333..3d1a651 100644 (file)
@@ -70,7 +70,7 @@ void do_trap(struct pt_regs *regs, int signo, int code,
            && printk_ratelimit()) {
                pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
                        tsk->comm, task_pid_nr(tsk), signo, code, addr);
-               print_vma_addr(KERN_CONT " in ", GET_IP(regs));
+               print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
                pr_cont("\n");
                show_regs(regs);
        }
@@ -118,6 +118,17 @@ DO_ERROR_INFO(do_trap_ecall_s,
 DO_ERROR_INFO(do_trap_ecall_m,
        SIGILL, ILL_ILLTRP, "environment call from M-mode");
 
+#ifdef CONFIG_GENERIC_BUG
+static inline unsigned long get_break_insn_length(unsigned long pc)
+{
+       bug_insn_t insn;
+
+       if (probe_kernel_address((bug_insn_t *)pc, insn))
+               return 0;
+       return (((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? 4UL : 2UL);
+}
+#endif /* CONFIG_GENERIC_BUG */
+
 asmlinkage void do_trap_break(struct pt_regs *regs)
 {
 #ifdef CONFIG_GENERIC_BUG
@@ -129,8 +140,8 @@ asmlinkage void do_trap_break(struct pt_regs *regs)
                case BUG_TRAP_TYPE_NONE:
                        break;
                case BUG_TRAP_TYPE_WARN:
-                       regs->sepc += sizeof(bug_insn_t);
-                       return;
+                       regs->sepc += get_break_insn_length(regs->sepc);
+                       break;
                case BUG_TRAP_TYPE_BUG:
                        die(regs, "Kernel BUG");
                }
@@ -145,11 +156,14 @@ int is_valid_bugaddr(unsigned long pc)
 {
        bug_insn_t insn;
 
-       if (pc < PAGE_OFFSET)
+       if (pc < VMALLOC_START)
                return 0;
        if (probe_kernel_address((bug_insn_t *)pc, insn))
                return 0;
-       return (insn == __BUG_INSN);
+       if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
+               return (insn == __BUG_INSN_32);
+       else
+               return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
 }
 #endif /* CONFIG_GENERIC_BUG */
 
@@ -159,9 +173,9 @@ void __init trap_init(void)
         * Set sup0 scratch register to 0, indicating to exception vector
         * that we are presently executing in the kernel
         */
-       csr_write(sscratch, 0);
+       csr_write(CSR_SSCRATCH, 0);
        /* Set the exception vector address */
-       csr_write(stvec, &handle_exception);
+       csr_write(CSR_STVEC, &handle_exception);
        /* Enable all interrupts */
-       csr_write(sie, -1);
+       csr_write(CSR_SIE, -1);
 }
index fec62b2..b07b765 100644 (file)
@@ -36,7 +36,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
 # these symbols in the kernel code rather than hand-coded addresses.
 
 SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-                            $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+       -Wl,--hash-style=both
 $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
        $(call if_changed,vdsold)
 
index b68aac7..8db5691 100644 (file)
@@ -9,3 +9,5 @@ obj-y += fault.o
 obj-y += extable.o
 obj-y += ioremap.o
 obj-y += cacheflush.o
+obj-y += context.o
+obj-y += sifive_l2_cache.o
index 498c0a0..497b7d0 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
 
+#ifdef CONFIG_SMP
+
+#include <asm/sbi.h>
+
+void flush_icache_all(void)
+{
+       sbi_remote_fence_i(NULL);
+}
+
+/*
+ * Performs an icache flush for the given MM context.  RISC-V has no direct
+ * mechanism for instruction cache shoot downs, so instead we send an IPI that
+ * informs the remote harts they need to flush their local instruction caches.
+ * To avoid pathologically slow behavior in a common case (a bunch of
+ * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
+ * IPIs for harts that are not currently executing a MM context and instead
+ * schedule a deferred local instruction cache flush to be performed before
+ * execution resumes on each hart.
+ */
+void flush_icache_mm(struct mm_struct *mm, bool local)
+{
+       unsigned int cpu;
+       cpumask_t others, hmask, *mask;
+
+       preempt_disable();
+
+       /* Mark every hart's icache as needing a flush for this MM. */
+       mask = &mm->context.icache_stale_mask;
+       cpumask_setall(mask);
+       /* Flush this hart's I$ now, and mark it as flushed. */
+       cpu = smp_processor_id();
+       cpumask_clear_cpu(cpu, mask);
+       local_flush_icache_all();
+
+       /*
+        * Flush the I$ of other harts concurrently executing, and mark them as
+        * flushed.
+        */
+       cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+       local |= cpumask_empty(&others);
+       if (mm != current->active_mm || !local) {
+               cpumask_clear(&hmask);
+               riscv_cpuid_to_hartid_mask(&others, &hmask);
+               sbi_remote_fence_i(hmask.bits);
+       } else {
+               /*
+                * It's assumed that at least one strongly ordered operation is
+                * performed on this hart between setting a hart's cpumask bit
+                * and scheduling this MM context on that hart.  Sending an SBI
+                * remote message will do this, but in the case where no
+                * messages are sent we still need to order this hart's writes
+                * with flush_icache_deferred().
+                */
+               smp_mb();
+       }
+
+       preempt_enable();
+}
+
+#endif /* CONFIG_SMP */
+
 void flush_icache_pte(pte_t pte)
 {
        struct page *page = pte_page(pte);
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
new file mode 100644 (file)
index 0000000..89ceb3c
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/mm.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * When necessary, performs a deferred icache flush for the given MM context,
+ * on the local CPU.  RISC-V has no direct mechanism for instruction cache
+ * shoot downs, so instead we send an IPI that informs the remote harts they
+ * need to flush their local instruction caches.  To avoid pathologically slow
+ * behavior in a common case (a bunch of single-hart processes on a many-hart
+ * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
+ * executing a MM context and instead schedule a deferred local instruction
+ * cache flush to be performed before execution resumes on each hart.  This
+ * actually performs that local instruction cache flush, which implicitly only
+ * refers to the current hart.
+ */
+static inline void flush_icache_deferred(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+       unsigned int cpu = smp_processor_id();
+       cpumask_t *mask = &mm->context.icache_stale_mask;
+
+       if (cpumask_test_cpu(cpu, mask)) {
+               cpumask_clear_cpu(cpu, mask);
+               /*
+                * Ensure the remote hart's writes are visible to this hart.
+                * This pairs with a barrier in flush_icache_mm.
+                */
+               smp_mb();
+               local_flush_icache_all();
+       }
+
+#endif
+}
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+       struct task_struct *task)
+{
+       unsigned int cpu;
+
+       if (unlikely(prev == next))
+               return;
+
+       /*
+        * Mark the current MM context as inactive, and the next as
+        * active.  This is at least used by the icache flushing
+        * routines in order to determine who should be flushed.
+        */
+       cpu = smp_processor_id();
+
+       cpumask_clear_cpu(cpu, mm_cpumask(prev));
+       cpumask_set_cpu(cpu, mm_cpumask(next));
+
+       /*
+        * Use the old spbtr name instead of using the current satp
+        * name to support binutils 2.29 which doesn't know about the
+        * privileged ISA 1.10 yet.
+        */
+       csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
+       local_flush_tlb_all();
+
+       flush_icache_deferred(next);
+}
index 88401d5..cec8be9 100644 (file)
@@ -229,8 +229,9 @@ vmalloc_fault:
                pte_t *pte_k;
                int index;
 
+               /* User mode accesses just cause a SIGSEGV */
                if (user_mode(regs))
-                       goto bad_area;
+                       return do_trap(regs, SIGSEGV, code, addr, tsk);
 
                /*
                 * Synchronize this task's top level page-table
@@ -239,13 +240,9 @@ vmalloc_fault:
                 * Do _not_ use "tsk->active_mm->pgd" here.
                 * We might be inside an interrupt in the middle
                 * of a task switch.
-                *
-                * Note: Use the old spbtr name instead of using the current
-                * satp name to support binutils 2.29 which doesn't know about
-                * the privileged ISA 1.10 yet.
                 */
                index = pgd_index(addr);
-               pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
+               pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
                pgd_k = init_mm.pgd + index;
 
                if (!pgd_present(*pgd_k))
diff --git a/arch/riscv/mm/sifive_l2_cache.c b/arch/riscv/mm/sifive_l2_cache.c
new file mode 100644 (file)
index 0000000..4eb6461
--- /dev/null
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiFive L2 cache controller Driver
+ *
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <asm/sifive_l2_cache.h>
+
+#define SIFIVE_L2_DIRECCFIX_LOW 0x100
+#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
+#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
+
+#define SIFIVE_L2_DATECCFIX_LOW 0x140
+#define SIFIVE_L2_DATECCFIX_HIGH 0x144
+#define SIFIVE_L2_DATECCFIX_COUNT 0x148
+
+#define SIFIVE_L2_DATECCFAIL_LOW 0x160
+#define SIFIVE_L2_DATECCFAIL_HIGH 0x164
+#define SIFIVE_L2_DATECCFAIL_COUNT 0x168
+
+#define SIFIVE_L2_CONFIG 0x00
+#define SIFIVE_L2_WAYENABLE 0x08
+#define SIFIVE_L2_ECCINJECTERR 0x40
+
+#define SIFIVE_L2_MAX_ECCINTR 3
+
+static void __iomem *l2_base;
+static int g_irq[SIFIVE_L2_MAX_ECCINTR];
+
+enum {
+       DIR_CORR = 0,
+       DATA_CORR,
+       DATA_UNCORR,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *sifive_test;
+
+static ssize_t l2_write(struct file *file, const char __user *data,
+                       size_t count, loff_t *ppos)
+{
+       unsigned int val;
+
+       if (kstrtouint_from_user(data, count, 0, &val))
+               return -EINVAL;
+       if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
+               writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
+       else
+               return -EINVAL;
+       return count;
+}
+
+static const struct file_operations l2_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .write = l2_write
+};
+
+static void setup_sifive_debug(void)
+{
+       sifive_test = debugfs_create_dir("sifive_l2_cache", NULL);
+
+       debugfs_create_file("sifive_debug_inject_error", 0200,
+                           sifive_test, NULL, &l2_fops);
+}
+#endif
+
+static void l2_config_read(void)
+{
+       u32 regval, val;
+
+       regval = readl(l2_base + SIFIVE_L2_CONFIG);
+       val = regval & 0xFF;
+       pr_info("L2CACHE: No. of Banks in the cache: %d\n", val);
+       val = (regval & 0xFF00) >> 8;
+       pr_info("L2CACHE: No. of ways per bank: %d\n", val);
+       val = (regval & 0xFF0000) >> 16;
+       pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val);
+       val = (regval & 0xFF000000) >> 24;
+       pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val);
+
+       regval = readl(l2_base + SIFIVE_L2_WAYENABLE);
+       pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval);
+}
+
+static const struct of_device_id sifive_l2_ids[] = {
+       { .compatible = "sifive,fu540-c000-ccache" },
+       { /* end of table */ },
+};
+
+static ATOMIC_NOTIFIER_HEAD(l2_err_chain);
+
+int register_sifive_l2_error_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_register(&l2_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier);
+
+int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&l2_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
+
+static irqreturn_t l2_int_handler(int irq, void *device)
+{
+       unsigned int regval, add_h, add_l;
+
+       if (irq == g_irq[DIR_CORR]) {
+               add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH);
+               add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW);
+               pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l);
+               regval = readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT);
+               atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
+                                          "DirECCFix");
+       }
+       if (irq == g_irq[DATA_CORR]) {
+               add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
+               add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
+               pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l);
+               regval = readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT);
+               atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
+                                          "DatECCFix");
+       }
+       if (irq == g_irq[DATA_UNCORR]) {
+               add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH);
+               add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW);
+               pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l);
+               regval = readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT);
+               atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
+                                          "DatECCFail");
+       }
+
+       return IRQ_HANDLED;
+}
+
+int __init sifive_l2_init(void)
+{
+       struct device_node *np;
+       struct resource res;
+       int i, rc;
+
+       np = of_find_matching_node(NULL, sifive_l2_ids);
+       if (!np)
+               return -ENODEV;
+
+       if (of_address_to_resource(np, 0, &res))
+               return -ENODEV;
+
+       l2_base = ioremap(res.start, resource_size(&res));
+       if (!l2_base)
+               return -ENOMEM;
+
+       for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) {
+               g_irq[i] = irq_of_parse_and_map(np, i);
+               rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
+               if (rc) {
+                       pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]);
+                       return rc;
+               }
+       }
+
+       l2_config_read();
+
+#ifdef CONFIG_DEBUG_FS
+       setup_sifive_debug();
+#endif
+       return 0;
+}
+device_initcall(sifive_l2_init);
index fca34b2..9f4b4bb 100644 (file)
@@ -22,7 +22,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 }
 extern void arch_exit_mmap(struct mm_struct *mm);
 static inline void arch_unmap(struct mm_struct *mm,
-                       struct vm_area_struct *vma,
                        unsigned long start, unsigned long end)
 {
 }
index 5c205a9..9f06ea5 100644 (file)
@@ -88,7 +88,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
 }
 
 static inline void arch_unmap(struct mm_struct *mm,
-                       struct vm_area_struct *vma,
                        unsigned long start, unsigned long end)
 {
 }
index 93dff19..9024236 100644 (file)
@@ -278,8 +278,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
        mpx_mm_init(mm);
 }
 
-static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
-                             unsigned long start, unsigned long end)
+static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
+                             unsigned long end)
 {
        /*
         * mpx_notify_unmap() goes and reads a rarely-hot
@@ -299,7 +299,7 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
         * consistently wrong.
         */
        if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
-               mpx_notify_unmap(mm, vma, start, end);
+               mpx_notify_unmap(mm, start, end);
 }
 
 /*
index d0b1434..143a5c1 100644 (file)
@@ -64,12 +64,15 @@ struct mpx_fault_info {
 };
 
 #ifdef CONFIG_X86_INTEL_MPX
-int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
-int mpx_handle_bd_fault(void);
+
+extern int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
+extern int mpx_handle_bd_fault(void);
+
 static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
 {
        return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR);
 }
+
 static inline void mpx_mm_init(struct mm_struct *mm)
 {
        /*
@@ -78,11 +81,10 @@ static inline void mpx_mm_init(struct mm_struct *mm)
         */
        mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
 }
-void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
-                     unsigned long start, unsigned long end);
 
-unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
-               unsigned long flags);
+extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end);
+extern unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags);
+
 #else
 static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
 {
@@ -100,7 +102,6 @@ static inline void mpx_mm_init(struct mm_struct *mm)
 {
 }
 static inline void mpx_notify_unmap(struct mm_struct *mm,
-                                   struct vm_area_struct *vma,
                                    unsigned long start, unsigned long end)
 {
 }
index 59726aa..0d1c47c 100644 (file)
@@ -881,9 +881,10 @@ static int mpx_unmap_tables(struct mm_struct *mm,
  * the virtual address region start...end have already been split if
  * necessary, and the 'vma' is the first vma in this range (start -> end).
  */
-void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
-               unsigned long start, unsigned long end)
+void mpx_notify_unmap(struct mm_struct *mm, unsigned long start,
+                     unsigned long end)
 {
+       struct vm_area_struct *vma;
        int ret;
 
        /*
@@ -902,11 +903,12 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
         * which should not occur normally. Being strict about it here
         * helps ensure that we do not have an exploitable stack overflow.
         */
-       do {
+       vma = find_vma(mm, start);
+       while (vma && vma->vm_start < end) {
                if (vma->vm_flags & VM_MPX)
                        return;
                vma = vma->vm_next;
-       } while (vma && vma->vm_start < end);
+       }
 
        ret = mpx_unmap_tables(mm, start, end);
        if (ret)
index b0b688c..3751d81 100644 (file)
@@ -170,8 +170,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
 EXPORT_SYMBOL(tegra_ahb_enable_smmu);
 #endif
 
-#ifdef CONFIG_PM
-static int tegra_ahb_suspend(struct device *dev)
+static int __maybe_unused tegra_ahb_suspend(struct device *dev)
 {
        int i;
        struct tegra_ahb *ahb = dev_get_drvdata(dev);
@@ -181,7 +180,7 @@ static int tegra_ahb_suspend(struct device *dev)
        return 0;
 }
 
-static int tegra_ahb_resume(struct device *dev)
+static int __maybe_unused tegra_ahb_resume(struct device *dev)
 {
        int i;
        struct tegra_ahb *ahb = dev_get_drvdata(dev);
@@ -190,7 +189,6 @@ static int tegra_ahb_resume(struct device *dev)
                gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
        return 0;
 }
-#endif
 
 static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
                            tegra_ahb_suspend,
index 4832148..6bcaa4e 100644 (file)
@@ -387,7 +387,7 @@ config ARM_GLOBAL_TIMER
          This options enables support for the ARM global timer unit
 
 config ARM_TIMER_SP804
-       bool "Support for Dual Timer SP804 module"
+       bool "Support for Dual Timer SP804 module" if COMPILE_TEST
        depends on GENERIC_SCHED_CLOCK && CLKDEV_LOOKUP
        select CLKSRC_MMIO
        select TIMER_OF if OF
@@ -407,8 +407,11 @@ config ARMV7M_SYSTICK
          This options enables support for the ARMv7M system timer unit
 
 config ATMEL_PIT
+       bool "Atmel PIT support" if COMPILE_TEST
+       depends on HAS_IOMEM
        select TIMER_OF if OF
-       def_bool SOC_AT91SAM9 || SOC_SAMA5
+       help
+         Support for the Periodic Interval Timer found on Atmel SoCs.
 
 config ATMEL_ST
        bool "Atmel ST timer support" if COMPILE_TEST
@@ -418,6 +421,13 @@ config ATMEL_ST
        help
          Support for the Atmel ST timer.
 
+config ATMEL_TCB_CLKSRC
+       bool "Atmel TC Block timer driver" if COMPILE_TEST
+       depends on HAS_IOMEM
+       select TIMER_OF if OF
+       help
+         Support for Timer Counter Blocks on Atmel SoCs.
+
 config CLKSRC_EXYNOS_MCT
        bool "Exynos multi core timer driver" if COMPILE_TEST
        depends on ARM || ARM64
index dba4eff..236858f 100644 (file)
@@ -3,7 +3,7 @@ obj-$(CONFIG_TIMER_OF)          += timer-of.o
 obj-$(CONFIG_TIMER_PROBE)      += timer-probe.o
 obj-$(CONFIG_ATMEL_PIT)                += timer-atmel-pit.o
 obj-$(CONFIG_ATMEL_ST)         += timer-atmel-st.o
-obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
+obj-$(CONFIG_ATMEL_TCB_CLKSRC) += timer-atmel-tcb.o
 obj-$(CONFIG_X86_PM_TIMER)     += acpi_pm.o
 obj-$(CONFIG_SCx200HR_TIMER)   += scx200_hrt.o
 obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC)   += timer-cs5535.o
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
deleted file mode 100644 (file)
index f987027..0000000
+++ /dev/null
@@ -1,433 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/init.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/syscore_ops.h>
-#include <linux/atmel_tc.h>
-
-
-/*
- * We're configured to use a specific TC block, one that's not hooked
- * up to external hardware, to provide a time solution:
- *
- *   - Two channels combine to create a free-running 32 bit counter
- *     with a base rate of 5+ MHz, packaged as a clocksource (with
- *     resolution better than 200 nsec).
- *   - Some chips support 32 bit counter. A single channel is used for
- *     this 32 bit free-running counter. the second channel is not used.
- *
- *   - The third channel may be used to provide a 16-bit clockevent
- *     source, used in either periodic or oneshot mode.  This runs
- *     at 32 KiHZ, and can handle delays of up to two seconds.
- *
- * A boot clocksource and clockevent source are also currently needed,
- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
- * this code can be used when init_timers() is called, well before most
- * devices are set up.  (Some low end AT91 parts, which can run uClinux,
- * have only the timers in one TC block... they currently don't support
- * the tclib code, because of that initialization issue.)
- *
- * REVISIT behavior during system suspend states... we should disable
- * all clocks and save the power.  Easily done for clockevent devices,
- * but clocksources won't necessarily get the needed notifications.
- * For deeper system sleep states, this will be mandatory...
- */
-
-static void __iomem *tcaddr;
-static struct
-{
-       u32 cmr;
-       u32 imr;
-       u32 rc;
-       bool clken;
-} tcb_cache[3];
-static u32 bmr_cache;
-
-static u64 tc_get_cycles(struct clocksource *cs)
-{
-       unsigned long   flags;
-       u32             lower, upper;
-
-       raw_local_irq_save(flags);
-       do {
-               upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
-               lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
-       } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
-
-       raw_local_irq_restore(flags);
-       return (upper << 16) | lower;
-}
-
-static u64 tc_get_cycles32(struct clocksource *cs)
-{
-       return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
-}
-
-static void tc_clksrc_suspend(struct clocksource *cs)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
-               tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
-               tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
-               tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
-               tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
-                                       ATMEL_TC_CLKSTA);
-       }
-
-       bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
-}
-
-static void tc_clksrc_resume(struct clocksource *cs)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
-               /* Restore registers for the channel, RA and RB are not used  */
-               writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
-               writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
-               writel(0, tcaddr + ATMEL_TC_REG(i, RA));
-               writel(0, tcaddr + ATMEL_TC_REG(i, RB));
-               /* Disable all the interrupts */
-               writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
-               /* Reenable interrupts that were enabled before suspending */
-               writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
-               /* Start the clock if it was used */
-               if (tcb_cache[i].clken)
-                       writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
-       }
-
-       /* Dual channel, chain channels */
-       writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
-       /* Finally, trigger all the channels*/
-       writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
-}
-
-static struct clocksource clksrc = {
-       .name           = "tcb_clksrc",
-       .rating         = 200,
-       .read           = tc_get_cycles,
-       .mask           = CLOCKSOURCE_MASK(32),
-       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-       .suspend        = tc_clksrc_suspend,
-       .resume         = tc_clksrc_resume,
-};
-
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
-
-struct tc_clkevt_device {
-       struct clock_event_device       clkevt;
-       struct clk                      *clk;
-       void __iomem                    *regs;
-};
-
-static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
-{
-       return container_of(clkevt, struct tc_clkevt_device, clkevt);
-}
-
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
- * because using one of the divided clocks would usually mean the
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
- *
- * A divided clock could be good for high resolution timers, since
- * 30.5 usec resolution can seem "low".
- */
-static u32 timer_clock;
-
-static int tc_shutdown(struct clock_event_device *d)
-{
-       struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-       void __iomem            *regs = tcd->regs;
-
-       writel(0xff, regs + ATMEL_TC_REG(2, IDR));
-       writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-       if (!clockevent_state_detached(d))
-               clk_disable(tcd->clk);
-
-       return 0;
-}
-
-static int tc_set_oneshot(struct clock_event_device *d)
-{
-       struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-       void __iomem            *regs = tcd->regs;
-
-       if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
-               tc_shutdown(d);
-
-       clk_enable(tcd->clk);
-
-       /* slow clock, count up to RC, then irq and stop */
-       writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
-                    ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
-       writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-
-       /* set_next_event() configures and starts the timer */
-       return 0;
-}
-
-static int tc_set_periodic(struct clock_event_device *d)
-{
-       struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-       void __iomem            *regs = tcd->regs;
-
-       if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
-               tc_shutdown(d);
-
-       /* By not making the gentime core emulate periodic mode on top
-        * of oneshot, we get lower overhead and improved accuracy.
-        */
-       clk_enable(tcd->clk);
-
-       /* slow clock, count up to RC, then irq and restart */
-       writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
-                    regs + ATMEL_TC_REG(2, CMR));
-       writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
-
-       /* Enable clock and interrupts on RC compare */
-       writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-
-       /* go go gadget! */
-       writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
-                    ATMEL_TC_REG(2, CCR));
-       return 0;
-}
-
-static int tc_next_event(unsigned long delta, struct clock_event_device *d)
-{
-       writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
-
-       /* go go gadget! */
-       writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
-                       tcaddr + ATMEL_TC_REG(2, CCR));
-       return 0;
-}
-
-static struct tc_clkevt_device clkevt = {
-       .clkevt = {
-               .name                   = "tc_clkevt",
-               .features               = CLOCK_EVT_FEAT_PERIODIC |
-                                         CLOCK_EVT_FEAT_ONESHOT,
-               /* Should be lower than at91rm9200's system timer */
-               .rating                 = 125,
-               .set_next_event         = tc_next_event,
-               .set_state_shutdown     = tc_shutdown,
-               .set_state_periodic     = tc_set_periodic,
-               .set_state_oneshot      = tc_set_oneshot,
-       },
-};
-
-static irqreturn_t ch2_irq(int irq, void *handle)
-{
-       struct tc_clkevt_device *dev = handle;
-       unsigned int            sr;
-
-       sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
-       if (sr & ATMEL_TC_CPCS) {
-               dev->clkevt.event_handler(&dev->clkevt);
-               return IRQ_HANDLED;
-       }
-
-       return IRQ_NONE;
-}
-
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
-{
-       int ret;
-       struct clk *t2_clk = tc->clk[2];
-       int irq = tc->irq[2];
-
-       ret = clk_prepare_enable(tc->slow_clk);
-       if (ret)
-               return ret;
-
-       /* try to enable t2 clk to avoid future errors in mode change */
-       ret = clk_prepare_enable(t2_clk);
-       if (ret) {
-               clk_disable_unprepare(tc->slow_clk);
-               return ret;
-       }
-
-       clk_disable(t2_clk);
-
-       clkevt.regs = tc->regs;
-       clkevt.clk = t2_clk;
-
-       timer_clock = clk32k_divisor_idx;
-
-       clkevt.clkevt.cpumask = cpumask_of(0);
-
-       ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
-       if (ret) {
-               clk_unprepare(t2_clk);
-               clk_disable_unprepare(tc->slow_clk);
-               return ret;
-       }
-
-       clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
-
-       return ret;
-}
-
-#else /* !CONFIG_GENERIC_CLOCKEVENTS */
-
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
-{
-       /* NOTHING */
-       return 0;
-}
-
-#endif
-
-static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
-{
-       /* channel 0:  waveform mode, input mclk/8, clock TIOA0 on overflow */
-       writel(mck_divisor_idx                  /* likely divide-by-8 */
-                       | ATMEL_TC_WAVE
-                       | ATMEL_TC_WAVESEL_UP           /* free-run */
-                       | ATMEL_TC_ACPA_SET             /* TIOA0 rises at 0 */
-                       | ATMEL_TC_ACPC_CLEAR,          /* (duty cycle 50%) */
-                       tcaddr + ATMEL_TC_REG(0, CMR));
-       writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
-       writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
-       writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));    /* no irqs */
-       writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
-
-       /* channel 1:  waveform mode, input TIOA0 */
-       writel(ATMEL_TC_XC1                     /* input: TIOA0 */
-                       | ATMEL_TC_WAVE
-                       | ATMEL_TC_WAVESEL_UP,          /* free-run */
-                       tcaddr + ATMEL_TC_REG(1, CMR));
-       writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR));    /* no irqs */
-       writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
-
-       /* chain channel 0 to channel 1*/
-       writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
-       /* then reset all the timers */
-       writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
-}
-
-static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
-{
-       /* channel 0:  waveform mode, input mclk/8 */
-       writel(mck_divisor_idx                  /* likely divide-by-8 */
-                       | ATMEL_TC_WAVE
-                       | ATMEL_TC_WAVESEL_UP,          /* free-run */
-                       tcaddr + ATMEL_TC_REG(0, CMR));
-       writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));    /* no irqs */
-       writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
-
-       /* then reset all the timers */
-       writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
-}
-
-static int __init tcb_clksrc_init(void)
-{
-       static char bootinfo[] __initdata
-               = KERN_DEBUG "%s: tc%d at %d.%03d MHz\n";
-
-       struct platform_device *pdev;
-       struct atmel_tc *tc;
-       struct clk *t0_clk;
-       u32 rate, divided_rate = 0;
-       int best_divisor_idx = -1;
-       int clk32k_divisor_idx = -1;
-       int i;
-       int ret;
-
-       tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK);
-       if (!tc) {
-               pr_debug("can't alloc TC for clocksource\n");
-               return -ENODEV;
-       }
-       tcaddr = tc->regs;
-       pdev = tc->pdev;
-
-       t0_clk = tc->clk[0];
-       ret = clk_prepare_enable(t0_clk);
-       if (ret) {
-               pr_debug("can't enable T0 clk\n");
-               goto err_free_tc;
-       }
-
-       /* How fast will we be counting?  Pick something over 5 MHz.  */
-       rate = (u32) clk_get_rate(t0_clk);
-       for (i = 0; i < 5; i++) {
-               unsigned divisor = atmel_tc_divisors[i];
-               unsigned tmp;
-
-               /* remember 32 KiHz clock for later */
-               if (!divisor) {
-                       clk32k_divisor_idx = i;
-                       continue;
-               }
-
-               tmp = rate / divisor;
-               pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
-               if (best_divisor_idx > 0) {
-                       if (tmp < 5 * 1000 * 1000)
-                               continue;
-               }
-               divided_rate = tmp;
-               best_divisor_idx = i;
-       }
-
-
-       printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
-                       divided_rate / 1000000,
-                       ((divided_rate % 1000000) + 500) / 1000);
-
-       if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
-               /* use apropriate function to read 32 bit counter */
-               clksrc.read = tc_get_cycles32;
-               /* setup ony channel 0 */
-               tcb_setup_single_chan(tc, best_divisor_idx);
-       } else {
-               /* tclib will give us three clocks no matter what the
-                * underlying platform supports.
-                */
-               ret = clk_prepare_enable(tc->clk[1]);
-               if (ret) {
-                       pr_debug("can't enable T1 clk\n");
-                       goto err_disable_t0;
-               }
-               /* setup both channel 0 & 1 */
-               tcb_setup_dual_chan(tc, best_divisor_idx);
-       }
-
-       /* and away we go! */
-       ret = clocksource_register_hz(&clksrc, divided_rate);
-       if (ret)
-               goto err_disable_t1;
-
-       /* channel 2:  periodic and oneshot timer support */
-       ret = setup_clkevents(tc, clk32k_divisor_idx);
-       if (ret)
-               goto err_unregister_clksrc;
-
-       return 0;
-
-err_unregister_clksrc:
-       clocksource_unregister(&clksrc);
-
-err_disable_t1:
-       if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
-               clk_disable_unprepare(tc->clk[1]);
-
-err_disable_t0:
-       clk_disable_unprepare(t0_clk);
-
-err_free_tc:
-       atmel_tc_free(tc);
-       return ret;
-}
-arch_initcall(tcb_clksrc_init);
diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
new file mode 100644 (file)
index 0000000..6ed31f9
--- /dev/null
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/syscore_ops.h>
+#include <soc/at91/atmel_tcb.h>
+
+
+/*
+ * We're configured to use a specific TC block, one that's not hooked
+ * up to external hardware, to provide a time solution:
+ *
+ *   - Two channels combine to create a free-running 32 bit counter
+ *     with a base rate of 5+ MHz, packaged as a clocksource (with
+ *     resolution better than 200 nsec).
+ *   - Some chips support 32 bit counter. A single channel is used for
+ *     this 32 bit free-running counter. the second channel is not used.
+ *
+ *   - The third channel may be used to provide a 16-bit clockevent
+ *     source, used in either periodic or oneshot mode.  This runs
+ *     at 32 KiHZ, and can handle delays of up to two seconds.
+ *
+ * REVISIT behavior during system suspend states... we should disable
+ * all clocks and save the power.  Easily done for clockevent devices,
+ * but clocksources won't necessarily get the needed notifications.
+ * For deeper system sleep states, this will be mandatory...
+ */
+
+static void __iomem *tcaddr;
+static struct
+{
+       u32 cmr;
+       u32 imr;
+       u32 rc;
+       bool clken;
+} tcb_cache[3];
+static u32 bmr_cache;
+
+static u64 tc_get_cycles(struct clocksource *cs)
+{
+       unsigned long   flags;
+       u32             lower, upper;
+
+       raw_local_irq_save(flags);
+       do {
+               upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
+               lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
+       } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
+
+       raw_local_irq_restore(flags);
+       return (upper << 16) | lower;
+}
+
+static u64 tc_get_cycles32(struct clocksource *cs)
+{
+       return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
+}
+
+static void tc_clksrc_suspend(struct clocksource *cs)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
+               tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
+               tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
+               tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
+               tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
+                                       ATMEL_TC_CLKSTA);
+       }
+
+       bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
+}
+
+static void tc_clksrc_resume(struct clocksource *cs)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
+               /* Restore registers for the channel, RA and RB are not used  */
+               writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
+               writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
+               writel(0, tcaddr + ATMEL_TC_REG(i, RA));
+               writel(0, tcaddr + ATMEL_TC_REG(i, RB));
+               /* Disable all the interrupts */
+               writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
+               /* Reenable interrupts that were enabled before suspending */
+               writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
+               /* Start the clock if it was used */
+               if (tcb_cache[i].clken)
+                       writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
+       }
+
+       /* Dual channel, chain channels */
+       writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
+       /* Finally, trigger all the channels*/
+       writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+}
+
+static struct clocksource clksrc = {
+       .rating         = 200,
+       .read           = tc_get_cycles,
+       .mask           = CLOCKSOURCE_MASK(32),
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+       .suspend        = tc_clksrc_suspend,
+       .resume         = tc_clksrc_resume,
+};
+
+static u64 notrace tc_sched_clock_read(void)
+{
+       return tc_get_cycles(&clksrc);
+}
+
+static u64 notrace tc_sched_clock_read32(void)
+{
+       return tc_get_cycles32(&clksrc);
+}
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+
+struct tc_clkevt_device {
+       struct clock_event_device       clkevt;
+       struct clk                      *clk;
+       void __iomem                    *regs;
+};
+
+static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
+{
+       return container_of(clkevt, struct tc_clkevt_device, clkevt);
+}
+
+/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
+ * because using one of the divided clocks would usually mean the
+ * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
+ *
+ * A divided clock could be good for high resolution timers, since
+ * 30.5 usec resolution can seem "low".
+ */
+static u32 timer_clock;
+
+static int tc_shutdown(struct clock_event_device *d)
+{
+       struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+       void __iomem            *regs = tcd->regs;
+
+       writel(0xff, regs + ATMEL_TC_REG(2, IDR));
+       writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
+       if (!clockevent_state_detached(d))
+               clk_disable(tcd->clk);
+
+       return 0;
+}
+
+static int tc_set_oneshot(struct clock_event_device *d)
+{
+       struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+       void __iomem            *regs = tcd->regs;
+
+       if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+               tc_shutdown(d);
+
+       clk_enable(tcd->clk);
+
+       /* slow clock, count up to RC, then irq and stop */
+       writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
+                    ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
+       writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+
+       /* set_next_event() configures and starts the timer */
+       return 0;
+}
+
+static int tc_set_periodic(struct clock_event_device *d)
+{
+       struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+       void __iomem            *regs = tcd->regs;
+
+       if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+               tc_shutdown(d);
+
+       /* By not making the gentime core emulate periodic mode on top
+        * of oneshot, we get lower overhead and improved accuracy.
+        */
+       clk_enable(tcd->clk);
+
+       /* slow clock, count up to RC, then irq and restart */
+       writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+                    regs + ATMEL_TC_REG(2, CMR));
+       writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+
+       /* Enable clock and interrupts on RC compare */
+       writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+
+       /* go go gadget! */
+       writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
+                    ATMEL_TC_REG(2, CCR));
+       return 0;
+}
+
+static int tc_next_event(unsigned long delta, struct clock_event_device *d)
+{
+       writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
+
+       /* go go gadget! */
+       writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+                       tcaddr + ATMEL_TC_REG(2, CCR));
+       return 0;
+}
+
+static struct tc_clkevt_device clkevt = {
+       .clkevt = {
+               .features               = CLOCK_EVT_FEAT_PERIODIC |
+                                         CLOCK_EVT_FEAT_ONESHOT,
+               /* Should be lower than at91rm9200's system timer */
+               .rating                 = 125,
+               .set_next_event         = tc_next_event,
+               .set_state_shutdown     = tc_shutdown,
+               .set_state_periodic     = tc_set_periodic,
+               .set_state_oneshot      = tc_set_oneshot,
+       },
+};
+
+static irqreturn_t ch2_irq(int irq, void *handle)
+{
+       struct tc_clkevt_device *dev = handle;
+       unsigned int            sr;
+
+       sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
+       if (sr & ATMEL_TC_CPCS) {
+               dev->clkevt.event_handler(&dev->clkevt);
+               return IRQ_HANDLED;
+       }
+
+       return IRQ_NONE;
+}
+
+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+{
+       int ret;
+       struct clk *t2_clk = tc->clk[2];
+       int irq = tc->irq[2];
+
+       ret = clk_prepare_enable(tc->slow_clk);
+       if (ret)
+               return ret;
+
+       /* try to enable t2 clk to avoid future errors in mode change */
+       ret = clk_prepare_enable(t2_clk);
+       if (ret) {
+               clk_disable_unprepare(tc->slow_clk);
+               return ret;
+       }
+
+       clk_disable(t2_clk);
+
+       clkevt.regs = tc->regs;
+       clkevt.clk = t2_clk;
+
+       timer_clock = clk32k_divisor_idx;
+
+       clkevt.clkevt.cpumask = cpumask_of(0);
+
+       ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
+       if (ret) {
+               clk_unprepare(t2_clk);
+               clk_disable_unprepare(tc->slow_clk);
+               return ret;
+       }
+
+       clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
+
+       return ret;
+}
+
+#else /* !CONFIG_GENERIC_CLOCKEVENTS */
+
+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+{
+       /* NOTHING */
+       return 0;
+}
+
+#endif
+
+static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
+{
+       /* channel 0:  waveform mode, input mclk/8, clock TIOA0 on overflow */
+       writel(mck_divisor_idx                  /* likely divide-by-8 */
+                       | ATMEL_TC_WAVE
+                       | ATMEL_TC_WAVESEL_UP           /* free-run */
+                       | ATMEL_TC_ACPA_SET             /* TIOA0 rises at 0 */
+                       | ATMEL_TC_ACPC_CLEAR,          /* (duty cycle 50%) */
+                       tcaddr + ATMEL_TC_REG(0, CMR));
+       writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
+       writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
+       writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));    /* no irqs */
+       writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
+
+       /* channel 1:  waveform mode, input TIOA0 */
+       writel(ATMEL_TC_XC1                     /* input: TIOA0 */
+                       | ATMEL_TC_WAVE
+                       | ATMEL_TC_WAVESEL_UP,          /* free-run */
+                       tcaddr + ATMEL_TC_REG(1, CMR));
+       writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR));    /* no irqs */
+       writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
+
+       /* chain channel 0 to channel 1*/
+       writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
+       /* then reset all the timers */
+       writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+}
+
+static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
+{
+       /* channel 0:  waveform mode, input mclk/8 */
+       writel(mck_divisor_idx                  /* likely divide-by-8 */
+                       | ATMEL_TC_WAVE
+                       | ATMEL_TC_WAVESEL_UP,          /* free-run */
+                       tcaddr + ATMEL_TC_REG(0, CMR));
+       writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));    /* no irqs */
+       writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
+
+       /* then reset all the timers */
+       writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+}
+
+static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
+
+static const struct of_device_id atmel_tcb_of_match[] = {
+       { .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, },
+       { .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, },
+       { /* sentinel */ }
+};
+
+static int __init tcb_clksrc_init(struct device_node *node)
+{
+       struct atmel_tc tc;
+       struct clk *t0_clk;
+       const struct of_device_id *match;
+       u64 (*tc_sched_clock)(void);
+       u32 rate, divided_rate = 0;
+       int best_divisor_idx = -1;
+       int clk32k_divisor_idx = -1;
+       int bits;
+       int i;
+       int ret;
+
+       /* Protect against multiple calls */
+       if (tcaddr)
+               return 0;
+
+       tc.regs = of_iomap(node->parent, 0);
+       if (!tc.regs)
+               return -ENXIO;
+
+       t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
+       if (IS_ERR(t0_clk))
+               return PTR_ERR(t0_clk);
+
+       tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
+       if (IS_ERR(tc.slow_clk))
+               return PTR_ERR(tc.slow_clk);
+
+       tc.clk[0] = t0_clk;
+       tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
+       if (IS_ERR(tc.clk[1]))
+               tc.clk[1] = t0_clk;
+       tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
+       if (IS_ERR(tc.clk[2]))
+               tc.clk[2] = t0_clk;
+
+       tc.irq[2] = of_irq_get(node->parent, 2);
+       if (tc.irq[2] <= 0) {
+               tc.irq[2] = of_irq_get(node->parent, 0);
+               if (tc.irq[2] <= 0)
+                       return -EINVAL;
+       }
+
+       match = of_match_node(atmel_tcb_of_match, node->parent);
+       bits = (uintptr_t)match->data;
+
+       for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
+               writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
+
+       ret = clk_prepare_enable(t0_clk);
+       if (ret) {
+               pr_debug("can't enable T0 clk\n");
+               return ret;
+       }
+
+       /* How fast will we be counting?  Pick something over 5 MHz.  */
+       rate = (u32) clk_get_rate(t0_clk);
+       for (i = 0; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
+               unsigned divisor = atmel_tcb_divisors[i];
+               unsigned tmp;
+
+               /* remember 32 KiHz clock for later */
+               if (!divisor) {
+                       clk32k_divisor_idx = i;
+                       continue;
+               }
+
+               tmp = rate / divisor;
+               pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
+               if (best_divisor_idx > 0) {
+                       if (tmp < 5 * 1000 * 1000)
+                               continue;
+               }
+               divided_rate = tmp;
+               best_divisor_idx = i;
+       }
+
+       clksrc.name = kbasename(node->parent->full_name);
+       clkevt.clkevt.name = kbasename(node->parent->full_name);
+       pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
+                       ((divided_rate % 1000000) + 500) / 1000);
+
+       tcaddr = tc.regs;
+
+       if (bits == 32) {
+               /* use apropriate function to read 32 bit counter */
+               clksrc.read = tc_get_cycles32;
+               /* setup ony channel 0 */
+               tcb_setup_single_chan(&tc, best_divisor_idx);
+               tc_sched_clock = tc_sched_clock_read32;
+       } else {
+               /* we have three clocks no matter what the
+                * underlying platform supports.
+                */
+               ret = clk_prepare_enable(tc.clk[1]);
+               if (ret) {
+                       pr_debug("can't enable T1 clk\n");
+                       goto err_disable_t0;
+               }
+               /* setup both channel 0 & 1 */
+               tcb_setup_dual_chan(&tc, best_divisor_idx);
+               tc_sched_clock = tc_sched_clock_read;
+       }
+
+       /* and away we go! */
+       ret = clocksource_register_hz(&clksrc, divided_rate);
+       if (ret)
+               goto err_disable_t1;
+
+       /* channel 2:  periodic and oneshot timer support */
+       ret = setup_clkevents(&tc, clk32k_divisor_idx);
+       if (ret)
+               goto err_unregister_clksrc;
+
+       sched_clock_register(tc_sched_clock, 32, divided_rate);
+
+       return 0;
+
+err_unregister_clksrc:
+       clocksource_unregister(&clksrc);
+
+err_disable_t1:
+       if (bits != 32)
+               clk_disable_unprepare(tc.clk[1]);
+
+err_disable_t0:
+       clk_disable_unprepare(t0_clk);
+
+       tcaddr = NULL;
+
+       return ret;
+}
+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
index f2019a8..fa9fb4e 100644 (file)
@@ -26,8 +26,8 @@
 #define MLB_TMR_TMCSR_CSL_DIV2 0
 #define MLB_TMR_DIV_CNT                2
 
-#define MLB_TMR_SRC_CH  (1)
-#define MLB_TMR_EVT_CH  (0)
+#define MLB_TMR_SRC_CH         1
+#define MLB_TMR_EVT_CH         0
 
 #define MLB_TMR_SRC_CH_OFS     (MLB_TMR_REGSZPCH * MLB_TMR_SRC_CH)
 #define MLB_TMR_EVT_CH_OFS     (MLB_TMR_REGSZPCH * MLB_TMR_EVT_CH)
@@ -43,6 +43,8 @@
 #define MLB_TMR_EVT_TMRLR2_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR2_OFS)
 
 #define MLB_TIMER_RATING       500
+#define MLB_TIMER_ONESHOT      0
+#define MLB_TIMER_PERIODIC     1
 
 static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
 {
@@ -59,27 +61,53 @@ static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int mlb_set_state_periodic(struct clock_event_device *clk)
+static void mlb_evt_timer_start(struct timer_of *to, bool periodic)
 {
-       struct timer_of *to = to_timer_of(clk);
        u32 val = MLB_TMR_TMCSR_CSL_DIV2;
 
+       val |= MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
+       if (periodic)
+               val |= MLB_TMR_TMCSR_RELD;
        writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+}
 
-       writel_relaxed(to->of_clk.period, timer_of_base(to) +
-                               MLB_TMR_EVT_TMRLR1_OFS);
-       val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE |
-               MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
+static void mlb_evt_timer_stop(struct timer_of *to)
+{
+       u32 val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+
+       val &= ~MLB_TMR_TMCSR_CNTE;
        writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+}
+
+static void mlb_evt_timer_register_count(struct timer_of *to, unsigned long cnt)
+{
+       writel_relaxed(cnt, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
+}
+
+static int mlb_set_state_periodic(struct clock_event_device *clk)
+{
+       struct timer_of *to = to_timer_of(clk);
+
+       mlb_evt_timer_stop(to);
+       mlb_evt_timer_register_count(to, to->of_clk.period);
+       mlb_evt_timer_start(to, MLB_TIMER_PERIODIC);
        return 0;
 }
 
 static int mlb_set_state_oneshot(struct clock_event_device *clk)
 {
        struct timer_of *to = to_timer_of(clk);
-       u32 val = MLB_TMR_TMCSR_CSL_DIV2;
 
-       writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+       mlb_evt_timer_stop(to);
+       mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
+       return 0;
+}
+
+static int mlb_set_state_shutdown(struct clock_event_device *clk)
+{
+       struct timer_of *to = to_timer_of(clk);
+
+       mlb_evt_timer_stop(to);
        return 0;
 }
 
@@ -88,22 +116,21 @@ static int mlb_clkevt_next_event(unsigned long event,
 {
        struct timer_of *to = to_timer_of(clk);
 
-       writel_relaxed(event, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
-       writel_relaxed(MLB_TMR_TMCSR_CSL_DIV2 |
-                       MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_INTE |
-                       MLB_TMR_TMCSR_TRG, timer_of_base(to) +
-                       MLB_TMR_EVT_TMCSR_OFS);
+       mlb_evt_timer_stop(to);
+       mlb_evt_timer_register_count(to, event);
+       mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
        return 0;
 }
 
 static int mlb_config_clock_source(struct timer_of *to)
 {
-       writel_relaxed(0, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
-       writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMR_OFS);
+       u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+
+       writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
        writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR1_OFS);
        writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR2_OFS);
-       writel_relaxed(BIT(4) | BIT(1) | BIT(0), timer_of_base(to) +
-               MLB_TMR_SRC_TMCSR_OFS);
+       val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_TRG;
+       writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
        return 0;
 }
 
@@ -123,6 +150,7 @@ static struct timer_of to = {
                .features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT,
                .set_state_oneshot = mlb_set_state_oneshot,
                .set_state_periodic = mlb_set_state_periodic,
+               .set_state_shutdown = mlb_set_state_shutdown,
                .set_next_event = mlb_clkevt_next_event,
        },
 
index 6e0180a..65f38f6 100644 (file)
@@ -186,7 +186,8 @@ static int __init sun4i_timer_init(struct device_node *node)
         */
        if (of_machine_is_compatible("allwinner,sun4i-a10") ||
            of_machine_is_compatible("allwinner,sun5i-a13") ||
-           of_machine_is_compatible("allwinner,sun5i-a10s"))
+           of_machine_is_compatible("allwinner,sun5i-a10s") ||
+           of_machine_is_compatible("allwinner,suniv-f1c100s"))
                sched_clock_register(sun4i_timer_sched_read, 32,
                                     timer_of_rate(&to));
 
@@ -218,3 +219,5 @@ static int __init sun4i_timer_init(struct device_node *node)
 }
 TIMER_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
                       sun4i_timer_init);
+TIMER_OF_DECLARE(suniv, "allwinner,suniv-f1c100s-timer",
+                      sun4i_timer_init);
index fdb3d79..919b356 100644 (file)
@@ -60,9 +60,6 @@
 static u32 usec_config;
 static void __iomem *timer_reg_base;
 #ifdef CONFIG_ARM
-static void __iomem *rtc_base;
-static struct timespec64 persistent_ts;
-static u64 persistent_ms, last_persistent_ms;
 static struct delay_timer tegra_delay_timer;
 #endif
 
@@ -199,40 +196,30 @@ static unsigned long tegra_delay_timer_read_counter_long(void)
        return readl(timer_reg_base + TIMERUS_CNTR_1US);
 }
 
+static struct timer_of suspend_rtc_to = {
+       .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
 /*
  * tegra_rtc_read - Reads the Tegra RTC registers
  * Care must be taken that this funciton is not called while the
  * tegra_rtc driver could be executing to avoid race conditions
  * on the RTC shadow register
  */
-static u64 tegra_rtc_read_ms(void)
+static u64 tegra_rtc_read_ms(struct clocksource *cs)
 {
-       u32 ms = readl(rtc_base + RTC_MILLISECONDS);
-       u32 s = readl(rtc_base + RTC_SHADOW_SECONDS);
+       u32 ms = readl(timer_of_base(&suspend_rtc_to) + RTC_MILLISECONDS);
+       u32 s = readl(timer_of_base(&suspend_rtc_to) + RTC_SHADOW_SECONDS);
        return (u64)s * MSEC_PER_SEC + ms;
 }
 
-/*
- * tegra_read_persistent_clock64 -  Return time from a persistent clock.
- *
- * Reads the time from a source which isn't disabled during PM, the
- * 32k sync timer.  Convert the cycles elapsed since last read into
- * nsecs and adds to a monotonically increasing timespec64.
- * Care must be taken that this funciton is not called while the
- * tegra_rtc driver could be executing to avoid race conditions
- * on the RTC shadow register
- */
-static void tegra_read_persistent_clock64(struct timespec64 *ts)
-{
-       u64 delta;
-
-       last_persistent_ms = persistent_ms;
-       persistent_ms = tegra_rtc_read_ms();
-       delta = persistent_ms - last_persistent_ms;
-
-       timespec64_add_ns(&persistent_ts, delta * NSEC_PER_MSEC);
-       *ts = persistent_ts;
-}
+static struct clocksource suspend_rtc_clocksource = {
+       .name   = "tegra_suspend_timer",
+       .rating = 200,
+       .read   = tegra_rtc_read_ms,
+       .mask   = CLOCKSOURCE_MASK(32),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
 #endif
 
 static int tegra_timer_common_init(struct device_node *np, struct timer_of *to)
@@ -385,25 +372,15 @@ out:
 
 static int __init tegra20_init_rtc(struct device_node *np)
 {
-       struct clk *clk;
+       int ret;
 
-       rtc_base = of_iomap(np, 0);
-       if (!rtc_base) {
-               pr_err("Can't map RTC registers\n");
-               return -ENXIO;
-       }
+       ret = timer_of_init(np, &suspend_rtc_to);
+       if (ret)
+               return ret;
 
-       /*
-        * rtc registers are used by read_persistent_clock, keep the rtc clock
-        * enabled
-        */
-       clk = of_clk_get(np, 0);
-       if (IS_ERR(clk))
-               pr_warn("Unable to get rtc-tegra clock\n");
-       else
-               clk_prepare_enable(clk);
+       clocksource_register_hz(&suspend_rtc_clocksource, 1000);
 
-       return register_persistent_clock(tegra_read_persistent_clock64);
+       return 0;
 }
 TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
 #endif
index 3fbbb61..ef93406 100644 (file)
@@ -64,6 +64,22 @@ struct ti_sci_xfers_info {
        spinlock_t xfer_lock;
 };
 
+/**
+ * struct ti_sci_rm_type_map - Structure representing TISCI Resource
+ *                             management representation of dev_ids.
+ * @dev_id:    TISCI device ID
+ * @type:      Corresponding id as identified by TISCI RM.
+ *
+ * Note: This is used only as a work around for using RM range apis
+ *     for AM654 SoC. For future SoCs dev_id will be used as type
+ *     for RM range APIs. In order to maintain ABI backward compatibility
+ *     type is not being changed for AM654 SoC.
+ */
+struct ti_sci_rm_type_map {
+       u32 dev_id;
+       u16 type;
+};
+
 /**
  * struct ti_sci_desc - Description of SoC integration
  * @default_host_id:   Host identifier representing the compute entity
@@ -71,12 +87,14 @@ struct ti_sci_xfers_info {
  * @max_msgs: Maximum number of messages that can be pending
  *               simultaneously in the system
  * @max_msg_size: Maximum size of data per message that can be handled.
+ * @rm_type_map: RM resource type mapping structure.
  */
 struct ti_sci_desc {
        u8 default_host_id;
        int max_rx_timeout_ms;
        int max_msgs;
        int max_msg_size;
+       struct ti_sci_rm_type_map *rm_type_map;
 };
 
 /**
@@ -1600,6 +1618,392 @@ fail:
        return ret;
 }
 
+static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
+                                   u16 *type)
+{
+       struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
+       bool found = false;
+       int i;
+
+       /* If map is not provided then assume dev_id is used as type */
+       if (!rm_type_map) {
+               *type = dev_id;
+               return 0;
+       }
+
+       for (i = 0; rm_type_map[i].dev_id; i++) {
+               if (rm_type_map[i].dev_id == dev_id) {
+                       *type = rm_type_map[i].type;
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found)
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * ti_sci_get_resource_range - Helper to get a range of resources assigned
+ *                            to a host. Resource is uniquely identified by
+ *                            type and subtype.
+ * @handle:            Pointer to TISCI handle.
+ * @dev_id:            TISCI device ID.
+ * @subtype:           Resource assignment subtype that is being requested
+ *                     from the given device.
+ * @s_host:            Host processor ID to which the resources are allocated
+ * @range_start:       Start index of the resource range
+ * @range_num:         Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
+                                    u32 dev_id, u8 subtype, u8 s_host,
+                                    u16 *range_start, u16 *range_num)
+{
+       struct ti_sci_msg_resp_get_resource_range *resp;
+       struct ti_sci_msg_req_get_resource_range *req;
+       struct ti_sci_xfer *xfer;
+       struct ti_sci_info *info;
+       struct device *dev;
+       u16 type;
+       int ret = 0;
+
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+       if (!handle)
+               return -EINVAL;
+
+       info = handle_to_ti_sci_info(handle);
+       dev = info->dev;
+
+       xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
+                                  TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+                                  sizeof(*req), sizeof(*resp));
+       if (IS_ERR(xfer)) {
+               ret = PTR_ERR(xfer);
+               dev_err(dev, "Message alloc failed(%d)\n", ret);
+               return ret;
+       }
+
+       ret = ti_sci_get_resource_type(info, dev_id, &type);
+       if (ret) {
+               dev_err(dev, "rm type lookup failed for %u\n", dev_id);
+               goto fail;
+       }
+
+       req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
+       req->secondary_host = s_host;
+       req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
+       req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
+
+       ret = ti_sci_do_xfer(info, xfer);
+       if (ret) {
+               dev_err(dev, "Mbox send fail %d\n", ret);
+               goto fail;
+       }
+
+       resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
+
+       if (!ti_sci_is_response_ack(resp)) {
+               ret = -ENODEV;
+       } else if (!resp->range_start && !resp->range_num) {
+               ret = -ENODEV;
+       } else {
+               *range_start = resp->range_start;
+               *range_num = resp->range_num;
+       };
+
+fail:
+       ti_sci_put_one_xfer(&info->minfo, xfer);
+
+       return ret;
+}
+
+/**
+ * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
+ *                                that is same as ti sci interface host.
+ * @handle:            Pointer to TISCI handle.
+ * @dev_id:            TISCI device ID.
+ * @subtype:           Resource assignment subtype that is being requested
+ *                     from the given device.
+ * @range_start:       Start index of the resource range
+ * @range_num:         Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
+                                        u32 dev_id, u8 subtype,
+                                        u16 *range_start, u16 *range_num)
+{
+       return ti_sci_get_resource_range(handle, dev_id, subtype,
+                                        TI_SCI_IRQ_SECONDARY_HOST_INVALID,
+                                        range_start, range_num);
+}
+
+/**
+ * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
+ *                                           assigned to a specified host.
+ * @handle:            Pointer to TISCI handle.
+ * @dev_id:            TISCI device ID.
+ * @subtype:           Resource assignment subtype that is being requested
+ *                     from the given device.
+ * @s_host:            Host processor ID to which the resources are allocated
+ * @range_start:       Start index of the resource range
+ * @range_num:         Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static
+int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
+                                            u32 dev_id, u8 subtype, u8 s_host,
+                                            u16 *range_start, u16 *range_num)
+{
+       return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
+                                        range_start, range_num);
+}
+
+/**
+ * ti_sci_manage_irq() - Helper api to configure/release the irq route between
+ *                      the requested source and destination
+ * @handle:            Pointer to TISCI handle.
+ * @valid_params:      Bit fields defining the validity of certain params
+ * @src_id:            Device ID of the IRQ source
+ * @src_index:         IRQ source index within the source device
+ * @dst_id:            Device ID of the IRQ destination
+ * @dst_host_irq:      IRQ number of the destination device
+ * @ia_id:             Device ID of the IA, if the IRQ flows through this IA
+ * @vint:              Virtual interrupt to be used within the IA
+ * @global_event:      Global event number to be used for the requesting event
+ * @vint_status_bit:   Virtual interrupt status bit to be used for the event
+ * @s_host:            Secondary host ID to which the irq/event is being
+ *                     requested for.
+ * @type:              Request type irq set or release.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
+                            u32 valid_params, u16 src_id, u16 src_index,
+                            u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
+                            u16 global_event, u8 vint_status_bit, u8 s_host,
+                            u16 type)
+{
+       struct ti_sci_msg_req_manage_irq *req;
+       struct ti_sci_msg_hdr *resp;
+       struct ti_sci_xfer *xfer;
+       struct ti_sci_info *info;
+       struct device *dev;
+       int ret = 0;
+
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+       if (!handle)
+               return -EINVAL;
+
+       info = handle_to_ti_sci_info(handle);
+       dev = info->dev;
+
+       xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+                                  sizeof(*req), sizeof(*resp));
+       if (IS_ERR(xfer)) {
+               ret = PTR_ERR(xfer);
+               dev_err(dev, "Message alloc failed(%d)\n", ret);
+               return ret;
+       }
+       req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
+       req->valid_params = valid_params;
+       req->src_id = src_id;
+       req->src_index = src_index;
+       req->dst_id = dst_id;
+       req->dst_host_irq = dst_host_irq;
+       req->ia_id = ia_id;
+       req->vint = vint;
+       req->global_event = global_event;
+       req->vint_status_bit = vint_status_bit;
+       req->secondary_host = s_host;
+
+       ret = ti_sci_do_xfer(info, xfer);
+       if (ret) {
+               dev_err(dev, "Mbox send fail %d\n", ret);
+               goto fail;
+       }
+
+       resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+       ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+       ti_sci_put_one_xfer(&info->minfo, xfer);
+
+       return ret;
+}
+
+/**
+ * ti_sci_set_irq() - Helper api to configure the irq route between the
+ *                   requested source and destination
+ * @handle:            Pointer to TISCI handle.
+ * @valid_params:      Bit fields defining the validity of certain params
+ * @src_id:            Device ID of the IRQ source
+ * @src_index:         IRQ source index within the source device
+ * @dst_id:            Device ID of the IRQ destination
+ * @dst_host_irq:      IRQ number of the destination device
+ * @ia_id:             Device ID of the IA, if the IRQ flows through this IA
+ * @vint:              Virtual interrupt to be used within the IA
+ * @global_event:      Global event number to be used for the requesting event
+ * @vint_status_bit:   Virtual interrupt status bit to be used for the event
+ * @s_host:            Secondary host ID to which the irq/event is being
+ *                     requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
+                         u16 src_id, u16 src_index, u16 dst_id,
+                         u16 dst_host_irq, u16 ia_id, u16 vint,
+                         u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+       pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+                __func__, valid_params, src_id, src_index,
+                dst_id, dst_host_irq, ia_id, vint, global_event,
+                vint_status_bit);
+
+       return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+                                dst_id, dst_host_irq, ia_id, vint,
+                                global_event, vint_status_bit, s_host,
+                                TI_SCI_MSG_SET_IRQ);
+}
+
+/**
+ * ti_sci_free_irq() - Helper api to free the irq route between the
+ *                        requested source and destination
+ * @handle:            Pointer to TISCI handle.
+ * @valid_params:      Bit fields defining the validity of certain params
+ * @src_id:            Device ID of the IRQ source
+ * @src_index:         IRQ source index within the source device
+ * @dst_id:            Device ID of the IRQ destination
+ * @dst_host_irq:      IRQ number of the destination device
+ * @ia_id:             Device ID of the IA, if the IRQ flows through this IA
+ * @vint:              Virtual interrupt to be used within the IA
+ * @global_event:      Global event number to be used for the requesting event
+ * @vint_status_bit:   Virtual interrupt status bit to be used for the event
+ * @s_host:            Secondary host ID to which the irq/event is being
+ *                     requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
+                          u16 src_id, u16 src_index, u16 dst_id,
+                          u16 dst_host_irq, u16 ia_id, u16 vint,
+                          u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+       pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+                __func__, valid_params, src_id, src_index,
+                dst_id, dst_host_irq, ia_id, vint, global_event,
+                vint_status_bit);
+
+       return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+                                dst_id, dst_host_irq, ia_id, vint,
+                                global_event, vint_status_bit, s_host,
+                                TI_SCI_MSG_FREE_IRQ);
+}
+
+/**
+ * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
+ *                       source and destination.
+ * @handle:            Pointer to TISCI handle.
+ * @src_id:            Device ID of the IRQ source
+ * @src_index:         IRQ source index within the source device
+ * @dst_id:            Device ID of the IRQ destination
+ * @dst_host_irq:      IRQ number of the destination device
+ * @vint_irq:          Boolean specifying if this interrupt belongs to
+ *                     Interrupt Aggregator.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
+                             u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+       u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+       return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
+                             dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
+ *                             requested source and Interrupt Aggregator.
+ * @handle:            Pointer to TISCI handle.
+ * @src_id:            Device ID of the IRQ source
+ * @src_index:         IRQ source index within the source device
+ * @ia_id:             Device ID of the IA, if the IRQ flows through this IA
+ * @vint:              Virtual interrupt to be used within the IA
+ * @global_event:      Global event number to be used for the requesting event
+ * @vint_status_bit:   Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
+                                   u16 src_id, u16 src_index, u16 ia_id,
+                                   u16 vint, u16 global_event,
+                                   u8 vint_status_bit)
+{
+       u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
+                          MSG_FLAG_GLB_EVNT_VALID |
+                          MSG_FLAG_VINT_STS_BIT_VALID;
+
+       return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
+                             ia_id, vint, global_event, vint_status_bit, 0);
+}
+
+/**
+ * ti_sci_cmd_free_irq() - Free a host irq route between the between the
+ *                        requested source and destination.
+ * @handle:            Pointer to TISCI handle.
+ * @src_id:            Device ID of the IRQ source
+ * @src_index:         IRQ source index within the source device
+ * @dst_id:            Device ID of the IRQ destination
+ * @dst_host_irq:      IRQ number of the destination device
+ * @vint_irq:          Boolean specifying if this interrupt belongs to
+ *                     Interrupt Aggregator.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
+                              u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+       u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+       return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
+                              dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_free_event_map() - Free an event map between the requested source
+ *                              and Interrupt Aggregator.
+ * @handle:            Pointer to TISCI handle.
+ * @src_id:            Device ID of the IRQ source
+ * @src_index:         IRQ source index within the source device
+ * @ia_id:             Device ID of the IA, if the IRQ flows through this IA
+ * @vint:              Virtual interrupt to be used within the IA
+ * @global_event:      Global event number to be used for the requesting event
+ * @vint_status_bit:   Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
+                                    u16 src_id, u16 src_index, u16 ia_id,
+                                    u16 vint, u16 global_event,
+                                    u8 vint_status_bit)
+{
+       u32 valid_params = MSG_FLAG_IA_ID_VALID |
+                          MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
+                          MSG_FLAG_VINT_STS_BIT_VALID;
+
+       return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
+                              ia_id, vint, global_event, vint_status_bit, 0);
+}
+
 /*
  * ti_sci_setup_ops() - Setup the operations structures
  * @info:      pointer to TISCI pointer
@@ -1610,6 +2014,8 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
        struct ti_sci_core_ops *core_ops = &ops->core_ops;
        struct ti_sci_dev_ops *dops = &ops->dev_ops;
        struct ti_sci_clk_ops *cops = &ops->clk_ops;
+       struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
+       struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
 
        core_ops->reboot_device = ti_sci_cmd_core_reboot;
 
@@ -1640,6 +2046,15 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
        cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
        cops->set_freq = ti_sci_cmd_clk_set_freq;
        cops->get_freq = ti_sci_cmd_clk_get_freq;
+
+       rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
+       rm_core_ops->get_range_from_shost =
+                               ti_sci_cmd_get_resource_range_from_shost;
+
+       iops->set_irq = ti_sci_cmd_set_irq;
+       iops->set_event_map = ti_sci_cmd_set_event_map;
+       iops->free_irq = ti_sci_cmd_free_irq;
+       iops->free_event_map = ti_sci_cmd_free_event_map;
 }
 
 /**
@@ -1764,6 +2179,219 @@ const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
 
+/**
+ * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
+ * @np:                device node
+ * @property:  property name containing phandle on TISCI node
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
+                                                 const char *property)
+{
+       struct ti_sci_handle *handle = NULL;
+       struct device_node *ti_sci_np;
+       struct ti_sci_info *info;
+       struct list_head *p;
+
+       if (!np) {
+               pr_err("I need a device pointer\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       ti_sci_np = of_parse_phandle(np, property, 0);
+       if (!ti_sci_np)
+               return ERR_PTR(-ENODEV);
+
+       mutex_lock(&ti_sci_list_mutex);
+       list_for_each(p, &ti_sci_list) {
+               info = list_entry(p, struct ti_sci_info, node);
+               if (ti_sci_np == info->dev->of_node) {
+                       handle = &info->handle;
+                       info->users++;
+                       break;
+               }
+       }
+       mutex_unlock(&ti_sci_list_mutex);
+       of_node_put(ti_sci_np);
+
+       if (!handle)
+               return ERR_PTR(-EPROBE_DEFER);
+
+       return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
+
+/**
+ * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
+ * @dev:       Device pointer requesting TISCI handle
+ * @property:  property name containing phandle on TISCI node
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
+                                                      const char *property)
+{
+       const struct ti_sci_handle *handle;
+       const struct ti_sci_handle **ptr;
+
+       ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return ERR_PTR(-ENOMEM);
+       handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
+
+       if (!IS_ERR(handle)) {
+               *ptr = handle;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
+
+/**
+ * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
+ * @res:       Pointer to the TISCI resource
+ *
+ * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
+ */
+u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
+{
+       unsigned long flags;
+       u16 set, free_bit;
+
+       raw_spin_lock_irqsave(&res->lock, flags);
+       for (set = 0; set < res->sets; set++) {
+               free_bit = find_first_zero_bit(res->desc[set].res_map,
+                                              res->desc[set].num);
+               if (free_bit != res->desc[set].num) {
+                       set_bit(free_bit, res->desc[set].res_map);
+                       raw_spin_unlock_irqrestore(&res->lock, flags);
+                       return res->desc[set].start + free_bit;
+               }
+       }
+       raw_spin_unlock_irqrestore(&res->lock, flags);
+
+       return TI_SCI_RESOURCE_NULL;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
+
+/**
+ * ti_sci_release_resource() - Release a resource from TISCI resource.
+ * @res:       Pointer to the TISCI resource
+ * @id:                Resource id to be released.
+ */
+void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
+{
+       unsigned long flags;
+       u16 set;
+
+       raw_spin_lock_irqsave(&res->lock, flags);
+       for (set = 0; set < res->sets; set++) {
+               if (res->desc[set].start <= id &&
+                   (res->desc[set].num + res->desc[set].start) > id)
+                       clear_bit(id - res->desc[set].start,
+                                 res->desc[set].res_map);
+       }
+       raw_spin_unlock_irqrestore(&res->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ti_sci_release_resource);
+
+/**
+ * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
+ * @res:       Pointer to the TISCI resource
+ *
+ * Return: Total number of available resources.
+ */
+u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
+{
+       u32 set, count = 0;
+
+       for (set = 0; set < res->sets; set++)
+               count += res->desc[set].num;
+
+       return count;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
+
+/**
+ * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
+ * @handle:    TISCI handle
+ * @dev:       Device pointer to which the resource is assigned
+ * @dev_id:    TISCI device id to which the resource is assigned
+ * @of_prop:   property name by which the resource are represented
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ *        error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+                           struct device *dev, u32 dev_id, char *of_prop)
+{
+       struct ti_sci_resource *res;
+       u32 resource_subtype;
+       int i, ret;
+
+       res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+       if (!res)
+               return ERR_PTR(-ENOMEM);
+
+       res->sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
+                                                   sizeof(u32));
+       if (res->sets < 0) {
+               dev_err(dev, "%s resource type ids not available\n", of_prop);
+               return ERR_PTR(res->sets);
+       }
+
+       res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
+                                GFP_KERNEL);
+       if (!res->desc)
+               return ERR_PTR(-ENOMEM);
+
+       for (i = 0; i < res->sets; i++) {
+               ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
+                                                &resource_subtype);
+               if (ret)
+                       return ERR_PTR(-EINVAL);
+
+               ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
+                                                       resource_subtype,
+                                                       &res->desc[i].start,
+                                                       &res->desc[i].num);
+               if (ret) {
+                       dev_err(dev, "dev = %d subtype %d not allocated for this host\n",
+                               dev_id, resource_subtype);
+                       return ERR_PTR(ret);
+               }
+
+               dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
+                       dev_id, resource_subtype, res->desc[i].start,
+                       res->desc[i].num);
+
+               res->desc[i].res_map =
+                       devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
+                                    sizeof(*res->desc[i].res_map), GFP_KERNEL);
+               if (!res->desc[i].res_map)
+                       return ERR_PTR(-ENOMEM);
+       }
+       raw_spin_lock_init(&res->lock);
+
+       return res;
+}
+
 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
                                void *cmd)
 {
@@ -1784,10 +2412,33 @@ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
        /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
        .max_msgs = 20,
        .max_msg_size = 64,
+       .rm_type_map = NULL,
+};
+
+static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
+       {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
+       {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
+       {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
+       {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
+       {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
+       {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
+       {.dev_id = 0, .type = 0x000}, /* end of table */
+};
+
+/* Description for AM654 */
+static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
+       .default_host_id = 12,
+       /* Conservative duration */
+       .max_rx_timeout_ms = 10000,
+       /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+       .max_msgs = 20,
+       .max_msg_size = 60,
+       .rm_type_map = ti_sci_am654_rm_type_map,
 };
 
 static const struct of_device_id ti_sci_of_match[] = {
        {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+       {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
        { /* Sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
index 12bf316..4983827 100644 (file)
 #define TI_SCI_MSG_QUERY_CLOCK_FREQ    0x010d
 #define TI_SCI_MSG_GET_CLOCK_FREQ      0x010e
 
+/* Resource Management Requests */
+#define TI_SCI_MSG_GET_RESOURCE_RANGE  0x1500
+
+/* IRQ requests */
+#define TI_SCI_MSG_SET_IRQ             0x1000
+#define TI_SCI_MSG_FREE_IRQ            0x1001
+
 /**
  * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
  * @type:      Type of messages: One of TI_SCI_MSG* values
@@ -461,4 +468,99 @@ struct ti_sci_msg_resp_get_clock_freq {
        u64 freq_hz;
 } __packed;
 
+#define TI_SCI_IRQ_SECONDARY_HOST_INVALID      0xff
+
+/**
+ * struct ti_sci_msg_req_get_resource_range - Request to get a host's assigned
+ *                                           range of resources.
+ * @hdr:               Generic Header
+ * @type:              Unique resource assignment type
+ * @subtype:           Resource assignment subtype within the resource type.
+ * @secondary_host:    Host processing entity to which the resources are
+ *                     allocated. This is required only when the destination
+ *                     host id id different from ti sci interface host id,
+ *                     else TI_SCI_IRQ_SECONDARY_HOST_INVALID can be passed.
+ *
+ * Request type is TI_SCI_MSG_GET_RESOURCE_RANGE. Responded with requested
+ * resource range which is of type TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_req_get_resource_range {
+       struct ti_sci_msg_hdr hdr;
+#define MSG_RM_RESOURCE_TYPE_MASK      GENMASK(9, 0)
+#define MSG_RM_RESOURCE_SUBTYPE_MASK   GENMASK(5, 0)
+       u16 type;
+       u8 subtype;
+       u8 secondary_host;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_resource_range - Response to resource get range.
+ * @hdr:               Generic Header
+ * @range_start:       Start index of the resource range.
+ * @range_num:         Number of resources in the range.
+ *
+ * Response to request TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_resp_get_resource_range {
+       struct ti_sci_msg_hdr hdr;
+       u16 range_start;
+       u16 range_num;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_manage_irq - Request to configure/release the route
+ *                                     between the dev and the host.
+ * @hdr:               Generic Header
+ * @valid_params:      Bit fields defining the validity of interrupt source
+ *                     parameters. If a bit is not set, then corresponding
+ *                     field is not valid and will not be used for route set.
+ *                     Bit field definitions:
+ *                     0 - Valid bit for @dst_id
+ *                     1 - Valid bit for @dst_host_irq
+ *                     2 - Valid bit for @ia_id
+ *                     3 - Valid bit for @vint
+ *                     4 - Valid bit for @global_event
+ *                     5 - Valid bit for @vint_status_bit_index
+ *                     31 - Valid bit for @secondary_host
+ * @src_id:            IRQ source peripheral ID.
+ * @src_index:         IRQ source index within the peripheral
+ * @dst_id:            IRQ Destination ID. Based on the architecture it can be
+ *                     IRQ controller or host processor ID.
+ * @dst_host_irq:      IRQ number of the destination host IRQ controller
+ * @ia_id:             Device ID of the interrupt aggregator in which the
+ *                     vint resides.
+ * @vint:              Virtual interrupt number if the interrupt route
+ *                     is through an interrupt aggregator.
+ * @global_event:      Global event that is to be mapped to interrupt
+ *                     aggregator virtual interrupt status bit.
+ * @vint_status_bit:   Virtual interrupt status bit if the interrupt route
+ *                     utilizes an interrupt aggregator status bit.
+ * @secondary_host:    Host ID of the IRQ destination computing entity. This is
+ *                     required only when destination host id is different
+ *                     from ti sci interface host id.
+ *
+ * Request type is TI_SCI_MSG_SET/RELEASE_IRQ.
+ * Response is generic ACK / NACK message.
+ */
+struct ti_sci_msg_req_manage_irq {
+       struct ti_sci_msg_hdr hdr;
+#define MSG_FLAG_DST_ID_VALID                  TI_SCI_MSG_FLAG(0)
+#define MSG_FLAG_DST_HOST_IRQ_VALID            TI_SCI_MSG_FLAG(1)
+#define MSG_FLAG_IA_ID_VALID                   TI_SCI_MSG_FLAG(2)
+#define MSG_FLAG_VINT_VALID                    TI_SCI_MSG_FLAG(3)
+#define MSG_FLAG_GLB_EVNT_VALID                        TI_SCI_MSG_FLAG(4)
+#define MSG_FLAG_VINT_STS_BIT_VALID            TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_SHOST_VALID                   TI_SCI_MSG_FLAG(31)
+       u32 valid_params;
+       u16 src_id;
+       u16 src_index;
+       u16 dst_id;
+       u16 dst_host_irq;
+       u16 ia_id;
+       u16 vint;
+       u16 global_event;
+       u8 vint_status_bit;
+       u8 secondary_host;
+} __packed;
+
 #endif /* __TI_SCI_H */
index 1306722..715371b 100644 (file)
@@ -363,22 +363,16 @@ static int thunderx_gpio_irq_request_resources(struct irq_data *data)
 {
        struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
        struct thunderx_gpio *txgpio = txline->txgpio;
-       struct irq_data *parent_data = data->parent_data;
        int r;
 
        r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
        if (r)
                return r;
 
-       if (parent_data && parent_data->chip->irq_request_resources) {
-               r = parent_data->chip->irq_request_resources(parent_data);
-               if (r)
-                       goto error;
-       }
+       r = irq_chip_request_resources_parent(data);
+       if (r)
+               gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
 
-       return 0;
-error:
-       gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
        return r;
 }
 
@@ -386,10 +380,8 @@ static void thunderx_gpio_irq_release_resources(struct irq_data *data)
 {
        struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
        struct thunderx_gpio *txgpio = txline->txgpio;
-       struct irq_data *parent_data = data->parent_data;
 
-       if (parent_data && parent_data->chip->irq_release_resources)
-               parent_data->chip->irq_release_resources(parent_data);
+       irq_chip_release_resources_parent(data);
 
        gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
 }
index 9732a81..d389d4f 100644 (file)
@@ -714,7 +714,7 @@ static int i2c_dev_irq_from_resources(const struct resource *resources,
 }
 
 /**
- * i2c_new_device - instantiate an i2c device
+ * i2c_new_client_device - instantiate an i2c device
  * @adap: the adapter managing the device
  * @info: describes one I2C device; bus_num is ignored
  * Context: can sleep
@@ -727,17 +727,17 @@ static int i2c_dev_irq_from_resources(const struct resource *resources,
  * before any i2c_adapter could exist.
  *
  * This returns the new i2c client, which may be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
+ * i2c_unregister_device(); or an ERR_PTR to describe the error.
  */
-struct i2c_client *
-i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+static struct i2c_client *
+i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
 {
        struct i2c_client       *client;
        int                     status;
 
        client = kzalloc(sizeof *client, GFP_KERNEL);
        if (!client)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        client->adapter = adap;
 
@@ -803,7 +803,31 @@ out_err:
                client->name, client->addr, status);
 out_err_silent:
        kfree(client);
-       return NULL;
+       return ERR_PTR(status);
+}
+EXPORT_SYMBOL_GPL(i2c_new_client_device);
+
+/**
+ * i2c_new_device - instantiate an i2c device
+ * @adap: the adapter managing the device
+ * @info: describes one I2C device; bus_num is ignored
+ * Context: can sleep
+ *
+ * This deprecated function has the same functionality as
+ * @i2c_new_client_device, it just returns NULL instead of an ERR_PTR in case of
+ * an error for compatibility with current I2C API. It will be removed once all
+ * users are converted.
+ *
+ * This returns the new i2c client, which may be saved for later use with
+ * i2c_unregister_device(); or NULL to indicate an error.
+ */
+struct i2c_client *
+i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+{
+       struct i2c_client *ret;
+
+       ret = i2c_new_client_device(adap, info);
+       return IS_ERR(ret) ? NULL : ret;
 }
 EXPORT_SYMBOL_GPL(i2c_new_device);
 
@@ -854,7 +878,7 @@ static struct i2c_driver dummy_driver = {
 };
 
 /**
- * i2c_new_dummy - return a new i2c device bound to a dummy driver
+ * i2c_new_dummy_device - return a new i2c device bound to a dummy driver
  * @adapter: the adapter managing the device
  * @address: seven bit address to be used
  * Context: can sleep
@@ -869,18 +893,86 @@ static struct i2c_driver dummy_driver = {
  * different driver.
  *
  * This returns the new i2c client, which should be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
+ * i2c_unregister_device(); or an ERR_PTR to describe the error.
  */
-struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address)
+static struct i2c_client *
+i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address)
 {
        struct i2c_board_info info = {
                I2C_BOARD_INFO("dummy", address),
        };
 
-       return i2c_new_device(adapter, &info);
+       return i2c_new_client_device(adapter, &info);
+}
+EXPORT_SYMBOL_GPL(i2c_new_dummy_device);
+
+/**
+ * i2c_new_dummy - return a new i2c device bound to a dummy driver
+ * @adapter: the adapter managing the device
+ * @address: seven bit address to be used
+ * Context: can sleep
+ *
+ * This deprecated function has the same functionality as @i2c_new_dummy_device,
+ * it just returns NULL instead of an ERR_PTR in case of an error for
+ * compatibility with current I2C API. It will be removed once all users are
+ * converted.
+ *
+ * This returns the new i2c client, which should be saved for later use with
+ * i2c_unregister_device(); or NULL to indicate an error.
+ */
+struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address)
+{
+       struct i2c_client *ret;
+
+       ret = i2c_new_dummy_device(adapter, address);
+       return IS_ERR(ret) ? NULL : ret;
 }
 EXPORT_SYMBOL_GPL(i2c_new_dummy);
 
+struct i2c_dummy_devres {
+       struct i2c_client *client;
+};
+
+static void devm_i2c_release_dummy(struct device *dev, void *res)
+{
+       struct i2c_dummy_devres *this = res;
+
+       i2c_unregister_device(this->client);
+}
+
+/**
+ * devm_i2c_new_dummy_device - return a new i2c device bound to a dummy driver
+ * @dev: device the managed resource is bound to
+ * @adapter: the adapter managing the device
+ * @address: seven bit address to be used
+ * Context: can sleep
+ *
+ * This is the device-managed version of @i2c_new_dummy_device. It returns the
+ * new i2c client or an ERR_PTR in case of an error.
+ */
+struct i2c_client *devm_i2c_new_dummy_device(struct device *dev,
+                                            struct i2c_adapter *adapter,
+                                            u16 address)
+{
+       struct i2c_dummy_devres *dr;
+       struct i2c_client *client;
+
+       dr = devres_alloc(devm_i2c_release_dummy, sizeof(*dr), GFP_KERNEL);
+       if (!dr)
+               return ERR_PTR(-ENOMEM);
+
+       client = i2c_new_dummy_device(adapter, address);
+       if (IS_ERR(client)) {
+               devres_free(dr);
+       } else {
+               dr->client = client;
+               devres_add(dev, dr);
+       }
+
+       return client;
+}
+EXPORT_SYMBOL_GPL(devm_i2c_new_dummy_device);
+
 /**
  * i2c_new_secondary_device - Helper to get the instantiated secondary address
  * and create the associated device
@@ -1000,9 +1092,9 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
                info.flags |= I2C_CLIENT_SLAVE;
        }
 
-       client = i2c_new_device(adap, &info);
-       if (!client)
-               return -EINVAL;
+       client = i2c_new_client_device(adap, &info);
+       if (IS_ERR(client))
+               return PTR_ERR(client);
 
        /* Keep track of the added device */
        mutex_lock(&adap->userspace_clients_lock);
index 15b8311..e559e43 100644 (file)
@@ -94,6 +94,7 @@ config IOMMU_DMA
        bool
        select IOMMU_API
        select IOMMU_IOVA
+       select IRQ_MSI_IOMMU
        select NEED_SG_DMA_LENGTH
 
 config FSL_PAMU
index 5e89804..129c4ba 100644 (file)
@@ -907,17 +907,18 @@ out_free_page:
        return NULL;
 }
 
-void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
 {
-       struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
+       struct device *dev = msi_desc_to_dev(desc);
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
        struct iommu_dma_cookie *cookie;
        struct iommu_dma_msi_page *msi_page;
-       phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
        unsigned long flags;
 
-       if (!domain || !domain->iova_cookie)
-               return;
+       if (!domain || !domain->iova_cookie) {
+               desc->iommu_cookie = NULL;
+               return 0;
+       }
 
        cookie = domain->iova_cookie;
 
@@ -930,19 +931,26 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
        msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
        spin_unlock_irqrestore(&cookie->msi_lock, flags);
 
-       if (WARN_ON(!msi_page)) {
-               /*
-                * We're called from a void callback, so the best we can do is
-                * 'fail' by filling the message with obviously bogus values.
-                * Since we got this far due to an IOMMU being present, it's
-                * not like the existing address would have worked anyway...
-                */
-               msg->address_hi = ~0U;
-               msg->address_lo = ~0U;
-               msg->data = ~0U;
-       } else {
-               msg->address_hi = upper_32_bits(msi_page->iova);
-               msg->address_lo &= cookie_msi_granule(cookie) - 1;
-               msg->address_lo += lower_32_bits(msi_page->iova);
-       }
+       msi_desc_set_iommu_cookie(desc, msi_page);
+
+       if (!msi_page)
+               return -ENOMEM;
+       return 0;
+}
+
+void iommu_dma_compose_msi_msg(struct msi_desc *desc,
+                              struct msi_msg *msg)
+{
+       struct device *dev = msi_desc_to_dev(desc);
+       const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+       const struct iommu_dma_msi_page *msi_page;
+
+       msi_page = msi_desc_get_iommu_cookie(desc);
+
+       if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
+               return;
+
+       msg->address_hi = upper_32_bits(msi_page->iova);
+       msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
+       msg->address_lo += lower_32_bits(msi_page->iova);
 }
index cf79849..1c1f3f6 100644 (file)
@@ -6,7 +6,6 @@ config IRQCHIP
 
 config ARM_GIC
        bool
-       select IRQ_DOMAIN
        select IRQ_DOMAIN_HIERARCHY
        select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -33,7 +32,6 @@ config GIC_NON_BANKED
 
 config ARM_GIC_V3
        bool
-       select IRQ_DOMAIN
        select GENERIC_IRQ_MULTI_HANDLER
        select IRQ_DOMAIN_HIERARCHY
        select PARTITION_PERCPU
@@ -59,7 +57,6 @@ config ARM_GIC_V3_ITS_FSL_MC
 
 config ARM_NVIC
        bool
-       select IRQ_DOMAIN
        select IRQ_DOMAIN_HIERARCHY
        select GENERIC_IRQ_CHIP
 
@@ -358,7 +355,6 @@ config STM32_EXTI
 config QCOM_IRQ_COMBINER
        bool "QCOM IRQ combiner support"
        depends on ARCH_QCOM && ACPI
-       select IRQ_DOMAIN
        select IRQ_DOMAIN_HIERARCHY
        help
          Say yes here to add support for the IRQ combiner devices embedded
@@ -375,7 +371,6 @@ config IRQ_UNIPHIER_AIDET
 config MESON_IRQ_GPIO
        bool "Meson GPIO Interrupt Multiplexer"
        depends on ARCH_MESON
-       select IRQ_DOMAIN
        select IRQ_DOMAIN_HIERARCHY
        help
          Support Meson SoC Family GPIO Interrupt Multiplexer
@@ -391,7 +386,6 @@ config GOLDFISH_PIC
 config QCOM_PDC
        bool "QCOM PDC"
        depends on ARCH_QCOM
-       select IRQ_DOMAIN
        select IRQ_DOMAIN_HIERARCHY
        help
          Power Domain Controller driver to manage and configure wakeup
@@ -431,6 +425,27 @@ config LS1X_IRQ
        help
          Support for the Loongson-1 platform Interrupt Controller.
 
+config TI_SCI_INTR_IRQCHIP
+       bool
+       depends on TI_SCI_PROTOCOL
+       select IRQ_DOMAIN_HIERARCHY
+       help
+         This enables the irqchip driver support for K3 Interrupt router
+         over TI System Control Interface available on some new TI's SoCs.
+         If you wish to use interrupt router irq resources managed by the
+         TI System Controller, say Y here. Otherwise, say N.
+
+config TI_SCI_INTA_IRQCHIP
+       bool
+       depends on TI_SCI_PROTOCOL
+       select IRQ_DOMAIN_HIERARCHY
+       select TI_SCI_INTA_MSI_DOMAIN
+       help
+         This enables the irqchip driver support for K3 Interrupt aggregator
+         over TI System Control Interface available on some new TI's SoCs.
+         If you wish to use interrupt aggregator irq resources managed by the
+         TI System Controller, say Y here. Otherwise, say N.
+
 endmenu
 
 config SIFIVE_PLIC
index f8c66e9..606a003 100644 (file)
@@ -98,3 +98,5 @@ obj-$(CONFIG_SIFIVE_PLIC)             += irq-sifive-plic.o
 obj-$(CONFIG_IMX_IRQSTEER)             += irq-imx-irqsteer.o
 obj-$(CONFIG_MADERA_IRQ)               += irq-madera.o
 obj-$(CONFIG_LS1X_IRQ)                 += irq-ls1x.o
+obj-$(CONFIG_TI_SCI_INTR_IRQCHIP)      += irq-ti-sci-intr.o
+obj-$(CONFIG_TI_SCI_INTA_IRQCHIP)      += irq-ti-sci-inta.o
index 0f6e30e..0acebac 100644 (file)
@@ -343,6 +343,9 @@ int __init bcm7038_l1_of_init(struct device_node *dn,
                goto out_unmap;
        }
 
+       pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
+               dn, IRQS_PER_WORD * intc->n_words);
+
        return 0;
 
 out_unmap:
index 8968e5e..541bdca 100644 (file)
@@ -318,6 +318,9 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
                }
        }
 
+       pr_info("registered %s intc (%pOF, parent IRQ(s): %d)\n",
+               intc_name, dn, data->num_parent_irqs);
+
        return 0;
 
 out_free_domain:
index 5e4ca13..a0642b5 100644 (file)
@@ -264,6 +264,8 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
                ct->chip.irq_set_wake = irq_gc_set_wake;
        }
 
+       pr_info("registered L2 intc (%pOF, parent irq: %d)\n", np, parent_irq);
+
        return 0;
 
 out_free_domain:
index ecafd29..c4aac09 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/of_irq.h>
 #include <linux/irqchip/arm-gic.h>
 #include <linux/platform_device.h>
-#include <linux/pm_clock.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
@@ -28,17 +27,27 @@ struct gic_clk_data {
        const char *const *clocks;
 };
 
+struct gic_chip_pm {
+       struct gic_chip_data *chip_data;
+       const struct gic_clk_data *clk_data;
+       struct clk_bulk_data *clks;
+};
+
 static int gic_runtime_resume(struct device *dev)
 {
-       struct gic_chip_data *gic = dev_get_drvdata(dev);
+       struct gic_chip_pm *chip_pm = dev_get_drvdata(dev);
+       struct gic_chip_data *gic = chip_pm->chip_data;
+       const struct gic_clk_data *data = chip_pm->clk_data;
        int ret;
 
-       ret = pm_clk_resume(dev);
-       if (ret)
+       ret = clk_bulk_prepare_enable(data->num_clocks, chip_pm->clks);
+       if (ret) {
+               dev_err(dev, "clk_enable failed: %d\n", ret);
                return ret;
+       }
 
        /*
-        * On the very first resume, the pointer to the driver data
+        * On the very first resume, the pointer to chip_pm->chip_data
         * will be NULL and this is intentional, because we do not
         * want to restore the GIC on the very first resume. So if
         * the pointer is not valid just return.
@@ -54,35 +63,14 @@ static int gic_runtime_resume(struct device *dev)
 
 static int gic_runtime_suspend(struct device *dev)
 {
-       struct gic_chip_data *gic = dev_get_drvdata(dev);
+       struct gic_chip_pm *chip_pm = dev_get_drvdata(dev);
+       struct gic_chip_data *gic = chip_pm->chip_data;
+       const struct gic_clk_data *data = chip_pm->clk_data;
 
        gic_dist_save(gic);
        gic_cpu_save(gic);
 
-       return pm_clk_suspend(dev);
-}
-
-static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data)
-{
-       unsigned int i;
-       int ret;
-
-       if (!dev || !data)
-               return -EINVAL;
-
-       ret = pm_clk_create(dev);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < data->num_clocks; i++) {
-               ret = of_pm_clk_add_clk(dev, data->clocks[i]);
-               if (ret) {
-                       dev_err(dev, "failed to add clock %s\n",
-                               data->clocks[i]);
-                       pm_clk_destroy(dev);
-                       return ret;
-               }
-       }
+       clk_bulk_disable_unprepare(data->num_clocks, chip_pm->clks);
 
        return 0;
 }
@@ -91,8 +79,8 @@ static int gic_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        const struct gic_clk_data *data;
-       struct gic_chip_data *gic;
-       int ret, irq;
+       struct gic_chip_pm *chip_pm;
+       int ret, irq, i;
 
        data = of_device_get_match_data(&pdev->dev);
        if (!data) {
@@ -100,28 +88,41 @@ static int gic_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       chip_pm = devm_kzalloc(dev, sizeof(*chip_pm), GFP_KERNEL);
+       if (!chip_pm)
+               return -ENOMEM;
+
        irq = irq_of_parse_and_map(dev->of_node, 0);
        if (!irq) {
                dev_err(dev, "no parent interrupt found!\n");
                return -EINVAL;
        }
 
-       ret = gic_get_clocks(dev, data);
+       chip_pm->clks = devm_kcalloc(dev, data->num_clocks,
+                                    sizeof(*chip_pm->clks), GFP_KERNEL);
+       if (!chip_pm->clks)
+               return -ENOMEM;
+
+       for (i = 0; i < data->num_clocks; i++)
+               chip_pm->clks[i].id = data->clocks[i];
+
+       ret = devm_clk_bulk_get(dev, data->num_clocks, chip_pm->clks);
        if (ret)
                goto irq_dispose;
 
+       chip_pm->clk_data = data;
+       dev_set_drvdata(dev, chip_pm);
+
        pm_runtime_enable(dev);
 
        ret = pm_runtime_get_sync(dev);
        if (ret < 0)
                goto rpm_disable;
 
-       ret = gic_of_init_child(dev, &gic, irq);
+       ret = gic_of_init_child(dev, &chip_pm->chip_data, irq);
        if (ret)
                goto rpm_put;
 
-       platform_set_drvdata(pdev, gic);
-
        pm_runtime_put(dev);
 
        dev_info(dev, "GIC IRQ controller registered\n");
@@ -132,7 +133,6 @@ rpm_put:
        pm_runtime_put_sync(dev);
 rpm_disable:
        pm_runtime_disable(dev);
-       pm_clk_destroy(dev);
 irq_dispose:
        irq_dispose_mapping(irq);
 
@@ -142,6 +142,8 @@ irq_dispose:
 static const struct dev_pm_ops gic_pm_ops = {
        SET_RUNTIME_PM_OPS(gic_runtime_suspend,
                           gic_runtime_resume, NULL)
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                                    pm_runtime_force_resume)
 };
 
 static const char * const gic400_clocks[] = {
index de14e06..3c77ab6 100644 (file)
@@ -110,7 +110,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
        if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
                msg->data -= v2m->spi_offset;
 
-       iommu_dma_map_msi_msg(data->irq, msg);
+       iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
 }
 
 static struct irq_chip gicv2m_irq_chip = {
@@ -167,6 +167,7 @@ static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
 static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
                                   unsigned int nr_irqs, void *args)
 {
+       msi_alloc_info_t *info = args;
        struct v2m_data *v2m = NULL, *tmp;
        int hwirq, offset, i, err = 0;
 
@@ -186,6 +187,11 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 
        hwirq = v2m->spi_start + offset;
 
+       err = iommu_dma_prepare_msi(info->desc,
+                                   v2m->res.start + V2M_MSI_SETSPI_NS);
+       if (err)
+               return err;
+
        for (i = 0; i < nr_irqs; i++) {
                err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
                if (err)
index 128ac89..cfb9b4e 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/list.h>
-#include <linux/list_sort.h>
 #include <linux/log2.h>
 #include <linux/memblock.h>
 #include <linux/mm.h>
@@ -1179,7 +1178,7 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
        msg->address_hi         = upper_32_bits(addr);
        msg->data               = its_get_event_id(d);
 
-       iommu_dma_map_msi_msg(d->irq, msg);
+       iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
 }
 
 static int its_irq_set_irqchip_state(struct irq_data *d,
@@ -1465,9 +1464,8 @@ static struct lpi_range *mk_lpi_range(u32 base, u32 span)
 {
        struct lpi_range *range;
 
-       range = kzalloc(sizeof(*range), GFP_KERNEL);
+       range = kmalloc(sizeof(*range), GFP_KERNEL);
        if (range) {
-               INIT_LIST_HEAD(&range->entry);
                range->base_id = base;
                range->span = span;
        }
@@ -1475,31 +1473,6 @@ static struct lpi_range *mk_lpi_range(u32 base, u32 span)
        return range;
 }
 
-static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
-{
-       struct lpi_range *ra, *rb;
-
-       ra = container_of(a, struct lpi_range, entry);
-       rb = container_of(b, struct lpi_range, entry);
-
-       return ra->base_id - rb->base_id;
-}
-
-static void merge_lpi_ranges(void)
-{
-       struct lpi_range *range, *tmp;
-
-       list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
-               if (!list_is_last(&range->entry, &lpi_range_list) &&
-                   (tmp->base_id == (range->base_id + range->span))) {
-                       tmp->base_id = range->base_id;
-                       tmp->span += range->span;
-                       list_del(&range->entry);
-                       kfree(range);
-               }
-       }
-}
-
 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
 {
        struct lpi_range *range, *tmp;
@@ -1529,25 +1502,49 @@ static int alloc_lpi_range(u32 nr_lpis, u32 *base)
        return err;
 }
 
+static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
+{
+       if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
+               return;
+       if (a->base_id + a->span != b->base_id)
+               return;
+       b->base_id = a->base_id;
+       b->span += a->span;
+       list_del(&a->entry);
+       kfree(a);
+}
+
 static int free_lpi_range(u32 base, u32 nr_lpis)
 {
-       struct lpi_range *new;
-       int err = 0;
+       struct lpi_range *new, *old;
+
+       new = mk_lpi_range(base, nr_lpis);
+       if (!new)
+               return -ENOMEM;
 
        mutex_lock(&lpi_range_lock);
 
-       new = mk_lpi_range(base, nr_lpis);
-       if (!new) {
-               err = -ENOMEM;
-               goto out;
+       list_for_each_entry_reverse(old, &lpi_range_list, entry) {
+               if (old->base_id < base)
+                       break;
        }
+       /*
+        * old is the last element with ->base_id smaller than base,
+        * so new goes right after it. If there are no elements with
+        * ->base_id smaller than base, &old->entry ends up pointing
+        * at the head of the list, and inserting new it the start of
+        * the list is the right thing to do in that case as well.
+        */
+       list_add(&new->entry, &old->entry);
+       /*
+        * Now check if we can merge with the preceding and/or
+        * following ranges.
+        */
+       merge_lpi_ranges(old, new);
+       merge_lpi_ranges(new, list_next_entry(new, entry));
 
-       list_add(&new->entry, &lpi_range_list);
-       list_sort(NULL, &lpi_range_list, lpi_range_cmp);
-       merge_lpi_ranges();
-out:
        mutex_unlock(&lpi_range_lock);
-       return err;
+       return 0;
 }
 
 static int __init its_lpi_init(u32 id_bits)
@@ -2487,7 +2484,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
        int err = 0;
 
        /*
-        * We ignore "dev" entierely, and rely on the dev_id that has
+        * We ignore "dev" entirely, and rely on the dev_id that has
         * been passed via the scratchpad. This limits this domain's
         * usefulness to upper layers that definitely know that they
         * are built on top of the ITS.
@@ -2566,6 +2563,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 {
        msi_alloc_info_t *info = args;
        struct its_device *its_dev = info->scratchpad[0].ptr;
+       struct its_node *its = its_dev->its;
        irq_hw_number_t hwirq;
        int err;
        int i;
@@ -2574,6 +2572,10 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
        if (err)
                return err;
 
+       err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
+       if (err)
+               return err;
+
        for (i = 0; i < nr_irqs; i++) {
                err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
                if (err)
index fbfa7ff..563a9b3 100644 (file)
@@ -84,6 +84,7 @@ static void mbi_free_msi(struct mbi_range *mbi, unsigned int hwirq,
 static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
                                   unsigned int nr_irqs, void *args)
 {
+       msi_alloc_info_t *info = args;
        struct mbi_range *mbi = NULL;
        int hwirq, offset, i, err = 0;
 
@@ -104,6 +105,11 @@ static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 
        hwirq = mbi->spi_start + offset;
 
+       err = iommu_dma_prepare_msi(info->desc,
+                                   mbi_phys_base + GICD_SETSPI_NSR);
+       if (err)
+               return err;
+
        for (i = 0; i < nr_irqs; i++) {
                err = mbi_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
                if (err)
@@ -142,7 +148,7 @@ static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
        msg[0].address_lo = lower_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
        msg[0].data = data->parent_data->hwirq;
 
-       iommu_dma_map_msi_msg(data->irq, msg);
+       iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
 }
 
 #ifdef CONFIG_PCI_MSI
@@ -202,7 +208,7 @@ static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
        msg[1].address_lo = lower_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
        msg[1].data = data->parent_data->hwirq;
 
-       iommu_dma_map_msi_msg(data->irq, &msg[1]);
+       iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), &msg[1]);
 }
 
 /* Platform-MSI specific irqchip */
index 88df3d0..290531e 100644 (file)
@@ -144,7 +144,6 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct irqsteer_data *data;
-       struct resource *res;
        u32 irqs_num;
        int i, ret;
 
@@ -152,8 +151,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
        if (!data)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       data->regs = devm_ioremap_resource(&pdev->dev, res);
+       data->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(data->regs)) {
                dev_err(&pdev->dev, "failed to initialize reg\n");
                return PTR_ERR(data->regs);
index c671b32..669d291 100644 (file)
@@ -100,7 +100,7 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
                msg->data |= cpumask_first(mask);
        }
 
-       iommu_dma_map_msi_msg(data->irq, msg);
+       iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
 }
 
 static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
@@ -141,6 +141,7 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
                                        unsigned int nr_irqs,
                                        void *args)
 {
+       msi_alloc_info_t *info = args;
        struct ls_scfg_msi *msi_data = domain->host_data;
        int pos, err = 0;
 
@@ -154,6 +155,10 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
                err = -ENOSPC;
        spin_unlock(&msi_data->lock);
 
+       if (err)
+               return err;
+
+       err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
        if (err)
                return err;
 
index 8c03952..04c05a1 100644 (file)
@@ -389,10 +389,8 @@ static int intc_irqpin_probe(struct platform_device *pdev)
        int k;
 
        p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
-       if (!p) {
-               dev_err(dev, "failed to allocate driver data\n");
+       if (!p)
                return -ENOMEM;
-       }
 
        /* deal with driver instance configuration */
        of_property_read_u32(dev->of_node, "sense-bitfield-width",
index 7bd1d4c..e00f2fa 100644 (file)
 #include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
+#include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/of_platform.h>
 #include <linux/syscore_ops.h>
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -37,12 +39,6 @@ struct stm32_exti_bank {
 
 #define UNDEF_REG ~0
 
-enum stm32_exti_hwspinlock {
-       HWSPINLOCK_UNKNOWN,
-       HWSPINLOCK_NONE,
-       HWSPINLOCK_READY,
-};
-
 struct stm32_desc_irq {
        u32 exti;
        u32 irq_parent;
@@ -69,8 +65,6 @@ struct stm32_exti_host_data {
        void __iomem *base;
        struct stm32_exti_chip_data *chips_data;
        const struct stm32_exti_drv_data *drv_data;
-       struct device_node *node;
-       enum stm32_exti_hwspinlock hwlock_state;
        struct hwspinlock *hwlock;
 };
 
@@ -285,49 +279,27 @@ static int stm32_exti_set_type(struct irq_data *d,
 
 static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data)
 {
-       struct stm32_exti_host_data *host_data = chip_data->host_data;
-       struct hwspinlock *hwlock;
-       int id, ret = 0, timeout = 0;
-
-       /* first time, check for hwspinlock availability */
-       if (unlikely(host_data->hwlock_state == HWSPINLOCK_UNKNOWN)) {
-               id = of_hwspin_lock_get_id(host_data->node, 0);
-               if (id >= 0) {
-                       hwlock = hwspin_lock_request_specific(id);
-                       if (hwlock) {
-                               /* found valid hwspinlock */
-                               host_data->hwlock_state = HWSPINLOCK_READY;
-                               host_data->hwlock = hwlock;
-                               pr_debug("%s hwspinlock = %d\n", __func__, id);
-                       } else {
-                               host_data->hwlock_state = HWSPINLOCK_NONE;
-                       }
-               } else if (id != -EPROBE_DEFER) {
-                       host_data->hwlock_state = HWSPINLOCK_NONE;
-               } else {
-                       /* hwspinlock driver shall be ready at that stage */
-                       ret = -EPROBE_DEFER;
-               }
-       }
+       int ret, timeout = 0;
 
-       if (likely(host_data->hwlock_state == HWSPINLOCK_READY)) {
-               /*
-                * Use the x_raw API since we are under spin_lock protection.
-                * Do not use the x_timeout API because we are under irq_disable
-                * mode (see __setup_irq())
-                */
-               do {
-                       ret = hwspin_trylock_raw(host_data->hwlock);
-                       if (!ret)
-                               return 0;
-
-                       udelay(HWSPNLCK_RETRY_DELAY);
-                       timeout += HWSPNLCK_RETRY_DELAY;
-               } while (timeout < HWSPNLCK_TIMEOUT);
-
-               if (ret == -EBUSY)
-                       ret = -ETIMEDOUT;
-       }
+       if (!chip_data->host_data->hwlock)
+               return 0;
+
+       /*
+        * Use the x_raw API since we are under spin_lock protection.
+        * Do not use the x_timeout API because we are under irq_disable
+        * mode (see __setup_irq())
+        */
+       do {
+               ret = hwspin_trylock_raw(chip_data->host_data->hwlock);
+               if (!ret)
+                       return 0;
+
+               udelay(HWSPNLCK_RETRY_DELAY);
+               timeout += HWSPNLCK_RETRY_DELAY;
+       } while (timeout < HWSPNLCK_TIMEOUT);
+
+       if (ret == -EBUSY)
+               ret = -ETIMEDOUT;
 
        if (ret)
                pr_err("%s can't get hwspinlock (%d)\n", __func__, ret);
@@ -337,7 +309,7 @@ static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data)
 
 static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data)
 {
-       if (likely(chip_data->host_data->hwlock_state == HWSPINLOCK_READY))
+       if (chip_data->host_data->hwlock)
                hwspin_unlock_raw(chip_data->host_data->hwlock);
 }
 
@@ -586,8 +558,7 @@ static int stm32_exti_h_set_affinity(struct irq_data *d,
        return -EINVAL;
 }
 
-#ifdef CONFIG_PM
-static int stm32_exti_h_suspend(void)
+static int __maybe_unused stm32_exti_h_suspend(void)
 {
        struct stm32_exti_chip_data *chip_data;
        int i;
@@ -602,7 +573,7 @@ static int stm32_exti_h_suspend(void)
        return 0;
 }
 
-static void stm32_exti_h_resume(void)
+static void __maybe_unused stm32_exti_h_resume(void)
 {
        struct stm32_exti_chip_data *chip_data;
        int i;
@@ -616,17 +587,22 @@ static void stm32_exti_h_resume(void)
 }
 
 static struct syscore_ops stm32_exti_h_syscore_ops = {
+#ifdef CONFIG_PM_SLEEP
        .suspend        = stm32_exti_h_suspend,
        .resume         = stm32_exti_h_resume,
+#endif
 };
 
-static void stm32_exti_h_syscore_init(void)
+static void stm32_exti_h_syscore_init(struct stm32_exti_host_data *host_data)
 {
+       stm32_host_data = host_data;
        register_syscore_ops(&stm32_exti_h_syscore_ops);
 }
-#else
-static inline void stm32_exti_h_syscore_init(void) {}
-#endif
+
+static void stm32_exti_h_syscore_deinit(void)
+{
+       unregister_syscore_ops(&stm32_exti_h_syscore_ops);
+}
 
 static struct irq_chip stm32_exti_h_chip = {
        .name                   = "stm32-exti-h",
@@ -683,8 +659,6 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
                return NULL;
 
        host_data->drv_data = dd;
-       host_data->node = node;
-       host_data->hwlock_state = HWSPINLOCK_UNKNOWN;
        host_data->chips_data = kcalloc(dd->bank_nr,
                                        sizeof(struct stm32_exti_chip_data),
                                        GFP_KERNEL);
@@ -711,7 +685,8 @@ free_host_data:
 
 static struct
 stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
-                                          u32 bank_idx)
+                                          u32 bank_idx,
+                                          struct device_node *node)
 {
        const struct stm32_exti_bank *stm32_bank;
        struct stm32_exti_chip_data *chip_data;
@@ -731,7 +706,7 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
        writel_relaxed(0, base + stm32_bank->imr_ofst);
        writel_relaxed(0, base + stm32_bank->emr_ofst);
 
-       pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
+       pr_info("%pOF: bank%d\n", node, bank_idx);
 
        return chip_data;
 }
@@ -771,7 +746,7 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
                struct stm32_exti_chip_data *chip_data;
 
                stm32_bank = drv_data->exti_banks[i];
-               chip_data = stm32_exti_chip_init(host_data, i);
+               chip_data = stm32_exti_chip_init(host_data, i, node);
 
                gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
 
@@ -815,50 +790,130 @@ static const struct irq_domain_ops stm32_exti_h_domain_ops = {
        .xlate = irq_domain_xlate_twocell,
 };
 
-static int
-__init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
-                                struct device_node *node,
-                                struct device_node *parent)
+static void stm32_exti_remove_irq(void *data)
+{
+       struct irq_domain *domain = data;
+
+       irq_domain_remove(domain);
+}
+
+static int stm32_exti_remove(struct platform_device *pdev)
+{
+       stm32_exti_h_syscore_deinit();
+       return 0;
+}
+
+static int stm32_exti_probe(struct platform_device *pdev)
 {
+       int ret, i;
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
        struct irq_domain *parent_domain, *domain;
        struct stm32_exti_host_data *host_data;
-       int ret, i;
+       const struct stm32_exti_drv_data *drv_data;
+       struct resource *res;
 
-       parent_domain = irq_find_host(parent);
-       if (!parent_domain) {
-               pr_err("interrupt-parent not found\n");
-               return -EINVAL;
+       host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
+       if (!host_data)
+               return -ENOMEM;
+
+       /* check for optional hwspinlock which may be not available yet */
+       ret = of_hwspin_lock_get_id(np, 0);
+       if (ret == -EPROBE_DEFER)
+               /* hwspinlock framework not yet ready */
+               return ret;
+
+       if (ret >= 0) {
+               host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
+               if (!host_data->hwlock) {
+                       dev_err(dev, "Failed to request hwspinlock\n");
+                       return -EINVAL;
+               }
+       } else if (ret != -ENOENT) {
+               /* note: ENOENT is a valid case (means 'no hwspinlock') */
+               dev_err(dev, "Failed to get hwspinlock\n");
+               return ret;
        }
 
-       host_data = stm32_exti_host_init(drv_data, node);
-       if (!host_data)
+       /* initialize host_data */
+       drv_data = of_device_get_match_data(dev);
+       if (!drv_data) {
+               dev_err(dev, "no of match data\n");
+               return -ENODEV;
+       }
+       host_data->drv_data = drv_data;
+
+       host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr,
+                                            sizeof(*host_data->chips_data),
+                                            GFP_KERNEL);
+       if (!host_data->chips_data)
                return -ENOMEM;
 
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       host_data->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(host_data->base)) {
+               dev_err(dev, "Unable to map registers\n");
+               return PTR_ERR(host_data->base);
+       }
+
        for (i = 0; i < drv_data->bank_nr; i++)
-               stm32_exti_chip_init(host_data, i);
+               stm32_exti_chip_init(host_data, i, np);
+
+       parent_domain = irq_find_host(of_irq_find_parent(np));
+       if (!parent_domain) {
+               dev_err(dev, "GIC interrupt-parent not found\n");
+               return -EINVAL;
+       }
 
        domain = irq_domain_add_hierarchy(parent_domain, 0,
                                          drv_data->bank_nr * IRQS_PER_BANK,
-                                         node, &stm32_exti_h_domain_ops,
+                                         np, &stm32_exti_h_domain_ops,
                                          host_data);
 
        if (!domain) {
-               pr_err("%pOFn: Could not register exti domain.\n", node);
-               ret = -ENOMEM;
-               goto out_unmap;
+               dev_err(dev, "Could not register exti domain\n");
+               return -ENOMEM;
        }
 
-       stm32_exti_h_syscore_init();
+       ret = devm_add_action_or_reset(dev, stm32_exti_remove_irq, domain);
+       if (ret)
+               return ret;
+
+       stm32_exti_h_syscore_init(host_data);
 
        return 0;
+}
 
-out_unmap:
-       iounmap(host_data->base);
-       kfree(host_data->chips_data);
-       kfree(host_data);
-       return ret;
+/* platform driver only for MP1 */
+static const struct of_device_id stm32_exti_ids[] = {
+       { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
+       {},
+};
+MODULE_DEVICE_TABLE(of, stm32_exti_ids);
+
+static struct platform_driver stm32_exti_driver = {
+       .probe          = stm32_exti_probe,
+       .remove         = stm32_exti_remove,
+       .driver         = {
+               .name   = "stm32_exti",
+               .of_match_table = stm32_exti_ids,
+       },
+};
+
+static int __init stm32_exti_arch_init(void)
+{
+       return platform_driver_register(&stm32_exti_driver);
 }
 
+static void __exit stm32_exti_arch_exit(void)
+{
+       return platform_driver_unregister(&stm32_exti_driver);
+}
+
+arch_initcall(stm32_exti_arch_init);
+module_exit(stm32_exti_arch_exit);
+
+/* no platform driver for F4 and H7 */
 static int __init stm32f4_exti_of_init(struct device_node *np,
                                       struct device_node *parent)
 {
@@ -874,11 +929,3 @@ static int __init stm32h7_exti_of_init(struct device_node *np,
 }
 
 IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);
-
-static int __init stm32mp1_exti_of_init(struct device_node *np,
-                                       struct device_node *parent)
-{
-       return stm32_exti_hierarchy_init(&stm32mp1_drv_data, np, parent);
-}
-
-IRQCHIP_DECLARE(stm32mp1_exti, "st,stm32mp1-exti", stm32mp1_exti_of_init);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
new file mode 100644 (file)
index 0000000..011b60a
--- /dev/null
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' K3 Interrupt Aggregator irqchip driver
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ *     Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <asm-generic/msi.h>
+
+#define TI_SCI_DEV_ID_MASK     0xffff
+#define TI_SCI_DEV_ID_SHIFT    16
+#define TI_SCI_IRQ_ID_MASK     0xffff
+#define TI_SCI_IRQ_ID_SHIFT    0
+#define HWIRQ_TO_DEVID(hwirq)  (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \
+                                (TI_SCI_DEV_ID_MASK))
+#define HWIRQ_TO_IRQID(hwirq)  ((hwirq) & (TI_SCI_IRQ_ID_MASK))
+#define TO_HWIRQ(dev, index)   ((((dev) & TI_SCI_DEV_ID_MASK) << \
+                                TI_SCI_DEV_ID_SHIFT) | \
+                               ((index) & TI_SCI_IRQ_ID_MASK))
+
+#define MAX_EVENTS_PER_VINT    64
+#define VINT_ENABLE_SET_OFFSET 0x0
+#define VINT_ENABLE_CLR_OFFSET 0x8
+#define VINT_STATUS_OFFSET     0x18
+
+/**
+ * struct ti_sci_inta_event_desc - Description of an event coming to
+ *                                Interrupt Aggregator. This serves
+ *                                as a mapping table for global event,
+ *                                hwirq and vint bit.
+ * @global_event:      Global event number corresponding to this event
+ * @hwirq:             Hwirq of the incoming interrupt
+ * @vint_bit:          Corresponding vint bit to which this event is attached.
+ */
+struct ti_sci_inta_event_desc {
+       u16 global_event;
+       u32 hwirq;
+       u8 vint_bit;
+};
+
+/**
+ * struct ti_sci_inta_vint_desc - Description of a virtual interrupt coming out
+ *                               of Interrupt Aggregator.
+ * @domain:            Pointer to IRQ domain to which this vint belongs.
+ * @list:              List entry for the vint list
+ * @event_map:         Bitmap to manage the allocation of events to vint.
+ * @events:            Array of event descriptors assigned to this vint.
+ * @parent_virq:       Linux IRQ number that gets attached to parent
+ * @vint_id:           TISCI vint ID
+ */
+struct ti_sci_inta_vint_desc {
+       struct irq_domain *domain;
+       struct list_head list;
+       DECLARE_BITMAP(event_map, MAX_EVENTS_PER_VINT);
+       struct ti_sci_inta_event_desc events[MAX_EVENTS_PER_VINT];
+       unsigned int parent_virq;
+       u16 vint_id;
+};
+
+/**
+ * struct ti_sci_inta_irq_domain - Structure representing a TISCI based
+ *                                Interrupt Aggregator IRQ domain.
+ * @sci:               Pointer to TISCI handle
+ * @vint:              TISCI resource pointer representing IA inerrupts.
+ * @global_event:      TISCI resource pointer representing global events.
+ * @vint_list:         List of the vints active in the system
+ * @vint_mutex:                Mutex to protect vint_list
+ * @base:              Base address of the memory mapped IO registers
+ * @pdev:              Pointer to platform device.
+ */
+struct ti_sci_inta_irq_domain {
+       const struct ti_sci_handle *sci;
+       struct ti_sci_resource *vint;
+       struct ti_sci_resource *global_event;
+       struct list_head vint_list;
+       /* Mutex to protect vint list */
+       struct mutex vint_mutex;
+       void __iomem *base;
+       struct platform_device *pdev;
+};
+
+#define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
+                                       events[i])
+
+/**
+ * ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs
+ * @desc:      Pointer to irq_desc corresponding to the irq
+ */
+static void ti_sci_inta_irq_handler(struct irq_desc *desc)
+{
+       struct ti_sci_inta_vint_desc *vint_desc;
+       struct ti_sci_inta_irq_domain *inta;
+       struct irq_domain *domain;
+       unsigned int virq, bit;
+       unsigned long val;
+
+       vint_desc = irq_desc_get_handler_data(desc);
+       domain = vint_desc->domain;
+       inta = domain->host_data;
+
+       chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+       val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 +
+                           VINT_STATUS_OFFSET);
+
+       for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) {
+               virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq);
+               if (virq)
+                       generic_handle_irq(virq);
+       }
+
+       chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+/**
+ * ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator
+ * @domain:    IRQ domain corresponding to Interrupt Aggregator
+ *
+ * Return 0 if all went well else corresponding error value.
+ */
+static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain)
+{
+       struct ti_sci_inta_irq_domain *inta = domain->host_data;
+       struct ti_sci_inta_vint_desc *vint_desc;
+       struct irq_fwspec parent_fwspec;
+       unsigned int parent_virq;
+       u16 vint_id;
+
+       vint_id = ti_sci_get_free_resource(inta->vint);
+       if (vint_id == TI_SCI_RESOURCE_NULL)
+               return ERR_PTR(-EINVAL);
+
+       vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL);
+       if (!vint_desc)
+               return ERR_PTR(-ENOMEM);
+
+       vint_desc->domain = domain;
+       vint_desc->vint_id = vint_id;
+       INIT_LIST_HEAD(&vint_desc->list);
+
+       parent_fwspec.fwnode = of_node_to_fwnode(of_irq_find_parent(dev_of_node(&inta->pdev->dev)));
+       parent_fwspec.param_count = 2;
+       parent_fwspec.param[0] = inta->pdev->id;
+       parent_fwspec.param[1] = vint_desc->vint_id;
+
+       parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
+       if (parent_virq <= 0) {
+               kfree(vint_desc);
+               return ERR_PTR(parent_virq);
+       }
+       vint_desc->parent_virq = parent_virq;
+
+       list_add_tail(&vint_desc->list, &inta->vint_list);
+       irq_set_chained_handler_and_data(vint_desc->parent_virq,
+                                        ti_sci_inta_irq_handler, vint_desc);
+
+       return vint_desc;
+}
+
+/**
+ * ti_sci_inta_alloc_event() - Attach an event to a IA vint.
+ * @vint_desc: Pointer to vint_desc to which the event gets attached
+ * @free_bit:  Bit inside vint to which event gets attached
+ * @hwirq:     hwirq of the input event
+ *
+ * Return event_desc pointer if all went ok else appropriate error value.
+ */
+static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta_vint_desc *vint_desc,
+                                                             u16 free_bit,
+                                                             u32 hwirq)
+{
+       struct ti_sci_inta_irq_domain *inta = vint_desc->domain->host_data;
+       struct ti_sci_inta_event_desc *event_desc;
+       u16 dev_id, dev_index;
+       int err;
+
+       dev_id = HWIRQ_TO_DEVID(hwirq);
+       dev_index = HWIRQ_TO_IRQID(hwirq);
+
+       event_desc = &vint_desc->events[free_bit];
+       event_desc->hwirq = hwirq;
+       event_desc->vint_bit = free_bit;
+       event_desc->global_event = ti_sci_get_free_resource(inta->global_event);
+       if (event_desc->global_event == TI_SCI_RESOURCE_NULL)
+               return ERR_PTR(-EINVAL);
+
+       err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci,
+                                                     dev_id, dev_index,
+                                                     inta->pdev->id,
+                                                     vint_desc->vint_id,
+                                                     event_desc->global_event,
+                                                     free_bit);
+       if (err)
+               goto free_global_event;
+
+       return event_desc;
+free_global_event:
+       ti_sci_release_resource(inta->global_event, event_desc->global_event);
+       return ERR_PTR(err);
+}
+
+/**
+ * ti_sci_inta_alloc_irq() -  Allocate an irq within INTA domain
+ * @domain:    irq_domain pointer corresponding to INTA
+ * @hwirq:     hwirq of the input event
+ *
+ * Note: Allocation happens in the following manner:
+ *     - Find a free bit available in any of the vints available in the list.
+ *     - If not found, allocate a vint from the vint pool
+ *     - Attach the free bit to input hwirq.
+ * Return event_desc if all went ok else appropriate error value.
+ */
+static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *domain,
+                                                           u32 hwirq)
+{
+       struct ti_sci_inta_irq_domain *inta = domain->host_data;
+       struct ti_sci_inta_vint_desc *vint_desc = NULL;
+       struct ti_sci_inta_event_desc *event_desc;
+       u16 free_bit;
+
+       mutex_lock(&inta->vint_mutex);
+       list_for_each_entry(vint_desc, &inta->vint_list, list) {
+               free_bit = find_first_zero_bit(vint_desc->event_map,
+                                              MAX_EVENTS_PER_VINT);
+               if (free_bit != MAX_EVENTS_PER_VINT) {
+                       set_bit(free_bit, vint_desc->event_map);
+                       goto alloc_event;
+               }
+       }
+
+       /* No free bits available. Allocate a new vint */
+       vint_desc = ti_sci_inta_alloc_parent_irq(domain);
+       if (IS_ERR(vint_desc)) {
+               mutex_unlock(&inta->vint_mutex);
+               return ERR_PTR(PTR_ERR(vint_desc));
+       }
+
+       free_bit = find_first_zero_bit(vint_desc->event_map,
+                                      MAX_EVENTS_PER_VINT);
+       set_bit(free_bit, vint_desc->event_map);
+
+alloc_event:
+       event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq);
+       if (IS_ERR(event_desc))
+               clear_bit(free_bit, vint_desc->event_map);
+
+       mutex_unlock(&inta->vint_mutex);
+       return event_desc;
+}
+
+/**
+ * ti_sci_inta_free_parent_irq() - Free a parent irq to INTA
+ * @inta:      Pointer to inta domain.
+ * @vint_desc: Pointer to vint_desc that needs to be freed.
+ */
+static void ti_sci_inta_free_parent_irq(struct ti_sci_inta_irq_domain *inta,
+                                       struct ti_sci_inta_vint_desc *vint_desc)
+{
+       if (find_first_bit(vint_desc->event_map, MAX_EVENTS_PER_VINT) == MAX_EVENTS_PER_VINT) {
+               list_del(&vint_desc->list);
+               ti_sci_release_resource(inta->vint, vint_desc->vint_id);
+               irq_dispose_mapping(vint_desc->parent_virq);
+               kfree(vint_desc);
+       }
+}
+
+/**
+ * ti_sci_inta_free_irq() - Free an IRQ within INTA domain
+ * @event_desc:        Pointer to event_desc that needs to be freed.
+ * @hwirq:     Hwirq number within INTA domain that needs to be freed
+ */
+static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc,
+                                u32 hwirq)
+{
+       struct ti_sci_inta_vint_desc *vint_desc;
+       struct ti_sci_inta_irq_domain *inta;
+
+       vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
+       inta = vint_desc->domain->host_data;
+       /* free event irq */
+       mutex_lock(&inta->vint_mutex);
+       inta->sci->ops.rm_irq_ops.free_event_map(inta->sci,
+                                                HWIRQ_TO_DEVID(hwirq),
+                                                HWIRQ_TO_IRQID(hwirq),
+                                                inta->pdev->id,
+                                                vint_desc->vint_id,
+                                                event_desc->global_event,
+                                                event_desc->vint_bit);
+
+       clear_bit(event_desc->vint_bit, vint_desc->event_map);
+       ti_sci_release_resource(inta->global_event, event_desc->global_event);
+       event_desc->global_event = TI_SCI_RESOURCE_NULL;
+       event_desc->hwirq = 0;
+
+       ti_sci_inta_free_parent_irq(inta, vint_desc);
+       mutex_unlock(&inta->vint_mutex);
+}
+
+/**
+ * ti_sci_inta_request_resources() - Allocate resources for input irq
+ * @data: Pointer to corresponding irq_data
+ *
+ * Note: This is the core api where the actual allocation happens for input
+ *      hwirq. This allocation involves creating a parent irq for vint.
+ *      If this is done in irq_domain_ops.alloc() then a deadlock is reached
+ *      for allocation. So this allocation is being done in request_resources()
+ *
+ * Return: 0 if all went well else corresponding error.
+ */
+static int ti_sci_inta_request_resources(struct irq_data *data)
+{
+       struct ti_sci_inta_event_desc *event_desc;
+
+       event_desc = ti_sci_inta_alloc_irq(data->domain, data->hwirq);
+       if (IS_ERR(event_desc))
+               return PTR_ERR(event_desc);
+
+       data->chip_data = event_desc;
+
+       return 0;
+}
+
+/**
+ * ti_sci_inta_release_resources - Release resources for input irq
+ * @data: Pointer to corresponding irq_data
+ *
+ * Note: Corresponding to request_resources(), all the unmapping and deletion
+ *      of parent vint irqs happens in this api.
+ */
+static void ti_sci_inta_release_resources(struct irq_data *data)
+{
+       struct ti_sci_inta_event_desc *event_desc;
+
+       event_desc = irq_data_get_irq_chip_data(data);
+       ti_sci_inta_free_irq(event_desc, data->hwirq);
+}
+
+/**
+ * ti_sci_inta_manage_event() - Control the event based on the offset
+ * @data:      Pointer to corresponding irq_data
+ * @offset:    register offset using which event is controlled.
+ */
+static void ti_sci_inta_manage_event(struct irq_data *data, u32 offset)
+{
+       struct ti_sci_inta_event_desc *event_desc;
+       struct ti_sci_inta_vint_desc *vint_desc;
+       struct ti_sci_inta_irq_domain *inta;
+
+       event_desc = irq_data_get_irq_chip_data(data);
+       vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
+       inta = data->domain->host_data;
+
+       writeq_relaxed(BIT(event_desc->vint_bit),
+                      inta->base + vint_desc->vint_id * 0x1000 + offset);
+}
+
+/**
+ * ti_sci_inta_mask_irq() - Mask an event
+ * @data:      Pointer to corresponding irq_data
+ */
+static void ti_sci_inta_mask_irq(struct irq_data *data)
+{
+       ti_sci_inta_manage_event(data, VINT_ENABLE_CLR_OFFSET);
+}
+
+/**
+ * ti_sci_inta_unmask_irq() - Unmask an event
+ * @data:      Pointer to corresponding irq_data
+ */
+static void ti_sci_inta_unmask_irq(struct irq_data *data)
+{
+       ti_sci_inta_manage_event(data, VINT_ENABLE_SET_OFFSET);
+}
+
+/**
+ * ti_sci_inta_ack_irq() - Ack an event
+ * @data:      Pointer to corresponding irq_data
+ */
+static void ti_sci_inta_ack_irq(struct irq_data *data)
+{
+       /*
+        * Do not clear the event if hardware is capable of sending
+        * a down event.
+        */
+       if (irqd_get_trigger_type(data) != IRQF_TRIGGER_HIGH)
+               ti_sci_inta_manage_event(data, VINT_STATUS_OFFSET);
+}
+
+static int ti_sci_inta_set_affinity(struct irq_data *d,
+                                   const struct cpumask *mask_val, bool force)
+{
+       return -EINVAL;
+}
+
+/**
+ * ti_sci_inta_set_type() - Update the trigger type of the irq.
+ * @data:      Pointer to corresponding irq_data
+ * @type:      Trigger type as specified by user
+ *
+ * Note: This updates the handle_irq callback for level msi.
+ *
+ * Return 0 if all went well else appropriate error.
+ */
+static int ti_sci_inta_set_type(struct irq_data *data, unsigned int type)
+{
+       /*
+        * .alloc default sets handle_edge_irq. But if the user specifies
+        * that IRQ is level MSI, then update the handle to handle_level_irq
+        */
+       switch (type & IRQ_TYPE_SENSE_MASK) {
+       case IRQF_TRIGGER_HIGH:
+               irq_set_handler_locked(data, handle_level_irq);
+               return 0;
+       case IRQF_TRIGGER_RISING:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
+
+static struct irq_chip ti_sci_inta_irq_chip = {
+       .name                   = "INTA",
+       .irq_ack                = ti_sci_inta_ack_irq,
+       .irq_mask               = ti_sci_inta_mask_irq,
+       .irq_set_type           = ti_sci_inta_set_type,
+       .irq_unmask             = ti_sci_inta_unmask_irq,
+       .irq_set_affinity       = ti_sci_inta_set_affinity,
+       .irq_request_resources  = ti_sci_inta_request_resources,
+       .irq_release_resources  = ti_sci_inta_release_resources,
+};
+
+/**
+ * ti_sci_inta_irq_domain_free() - Free an IRQ from the IRQ domain
+ * @domain:    Domain to which the irqs belong
+ * @virq:      base linux virtual IRQ to be freed.
+ * @nr_irqs:   Number of continuous irqs to be freed
+ */
+static void ti_sci_inta_irq_domain_free(struct irq_domain *domain,
+                                       unsigned int virq, unsigned int nr_irqs)
+{
+       struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+
+       irq_domain_reset_irq_data(data);
+}
+
+/**
+ * ti_sci_inta_irq_domain_alloc() - Allocate Interrupt aggregator IRQs
+ * @domain:    Point to the interrupt aggregator IRQ domain
+ * @virq:      Corresponding Linux virtual IRQ number
+ * @nr_irqs:   Continuous irqs to be allocated
+ * @data:      Pointer to firmware specifier
+ *
+ * No actual allocation happens here.
+ *
+ * Return 0 if all went well else appropriate error value.
+ */
+static int ti_sci_inta_irq_domain_alloc(struct irq_domain *domain,
+                                       unsigned int virq, unsigned int nr_irqs,
+                                       void *data)
+{
+       msi_alloc_info_t *arg = data;
+
+       irq_domain_set_info(domain, virq, arg->hwirq, &ti_sci_inta_irq_chip,
+                           NULL, handle_edge_irq, NULL, NULL);
+
+       return 0;
+}
+
+static const struct irq_domain_ops ti_sci_inta_irq_domain_ops = {
+       .free           = ti_sci_inta_irq_domain_free,
+       .alloc          = ti_sci_inta_irq_domain_alloc,
+};
+
+static struct irq_chip ti_sci_inta_msi_irq_chip = {
+       .name                   = "MSI-INTA",
+       .flags                  = IRQCHIP_SUPPORTS_LEVEL_MSI,
+};
+
+static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg,
+                                    struct msi_desc *desc)
+{
+       struct platform_device *pdev = to_platform_device(desc->dev);
+
+       arg->desc = desc;
+       arg->hwirq = TO_HWIRQ(pdev->id, desc->inta.dev_index);
+}
+
+static struct msi_domain_ops ti_sci_inta_msi_ops = {
+       .set_desc       = ti_sci_inta_msi_set_desc,
+};
+
+static struct msi_domain_info ti_sci_inta_msi_domain_info = {
+       .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+                  MSI_FLAG_LEVEL_CAPABLE),
+       .ops    = &ti_sci_inta_msi_ops,
+       .chip   = &ti_sci_inta_msi_irq_chip,
+};
+
+static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
+{
+       struct irq_domain *parent_domain, *domain, *msi_domain;
+       struct device_node *parent_node, *node;
+       struct ti_sci_inta_irq_domain *inta;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int ret;
+
+       node = dev_of_node(dev);
+       parent_node = of_irq_find_parent(node);
+       if (!parent_node) {
+               dev_err(dev, "Failed to get IRQ parent node\n");
+               return -ENODEV;
+       }
+
+       parent_domain = irq_find_host(parent_node);
+       if (!parent_domain)
+               return -EPROBE_DEFER;
+
+       inta = devm_kzalloc(dev, sizeof(*inta), GFP_KERNEL);
+       if (!inta)
+               return -ENOMEM;
+
+       inta->pdev = pdev;
+       inta->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+       if (IS_ERR(inta->sci)) {
+               ret = PTR_ERR(inta->sci);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "ti,sci read fail %d\n", ret);
+               inta->sci = NULL;
+               return ret;
+       }
+
+       ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &pdev->id);
+       if (ret) {
+               dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+               return -EINVAL;
+       }
+
+       inta->vint = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id,
+                                                "ti,sci-rm-range-vint");
+       if (IS_ERR(inta->vint)) {
+               dev_err(dev, "VINT resource allocation failed\n");
+               return PTR_ERR(inta->vint);
+       }
+
+       inta->global_event = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id,
+                                               "ti,sci-rm-range-global-event");
+       if (IS_ERR(inta->global_event)) {
+               dev_err(dev, "Global event resource allocation failed\n");
+               return PTR_ERR(inta->global_event);
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       inta->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(inta->base))
+               return -ENODEV;
+
+       domain = irq_domain_add_linear(dev_of_node(dev),
+                                      ti_sci_get_num_resources(inta->vint),
+                                      &ti_sci_inta_irq_domain_ops, inta);
+       if (!domain) {
+               dev_err(dev, "Failed to allocate IRQ domain\n");
+               return -ENOMEM;
+       }
+
+       msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node),
+                                               &ti_sci_inta_msi_domain_info,
+                                               domain);
+       if (!msi_domain) {
+               irq_domain_remove(domain);
+               dev_err(dev, "Failed to allocate msi domain\n");
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&inta->vint_list);
+       mutex_init(&inta->vint_mutex);
+
+       return 0;
+}
+
+static const struct of_device_id ti_sci_inta_irq_domain_of_match[] = {
+       { .compatible = "ti,sci-inta", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_inta_irq_domain_of_match);
+
+static struct platform_driver ti_sci_inta_irq_domain_driver = {
+       .probe = ti_sci_inta_irq_domain_probe,
+       .driver = {
+               .name = "ti-sci-inta",
+               .of_match_table = ti_sci_inta_irq_domain_of_match,
+       },
+};
+module_platform_driver(ti_sci_inta_irq_domain_driver);
+
+MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>");
+MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
new file mode 100644 (file)
index 0000000..59d51a2
--- /dev/null
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' K3 Interrupt Router irqchip driver
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ *     Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+#define TI_SCI_DEV_ID_MASK     0xffff
+#define TI_SCI_DEV_ID_SHIFT    16
+#define TI_SCI_IRQ_ID_MASK     0xffff
+#define TI_SCI_IRQ_ID_SHIFT    0
+#define HWIRQ_TO_DEVID(hwirq)  (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \
+                                (TI_SCI_DEV_ID_MASK))
+#define HWIRQ_TO_IRQID(hwirq)  ((hwirq) & (TI_SCI_IRQ_ID_MASK))
+#define TO_HWIRQ(dev, index)   ((((dev) & TI_SCI_DEV_ID_MASK) << \
+                                TI_SCI_DEV_ID_SHIFT) | \
+                               ((index) & TI_SCI_IRQ_ID_MASK))
+
+/**
+ * struct ti_sci_intr_irq_domain - Structure representing a TISCI based
+ *                                Interrupt Router IRQ domain.
+ * @sci:       Pointer to TISCI handle
+ * @dst_irq:   TISCI resource pointer representing GIC irq controller.
+ * @dst_id:    TISCI device ID of the GIC irq controller.
+ * @type:      Specifies the trigger type supported by this Interrupt Router
+ */
+struct ti_sci_intr_irq_domain {
+       const struct ti_sci_handle *sci;
+       struct ti_sci_resource *dst_irq;
+       u32 dst_id;
+       u32 type;
+};
+
+static struct irq_chip ti_sci_intr_irq_chip = {
+       .name                   = "INTR",
+       .irq_eoi                = irq_chip_eoi_parent,
+       .irq_mask               = irq_chip_mask_parent,
+       .irq_unmask             = irq_chip_unmask_parent,
+       .irq_set_type           = irq_chip_set_type_parent,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_set_affinity       = irq_chip_set_affinity_parent,
+};
+
+/**
+ * ti_sci_intr_irq_domain_translate() - Retrieve hwirq and type from
+ *                                     IRQ firmware specific handler.
+ * @domain:    Pointer to IRQ domain
+ * @fwspec:    Pointer to IRQ specific firmware structure
+ * @hwirq:     IRQ number identified by hardware
+ * @type:      IRQ type
+ *
+ * Return 0 if all went ok else appropriate error.
+ */
+static int ti_sci_intr_irq_domain_translate(struct irq_domain *domain,
+                                           struct irq_fwspec *fwspec,
+                                           unsigned long *hwirq,
+                                           unsigned int *type)
+{
+       struct ti_sci_intr_irq_domain *intr = domain->host_data;
+
+       if (fwspec->param_count != 2)
+               return -EINVAL;
+
+       *hwirq = TO_HWIRQ(fwspec->param[0], fwspec->param[1]);
+       *type = intr->type;
+
+       return 0;
+}
+
+/**
+ * ti_sci_intr_irq_domain_free() - Free the specified IRQs from the domain.
+ * @domain:    Domain to which the irqs belong
+ * @virq:      Linux virtual IRQ to be freed.
+ * @nr_irqs:   Number of continuous irqs to be freed
+ */
+static void ti_sci_intr_irq_domain_free(struct irq_domain *domain,
+                                       unsigned int virq, unsigned int nr_irqs)
+{
+       struct ti_sci_intr_irq_domain *intr = domain->host_data;
+       struct irq_data *data, *parent_data;
+       u16 dev_id, irq_index;
+
+       parent_data = irq_domain_get_irq_data(domain->parent, virq);
+       data = irq_domain_get_irq_data(domain, virq);
+       irq_index = HWIRQ_TO_IRQID(data->hwirq);
+       dev_id = HWIRQ_TO_DEVID(data->hwirq);
+
+       intr->sci->ops.rm_irq_ops.free_irq(intr->sci, dev_id, irq_index,
+                                          intr->dst_id, parent_data->hwirq);
+       ti_sci_release_resource(intr->dst_irq, parent_data->hwirq);
+       irq_domain_free_irqs_parent(domain, virq, 1);
+       irq_domain_reset_irq_data(data);
+}
+
+/**
+ * ti_sci_intr_alloc_gic_irq() - Allocate GIC specific IRQ
+ * @domain:    Pointer to the interrupt router IRQ domain
+ * @virq:      Corresponding Linux virtual IRQ number
+ * @hwirq:     Corresponding hwirq for the IRQ within this IRQ domain
+ *
+ * Returns 0 if all went well else appropriate error pointer.
+ */
+static int ti_sci_intr_alloc_gic_irq(struct irq_domain *domain,
+                                    unsigned int virq, u32 hwirq)
+{
+       struct ti_sci_intr_irq_domain *intr = domain->host_data;
+       struct irq_fwspec fwspec;
+       u16 dev_id, irq_index;
+       u16 dst_irq;
+       int err;
+
+       dev_id = HWIRQ_TO_DEVID(hwirq);
+       irq_index = HWIRQ_TO_IRQID(hwirq);
+
+       dst_irq = ti_sci_get_free_resource(intr->dst_irq);
+       if (dst_irq == TI_SCI_RESOURCE_NULL)
+               return -EINVAL;
+
+       fwspec.fwnode = domain->parent->fwnode;
+       fwspec.param_count = 3;
+       fwspec.param[0] = 0;    /* SPI */
+       fwspec.param[1] = dst_irq - 32; /* SPI offset */
+       fwspec.param[2] = intr->type;
+
+       err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+       if (err)
+               goto err_irqs;
+
+       err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci, dev_id, irq_index,
+                                               intr->dst_id, dst_irq);
+       if (err)
+               goto err_msg;
+
+       return 0;
+
+err_msg:
+       irq_domain_free_irqs_parent(domain, virq, 1);
+err_irqs:
+       ti_sci_release_resource(intr->dst_irq, dst_irq);
+       return err;
+}
+
+/**
+ * ti_sci_intr_irq_domain_alloc() - Allocate Interrupt router IRQs
+ * @domain:    Point to the interrupt router IRQ domain
+ * @virq:      Corresponding Linux virtual IRQ number
+ * @nr_irqs:   Continuous irqs to be allocated
+ * @data:      Pointer to firmware specifier
+ *
+ * Return 0 if all went well else appropriate error value.
+ */
+static int ti_sci_intr_irq_domain_alloc(struct irq_domain *domain,
+                                       unsigned int virq, unsigned int nr_irqs,
+                                       void *data)
+{
+       struct irq_fwspec *fwspec = data;
+       unsigned long hwirq;
+       unsigned int flags;
+       int err;
+
+       err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &flags);
+       if (err)
+               return err;
+
+       err = ti_sci_intr_alloc_gic_irq(domain, virq, hwirq);
+       if (err)
+               return err;
+
+       irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+                                     &ti_sci_intr_irq_chip, NULL);
+
+       return 0;
+}
+
+static const struct irq_domain_ops ti_sci_intr_irq_domain_ops = {
+       .free           = ti_sci_intr_irq_domain_free,
+       .alloc          = ti_sci_intr_irq_domain_alloc,
+       .translate      = ti_sci_intr_irq_domain_translate,
+};
+
+static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
+{
+       struct irq_domain *parent_domain, *domain;
+       struct ti_sci_intr_irq_domain *intr;
+       struct device_node *parent_node;
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       parent_node = of_irq_find_parent(dev_of_node(dev));
+       if (!parent_node) {
+               dev_err(dev, "Failed to get IRQ parent node\n");
+               return -ENODEV;
+       }
+
+       parent_domain = irq_find_host(parent_node);
+       if (!parent_domain) {
+               dev_err(dev, "Failed to find IRQ parent domain\n");
+               return -ENODEV;
+       }
+
+       intr = devm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
+       if (!intr)
+               return -ENOMEM;
+
+       ret = of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type",
+                                  &intr->type);
+       if (ret) {
+               dev_err(dev, "missing ti,intr-trigger-type property\n");
+               return -EINVAL;
+       }
+
+       intr->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+       if (IS_ERR(intr->sci)) {
+               ret = PTR_ERR(intr->sci);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "ti,sci read fail %d\n", ret);
+               intr->sci = NULL;
+               return ret;
+       }
+
+       ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dst-id",
+                                  &intr->dst_id);
+       if (ret) {
+               dev_err(dev, "missing 'ti,sci-dst-id' property\n");
+               return -EINVAL;
+       }
+
+       intr->dst_irq = devm_ti_sci_get_of_resource(intr->sci, dev,
+                                                   intr->dst_id,
+                                                   "ti,sci-rm-range-girq");
+       if (IS_ERR(intr->dst_irq)) {
+               dev_err(dev, "Destination irq resource allocation failed\n");
+               return PTR_ERR(intr->dst_irq);
+       }
+
+       domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev),
+                                         &ti_sci_intr_irq_domain_ops, intr);
+       if (!domain) {
+               dev_err(dev, "Failed to allocate IRQ domain\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id ti_sci_intr_irq_domain_of_match[] = {
+       { .compatible = "ti,sci-intr", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_intr_irq_domain_of_match);
+
+static struct platform_driver ti_sci_intr_irq_domain_driver = {
+       .probe = ti_sci_intr_irq_domain_probe,
+       .driver = {
+               .name = "ti-sci-intr",
+               .of_match_table = ti_sci_intr_irq_domain_of_match,
+       },
+};
+module_platform_driver(ti_sci_intr_irq_domain_driver);
+
+MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>");
+MODULE_DESCRIPTION("K3 Interrupt Router driver over TI SCI protocol");
+MODULE_LICENSE("GPL v2");
index b80cb6a..6a0365b 100644 (file)
@@ -59,30 +59,6 @@ config ATMEL_TCLIB
          blocks found on many Atmel processors.  This facilitates using
          these blocks by different drivers despite processor differences.
 
-config ATMEL_TCB_CLKSRC
-       bool "TC Block Clocksource"
-       depends on ATMEL_TCLIB
-       default y
-       help
-         Select this to get a high precision clocksource based on a
-         TC block with a 5+ MHz base clock rate.  Two timer channels
-         are combined to make a single 32-bit timer.
-
-         When GENERIC_CLOCKEVENTS is defined, the third timer channel
-         may be used as a clock event device supporting oneshot mode
-         (delays of up to two seconds) based on the 32 KiHz clock.
-
-config ATMEL_TCB_CLKSRC_BLOCK
-       int
-       depends on ATMEL_TCB_CLKSRC
-       default 0
-       range 0 1
-       help
-         Some chips provide more than one TC block, so you have the
-         choice of which one to use for the clock framework.  The other
-         TC can be used for other purposes, such as PWM generation and
-         interval timing.
-
 config DUMMY_IRQ
        tristate "Dummy IRQ handler"
        default n
index ac24a4b..2c6850e 100644 (file)
@@ -1,4 +1,3 @@
-#include <linux/atmel_tc.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -10,6 +9,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/of.h>
+#include <soc/at91/atmel_tcb.h>
 
 /*
  * This is a thin library to solve the problem of how to portably allocate
@@ -111,6 +111,9 @@ static int __init tc_probe(struct platform_device *pdev)
        struct resource *r;
        unsigned int    i;
 
+       if (of_get_child_count(pdev->dev.of_node))
+               return -EBUSY;
+
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
                return -EINVAL;
index 0d0f837..7da1fdb 100644 (file)
 #include <linux/ioport.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
-#include <linux/atmel_tc.h>
 #include <linux/pwm.h>
 #include <linux/of_device.h>
 #include <linux/slab.h>
+#include <soc/at91/atmel_tcb.h>
 
 #define NPWM   6
 
index 819bed0..51b3a47 100644 (file)
@@ -179,8 +179,10 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
        if (err < 0)
                goto err0;
        gc = gpio_to_chip(err);
-       if (WARN_ON(!gc))
+       if (WARN_ON(!gc)) {
+               err = -ENODEV;
                goto err0;
+       }
 
        if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
                pr_debug("%s: tried to get a non-qe pin\n", __func__);
index 13a8a13..bb90670 100644 (file)
@@ -385,8 +385,8 @@ static int ixp4xx_qmgr_probe(struct platform_device *pdev)
        if (!res)
                return -ENODEV;
        qmgr_regs = devm_ioremap_resource(dev, res);
-       if (!qmgr_regs)
-               return -ENOMEM;
+       if (IS_ERR(qmgr_regs))
+               return PTR_ERR(qmgr_regs);
 
        irq1 = platform_get_irq(pdev, 0);
        if (irq1 <= 0)
index 57960e9..dbd6c60 100644 (file)
@@ -74,4 +74,10 @@ config TI_SCI_PM_DOMAINS
          called ti_sci_pm_domains. Note this is needed early in boot before
          rootfs may be available.
 
+config TI_SCI_INTA_MSI_DOMAIN
+       bool
+       select GENERIC_MSI_IRQ_DOMAIN
+       help
+         Driver to enable Interrupt Aggregator specific MSI Domain.
+
 endif # SOC_TI
index a22edc0..b3868d3 100644 (file)
@@ -8,3 +8,4 @@ obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA)    += knav_dma.o
 obj-$(CONFIG_AMX3_PM)                  += pm33xx.o
 obj-$(CONFIG_WKUP_M3_IPC)              += wkup_m3_ipc.o
 obj-$(CONFIG_TI_SCI_PM_DOMAINS)                += ti_sci_pm_domains.o
+obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN)   += ti_sci_inta_msi.o
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
new file mode 100644 (file)
index 0000000..0eb9462
--- /dev/null
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' K3 Interrupt Aggregator MSI bus
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ *     Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+static void ti_sci_inta_msi_write_msg(struct irq_data *data,
+                                     struct msi_msg *msg)
+{
+       /* Nothing to do */
+}
+
+static void ti_sci_inta_msi_compose_msi_msg(struct irq_data *data,
+                                           struct msi_msg *msg)
+{
+       /* Nothing to do */
+}
+
+static void ti_sci_inta_msi_update_chip_ops(struct msi_domain_info *info)
+{
+       struct irq_chip *chip = info->chip;
+
+       if (WARN_ON(!chip))
+               return;
+
+       chip->irq_request_resources = irq_chip_request_resources_parent;
+       chip->irq_release_resources = irq_chip_release_resources_parent;
+       chip->irq_compose_msi_msg = ti_sci_inta_msi_compose_msi_msg;
+       chip->irq_write_msi_msg = ti_sci_inta_msi_write_msg;
+       chip->irq_set_type = irq_chip_set_type_parent;
+       chip->irq_unmask = irq_chip_unmask_parent;
+       chip->irq_mask = irq_chip_mask_parent;
+       chip->irq_ack = irq_chip_ack_parent;
+}
+
+struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode,
+                                                    struct msi_domain_info *info,
+                                                    struct irq_domain *parent)
+{
+       struct irq_domain *domain;
+
+       ti_sci_inta_msi_update_chip_ops(info);
+
+       domain = msi_create_irq_domain(fwnode, info, parent);
+       if (domain)
+               irq_domain_update_bus_token(domain, DOMAIN_BUS_TI_SCI_INTA_MSI);
+
+       return domain;
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_create_irq_domain);
+
+static void ti_sci_inta_msi_free_descs(struct device *dev)
+{
+       struct msi_desc *desc, *tmp;
+
+       list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+               list_del(&desc->list);
+               free_msi_entry(desc);
+       }
+}
+
+static int ti_sci_inta_msi_alloc_descs(struct device *dev,
+                                      struct ti_sci_resource *res)
+{
+       struct msi_desc *msi_desc;
+       int set, i, count = 0;
+
+       for (set = 0; set < res->sets; set++) {
+               for (i = 0; i < res->desc[set].num; i++) {
+                       msi_desc = alloc_msi_entry(dev, 1, NULL);
+                       if (!msi_desc) {
+                               ti_sci_inta_msi_free_descs(dev);
+                               return -ENOMEM;
+                       }
+
+                       msi_desc->inta.dev_index = res->desc[set].start + i;
+                       INIT_LIST_HEAD(&msi_desc->list);
+                       list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+                       count++;
+               }
+       }
+
+       return count;
+}
+
+int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
+                                     struct ti_sci_resource *res)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct irq_domain *msi_domain;
+       int ret, nvec;
+
+       msi_domain = dev_get_msi_domain(dev);
+       if (!msi_domain)
+               return -EINVAL;
+
+       if (pdev->id < 0)
+               return -ENODEV;
+
+       nvec = ti_sci_inta_msi_alloc_descs(dev, res);
+       if (nvec <= 0)
+               return nvec;
+
+       ret = msi_domain_alloc_irqs(msi_domain, dev, nvec);
+       if (ret) {
+               dev_err(dev, "Failed to allocate IRQs %d\n", ret);
+               goto cleanup;
+       }
+
+       return 0;
+
+cleanup:
+       ti_sci_inta_msi_free_descs(&pdev->dev);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
+
+void ti_sci_inta_msi_domain_free_irqs(struct device *dev)
+{
+       msi_domain_free_irqs(dev->msi_domain, dev);
+       ti_sci_inta_msi_free_descs(dev);
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_free_irqs);
+
+unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 dev_index)
+{
+       struct msi_desc *desc;
+
+       for_each_msi_entry(desc, dev)
+               if (desc->inta.dev_index == dev_index)
+                       return desc->irq;
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_get_virq);
index 75155bd..31f53fa 100644 (file)
@@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init);
 static int __init hvc_sbi_console_init(void)
 {
        hvc_instantiate(0, 0, &hvc_sbi_ops);
-       add_preferred_console("hvc", 0, NULL);
 
        return 0;
 }
index 9e529cc..9f39f0c 100644 (file)
@@ -477,8 +477,12 @@ static int efifb_probe(struct platform_device *dev)
                 * If the UEFI memory map covers the efifb region, we may only
                 * remap it using the attributes the memory map prescribes.
                 */
-               mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
-               mem_flags &= md.attribute;
+               md.attribute &= EFI_MEMORY_UC | EFI_MEMORY_WC |
+                               EFI_MEMORY_WT | EFI_MEMORY_WB;
+               if (md.attribute) {
+                       mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+                       mem_flags &= md.attribute;
+               }
        }
        if (mem_flags & EFI_MEMORY_WC)
                info->screen_base = ioremap_wc(efifb_fix.smem_start,
index 6a69f11..45e74da 100644 (file)
@@ -380,6 +380,8 @@ skip_rdma:
                                atomic_read(&server->in_send),
                                atomic_read(&server->num_waiters));
 #endif
+                       /* dump session id helpful for use with network trace */
+                       seq_printf(m, " SessionId: 0x%llx", ses->Suid);
                        if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
                                seq_puts(m, " encrypted");
                        if (ses->sign)
index b1a5fcf..f5fcd63 100644 (file)
@@ -878,6 +878,9 @@ out:
 
 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
 {
+       struct cifsFileInfo *cfile = file->private_data;
+       struct cifs_tcon *tcon;
+
        /*
         * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
         * the cached file length
@@ -909,6 +912,12 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
                if (rc < 0)
                        return (loff_t)rc;
        }
+       if (cfile && cfile->tlink) {
+               tcon = tlink_tcon(cfile->tlink);
+               if (tcon->ses->server->ops->llseek)
+                       return tcon->ses->server->ops->llseek(file, tcon,
+                                                             offset, whence);
+       }
        return generic_file_llseek(file, offset, whence);
 }
 
@@ -1070,11 +1079,6 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
 
        cifs_dbg(FYI, "copychunk range\n");
 
-       if (src_inode == target_inode) {
-               rc = -EINVAL;
-               goto out;
-       }
-
        if (!src_file->private_data || !dst_file->private_data) {
                rc = -EBADF;
                cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
index 33c251b..334ff5f 100644 (file)
@@ -497,6 +497,8 @@ struct smb_version_operations {
        /* version specific fiemap implementation */
        int (*fiemap)(struct cifs_tcon *tcon, struct cifsFileInfo *,
                      struct fiemap_extent_info *, u64, u64);
+       /* version specific llseek implementation */
+       loff_t (*llseek)(struct file *, struct cifs_tcon *, loff_t, int);
 };
 
 struct smb_version_values {
index 084756c..8c4121d 100644 (file)
@@ -528,6 +528,21 @@ cifs_reconnect(struct TCP_Server_Info *server)
        /* do not want to be sending data on a socket we are freeing */
        cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
        mutex_lock(&server->srv_mutex);
+       if (server->ssocket) {
+               cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n",
+                        server->ssocket->state, server->ssocket->flags);
+               kernel_sock_shutdown(server->ssocket, SHUT_WR);
+               cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n",
+                        server->ssocket->state, server->ssocket->flags);
+               sock_release(server->ssocket);
+               server->ssocket = NULL;
+       }
+       server->sequence_number = 0;
+       server->session_estab = false;
+       kfree(server->session_key.response);
+       server->session_key.response = NULL;
+       server->session_key.len = 0;
+       server->lstrp = jiffies;
 
        /* mark submitted MIDs for retry and issue callback */
        INIT_LIST_HEAD(&retry_list);
@@ -540,6 +555,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                list_move(&mid_entry->qhead, &retry_list);
        }
        spin_unlock(&GlobalMid_Lock);
+       mutex_unlock(&server->srv_mutex);
 
        cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
        list_for_each_safe(tmp, tmp2, &retry_list) {
@@ -548,24 +564,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
                mid_entry->callback(mid_entry);
        }
 
-       if (server->ssocket) {
-               cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n",
-                        server->ssocket->state, server->ssocket->flags);
-               kernel_sock_shutdown(server->ssocket, SHUT_WR);
-               cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n",
-                        server->ssocket->state, server->ssocket->flags);
-               sock_release(server->ssocket);
-               server->ssocket = NULL;
-       } else if (cifs_rdma_enabled(server))
+       if (cifs_rdma_enabled(server)) {
+               mutex_lock(&server->srv_mutex);
                smbd_destroy(server);
-       server->sequence_number = 0;
-       server->session_estab = false;
-       kfree(server->session_key.response);
-       server->session_key.response = NULL;
-       server->session_key.len = 0;
-       server->lstrp = jiffies;
-
-       mutex_unlock(&server->srv_mutex);
+               mutex_unlock(&server->srv_mutex);
+       }
 
        do {
                try_to_freeze();
@@ -2443,6 +2446,10 @@ match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
 {
        __be16 port, *sport;
 
+       /* SMBDirect manages its own ports, don't match it here */
+       if (server->rdma)
+               return true;
+
        switch (addr->sa_family) {
        case AF_INET:
                sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
index a930c89..e921e65 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  SMB2 version specific operations
  *
@@ -282,7 +283,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
        __u64 wire_mid = le64_to_cpu(shdr->MessageId);
 
        if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
-               cifs_dbg(VFS, "encrypted frame parsing not supported yet");
+               cifs_dbg(VFS, "Encrypted frame parsing not supported yet\n");
                return NULL;
        }
 
@@ -324,6 +325,7 @@ static int
 smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
 {
        int rc;
+
        ses->server->CurrentMid = 0;
        rc = SMB2_negotiate(xid, ses);
        /* BB we probably don't need to retry with modern servers */
@@ -789,8 +791,6 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
                SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
        else
                close_shroot(&tcon->crfid);
-
-       return;
 }
 
 static void
@@ -818,7 +818,6 @@ smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
        SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
                        FS_DEVICE_INFORMATION);
        SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
-       return;
 }
 
 static int
@@ -906,9 +905,8 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
                value = &src->ea_data[src->ea_name_length + 1];
                value_len = (size_t)le16_to_cpu(src->ea_value_length);
 
-               if (name_len == 0) {
+               if (name_len == 0)
                        break;
-               }
 
                if (src_size < 8 + name_len + 1 + value_len) {
                        cifs_dbg(FYI, "EA entry goes beyond length of list\n");
@@ -1161,6 +1159,7 @@ static void
 smb2_clear_stats(struct cifs_tcon *tcon)
 {
        int i;
+
        for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
                atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
                atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
@@ -1529,7 +1528,7 @@ smb2_copychunk_range(const unsigned int xid,
        if (pcchunk == NULL)
                return -ENOMEM;
 
-       cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
+       cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
        /* Request a key from the server to identify the source of the copy */
        rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
                                srcfile->fid.persistent_fid,
@@ -1649,6 +1648,7 @@ static unsigned int
 smb2_read_data_offset(char *buf)
 {
        struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
+
        return rsp->DataOffset;
 }
 
@@ -1777,7 +1777,7 @@ smb2_duplicate_extents(const unsigned int xid,
        dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
        dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
        dup_ext_buf.ByteCount = cpu_to_le64(len);
-       cifs_dbg(FYI, "duplicate extents: src off %lld dst off %lld len %lld",
+       cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
                src_off, dest_off, len);
 
        rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
@@ -1794,7 +1794,7 @@ smb2_duplicate_extents(const unsigned int xid,
                        &ret_data_len);
 
        if (ret_data_len > 0)
-               cifs_dbg(FYI, "non-zero response length in duplicate extents");
+               cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
 
 duplicate_extents_out:
        return rc;
@@ -1983,9 +1983,9 @@ smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
 }
 
 /*
-* If we negotiate SMB2 protocol and get STATUS_PENDING - update
-* the number of credits and return true. Otherwise - return false.
-*/
+ * If we negotiate SMB2 protocol and get STATUS_PENDING - update
+ * the number of credits and return true. Otherwise - return false.
+ */
 static bool
 smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
 {
@@ -2306,7 +2306,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
        struct get_dfs_referral_rsp *dfs_rsp = NULL;
        u32 dfs_req_size = 0, dfs_rsp_size = 0;
 
-       cifs_dbg(FYI, "smb2_get_dfs_refer path <%s>\n", search_name);
+       cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
 
        /*
         * Try to use the IPC tcon, otherwise just use any
@@ -2360,7 +2360,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
 
        if (rc) {
                if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
-                       cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc);
+                       cifs_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
                goto out;
        }
 
@@ -2369,7 +2369,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
                                 nls_codepage, remap, search_name,
                                 true /* is_unicode */);
        if (rc) {
-               cifs_dbg(VFS, "parse error in smb2_get_dfs_refer rc=%d\n", rc);
+               cifs_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
                goto out;
        }
 
@@ -2745,7 +2745,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
        inode = d_inode(cfile->dentry);
        cifsi = CIFS_I(inode);
 
-        trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
+       trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
                              ses->Suid, offset, len);
 
 
@@ -2759,7 +2759,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
                        return rc;
                }
 
-       cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+       cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
 
        fsctl_buf.FileOffset = cpu_to_le64(offset);
        fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
@@ -2816,7 +2816,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
                return rc;
        }
 
-       cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+       cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
 
        fsctl_buf.FileOffset = cpu_to_le64(offset);
        fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
@@ -2922,6 +2922,90 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
        return rc;
 }
 
+static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
+{
+       struct cifsFileInfo *wrcfile, *cfile = file->private_data;
+       struct cifsInodeInfo *cifsi;
+       struct inode *inode;
+       int rc = 0;
+       struct file_allocated_range_buffer in_data, *out_data = NULL;
+       u32 out_data_len;
+       unsigned int xid;
+
+       if (whence != SEEK_HOLE && whence != SEEK_DATA)
+               return generic_file_llseek(file, offset, whence);
+
+       inode = d_inode(cfile->dentry);
+       cifsi = CIFS_I(inode);
+
+       if (offset < 0 || offset >= i_size_read(inode))
+               return -ENXIO;
+
+       xid = get_xid();
+       /*
+        * We need to be sure that all dirty pages are written as they
+        * might fill holes on the server.
+        * Note that we also MUST flush any written pages since at least
+        * some servers (Windows2016) will not reflect recent writes in
+        * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
+        */
+       wrcfile = find_writable_file(cifsi, false);
+       if (wrcfile) {
+               filemap_write_and_wait(inode->i_mapping);
+               smb2_flush_file(xid, tcon, &wrcfile->fid);
+               cifsFileInfo_put(wrcfile);
+       }
+
+       if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
+               if (whence == SEEK_HOLE)
+                       offset = i_size_read(inode);
+               goto lseek_exit;
+       }
+
+       in_data.file_offset = cpu_to_le64(offset);
+       in_data.length = cpu_to_le64(i_size_read(inode));
+
+       rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+                       cfile->fid.volatile_fid,
+                       FSCTL_QUERY_ALLOCATED_RANGES, true,
+                       (char *)&in_data, sizeof(in_data),
+                       sizeof(struct file_allocated_range_buffer),
+                       (char **)&out_data, &out_data_len);
+       if (rc == -E2BIG)
+               rc = 0;
+       if (rc)
+               goto lseek_exit;
+
+       if (whence == SEEK_HOLE && out_data_len == 0)
+               goto lseek_exit;
+
+       if (whence == SEEK_DATA && out_data_len == 0) {
+               rc = -ENXIO;
+               goto lseek_exit;
+       }
+
+       if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
+               rc = -EINVAL;
+               goto lseek_exit;
+       }
+       if (whence == SEEK_DATA) {
+               offset = le64_to_cpu(out_data->file_offset);
+               goto lseek_exit;
+       }
+       if (offset < le64_to_cpu(out_data->file_offset))
+               goto lseek_exit;
+
+       offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
+
+ lseek_exit:
+       free_xid(xid);
+       kfree(out_data);
+       if (!rc)
+               return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
+       else
+               return rc;
+}
+
 static int smb3_fiemap(struct cifs_tcon *tcon,
                       struct cifsFileInfo *cfile,
                       struct fiemap_extent_info *fei, u64 start, u64 len)
@@ -3384,7 +3468,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
 
        req = aead_request_alloc(tfm, GFP_KERNEL);
        if (!req) {
-               cifs_dbg(VFS, "%s: Failed to alloc aead request", __func__);
+               cifs_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
                return -ENOMEM;
        }
 
@@ -3395,7 +3479,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
 
        sg = init_sg(num_rqst, rqst, sign);
        if (!sg) {
-               cifs_dbg(VFS, "%s: Failed to init sg", __func__);
+               cifs_dbg(VFS, "%s: Failed to init sg\n", __func__);
                rc = -ENOMEM;
                goto free_req;
        }
@@ -3403,7 +3487,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
        iv_len = crypto_aead_ivsize(tfm);
        iv = kzalloc(iv_len, GFP_KERNEL);
        if (!iv) {
-               cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
+               cifs_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
                rc = -ENOMEM;
                goto free_sg;
        }
@@ -3511,7 +3595,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
        fill_transform_hdr(tr_hdr, orig_len, old_rq);
 
        rc = crypt_message(server, num_rqst, new_rq, 1);
-       cifs_dbg(FYI, "encrypt message returned %d", rc);
+       cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
        if (rc)
                goto err_free;
 
@@ -3552,7 +3636,7 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
        rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
 
        rc = crypt_message(server, 1, &rqst, 0);
-       cifs_dbg(FYI, "decrypt message returned %d\n", rc);
+       cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
 
        if (rc)
                return rc;
@@ -4166,6 +4250,7 @@ struct smb_version_operations smb20_operations = {
        .ioctl_query_info = smb2_ioctl_query_info,
        .make_node = smb2_make_node,
        .fiemap = smb3_fiemap,
+       .llseek = smb3_llseek,
 };
 
 struct smb_version_operations smb21_operations = {
@@ -4266,6 +4351,7 @@ struct smb_version_operations smb21_operations = {
        .ioctl_query_info = smb2_ioctl_query_info,
        .make_node = smb2_make_node,
        .fiemap = smb3_fiemap,
+       .llseek = smb3_llseek,
 };
 
 struct smb_version_operations smb30_operations = {
@@ -4375,6 +4461,7 @@ struct smb_version_operations smb30_operations = {
        .ioctl_query_info = smb2_ioctl_query_info,
        .make_node = smb2_make_node,
        .fiemap = smb3_fiemap,
+       .llseek = smb3_llseek,
 };
 
 struct smb_version_operations smb311_operations = {
@@ -4485,6 +4572,7 @@ struct smb_version_operations smb311_operations = {
        .ioctl_query_info = smb2_ioctl_query_info,
        .make_node = smb2_make_node,
        .fiemap = smb3_fiemap,
+       .llseek = smb3_llseek,
 };
 
 struct smb_version_values smb20_values = {
index 29f011d..710ceb8 100644 (file)
@@ -2538,11 +2538,25 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
        struct kvec *iov = rqst->rq_iov;
        unsigned int total_len;
        int rc;
+       char *in_data_buf;
 
        rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len);
        if (rc)
                return rc;
 
+       if (indatalen) {
+               /*
+                * indatalen is usually small at a couple of bytes max, so
+                * just allocate through generic pool
+                */
+               in_data_buf = kmalloc(indatalen, GFP_NOFS);
+               if (!in_data_buf) {
+                       cifs_small_buf_release(req);
+                       return -ENOMEM;
+               }
+               memcpy(in_data_buf, in_data, indatalen);
+       }
+
        req->CtlCode = cpu_to_le32(opcode);
        req->PersistentFileId = persistent_fid;
        req->VolatileFileId = volatile_fid;
@@ -2563,7 +2577,7 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                       cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
                rqst->rq_nvec = 2;
                iov[0].iov_len = total_len - 1;
-               iov[1].iov_base = in_data;
+               iov[1].iov_base = in_data_buf;
                iov[1].iov_len = indatalen;
        } else {
                rqst->rq_nvec = 1;
@@ -2605,8 +2619,11 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
 void
 SMB2_ioctl_free(struct smb_rqst *rqst)
 {
-       if (rqst && rqst->rq_iov)
+       if (rqst && rqst->rq_iov) {
                cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+               if (rqst->rq_iov[1].iov_len)
+                       kfree(rqst->rq_iov[1].iov_base);
+       }
 }
 
 
index 251ef12..caac37b 100644 (file)
@@ -903,7 +903,7 @@ static int smbd_create_header(struct smbd_connection *info,
        request->sge[0].addr = ib_dma_map_single(info->id->device,
                                                 (void *)packet,
                                                 header_length,
-                                                DMA_BIDIRECTIONAL);
+                                                DMA_TO_DEVICE);
        if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
                mempool_free(request, info->request_mempool);
                rc = -EIO;
@@ -1005,7 +1005,7 @@ static int smbd_post_send_sgl(struct smbd_connection *info,
        for_each_sg(sgl, sg, num_sgs, i) {
                request->sge[i+1].addr =
                        ib_dma_map_page(info->id->device, sg_page(sg),
-                              sg->offset, sg->length, DMA_BIDIRECTIONAL);
+                              sg->offset, sg->length, DMA_TO_DEVICE);
                if (ib_dma_mapping_error(
                                info->id->device, request->sge[i+1].addr)) {
                        rc = -EIO;
@@ -2110,8 +2110,10 @@ int smbd_send(struct TCP_Server_Info *server,
                goto done;
        }
 
-       rqst_idx = 0;
+       log_write(INFO, "num_rqst=%d total length=%u\n",
+                       num_rqst, remaining_data_length);
 
+       rqst_idx = 0;
 next_rqst:
        rqst = &rqst_array[rqst_idx];
        iov = rqst->rq_iov;
index 9a16ff4..60661b3 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/uaccess.h>
 #include <asm/processor.h>
 #include <linux/mempool.h>
-#include <linux/signal.h>
+#include <linux/sched/signal.h>
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifsproto.h"
index 968f163..8e83741 100644 (file)
@@ -142,7 +142,8 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
        struct inode *inode;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_map_blocks map;
-       u32 i = 0, err = 0, num, n;
+       u32 i = 0, num;
+       int err = 0, n;
 
        if ((ino < EXT4_ROOT_INO) ||
            (ino > le32_to_cpu(sbi->s_es->s_inodes_count)))
@@ -276,6 +277,11 @@ int ext4_check_blockref(const char *function, unsigned int line,
        __le32 *bref = p;
        unsigned int blk;
 
+       if (ext4_has_feature_journal(inode->i_sb) &&
+           (inode->i_ino ==
+            le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
+               return 0;
+
        while (bref < p+max) {
                blk = le32_to_cpu(*bref++);
                if (blk &&
index 0f89f51..f2c62e2 100644 (file)
@@ -1035,6 +1035,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
        __le32 border;
        ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
        int err = 0;
+       size_t ext_size = 0;
 
        /* make decision: where to split? */
        /* FIXME: now decision is simplest: at current extent */
@@ -1126,6 +1127,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                le16_add_cpu(&neh->eh_entries, m);
        }
 
+       /* zero out unused area in the extent block */
+       ext_size = sizeof(struct ext4_extent_header) +
+               sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
+       memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
        ext4_extent_block_csum_set(inode, neh);
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
@@ -1205,6 +1210,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                                sizeof(struct ext4_extent_idx) * m);
                        le16_add_cpu(&neh->eh_entries, m);
                }
+               /* zero out unused area in the extent block */
+               ext_size = sizeof(struct ext4_extent_header) +
+                  (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
+               memset(bh->b_data + ext_size, 0,
+                       inode->i_sb->s_blocksize - ext_size);
                ext4_extent_block_csum_set(inode, neh);
                set_buffer_uptodate(bh);
                unlock_buffer(bh);
@@ -1270,6 +1280,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        ext4_fsblk_t newblock, goal = 0;
        struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
        int err = 0;
+       size_t ext_size = 0;
 
        /* Try to prepend new index to old one */
        if (ext_depth(inode))
@@ -1295,9 +1306,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
                goto out;
        }
 
+       ext_size = sizeof(EXT4_I(inode)->i_data);
        /* move top-level index/leaf into new block */
-       memmove(bh->b_data, EXT4_I(inode)->i_data,
-               sizeof(EXT4_I(inode)->i_data));
+       memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
+       /* zero out unused area in the extent block */
+       memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
 
        /* set size of new block */
        neh = ext_block_hdr(bh);
index 98ec11f..2c5baa5 100644 (file)
@@ -264,6 +264,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        }
 
        ret = __generic_file_write_iter(iocb, from);
+       /*
+        * Unaligned direct AIO must be the only IO in flight. Otherwise
+        * overlapping aligned IO after unaligned might result in data
+        * corruption.
+        */
+       if (ret == -EIOCBQUEUED && unaligned_aio)
+               ext4_unwritten_wait(inode);
        inode_unlock(inode);
 
        if (ret > 0)
index 4b99e2d..dbccf46 100644 (file)
@@ -626,7 +626,7 @@ int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head,
 {
        struct ext4_fsmap dkeys[2];     /* per-dev keys */
        struct ext4_getfsmap_dev handlers[EXT4_GETFSMAP_DEVS];
-       struct ext4_getfsmap_info info = {0};
+       struct ext4_getfsmap_info info = { NULL };
        int i;
        int error = 0;
 
index 7e85ecf..e486e49 100644 (file)
@@ -608,7 +608,7 @@ static int ext4_getfsmap_format(struct ext4_fsmap *xfm, void *priv)
 static int ext4_ioc_getfsmap(struct super_block *sb,
                             struct fsmap_head __user *arg)
 {
-       struct getfsmap_info info = {0};
+       struct getfsmap_info info = { NULL };
        struct ext4_fsmap_head xhead = {0};
        struct fsmap_head head;
        bool aborted = false;
index 6d50f53..cd01c4a 100644 (file)
@@ -872,12 +872,15 @@ static void dx_release(struct dx_frame *frames)
 {
        struct dx_root_info *info;
        int i;
+       unsigned int indirect_levels;
 
        if (frames[0].bh == NULL)
                return;
 
        info = &((struct dx_root *)frames[0].bh->b_data)->info;
-       for (i = 0; i <= info->indirect_levels; i++) {
+       /* save local copy, "info" may be freed after brelse() */
+       indirect_levels = info->indirect_levels;
+       for (i = 0; i <= indirect_levels; i++) {
                if (frames[i].bh == NULL)
                        break;
                brelse(frames[i].bh);
index f71b525..4079605 100644 (file)
@@ -699,7 +699,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
                        jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
                save_error_info(sb, function, line);
        }
-       if (test_opt(sb, ERRORS_PANIC)) {
+       if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
                if (EXT4_SB(sb)->s_journal &&
                  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
                        return;
@@ -4661,7 +4661,7 @@ failed_mount:
 
 #ifdef CONFIG_QUOTA
        for (i = 0; i < EXT4_MAXQUOTAS; i++)
-               kfree(sbi->s_qf_names[i]);
+               kfree(get_qf_name(sb, sbi, i));
 #endif
        ext4_blkdev_remove(sbi);
        brelse(bh);
index 37e16d9..43df0c9 100644 (file)
@@ -2375,22 +2375,19 @@ static struct kmem_cache *jbd2_journal_head_cache;
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
 
-static int jbd2_journal_init_journal_head_cache(void)
+static int __init jbd2_journal_init_journal_head_cache(void)
 {
-       int retval;
-
-       J_ASSERT(jbd2_journal_head_cache == NULL);
+       J_ASSERT(!jbd2_journal_head_cache);
        jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
                                sizeof(struct journal_head),
                                0,              /* offset */
                                SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU,
                                NULL);          /* ctor */
-       retval = 0;
        if (!jbd2_journal_head_cache) {
-               retval = -ENOMEM;
                printk(KERN_EMERG "JBD2: no memory for journal_head cache\n");
+               return -ENOMEM;
        }
-       return retval;
+       return 0;
 }
 
 static void jbd2_journal_destroy_journal_head_cache(void)
@@ -2636,28 +2633,38 @@ static void __exit jbd2_remove_jbd_stats_proc_entry(void)
 
 struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
 
+static int __init jbd2_journal_init_inode_cache(void)
+{
+       J_ASSERT(!jbd2_inode_cache);
+       jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
+       if (!jbd2_inode_cache) {
+               pr_emerg("JBD2: failed to create inode cache\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
 static int __init jbd2_journal_init_handle_cache(void)
 {
+       J_ASSERT(!jbd2_handle_cache);
        jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
-       if (jbd2_handle_cache == NULL) {
+       if (!jbd2_handle_cache) {
                printk(KERN_EMERG "JBD2: failed to create handle cache\n");
                return -ENOMEM;
        }
-       jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
-       if (jbd2_inode_cache == NULL) {
-               printk(KERN_EMERG "JBD2: failed to create inode cache\n");
-               kmem_cache_destroy(jbd2_handle_cache);
-               return -ENOMEM;
-       }
        return 0;
 }
 
+static void jbd2_journal_destroy_inode_cache(void)
+{
+       kmem_cache_destroy(jbd2_inode_cache);
+       jbd2_inode_cache = NULL;
+}
+
 static void jbd2_journal_destroy_handle_cache(void)
 {
        kmem_cache_destroy(jbd2_handle_cache);
        jbd2_handle_cache = NULL;
-       kmem_cache_destroy(jbd2_inode_cache);
-       jbd2_inode_cache = NULL;
 }
 
 /*
@@ -2668,11 +2675,15 @@ static int __init journal_init_caches(void)
 {
        int ret;
 
-       ret = jbd2_journal_init_revoke_caches();
+       ret = jbd2_journal_init_revoke_record_cache();
+       if (ret == 0)
+               ret = jbd2_journal_init_revoke_table_cache();
        if (ret == 0)
                ret = jbd2_journal_init_journal_head_cache();
        if (ret == 0)
                ret = jbd2_journal_init_handle_cache();
+       if (ret == 0)
+               ret = jbd2_journal_init_inode_cache();
        if (ret == 0)
                ret = jbd2_journal_init_transaction_cache();
        return ret;
@@ -2680,9 +2691,11 @@ static int __init journal_init_caches(void)
 
 static void jbd2_journal_destroy_caches(void)
 {
-       jbd2_journal_destroy_revoke_caches();
+       jbd2_journal_destroy_revoke_record_cache();
+       jbd2_journal_destroy_revoke_table_cache();
        jbd2_journal_destroy_journal_head_cache();
        jbd2_journal_destroy_handle_cache();
+       jbd2_journal_destroy_inode_cache();
        jbd2_journal_destroy_transaction_cache();
        jbd2_journal_destroy_slabs();
 }
index a1143e5..69b9bc3 100644 (file)
@@ -178,33 +178,41 @@ static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
        return NULL;
 }
 
-void jbd2_journal_destroy_revoke_caches(void)
+void jbd2_journal_destroy_revoke_record_cache(void)
 {
        kmem_cache_destroy(jbd2_revoke_record_cache);
        jbd2_revoke_record_cache = NULL;
+}
+
+void jbd2_journal_destroy_revoke_table_cache(void)
+{
        kmem_cache_destroy(jbd2_revoke_table_cache);
        jbd2_revoke_table_cache = NULL;
 }
 
-int __init jbd2_journal_init_revoke_caches(void)
+int __init jbd2_journal_init_revoke_record_cache(void)
 {
        J_ASSERT(!jbd2_revoke_record_cache);
-       J_ASSERT(!jbd2_revoke_table_cache);
-
        jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s,
                                        SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY);
-       if (!jbd2_revoke_record_cache)
-               goto record_cache_failure;
 
+       if (!jbd2_revoke_record_cache) {
+               pr_emerg("JBD2: failed to create revoke_record cache\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+int __init jbd2_journal_init_revoke_table_cache(void)
+{
+       J_ASSERT(!jbd2_revoke_table_cache);
        jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s,
                                             SLAB_TEMPORARY);
-       if (!jbd2_revoke_table_cache)
-               goto table_cache_failure;
-       return 0;
-table_cache_failure:
-       jbd2_journal_destroy_revoke_caches();
-record_cache_failure:
+       if (!jbd2_revoke_table_cache) {
+               pr_emerg("JBD2: failed to create revoke_table cache\n");
                return -ENOMEM;
+       }
+       return 0;
 }
 
 static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
index f940d31..8ca4fdd 100644 (file)
@@ -42,9 +42,11 @@ int __init jbd2_journal_init_transaction_cache(void)
                                        0,
                                        SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
                                        NULL);
-       if (transaction_cache)
-               return 0;
-       return -ENOMEM;
+       if (!transaction_cache) {
+               pr_emerg("JBD2: failed to create transaction cache\n");
+               return -ENOMEM;
+       }
+       return 0;
 }
 
 void jbd2_journal_destroy_transaction_cache(void)
index 9307cf0..c737868 100644 (file)
@@ -5,29 +5,15 @@ The full set of files can be found here:
 
   http://www.unicode.org/Public/12.1.0/ucd/
 
-Note!
-
-The URL's listed below are not stable.  That's because Unicode 12.1.0
-has not been officially released yet; it is scheduled to be released
-on May 8, 2019.  We taking Unicode 12.1.0 a few weeks early because it
-contains a new Japanese character which is required in order to
-specify Japenese dates after May 1, 2019, when Crown Prince Naruhito
-ascends to the Chrysanthemum Throne.  (Isn't internationalization fun?
-The abdication of Emperor Akihito of Japan is requiring dozens of
-software packages to be updated with only a month's notice.  :-)
-
-We will update the URL's (and any needed changes to the checksums)
-after the final Unicode 12.1.0 is released.
-
 Individual source links:
 
-  https://www.unicode.org/Public/12.1.0/ucd/CaseFolding-12.1.0d2.txt
-  https://www.unicode.org/Public/12.1.0/ucd/DerivedAge-12.1.0d3.txt
-  https://www.unicode.org/Public/12.1.0/ucd/extracted/DerivedCombiningClass-12.1.0d2.txt
-  https://www.unicode.org/Public/12.1.0/ucd/DerivedCoreProperties-12.1.0d2.txt
-  https://www.unicode.org/Public/12.1.0/ucd/NormalizationCorrections-12.1.0d1.txt
-  https://www.unicode.org/Public/12.1.0/ucd/NormalizationTest-12.1.0d3.txt
-  https://www.unicode.org/Public/12.1.0/ucd/UnicodeData-12.1.0d2.txt
+  https://www.unicode.org/Public/12.1.0/ucd/CaseFolding.txt
+  https://www.unicode.org/Public/12.1.0/ucd/DerivedAge.txt
+  https://www.unicode.org/Public/12.1.0/ucd/extracted/DerivedCombiningClass.txt
+  https://www.unicode.org/Public/12.1.0/ucd/DerivedCoreProperties.txt
+  https://www.unicode.org/Public/12.1.0/ucd/NormalizationCorrections.txt
+  https://www.unicode.org/Public/12.1.0/ucd/NormalizationTest.txt
+  https://www.unicode.org/Public/12.1.0/ucd/UnicodeData.txt
 
 md5sums (verify by running "md5sum -c README.utf8data"):
 
index 20d440c..801ed6d 100644 (file)
@@ -714,6 +714,8 @@ int utf8byte(struct utf8cursor *u8c)
                        }
 
                        leaf = utf8lookup(u8c->data, u8c->hangul, u8c->s);
+                       if (!leaf)
+                               return -1;
                        ccc = LEAF_CCC(leaf);
                }
 
index 8ac4e68..6736ed2 100644 (file)
@@ -18,7 +18,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
 }
 
 static inline void arch_unmap(struct mm_struct *mm,
-                       struct vm_area_struct *vma,
                        unsigned long start, unsigned long end)
 {
 }
diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h
deleted file mode 100644 (file)
index 468fdfa..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Timer/Counter Unit (TC) registers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef ATMEL_TC_H
-#define ATMEL_TC_H
-
-#include <linux/compiler.h>
-#include <linux/list.h>
-
-/*
- * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds
- * three general-purpose 16-bit timers.  These timers share one register bank.
- * Depending on the SOC, each timer may have its own clock and IRQ, or those
- * may be shared by the whole TC block.
- *
- * These TC blocks may have up to nine external pins:  TCLK0..2 signals for
- * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM
- * or triggering.  Those pins need to be set up for use with the TC block,
- * else they will be used as GPIOs or for a different controller.
- *
- * Although we expect each TC block to have a platform_device node, those
- * nodes are not what drivers bind to.  Instead, they ask for a specific
- * TC block, by number ... which is a common approach on systems with many
- * timers.  Then they use clk_get() and platform_get_irq() to get clock and
- * IRQ resources.
- */
-
-struct clk;
-
-/**
- * struct atmel_tcb_config - SoC data for a Timer/Counter Block
- * @counter_width: size in bits of a timer counter register
- */
-struct atmel_tcb_config {
-       size_t  counter_width;
-};
-
-/**
- * struct atmel_tc - information about a Timer/Counter Block
- * @pdev: physical device
- * @regs: mapping through which the I/O registers can be accessed
- * @id: block id
- * @tcb_config: configuration data from SoC
- * @irq: irq for each of the three channels
- * @clk: internal clock source for each of the three channels
- * @node: list node, for tclib internal use
- * @allocated: if already used, for tclib internal use
- *
- * On some platforms, each TC channel has its own clocks and IRQs,
- * while on others, all TC channels share the same clock and IRQ.
- * Drivers should clk_enable() all the clocks they need even though
- * all the entries in @clk may point to the same physical clock.
- * Likewise, drivers should request irqs independently for each
- * channel, but they must use IRQF_SHARED in case some of the entries
- * in @irq are actually the same IRQ.
- */
-struct atmel_tc {
-       struct platform_device  *pdev;
-       void __iomem            *regs;
-       int                     id;
-       const struct atmel_tcb_config *tcb_config;
-       int                     irq[3];
-       struct clk              *clk[3];
-       struct clk              *slow_clk;
-       struct list_head        node;
-       bool                    allocated;
-};
-
-extern struct atmel_tc *atmel_tc_alloc(unsigned block);
-extern void atmel_tc_free(struct atmel_tc *tc);
-
-/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
-extern const u8 atmel_tc_divisors[5];
-
-
-/*
- * Two registers have block-wide controls.  These are: configuring the three
- * "external" clocks (or event sources) used by the timer channels; and
- * synchronizing the timers by resetting them all at once.
- *
- * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2
- * signals.  Or, it can mean "external to timer", using the TIOA output from
- * one of the other two timers that's being run in waveform mode.
- */
-
-#define ATMEL_TC_BCR   0xc0            /* TC Block Control Register */
-#define     ATMEL_TC_SYNC      (1 << 0)        /* synchronize timers */
-
-#define ATMEL_TC_BMR   0xc4            /* TC Block Mode Register */
-#define     ATMEL_TC_TC0XC0S   (3 << 0)        /* external clock 0 source */
-#define        ATMEL_TC_TC0XC0S_TCLK0  (0 << 0)
-#define        ATMEL_TC_TC0XC0S_NONE   (1 << 0)
-#define        ATMEL_TC_TC0XC0S_TIOA1  (2 << 0)
-#define        ATMEL_TC_TC0XC0S_TIOA2  (3 << 0)
-#define     ATMEL_TC_TC1XC1S   (3 << 2)        /* external clock 1 source */
-#define        ATMEL_TC_TC1XC1S_TCLK1  (0 << 2)
-#define        ATMEL_TC_TC1XC1S_NONE   (1 << 2)
-#define        ATMEL_TC_TC1XC1S_TIOA0  (2 << 2)
-#define        ATMEL_TC_TC1XC1S_TIOA2  (3 << 2)
-#define     ATMEL_TC_TC2XC2S   (3 << 4)        /* external clock 2 source */
-#define        ATMEL_TC_TC2XC2S_TCLK2  (0 << 4)
-#define        ATMEL_TC_TC2XC2S_NONE   (1 << 4)
-#define        ATMEL_TC_TC2XC2S_TIOA0  (2 << 4)
-#define        ATMEL_TC_TC2XC2S_TIOA1  (3 << 4)
-
-
-/*
- * Each TC block has three "channels", each with one counter and controls.
- *
- * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection
- * when it's not "external") is silicon-specific.  AT91 platforms use one
- * set of definitions; AVR32 platforms use a different set.  Don't hard-wire
- * such knowledge into your code, use the global "atmel_tc_divisors" ...
- * where index N is the divisor for clock N+1, else zero to indicate it uses
- * the 32 KiHz clock.
- *
- * The timers can be chained in various ways, and operated in "waveform"
- * generation mode (including PWM) or "capture" mode (to time events).  In
- * both modes, behavior can be configured in many ways.
- *
- * Each timer has two I/O pins, TIOA and TIOB.  Waveform mode uses TIOA as a
- * PWM output, and TIOB as either another PWM or as a trigger.  Capture mode
- * uses them only as inputs.
- */
-#define ATMEL_TC_CHAN(idx)     ((idx)*0x40)
-#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg)
-
-#define ATMEL_TC_CCR   0x00            /* Channel Control Register */
-#define     ATMEL_TC_CLKEN     (1 << 0)        /* clock enable */
-#define     ATMEL_TC_CLKDIS    (1 << 1)        /* clock disable */
-#define     ATMEL_TC_SWTRG     (1 << 2)        /* software trigger */
-
-#define ATMEL_TC_CMR   0x04            /* Channel Mode Register */
-
-/* Both modes share some CMR bits */
-#define     ATMEL_TC_TCCLKS    (7 << 0)        /* clock source */
-#define        ATMEL_TC_TIMER_CLOCK1   (0 << 0)
-#define        ATMEL_TC_TIMER_CLOCK2   (1 << 0)
-#define        ATMEL_TC_TIMER_CLOCK3   (2 << 0)
-#define        ATMEL_TC_TIMER_CLOCK4   (3 << 0)
-#define        ATMEL_TC_TIMER_CLOCK5   (4 << 0)
-#define        ATMEL_TC_XC0            (5 << 0)
-#define        ATMEL_TC_XC1            (6 << 0)
-#define        ATMEL_TC_XC2            (7 << 0)
-#define     ATMEL_TC_CLKI      (1 << 3)        /* clock invert */
-#define     ATMEL_TC_BURST     (3 << 4)        /* clock gating */
-#define        ATMEL_TC_GATE_NONE      (0 << 4)
-#define        ATMEL_TC_GATE_XC0       (1 << 4)
-#define        ATMEL_TC_GATE_XC1       (2 << 4)
-#define        ATMEL_TC_GATE_XC2       (3 << 4)
-#define     ATMEL_TC_WAVE      (1 << 15)       /* true = Waveform mode */
-
-/* CAPTURE mode CMR bits */
-#define     ATMEL_TC_LDBSTOP   (1 << 6)        /* counter stops on RB load */
-#define     ATMEL_TC_LDBDIS    (1 << 7)        /* counter disable on RB load */
-#define     ATMEL_TC_ETRGEDG   (3 << 8)        /* external trigger edge */
-#define        ATMEL_TC_ETRGEDG_NONE   (0 << 8)
-#define        ATMEL_TC_ETRGEDG_RISING (1 << 8)
-#define        ATMEL_TC_ETRGEDG_FALLING        (2 << 8)
-#define        ATMEL_TC_ETRGEDG_BOTH   (3 << 8)
-#define     ATMEL_TC_ABETRG    (1 << 10)       /* external trigger is TIOA? */
-#define     ATMEL_TC_CPCTRG    (1 << 14)       /* RC compare trigger enable */
-#define     ATMEL_TC_LDRA      (3 << 16)       /* RA loading edge (of TIOA) */
-#define        ATMEL_TC_LDRA_NONE      (0 << 16)
-#define        ATMEL_TC_LDRA_RISING    (1 << 16)
-#define        ATMEL_TC_LDRA_FALLING   (2 << 16)
-#define        ATMEL_TC_LDRA_BOTH      (3 << 16)
-#define     ATMEL_TC_LDRB      (3 << 18)       /* RB loading edge (of TIOA) */
-#define        ATMEL_TC_LDRB_NONE      (0 << 18)
-#define        ATMEL_TC_LDRB_RISING    (1 << 18)
-#define        ATMEL_TC_LDRB_FALLING   (2 << 18)
-#define        ATMEL_TC_LDRB_BOTH      (3 << 18)
-
-/* WAVEFORM mode CMR bits */
-#define     ATMEL_TC_CPCSTOP   (1 <<  6)       /* RC compare stops counter */
-#define     ATMEL_TC_CPCDIS    (1 <<  7)       /* RC compare disables counter */
-#define     ATMEL_TC_EEVTEDG   (3 <<  8)       /* external event edge */
-#define        ATMEL_TC_EEVTEDG_NONE   (0 << 8)
-#define        ATMEL_TC_EEVTEDG_RISING (1 << 8)
-#define        ATMEL_TC_EEVTEDG_FALLING        (2 << 8)
-#define        ATMEL_TC_EEVTEDG_BOTH   (3 << 8)
-#define     ATMEL_TC_EEVT      (3 << 10)       /* external event source */
-#define        ATMEL_TC_EEVT_TIOB      (0 << 10)
-#define        ATMEL_TC_EEVT_XC0       (1 << 10)
-#define        ATMEL_TC_EEVT_XC1       (2 << 10)
-#define        ATMEL_TC_EEVT_XC2       (3 << 10)
-#define     ATMEL_TC_ENETRG    (1 << 12)       /* external event is trigger */
-#define     ATMEL_TC_WAVESEL   (3 << 13)       /* waveform type */
-#define        ATMEL_TC_WAVESEL_UP     (0 << 13)
-#define        ATMEL_TC_WAVESEL_UPDOWN (1 << 13)
-#define        ATMEL_TC_WAVESEL_UP_AUTO        (2 << 13)
-#define        ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13)
-#define     ATMEL_TC_ACPA      (3 << 16)       /* RA compare changes TIOA */
-#define        ATMEL_TC_ACPA_NONE      (0 << 16)
-#define        ATMEL_TC_ACPA_SET       (1 << 16)
-#define        ATMEL_TC_ACPA_CLEAR     (2 << 16)
-#define        ATMEL_TC_ACPA_TOGGLE    (3 << 16)
-#define     ATMEL_TC_ACPC      (3 << 18)       /* RC compare changes TIOA */
-#define        ATMEL_TC_ACPC_NONE      (0 << 18)
-#define        ATMEL_TC_ACPC_SET       (1 << 18)
-#define        ATMEL_TC_ACPC_CLEAR     (2 << 18)
-#define        ATMEL_TC_ACPC_TOGGLE    (3 << 18)
-#define     ATMEL_TC_AEEVT     (3 << 20)       /* external event changes TIOA */
-#define        ATMEL_TC_AEEVT_NONE     (0 << 20)
-#define        ATMEL_TC_AEEVT_SET      (1 << 20)
-#define        ATMEL_TC_AEEVT_CLEAR    (2 << 20)
-#define        ATMEL_TC_AEEVT_TOGGLE   (3 << 20)
-#define     ATMEL_TC_ASWTRG    (3 << 22)       /* software trigger changes TIOA */
-#define        ATMEL_TC_ASWTRG_NONE    (0 << 22)
-#define        ATMEL_TC_ASWTRG_SET     (1 << 22)
-#define        ATMEL_TC_ASWTRG_CLEAR   (2 << 22)
-#define        ATMEL_TC_ASWTRG_TOGGLE  (3 << 22)
-#define     ATMEL_TC_BCPB      (3 << 24)       /* RB compare changes TIOB */
-#define        ATMEL_TC_BCPB_NONE      (0 << 24)
-#define        ATMEL_TC_BCPB_SET       (1 << 24)
-#define        ATMEL_TC_BCPB_CLEAR     (2 << 24)
-#define        ATMEL_TC_BCPB_TOGGLE    (3 << 24)
-#define     ATMEL_TC_BCPC      (3 << 26)       /* RC compare changes TIOB */
-#define        ATMEL_TC_BCPC_NONE      (0 << 26)
-#define        ATMEL_TC_BCPC_SET       (1 << 26)
-#define        ATMEL_TC_BCPC_CLEAR     (2 << 26)
-#define        ATMEL_TC_BCPC_TOGGLE    (3 << 26)
-#define     ATMEL_TC_BEEVT     (3 << 28)       /* external event changes TIOB */
-#define        ATMEL_TC_BEEVT_NONE     (0 << 28)
-#define        ATMEL_TC_BEEVT_SET      (1 << 28)
-#define        ATMEL_TC_BEEVT_CLEAR    (2 << 28)
-#define        ATMEL_TC_BEEVT_TOGGLE   (3 << 28)
-#define     ATMEL_TC_BSWTRG    (3 << 30)       /* software trigger changes TIOB */
-#define        ATMEL_TC_BSWTRG_NONE    (0 << 30)
-#define        ATMEL_TC_BSWTRG_SET     (1 << 30)
-#define        ATMEL_TC_BSWTRG_CLEAR   (2 << 30)
-#define        ATMEL_TC_BSWTRG_TOGGLE  (3 << 30)
-
-#define ATMEL_TC_CV    0x10            /* counter Value */
-#define ATMEL_TC_RA    0x14            /* register A */
-#define ATMEL_TC_RB    0x18            /* register B */
-#define ATMEL_TC_RC    0x1c            /* register C */
-
-#define ATMEL_TC_SR    0x20            /* status (read-only) */
-/* Status-only flags */
-#define     ATMEL_TC_CLKSTA    (1 << 16)       /* clock enabled */
-#define     ATMEL_TC_MTIOA     (1 << 17)       /* TIOA mirror */
-#define     ATMEL_TC_MTIOB     (1 << 18)       /* TIOB mirror */
-
-#define ATMEL_TC_IER   0x24            /* interrupt enable (write-only) */
-#define ATMEL_TC_IDR   0x28            /* interrupt disable (write-only) */
-#define ATMEL_TC_IMR   0x2c            /* interrupt mask (read-only) */
-
-/* Status and IRQ flags */
-#define     ATMEL_TC_COVFS     (1 <<  0)       /* counter overflow */
-#define     ATMEL_TC_LOVRS     (1 <<  1)       /* load overrun */
-#define     ATMEL_TC_CPAS      (1 <<  2)       /* RA compare */
-#define     ATMEL_TC_CPBS      (1 <<  3)       /* RB compare */
-#define     ATMEL_TC_CPCS      (1 <<  4)       /* RC compare */
-#define     ATMEL_TC_LDRAS     (1 <<  5)       /* RA loading */
-#define     ATMEL_TC_LDRBS     (1 <<  6)       /* RB loading */
-#define     ATMEL_TC_ETRGS     (1 <<  7)       /* external trigger */
-#define     ATMEL_TC_ALL_IRQ   (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
-                                ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
-                                ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
-                                ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
-                                /* all IRQs */
-
-#endif
index 0c53f26..44e8fc3 100644 (file)
 
 #define AT91_PMC_FSMR          0x70            /* Fast Startup Mode Register */
 #define AT91_PMC_FSTT(n)       BIT(n)
+#define AT91_PMC_RTTAL         BIT(16)
 #define AT91_PMC_RTCAL         BIT(17)         /* RTC Alarm Enable */
 #define AT91_PMC_USBAL         BIT(18)         /* USB Resume Enable */
 #define AT91_PMC_SDMMC_CD      BIT(19)         /* SDMMC Card Detect Enable */
index e760dc5..476e0c5 100644 (file)
@@ -71,12 +71,25 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir, unsigned long attrs);
 
 /* The DMA API isn't _quite_ the whole story, though... */
-void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
+/*
+ * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device
+ *
+ * The MSI page will be stored in @desc.
+ *
+ * Return: 0 on success otherwise an error describing the failure.
+ */
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
+
+/* Update the MSI message if required. */
+void iommu_dma_compose_msi_msg(struct msi_desc *desc,
+                              struct msi_msg *msg);
+
 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
 
 #else
 
 struct iommu_domain;
+struct msi_desc;
 struct msi_msg;
 struct device;
 
@@ -99,7 +112,14 @@ static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
 {
 }
 
-static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
+                                       phys_addr_t msi_addr)
+{
+       return 0;
+}
+
+static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc,
+                                            struct msi_msg *msg)
 {
 }
 
index be27062..6c4db54 100644 (file)
@@ -469,6 +469,9 @@ extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr);
 extern struct i2c_client *
 i2c_new_dummy(struct i2c_adapter *adap, u16 address);
 
+extern struct i2c_client *
+devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address);
+
 extern struct i2c_client *
 i2c_new_secondary_device(struct i2c_client *client,
                                const char *name,
index 7ae8de5..fb301cf 100644 (file)
@@ -625,6 +625,8 @@ extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
 extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
                                             void *vcpu_info);
 extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
+extern int irq_chip_request_resources_parent(struct irq_data *data);
+extern void irq_chip_release_resources_parent(struct irq_data *data);
 #endif
 
 /* Handling of unhandled and spurious interrupts: */
index c848a7c..c7e3e39 100644 (file)
 #define GICR_PROPBASER_nCnB    GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB)
 #define GICR_PROPBASER_nC      GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC)
 #define GICR_PROPBASER_RaWt    GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
-#define GICR_PROPBASER_RaWb    GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
+#define GICR_PROPBASER_RaWb    GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)
 #define GICR_PROPBASER_WaWt    GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt)
 #define GICR_PROPBASER_WaWb    GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb)
 #define GICR_PROPBASER_RaWaWt  GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt)
 #define GICR_PENDBASER_nCnB    GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB)
 #define GICR_PENDBASER_nC      GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC)
 #define GICR_PENDBASER_RaWt    GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
-#define GICR_PENDBASER_RaWb    GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
+#define GICR_PENDBASER_RaWb    GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb)
 #define GICR_PENDBASER_WaWt    GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt)
 #define GICR_PENDBASER_WaWb    GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb)
 #define GICR_PENDBASER_RaWaWt  GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt)
 #define GICR_VPROPBASER_nCnB   GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB)
 #define GICR_VPROPBASER_nC     GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC)
 #define GICR_VPROPBASER_RaWt   GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
-#define GICR_VPROPBASER_RaWb   GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
+#define GICR_VPROPBASER_RaWb   GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb)
 #define GICR_VPROPBASER_WaWt   GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt)
 #define GICR_VPROPBASER_WaWb   GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb)
 #define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
 #define GICR_VPENDBASER_nCnB   GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
 #define GICR_VPENDBASER_nC     GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
 #define GICR_VPENDBASER_RaWt   GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
-#define GICR_VPENDBASER_RaWb   GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
+#define GICR_VPENDBASER_RaWb   GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb)
 #define GICR_VPENDBASER_WaWt   GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt)
 #define GICR_VPENDBASER_WaWb   GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb)
 #define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt)
 #define GITS_CBASER_nCnB       GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB)
 #define GITS_CBASER_nC         GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC)
 #define GITS_CBASER_RaWt       GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
-#define GITS_CBASER_RaWb       GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
+#define GITS_CBASER_RaWb       GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb)
 #define GITS_CBASER_WaWt       GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt)
 #define GITS_CBASER_WaWb       GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb)
 #define GITS_CBASER_RaWaWt     GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt)
 #define GITS_BASER_nCnB                GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB)
 #define GITS_BASER_nC          GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC)
 #define GITS_BASER_RaWt                GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
-#define GITS_BASER_RaWb                GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
+#define GITS_BASER_RaWb                GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)
 #define GITS_BASER_WaWt                GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt)
 #define GITS_BASER_WaWb                GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb)
 #define GITS_BASER_RaWaWt      GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt)
index 61706b4..07ec8b3 100644 (file)
@@ -82,6 +82,7 @@ enum irq_domain_bus_token {
        DOMAIN_BUS_NEXUS,
        DOMAIN_BUS_IPI,
        DOMAIN_BUS_FSL_MC_MSI,
+       DOMAIN_BUS_TI_SCI_INTA_MSI,
 };
 
 /**
index c2ffff5..6c9870e 100644 (file)
@@ -1318,7 +1318,7 @@ extern void               __wait_on_journal (journal_t *);
 
 /* Transaction cache support */
 extern void jbd2_journal_destroy_transaction_cache(void);
-extern int  jbd2_journal_init_transaction_cache(void);
+extern int __init jbd2_journal_init_transaction_cache(void);
 extern void jbd2_journal_free_transaction(transaction_t *);
 
 /*
@@ -1446,8 +1446,10 @@ static inline void jbd2_free_inode(struct jbd2_inode *jinode)
 /* Primary revoke support */
 #define JOURNAL_REVOKE_DEFAULT_HASH 256
 extern int        jbd2_journal_init_revoke(journal_t *, int);
-extern void       jbd2_journal_destroy_revoke_caches(void);
-extern int        jbd2_journal_init_revoke_caches(void);
+extern void       jbd2_journal_destroy_revoke_record_cache(void);
+extern void       jbd2_journal_destroy_revoke_table_cache(void);
+extern int __init jbd2_journal_init_revoke_record_cache(void);
+extern int __init jbd2_journal_init_revoke_table_cache(void);
 
 extern void       jbd2_journal_destroy_revoke(journal_t *);
 extern int        jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
index 052f04f..d48e919 100644 (file)
@@ -47,6 +47,14 @@ struct fsl_mc_msi_desc {
        u16                             msi_index;
 };
 
+/**
+ * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
+ * @dev_index: TISCI device index
+ */
+struct ti_sci_inta_msi_desc {
+       u16     dev_index;
+};
+
 /**
  * struct msi_desc - Descriptor structure for MSI based interrupts
  * @list:      List head for management
@@ -68,6 +76,7 @@ struct fsl_mc_msi_desc {
  * @mask_base: [PCI MSI-X] Mask register base address
  * @platform:  [platform]  Platform device specific msi descriptor data
  * @fsl_mc:    [fsl-mc]    FSL MC device specific msi descriptor data
+ * @inta:      [INTA]      TISCI based INTA specific msi descriptor data
  */
 struct msi_desc {
        /* Shared device/bus type independent data */
@@ -77,6 +86,9 @@ struct msi_desc {
        struct device                   *dev;
        struct msi_msg                  msg;
        struct irq_affinity_desc        *affinity;
+#ifdef CONFIG_IRQ_MSI_IOMMU
+       const void                      *iommu_cookie;
+#endif
 
        union {
                /* PCI MSI/X specific data */
@@ -106,6 +118,7 @@ struct msi_desc {
                 */
                struct platform_msi_desc platform;
                struct fsl_mc_msi_desc fsl_mc;
+               struct ti_sci_inta_msi_desc inta;
        };
 };
 
@@ -119,6 +132,29 @@ struct msi_desc {
 #define for_each_msi_entry_safe(desc, tmp, dev)        \
        list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
 
+#ifdef CONFIG_IRQ_MSI_IOMMU
+static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
+{
+       return desc->iommu_cookie;
+}
+
+static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
+                                            const void *iommu_cookie)
+{
+       desc->iommu_cookie = iommu_cookie;
+}
+#else
+static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
+{
+       return NULL;
+}
+
+static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
+                                            const void *iommu_cookie)
+{
+}
+#endif
+
 #ifdef CONFIG_PCI_MSI
 #define first_pci_msi_entry(pdev)      first_msi_entry(&(pdev)->dev)
 #define for_each_pci_msi_entry(desc, pdev)     \
diff --git a/include/linux/platform_data/xtalk-bridge.h b/include/linux/platform_data/xtalk-bridge.h
new file mode 100644 (file)
index 0000000..51e5001
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SGI PCI Xtalk Bridge
+ */
+
+#ifndef PLATFORM_DATA_XTALK_BRIDGE_H
+#define PLATFORM_DATA_XTALK_BRIDGE_H
+
+#include <asm/sn/types.h>
+
+struct xtalk_bridge_platform_data {
+       struct resource mem;
+       struct resource io;
+       unsigned long bridge_addr;
+       unsigned long intr_addr;
+       unsigned long mem_offset;
+       unsigned long io_offset;
+       nasid_t nasid;
+       int     masterwid;
+};
+
+#endif /* PLATFORM_DATA_XTALK_BRIDGE_H */
diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h
new file mode 100644 (file)
index 0000000..11fb504
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments' K3 TI SCI INTA MSI helper
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ *     Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#ifndef __INCLUDE_LINUX_TI_SCI_INTA_MSI_H
+#define __INCLUDE_LINUX_TI_SCI_INTA_MSI_H
+
+#include <linux/msi.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+struct irq_domain
+*ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode,
+                                  struct msi_domain_info *info,
+                                  struct irq_domain *parent);
+int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
+                                     struct ti_sci_resource *res);
+unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 index);
+void ti_sci_inta_msi_domain_free_irqs(struct device *dev);
+#endif /* __INCLUDE_LINUX_IRQCHIP_TI_SCI_INTA_H */
index 18435e5..568722a 100644 (file)
@@ -192,15 +192,68 @@ struct ti_sci_clk_ops {
                        u64 *current_freq);
 };
 
+/**
+ * struct ti_sci_rm_core_ops - Resource management core operations
+ * @get_range:         Get a range of resources belonging to ti sci host.
+ * @get_rage_from_shost:       Get a range of resources belonging to
+ *                             specified host id.
+ *                     - s_host: Host processing entity to which the
+ *                               resources are allocated
+ *
+ * NOTE: for these functions, all the parameters are consolidated and defined
+ * as below:
+ * - handle:   Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * - dev_id:   TISCI device ID.
+ * - subtype:  Resource assignment subtype that is being requested
+ *             from the given device.
+ * - range_start:      Start index of the resource range
+ * - range_end:                Number of resources in the range
+ */
+struct ti_sci_rm_core_ops {
+       int (*get_range)(const struct ti_sci_handle *handle, u32 dev_id,
+                        u8 subtype, u16 *range_start, u16 *range_num);
+       int (*get_range_from_shost)(const struct ti_sci_handle *handle,
+                                   u32 dev_id, u8 subtype, u8 s_host,
+                                   u16 *range_start, u16 *range_num);
+};
+
+/**
+ * struct ti_sci_rm_irq_ops: IRQ management operations
+ * @set_irq:           Set an IRQ route between the requested source
+ *                     and destination
+ * @set_event_map:     Set an Event based peripheral irq to Interrupt
+ *                     Aggregator.
+ * @free_irq:          Free an an IRQ route between the requested source
+ *                     destination.
+ * @free_event_map:    Free an event based peripheral irq to Interrupt
+ *                     Aggregator.
+ */
+struct ti_sci_rm_irq_ops {
+       int (*set_irq)(const struct ti_sci_handle *handle, u16 src_id,
+                      u16 src_index, u16 dst_id, u16 dst_host_irq);
+       int (*set_event_map)(const struct ti_sci_handle *handle, u16 src_id,
+                            u16 src_index, u16 ia_id, u16 vint,
+                            u16 global_event, u8 vint_status_bit);
+       int (*free_irq)(const struct ti_sci_handle *handle, u16 src_id,
+                       u16 src_index, u16 dst_id, u16 dst_host_irq);
+       int (*free_event_map)(const struct ti_sci_handle *handle, u16 src_id,
+                             u16 src_index, u16 ia_id, u16 vint,
+                             u16 global_event, u8 vint_status_bit);
+};
+
 /**
  * struct ti_sci_ops - Function support for TI SCI
  * @dev_ops:   Device specific operations
  * @clk_ops:   Clock specific operations
+ * @rm_core_ops:       Resource management core operations.
+ * @rm_irq_ops:                IRQ management specific operations
  */
 struct ti_sci_ops {
        struct ti_sci_core_ops core_ops;
        struct ti_sci_dev_ops dev_ops;
        struct ti_sci_clk_ops clk_ops;
+       struct ti_sci_rm_core_ops rm_core_ops;
+       struct ti_sci_rm_irq_ops rm_irq_ops;
 };
 
 /**
@@ -213,10 +266,47 @@ struct ti_sci_handle {
        struct ti_sci_ops ops;
 };
 
+#define TI_SCI_RESOURCE_NULL   0xffff
+
+/**
+ * struct ti_sci_resource_desc - Description of TI SCI resource instance range.
+ * @start:     Start index of the resource.
+ * @num:       Number of resources.
+ * @res_map:   Bitmap to manage the allocation of these resources.
+ */
+struct ti_sci_resource_desc {
+       u16 start;
+       u16 num;
+       unsigned long *res_map;
+};
+
+/**
+ * struct ti_sci_resource - Structure representing a resource assigned
+ *                         to a device.
+ * @sets:      Number of sets available from this resource type
+ * @lock:      Lock to guard the res map in each set.
+ * @desc:      Array of resource descriptors.
+ */
+struct ti_sci_resource {
+       u16 sets;
+       raw_spinlock_t lock;
+       struct ti_sci_resource_desc *desc;
+};
+
 #if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL)
 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev);
 int ti_sci_put_handle(const struct ti_sci_handle *handle);
 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev);
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
+                                                 const char *property);
+const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
+                                                      const char *property);
+u16 ti_sci_get_free_resource(struct ti_sci_resource *res);
+void ti_sci_release_resource(struct ti_sci_resource *res, u16 id);
+u32 ti_sci_get_num_resources(struct ti_sci_resource *res);
+struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+                           struct device *dev, u32 dev_id, char *of_prop);
 
 #else  /* CONFIG_TI_SCI_PROTOCOL */
 
@@ -236,6 +326,40 @@ const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
        return ERR_PTR(-EINVAL);
 }
 
+static inline
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
+                                                 const char *property)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline
+const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
+                                                      const char *property)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
+{
+       return TI_SCI_RESOURCE_NULL;
+}
+
+static inline void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
+{
+}
+
+static inline u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
+{
+       return 0;
+}
+
+static inline struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+                           struct device *dev, u32 dev_id, char *of_prop)
+{
+       return ERR_PTR(-EINVAL);
+}
 #endif /* CONFIG_TI_SCI_PROTOCOL */
 
 #endif /* __TISCI_PROTOCOL_H */
diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h
new file mode 100644 (file)
index 0000000..c3c7200
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * Timer/Counter Unit (TC) registers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SOC_ATMEL_TCB_H
+#define __SOC_ATMEL_TCB_H
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+
+/*
+ * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds
+ * three general-purpose 16-bit timers.  These timers share one register bank.
+ * Depending on the SOC, each timer may have its own clock and IRQ, or those
+ * may be shared by the whole TC block.
+ *
+ * These TC blocks may have up to nine external pins:  TCLK0..2 signals for
+ * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM
+ * or triggering.  Those pins need to be set up for use with the TC block,
+ * else they will be used as GPIOs or for a different controller.
+ *
+ * Although we expect each TC block to have a platform_device node, those
+ * nodes are not what drivers bind to.  Instead, they ask for a specific
+ * TC block, by number ... which is a common approach on systems with many
+ * timers.  Then they use clk_get() and platform_get_irq() to get clock and
+ * IRQ resources.
+ */
+
+struct clk;
+
+/**
+ * struct atmel_tcb_config - SoC data for a Timer/Counter Block
+ * @counter_width: size in bits of a timer counter register
+ */
+struct atmel_tcb_config {
+       size_t  counter_width;
+};
+
+/**
+ * struct atmel_tc - information about a Timer/Counter Block
+ * @pdev: physical device
+ * @regs: mapping through which the I/O registers can be accessed
+ * @id: block id
+ * @tcb_config: configuration data from SoC
+ * @irq: irq for each of the three channels
+ * @clk: internal clock source for each of the three channels
+ * @node: list node, for tclib internal use
+ * @allocated: if already used, for tclib internal use
+ *
+ * On some platforms, each TC channel has its own clocks and IRQs,
+ * while on others, all TC channels share the same clock and IRQ.
+ * Drivers should clk_enable() all the clocks they need even though
+ * all the entries in @clk may point to the same physical clock.
+ * Likewise, drivers should request irqs independently for each
+ * channel, but they must use IRQF_SHARED in case some of the entries
+ * in @irq are actually the same IRQ.
+ */
+struct atmel_tc {
+       struct platform_device  *pdev;
+       void __iomem            *regs;
+       int                     id;
+       const struct atmel_tcb_config *tcb_config;
+       int                     irq[3];
+       struct clk              *clk[3];
+       struct clk              *slow_clk;
+       struct list_head        node;
+       bool                    allocated;
+};
+
+extern struct atmel_tc *atmel_tc_alloc(unsigned block);
+extern void atmel_tc_free(struct atmel_tc *tc);
+
+/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
+extern const u8 atmel_tc_divisors[5];
+
+
+/*
+ * Two registers have block-wide controls.  These are: configuring the three
+ * "external" clocks (or event sources) used by the timer channels; and
+ * synchronizing the timers by resetting them all at once.
+ *
+ * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2
+ * signals.  Or, it can mean "external to timer", using the TIOA output from
+ * one of the other two timers that's being run in waveform mode.
+ */
+
+#define ATMEL_TC_BCR   0xc0            /* TC Block Control Register */
+#define     ATMEL_TC_SYNC      (1 << 0)        /* synchronize timers */
+
+#define ATMEL_TC_BMR   0xc4            /* TC Block Mode Register */
+#define     ATMEL_TC_TC0XC0S   (3 << 0)        /* external clock 0 source */
+#define        ATMEL_TC_TC0XC0S_TCLK0  (0 << 0)
+#define        ATMEL_TC_TC0XC0S_NONE   (1 << 0)
+#define        ATMEL_TC_TC0XC0S_TIOA1  (2 << 0)
+#define        ATMEL_TC_TC0XC0S_TIOA2  (3 << 0)
+#define     ATMEL_TC_TC1XC1S   (3 << 2)        /* external clock 1 source */
+#define        ATMEL_TC_TC1XC1S_TCLK1  (0 << 2)
+#define        ATMEL_TC_TC1XC1S_NONE   (1 << 2)
+#define        ATMEL_TC_TC1XC1S_TIOA0  (2 << 2)
+#define        ATMEL_TC_TC1XC1S_TIOA2  (3 << 2)
+#define     ATMEL_TC_TC2XC2S   (3 << 4)        /* external clock 2 source */
+#define        ATMEL_TC_TC2XC2S_TCLK2  (0 << 4)
+#define        ATMEL_TC_TC2XC2S_NONE   (1 << 4)
+#define        ATMEL_TC_TC2XC2S_TIOA0  (2 << 4)
+#define        ATMEL_TC_TC2XC2S_TIOA1  (3 << 4)
+
+
+/*
+ * Each TC block has three "channels", each with one counter and controls.
+ *
+ * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection
+ * when it's not "external") is silicon-specific.  AT91 platforms use one
+ * set of definitions; AVR32 platforms use a different set.  Don't hard-wire
+ * such knowledge into your code, use the global "atmel_tc_divisors" ...
+ * where index N is the divisor for clock N+1, else zero to indicate it uses
+ * the 32 KiHz clock.
+ *
+ * The timers can be chained in various ways, and operated in "waveform"
+ * generation mode (including PWM) or "capture" mode (to time events).  In
+ * both modes, behavior can be configured in many ways.
+ *
+ * Each timer has two I/O pins, TIOA and TIOB.  Waveform mode uses TIOA as a
+ * PWM output, and TIOB as either another PWM or as a trigger.  Capture mode
+ * uses them only as inputs.
+ */
+#define ATMEL_TC_CHAN(idx)     ((idx)*0x40)
+#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg)
+
+#define ATMEL_TC_CCR   0x00            /* Channel Control Register */
+#define     ATMEL_TC_CLKEN     (1 << 0)        /* clock enable */
+#define     ATMEL_TC_CLKDIS    (1 << 1)        /* clock disable */
+#define     ATMEL_TC_SWTRG     (1 << 2)        /* software trigger */
+
+#define ATMEL_TC_CMR   0x04            /* Channel Mode Register */
+
+/* Both modes share some CMR bits */
+#define     ATMEL_TC_TCCLKS    (7 << 0)        /* clock source */
+#define        ATMEL_TC_TIMER_CLOCK1   (0 << 0)
+#define        ATMEL_TC_TIMER_CLOCK2   (1 << 0)
+#define        ATMEL_TC_TIMER_CLOCK3   (2 << 0)
+#define        ATMEL_TC_TIMER_CLOCK4   (3 << 0)
+#define        ATMEL_TC_TIMER_CLOCK5   (4 << 0)
+#define        ATMEL_TC_XC0            (5 << 0)
+#define        ATMEL_TC_XC1            (6 << 0)
+#define        ATMEL_TC_XC2            (7 << 0)
+#define     ATMEL_TC_CLKI      (1 << 3)        /* clock invert */
+#define     ATMEL_TC_BURST     (3 << 4)        /* clock gating */
+#define        ATMEL_TC_GATE_NONE      (0 << 4)
+#define        ATMEL_TC_GATE_XC0       (1 << 4)
+#define        ATMEL_TC_GATE_XC1       (2 << 4)
+#define        ATMEL_TC_GATE_XC2       (3 << 4)
+#define     ATMEL_TC_WAVE      (1 << 15)       /* true = Waveform mode */
+
+/* CAPTURE mode CMR bits */
+#define     ATMEL_TC_LDBSTOP   (1 << 6)        /* counter stops on RB load */
+#define     ATMEL_TC_LDBDIS    (1 << 7)        /* counter disable on RB load */
+#define     ATMEL_TC_ETRGEDG   (3 << 8)        /* external trigger edge */
+#define        ATMEL_TC_ETRGEDG_NONE   (0 << 8)
+#define        ATMEL_TC_ETRGEDG_RISING (1 << 8)
+#define        ATMEL_TC_ETRGEDG_FALLING        (2 << 8)
+#define        ATMEL_TC_ETRGEDG_BOTH   (3 << 8)
+#define     ATMEL_TC_ABETRG    (1 << 10)       /* external trigger is TIOA? */
+#define     ATMEL_TC_CPCTRG    (1 << 14)       /* RC compare trigger enable */
+#define     ATMEL_TC_LDRA      (3 << 16)       /* RA loading edge (of TIOA) */
+#define        ATMEL_TC_LDRA_NONE      (0 << 16)
+#define        ATMEL_TC_LDRA_RISING    (1 << 16)
+#define        ATMEL_TC_LDRA_FALLING   (2 << 16)
+#define        ATMEL_TC_LDRA_BOTH      (3 << 16)
+#define     ATMEL_TC_LDRB      (3 << 18)       /* RB loading edge (of TIOA) */
+#define        ATMEL_TC_LDRB_NONE      (0 << 18)
+#define        ATMEL_TC_LDRB_RISING    (1 << 18)
+#define        ATMEL_TC_LDRB_FALLING   (2 << 18)
+#define        ATMEL_TC_LDRB_BOTH      (3 << 18)
+
+/* WAVEFORM mode CMR bits */
+#define     ATMEL_TC_CPCSTOP   (1 <<  6)       /* RC compare stops counter */
+#define     ATMEL_TC_CPCDIS    (1 <<  7)       /* RC compare disables counter */
+#define     ATMEL_TC_EEVTEDG   (3 <<  8)       /* external event edge */
+#define        ATMEL_TC_EEVTEDG_NONE   (0 << 8)
+#define        ATMEL_TC_EEVTEDG_RISING (1 << 8)
+#define        ATMEL_TC_EEVTEDG_FALLING        (2 << 8)
+#define        ATMEL_TC_EEVTEDG_BOTH   (3 << 8)
+#define     ATMEL_TC_EEVT      (3 << 10)       /* external event source */
+#define        ATMEL_TC_EEVT_TIOB      (0 << 10)
+#define        ATMEL_TC_EEVT_XC0       (1 << 10)
+#define        ATMEL_TC_EEVT_XC1       (2 << 10)
+#define        ATMEL_TC_EEVT_XC2       (3 << 10)
+#define     ATMEL_TC_ENETRG    (1 << 12)       /* external event is trigger */
+#define     ATMEL_TC_WAVESEL   (3 << 13)       /* waveform type */
+#define        ATMEL_TC_WAVESEL_UP     (0 << 13)
+#define        ATMEL_TC_WAVESEL_UPDOWN (1 << 13)
+#define        ATMEL_TC_WAVESEL_UP_AUTO        (2 << 13)
+#define        ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13)
+#define     ATMEL_TC_ACPA      (3 << 16)       /* RA compare changes TIOA */
+#define        ATMEL_TC_ACPA_NONE      (0 << 16)
+#define        ATMEL_TC_ACPA_SET       (1 << 16)
+#define        ATMEL_TC_ACPA_CLEAR     (2 << 16)
+#define        ATMEL_TC_ACPA_TOGGLE    (3 << 16)
+#define     ATMEL_TC_ACPC      (3 << 18)       /* RC compare changes TIOA */
+#define        ATMEL_TC_ACPC_NONE      (0 << 18)
+#define        ATMEL_TC_ACPC_SET       (1 << 18)
+#define        ATMEL_TC_ACPC_CLEAR     (2 << 18)
+#define        ATMEL_TC_ACPC_TOGGLE    (3 << 18)
+#define     ATMEL_TC_AEEVT     (3 << 20)       /* external event changes TIOA */
+#define        ATMEL_TC_AEEVT_NONE     (0 << 20)
+#define        ATMEL_TC_AEEVT_SET      (1 << 20)
+#define        ATMEL_TC_AEEVT_CLEAR    (2 << 20)
+#define        ATMEL_TC_AEEVT_TOGGLE   (3 << 20)
+#define     ATMEL_TC_ASWTRG    (3 << 22)       /* software trigger changes TIOA */
+#define        ATMEL_TC_ASWTRG_NONE    (0 << 22)
+#define        ATMEL_TC_ASWTRG_SET     (1 << 22)
+#define        ATMEL_TC_ASWTRG_CLEAR   (2 << 22)
+#define        ATMEL_TC_ASWTRG_TOGGLE  (3 << 22)
+#define     ATMEL_TC_BCPB      (3 << 24)       /* RB compare changes TIOB */
+#define        ATMEL_TC_BCPB_NONE      (0 << 24)
+#define        ATMEL_TC_BCPB_SET       (1 << 24)
+#define        ATMEL_TC_BCPB_CLEAR     (2 << 24)
+#define        ATMEL_TC_BCPB_TOGGLE    (3 << 24)
+#define     ATMEL_TC_BCPC      (3 << 26)       /* RC compare changes TIOB */
+#define        ATMEL_TC_BCPC_NONE      (0 << 26)
+#define        ATMEL_TC_BCPC_SET       (1 << 26)
+#define        ATMEL_TC_BCPC_CLEAR     (2 << 26)
+#define        ATMEL_TC_BCPC_TOGGLE    (3 << 26)
+#define     ATMEL_TC_BEEVT     (3 << 28)       /* external event changes TIOB */
+#define        ATMEL_TC_BEEVT_NONE     (0 << 28)
+#define        ATMEL_TC_BEEVT_SET      (1 << 28)
+#define        ATMEL_TC_BEEVT_CLEAR    (2 << 28)
+#define        ATMEL_TC_BEEVT_TOGGLE   (3 << 28)
+#define     ATMEL_TC_BSWTRG    (3 << 30)       /* software trigger changes TIOB */
+#define        ATMEL_TC_BSWTRG_NONE    (0 << 30)
+#define        ATMEL_TC_BSWTRG_SET     (1 << 30)
+#define        ATMEL_TC_BSWTRG_CLEAR   (2 << 30)
+#define        ATMEL_TC_BSWTRG_TOGGLE  (3 << 30)
+
+#define ATMEL_TC_CV    0x10            /* counter Value */
+#define ATMEL_TC_RA    0x14            /* register A */
+#define ATMEL_TC_RB    0x18            /* register B */
+#define ATMEL_TC_RC    0x1c            /* register C */
+
+#define ATMEL_TC_SR    0x20            /* status (read-only) */
+/* Status-only flags */
+#define     ATMEL_TC_CLKSTA    (1 << 16)       /* clock enabled */
+#define     ATMEL_TC_MTIOA     (1 << 17)       /* TIOA mirror */
+#define     ATMEL_TC_MTIOB     (1 << 18)       /* TIOB mirror */
+
+#define ATMEL_TC_IER   0x24            /* interrupt enable (write-only) */
+#define ATMEL_TC_IDR   0x28            /* interrupt disable (write-only) */
+#define ATMEL_TC_IMR   0x2c            /* interrupt mask (read-only) */
+
+/* Status and IRQ flags */
+#define     ATMEL_TC_COVFS     (1 <<  0)       /* counter overflow */
+#define     ATMEL_TC_LOVRS     (1 <<  1)       /* load overrun */
+#define     ATMEL_TC_CPAS      (1 <<  2)       /* RA compare */
+#define     ATMEL_TC_CPBS      (1 <<  3)       /* RB compare */
+#define     ATMEL_TC_CPCS      (1 <<  4)       /* RC compare */
+#define     ATMEL_TC_LDRAS     (1 <<  5)       /* RA loading */
+#define     ATMEL_TC_LDRBS     (1 <<  6)       /* RB loading */
+#define     ATMEL_TC_ETRGS     (1 <<  7)       /* external trigger */
+#define     ATMEL_TC_ALL_IRQ   (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
+                                ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
+                                ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
+                                ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
+                                /* all IRQs */
+
+#endif
index 5f3e2ba..8fee066 100644 (file)
@@ -91,6 +91,9 @@ config GENERIC_MSI_IRQ_DOMAIN
        select IRQ_DOMAIN_HIERARCHY
        select GENERIC_MSI_IRQ
 
+config IRQ_MSI_IOMMU
+       bool
+
 config HANDLE_DOMAIN_IRQ
        bool
 
index 51128be..29d6c7d 100644 (file)
@@ -1459,6 +1459,33 @@ int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
        return -ENOSYS;
 }
 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
+
+/**
+ * irq_chip_request_resources_parent - Request resources on the parent interrupt
+ * @data:      Pointer to interrupt specific data
+ */
+int irq_chip_request_resources_parent(struct irq_data *data)
+{
+       data = data->parent_data;
+
+       if (data->chip->irq_request_resources)
+               return data->chip->irq_request_resources(data);
+
+       return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
+
+/**
+ * irq_chip_release_resources_parent - Release resources on the parent interrupt
+ * @data:      Pointer to interrupt specific data
+ */
+void irq_chip_release_resources_parent(struct irq_data *data)
+{
+       data = data->parent_data;
+       if (data->chip->irq_release_resources)
+               data->chip->irq_release_resources(data);
+}
+EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
 #endif
 
 /**
index 9ed29e4..a453e22 100644 (file)
@@ -1297,7 +1297,7 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
 /**
  * __irq_domain_alloc_irqs - Allocate IRQs from domain
  * @domain:    domain to allocate from
- * @irq_base:  allocate specified IRQ nubmer if irq_base >= 0
+ * @irq_base:  allocate specified IRQ number if irq_base >= 0
  * @nr_irqs:   number of IRQs to allocate
  * @node:      NUMA node id for memory allocation
  * @arg:       domain specific argument
index bd7b9f2..2d6a666 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2735,9 +2735,17 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
                return -EINVAL;
 
        len = PAGE_ALIGN(len);
+       end = start + len;
        if (len == 0)
                return -EINVAL;
 
+       /*
+        * arch_unmap() might do unmaps itself.  It must be called
+        * and finish any rbtree manipulation before this code
+        * runs and also starts to manipulate the rbtree.
+        */
+       arch_unmap(mm, start, end);
+
        /* Find the first overlapping VMA */
        vma = find_vma(mm, start);
        if (!vma)
@@ -2746,7 +2754,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
        /* we have  start < vma->vm_end  */
 
        /* if it doesn't overlap, we have nothing.. */
-       end = start + len;
        if (vma->vm_start >= end)
                return 0;
 
@@ -2816,12 +2823,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
        /* Detach vmas from rbtree */
        detach_vmas_to_be_unmapped(mm, vma, prev, end);
 
-       /*
-        * mpx unmap needs to be called with mmap_sem held for write.
-        * It is safe to call it before unmap_region().
-        */
-       arch_unmap(mm, vma, start, end);
-
        if (downgrade)
                downgrade_write(&mm->mmap_sem);
 
index dabfcf7..7a0e64c 100644 (file)
@@ -381,6 +381,7 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
 #define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
 #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE  (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP   (1 << 3)
 
 #define KVM_STATE_NESTED_GUEST_MODE    0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING   0x00000002
index f3329ca..ac67bbe 100644 (file)
@@ -27,8 +27,29 @@ enum perf_event_x86_regs {
        PERF_REG_X86_R13,
        PERF_REG_X86_R14,
        PERF_REG_X86_R15,
-
+       /* These are the limits for the GPRs. */
        PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
        PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
+
+       /* These all need two bits set because they are 128bit */
+       PERF_REG_X86_XMM0  = 32,
+       PERF_REG_X86_XMM1  = 34,
+       PERF_REG_X86_XMM2  = 36,
+       PERF_REG_X86_XMM3  = 38,
+       PERF_REG_X86_XMM4  = 40,
+       PERF_REG_X86_XMM5  = 42,
+       PERF_REG_X86_XMM6  = 44,
+       PERF_REG_X86_XMM7  = 46,
+       PERF_REG_X86_XMM8  = 48,
+       PERF_REG_X86_XMM9  = 50,
+       PERF_REG_X86_XMM10 = 52,
+       PERF_REG_X86_XMM11 = 54,
+       PERF_REG_X86_XMM12 = 56,
+       PERF_REG_X86_XMM13 = 58,
+       PERF_REG_X86_XMM14 = 60,
+       PERF_REG_X86_XMM15 = 62,
+
+       /* These include both GPRs and XMMX registers */
+       PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2,
 };
 #endif /* _ASM_X86_PERF_REGS_H */
index 3b24dc0..9d05572 100644 (file)
@@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe)
        /* Copy successful. Return zero */
 .L_done_memcpy_trap:
        xorl %eax, %eax
+.L_done:
        ret
 ENDPROC(__memcpy_mcsafe)
 EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
@@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
        addl    %edx, %ecx
 .E_trailing_bytes:
        mov     %ecx, %eax
-       ret
+       jmp     .L_done
 
        /*
         * For write fault handling, given the destination is unaligned,
diff --git a/tools/lib/traceevent/Documentation/Makefile b/tools/lib/traceevent/Documentation/Makefile
new file mode 100644 (file)
index 0000000..aa72ab9
--- /dev/null
@@ -0,0 +1,207 @@
+include ../../../scripts/Makefile.include
+include ../../../scripts/utilities.mak
+
+# This Makefile and manpage XSL files were taken from tools/perf/Documentation
+# and modified for libtraceevent.
+
+MAN3_TXT= \
+       $(wildcard libtraceevent-*.txt) \
+       libtraceevent.txt
+
+MAN_TXT = $(MAN3_TXT)
+_MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT))
+_MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT))
+_DOC_MAN3=$(patsubst %.txt,%.3,$(MAN3_TXT))
+
+MAN_XML=$(addprefix $(OUTPUT),$(_MAN_XML))
+MAN_HTML=$(addprefix $(OUTPUT),$(_MAN_HTML))
+DOC_MAN3=$(addprefix $(OUTPUT),$(_DOC_MAN3))
+
+# Make the path relative to DESTDIR, not prefix
+ifndef DESTDIR
+prefix?=$(HOME)
+endif
+bindir?=$(prefix)/bin
+htmldir?=$(prefix)/share/doc/libtraceevent-doc
+pdfdir?=$(prefix)/share/doc/libtraceevent-doc
+mandir?=$(prefix)/share/man
+man3dir=$(mandir)/man3
+
+ASCIIDOC=asciidoc
+ASCIIDOC_EXTRA = --unsafe -f asciidoc.conf
+ASCIIDOC_HTML = xhtml11
+MANPAGE_XSL = manpage-normal.xsl
+XMLTO_EXTRA =
+INSTALL?=install
+RM ?= rm -f
+
+ifdef USE_ASCIIDOCTOR
+ASCIIDOC = asciidoctor
+ASCIIDOC_EXTRA = -a compat-mode
+ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions
+ASCIIDOC_EXTRA += -a mansource="libtraceevent" -a manmanual="libtraceevent Manual"
+ASCIIDOC_HTML = xhtml5
+endif
+
+XMLTO=xmlto
+
+_tmp_tool_path := $(call get-executable,$(ASCIIDOC))
+ifeq ($(_tmp_tool_path),)
+       missing_tools = $(ASCIIDOC)
+endif
+
+ifndef USE_ASCIIDOCTOR
+_tmp_tool_path := $(call get-executable,$(XMLTO))
+ifeq ($(_tmp_tool_path),)
+       missing_tools += $(XMLTO)
+endif
+endif
+
+#
+# For asciidoc ...
+#      -7.1.2, no extra settings are needed.
+#      8.0-,   set ASCIIDOC8.
+#
+
+#
+# For docbook-xsl ...
+#      -1.68.1,        set ASCIIDOC_NO_ROFF? (based on changelog from 1.73.0)
+#      1.69.0,         no extra settings are needed?
+#      1.69.1-1.71.0,  set DOCBOOK_SUPPRESS_SP?
+#      1.71.1,         no extra settings are needed?
+#      1.72.0,         set DOCBOOK_XSL_172.
+#      1.73.0-,        set ASCIIDOC_NO_ROFF
+#
+
+#
+# If you had been using DOCBOOK_XSL_172 in an attempt to get rid
+# of 'the ".ft C" problem' in your generated manpages, and you
+# instead ended up with weird characters around callouts, try
+# using ASCIIDOC_NO_ROFF instead (it works fine with ASCIIDOC8).
+#
+
+ifdef ASCIIDOC8
+ASCIIDOC_EXTRA += -a asciidoc7compatible
+endif
+ifdef DOCBOOK_XSL_172
+ASCIIDOC_EXTRA += -a libtraceevent-asciidoc-no-roff
+MANPAGE_XSL = manpage-1.72.xsl
+else
+       ifdef ASCIIDOC_NO_ROFF
+       # docbook-xsl after 1.72 needs the regular XSL, but will not
+       # pass-thru raw roff codes from asciidoc.conf, so turn them off.
+       ASCIIDOC_EXTRA += -a libtraceevent-asciidoc-no-roff
+       endif
+endif
+ifdef MAN_BOLD_LITERAL
+XMLTO_EXTRA += -m manpage-bold-literal.xsl
+endif
+ifdef DOCBOOK_SUPPRESS_SP
+XMLTO_EXTRA += -m manpage-suppress-sp.xsl
+endif
+
+SHELL_PATH ?= $(SHELL)
+# Shell quote;
+SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
+
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+export DESTDIR DESTDIR_SQ
+
+#
+# Please note that there is a minor bug in asciidoc.
+# The version after 6.0.3 _will_ include the patch found here:
+#   http://marc.theaimsgroup.com/?l=libtraceevent&m=111558757202243&w=2
+#
+# Until that version is released you may have to apply the patch
+# yourself - yes, all 6 characters of it!
+#
+QUIET_SUBDIR0  = +$(MAKE) -C # space to separate -C and subdir
+QUIET_SUBDIR1  =
+
+ifneq ($(findstring $(MAKEFLAGS),w),w)
+PRINT_DIR = --no-print-directory
+else # "make -w"
+NO_SUBDIR = :
+endif
+
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+ifneq ($(V),1)
+       QUIET_ASCIIDOC  = @echo '  ASCIIDOC '$@;
+       QUIET_XMLTO     = @echo '  XMLTO    '$@;
+       QUIET_SUBDIR0   = +@subdir=
+       QUIET_SUBDIR1   = ;$(NO_SUBDIR) \
+                          echo '  SUBDIR   ' $$subdir; \
+                         $(MAKE) $(PRINT_DIR) -C $$subdir
+       export V
+endif
+endif
+
+all: html man
+
+man: man3
+man3: $(DOC_MAN3)
+
+html: $(MAN_HTML)
+
+$(MAN_HTML) $(DOC_MAN3): asciidoc.conf
+
+install: install-man
+
+check-man-tools:
+ifdef missing_tools
+       $(error "You need to install $(missing_tools) for man pages")
+endif
+
+do-install-man: man
+       $(call QUIET_INSTALL, Documentation-man) \
+               $(INSTALL) -d -m 755 $(DESTDIR)$(man3dir); \
+               $(INSTALL) -m 644 $(DOC_MAN3) $(DESTDIR)$(man3dir);
+
+install-man: check-man-tools man do-install-man
+
+uninstall: uninstall-man
+
+uninstall-man:
+       $(call QUIET_UNINST, Documentation-man) \
+               $(Q)$(RM) $(addprefix $(DESTDIR)$(man3dir)/,$(DOC_MAN3))
+
+
+ifdef missing_tools
+  DO_INSTALL_MAN = $(warning Please install $(missing_tools) to have the man pages installed)
+else
+  DO_INSTALL_MAN = do-install-man
+endif
+
+CLEAN_FILES =                                  \
+       $(MAN_XML) $(addsuffix +,$(MAN_XML))    \
+       $(MAN_HTML) $(addsuffix +,$(MAN_HTML))  \
+       $(DOC_MAN3) *.3
+
+clean:
+       $(call QUIET_CLEAN, Documentation) $(RM) $(CLEAN_FILES)
+
+ifdef USE_ASCIIDOCTOR
+$(OUTPUT)%.3 : $(OUTPUT)%.txt
+       $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
+       $(ASCIIDOC) -b manpage -d manpage \
+               $(ASCIIDOC_EXTRA) -alibtraceevent_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
+       mv $@+ $@
+endif
+
+$(OUTPUT)%.3 : $(OUTPUT)%.xml
+       $(QUIET_XMLTO)$(RM) $@ && \
+       $(XMLTO) -o $(OUTPUT). -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
+
+$(OUTPUT)%.xml : %.txt
+       $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
+       $(ASCIIDOC) -b docbook -d manpage \
+               $(ASCIIDOC_EXTRA) -alibtraceevent_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
+       mv $@+ $@
+
+$(MAN_HTML): $(OUTPUT)%.html : %.txt
+       $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
+       $(ASCIIDOC) -b $(ASCIIDOC_HTML) -d manpage \
+               $(ASCIIDOC_EXTRA) -aperf_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
+       mv $@+ $@
diff --git a/tools/lib/traceevent/Documentation/asciidoc.conf b/tools/lib/traceevent/Documentation/asciidoc.conf
new file mode 100644 (file)
index 0000000..0759571
--- /dev/null
@@ -0,0 +1,120 @@
+## linktep: macro
+#
+# Usage: linktep:command[manpage-section]
+#
+# Note, {0} is the manpage section, while {target} is the command.
+#
+# Show TEP link as: <command>(<section>); if section is defined, else just show
+# the command.
+
+[macros]
+(?su)[\\]?(?P<name>linktep):(?P<target>\S*?)\[(?P<attrlist>.*?)\]=
+
+[attributes]
+asterisk=&#42;
+plus=&#43;
+caret=&#94;
+startsb=&#91;
+endsb=&#93;
+tilde=&#126;
+
+ifdef::backend-docbook[]
+[linktep-inlinemacro]
+{0%{target}}
+{0#<citerefentry>}
+{0#<refentrytitle>{target}</refentrytitle><manvolnum>{0}</manvolnum>}
+{0#</citerefentry>}
+endif::backend-docbook[]
+
+ifdef::backend-docbook[]
+ifndef::tep-asciidoc-no-roff[]
+# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this.
+# v1.72 breaks with this because it replaces dots not in roff requests.
+[listingblock]
+<example><title>{title}</title>
+<literallayout>
+ifdef::doctype-manpage[]
+&#10;.ft C&#10;
+endif::doctype-manpage[]
+|
+ifdef::doctype-manpage[]
+&#10;.ft&#10;
+endif::doctype-manpage[]
+</literallayout>
+{title#}</example>
+endif::tep-asciidoc-no-roff[]
+
+ifdef::tep-asciidoc-no-roff[]
+ifdef::doctype-manpage[]
+# The following two small workarounds insert a simple paragraph after screen
+[listingblock]
+<example><title>{title}</title>
+<literallayout>
+|
+</literallayout><simpara></simpara>
+{title#}</example>
+
+[verseblock]
+<formalpara{id? id="{id}"}><title>{title}</title><para>
+{title%}<literallayout{id? id="{id}"}>
+{title#}<literallayout>
+|
+</literallayout>
+{title#}</para></formalpara>
+{title%}<simpara></simpara>
+endif::doctype-manpage[]
+endif::tep-asciidoc-no-roff[]
+endif::backend-docbook[]
+
+ifdef::doctype-manpage[]
+ifdef::backend-docbook[]
+[header]
+template::[header-declarations]
+<refentry>
+<refmeta>
+<refentrytitle>{mantitle}</refentrytitle>
+<manvolnum>{manvolnum}</manvolnum>
+<refmiscinfo class="source">libtraceevent</refmiscinfo>
+<refmiscinfo class="version">{libtraceevent_version}</refmiscinfo>
+<refmiscinfo class="manual">libtraceevent Manual</refmiscinfo>
+</refmeta>
+<refnamediv>
+  <refname>{manname1}</refname>
+  <refname>{manname2}</refname>
+  <refname>{manname3}</refname>
+  <refname>{manname4}</refname>
+  <refname>{manname5}</refname>
+  <refname>{manname6}</refname>
+  <refname>{manname7}</refname>
+  <refname>{manname8}</refname>
+  <refname>{manname9}</refname>
+  <refname>{manname10}</refname>
+  <refname>{manname11}</refname>
+  <refname>{manname12}</refname>
+  <refname>{manname13}</refname>
+  <refname>{manname14}</refname>
+  <refname>{manname15}</refname>
+  <refname>{manname16}</refname>
+  <refname>{manname17}</refname>
+  <refname>{manname18}</refname>
+  <refname>{manname19}</refname>
+  <refname>{manname20}</refname>
+  <refname>{manname21}</refname>
+  <refname>{manname22}</refname>
+  <refname>{manname23}</refname>
+  <refname>{manname24}</refname>
+  <refname>{manname25}</refname>
+  <refname>{manname26}</refname>
+  <refname>{manname27}</refname>
+  <refname>{manname28}</refname>
+  <refname>{manname29}</refname>
+  <refname>{manname30}</refname>
+  <refpurpose>{manpurpose}</refpurpose>
+</refnamediv>
+endif::backend-docbook[]
+endif::doctype-manpage[]
+
+ifdef::backend-xhtml11[]
+[linktep-inlinemacro]
+<a href="{target}.html">{target}{0?({0})}</a>
+endif::backend-xhtml11[]
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-commands.txt b/tools/lib/traceevent/Documentation/libtraceevent-commands.txt
new file mode 100644 (file)
index 0000000..bec5520
--- /dev/null
@@ -0,0 +1,153 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_register_comm, tep_override_comm, tep_pid_is_registered,
+tep_data_comm_from_pid, tep_data_pid_from_comm, tep_cmdline_pid -
+Manage pid to process name mappings.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_register_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
+int *tep_override_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
+bool *tep_is_pid_registered*(struct tep_handle pass:[*]_tep_, int _pid_);
+const char pass:[*]*tep_data_comm_from_pid*(struct tep_handle pass:[*]_pevent_, int _pid_);
+struct cmdline pass:[*]*tep_data_pid_from_comm*(struct tep_handle pass:[*]_pevent_, const char pass:[*]_comm_, struct cmdline pass:[*]_next_);
+int *tep_cmdline_pid*(struct tep_handle pass:[*]_pevent_, struct cmdline pass:[*]_cmdline_);
+--
+
+DESCRIPTION
+-----------
+These functions can be used to handle the mapping between pid and process name.
+The library builds a cache of these mappings, which is used to display the name
+of the process, instead of its pid. This information can be retrieved from
+tracefs/saved_cmdlines file.
+
+The _tep_register_comm()_ function registers a _pid_ / process name mapping.
+If a command with the same _pid_ is already registered, an error is returned.
+The _pid_ argument is the process ID, the _comm_ argument is the process name,
+_tep_ is the event context. The _comm_ is duplicated internally.
+
+The _tep_override_comm()_ function registers a _pid_ / process name mapping.
+If a process with the same pid is already registered, the process name string is
+udapted with the new one. The _pid_ argument is the process ID, the _comm_
+argument is the process name, _tep_ is the event context. The _comm_ is
+duplicated internally.
+
+The _tep_is_pid_registered()_ function checks if a pid has a process name
+mapping registered. The _pid_ argument is the process ID, _tep_ is the event
+context.
+
+The _tep_data_comm_from_pid()_ function returns the process name for a given
+pid. The _pid_ argument is the process ID, _tep_ is the event context.
+The returned string should not be freed, but will be freed when the _tep_
+handler is closed.
+
+The _tep_data_pid_from_comm()_ function returns a pid for a given process name.
+The _comm_ argument is the process name, _tep_ is the event context.
+The argument _next_ is the cmdline structure to search for the next pid.
+As there may be more than one pid for a given process, the result of this call
+can be passed back into a recurring call in the _next_ parameter, to search for
+the next pid. If _next_ is NULL, it will return the first pid associated with
+the _comm_. The function performs a linear search, so it may be slow.
+
+The _tep_cmdline_pid()_ function returns the pid associated with a given
+_cmdline_. The _tep_ argument is the event context.
+
+RETURN VALUE
+------------
+_tep_register_comm()_ function returns 0 on success. In case of an error -1 is
+returned and errno is set to indicate the cause of the problem: ENOMEM, if there
+is not enough memory to duplicate the _comm_ or EEXIST if a mapping for this
+_pid_ is already registered.
+
+_tep_override_comm()_ function returns 0 on success. In case of an error -1 is
+returned and errno is set to indicate the cause of the problem: ENOMEM, if there
+is not enough memory to duplicate the _comm_.
+
+_tep_is_pid_registered()_ function returns true if the _pid_ has a process name
+mapped to it, false otherwise.
+
+_tep_data_comm_from_pid()_ function returns the process name as string, or the
+string "<...>" if there is no mapping for the given pid.
+
+_tep_data_pid_from_comm()_ function returns a pointer to a struct cmdline, that
+holds a pid for a given process, or NULL if none is found. This result can be
+passed back into a recurring call as the _next_ parameter of the function.
+
+_tep_cmdline_pid()_ functions returns the pid for the give cmdline. If _cmdline_
+ is NULL, then -1 is returned.
+
+EXAMPLE
+-------
+The following example registers pid for command "ls", in context of event _tep_
+and performs various searches for pid / process name mappings:
+[source,c]
+--
+#include <event-parse.h>
+...
+int ret;
+int ls_pid = 1021;
+struct tep_handle *tep = tep_alloc();
+...
+       ret = tep_register_comm(tep, "ls", ls_pid);
+       if (ret != 0 && errno == EEXIST)
+               ret = tep_override_comm(tep, "ls", ls_pid);
+       if (ret != 0) {
+               /* Failed to register pid / command mapping */
+       }
+...
+       if (tep_is_pid_registered(tep, ls_pid) == 0) {
+               /* Command mapping for ls_pid is not registered */
+       }
+...
+       const char *comm = tep_data_comm_from_pid(tep, ls_pid);
+       if (comm) {
+               /* Found process name for ls_pid */
+       }
+...
+       int pid;
+       struct cmdline *cmd = tep_data_pid_from_comm(tep, "ls", NULL);
+       while (cmd) {
+               pid = tep_cmdline_pid(tep, cmd);
+               /* Found pid for process "ls" */
+               cmd = tep_data_pid_from_comm(tep, "ls", cmd);
+       }
+--
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-cpus.txt b/tools/lib/traceevent/Documentation/libtraceevent-cpus.txt
new file mode 100644 (file)
index 0000000..5ad70e4
--- /dev/null
@@ -0,0 +1,77 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_cpus, tep_set_cpus - Get / set the number of CPUs, which have a tracing
+buffer representing it. Note, the buffer may be empty.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_get_cpus*(struct tep_handle pass:[*]_tep_);
+void *tep_set_cpus*(struct tep_handle pass:[*]_tep_, int _cpus_);
+--
+
+DESCRIPTION
+-----------
+The _tep_get_cpus()_ function gets the number of CPUs, which have a tracing
+buffer representing it. The _tep_ argument is trace event parser context.
+
+The _tep_set_cpus()_ function sets the number of CPUs, which have a tracing
+buffer representing it. The _tep_ argument is trace event parser context.
+The _cpu_ argument is the number of CPUs with tracing data.
+
+RETURN VALUE
+------------
+The _tep_get_cpus()_ functions returns the number of CPUs, which have tracing
+data recorded.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+       tep_set_cpus(tep, 5);
+...
+       printf("We have tracing data for %d CPUs", tep_get_cpus(tep));
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt b/tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt
new file mode 100644 (file)
index 0000000..e64851b
--- /dev/null
@@ -0,0 +1,78 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_read_number - Reads a number from raw data.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+unsigned long long *tep_read_number*(struct tep_handle pass:[*]_tep_, const void pass:[*]_ptr_, int _size_);
+--
+
+DESCRIPTION
+-----------
+The _tep_read_number()_ function reads an integer from raw data, taking into
+account the endianness of the raw data and the current host. The _tep_ argument
+is the trace event parser context. The _ptr_ is a pointer to the raw data, where
+the integer is, and the _size_ is the size of the integer.
+
+RETURN VALUE
+------------
+The _tep_read_number()_ function returns the integer in the byte order of
+the current host. In case of an error, 0 is returned.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+void process_record(struct tep_record *record)
+{
+       int offset = 24;
+       int data = tep_read_number(tep, record->data + offset, 4);
+
+       /* Read the 4 bytes at the offset 24 of data as an integer */
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_find.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_find.txt
new file mode 100644 (file)
index 0000000..7bc062c
--- /dev/null
@@ -0,0 +1,103 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_find_event,tep_find_event_by_name,tep_find_event_by_record -
+Find events by given key.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_event pass:[*]*tep_find_event*(struct tep_handle pass:[*]_tep_, int _id_);
+struct tep_event pass:[*]*tep_find_event_by_name*(struct tep_handle pass:[*]_tep_, const char pass:[*]_sys_, const char pass:[*]_name_);
+struct tep_event pass:[*]*tep_find_event_by_record*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_record_);
+--
+
+DESCRIPTION
+-----------
+This set of functions can be used to search for an event, based on a given
+criteria. All functions require a pointer to a _tep_, trace event parser
+context.
+
+The _tep_find_event()_ function searches for an event by given event _id_. The
+event ID is assigned dynamically and can be viewed in event's format file,
+"ID" field.
+
+The tep_find_event_by_name()_ function searches for an event by given
+event _name_, under the system _sys_. If the _sys_ is NULL (not specified),
+the first event with _name_ is returned.
+
+The tep_find_event_by_record()_ function searches for an event from a given
+_record_.
+
+RETURN VALUE
+------------
+All these functions return a pointer to the found event, or NULL if there is no
+such event.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+struct tep_event *event;
+
+event = tep_find_event(tep, 1857);
+if (event == NULL) {
+       /* There is no event with ID 1857 */
+}
+
+event = tep_find_event_by_name(tep, "kvm", "kvm_exit");
+if (event == NULL) {
+       /* There is no kvm_exit event, from kvm system */
+}
+
+void event_from_record(struct tep_record *record)
+{
+ struct tep_event *event = tep_find_event_by_record(tep, record);
+       if (event == NULL) {
+               /* There is no event from given record */
+       }
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_get.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_get.txt
new file mode 100644 (file)
index 0000000..6525092
--- /dev/null
@@ -0,0 +1,99 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_event, tep_get_first_event, tep_get_events_count - Access events.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_event pass:[*]*tep_get_event*(struct tep_handle pass:[*]_tep_, int _index_);
+struct tep_event pass:[*]*tep_get_first_event*(struct tep_handle pass:[*]_tep_);
+int *tep_get_events_count*(struct tep_handle pass:[*]_tep_);
+--
+
+DESCRIPTION
+-----------
+The _tep_get_event()_ function returns a pointer to event at the given _index_.
+The _tep_ argument is trace event parser context, the _index_ is the index of
+the requested event.
+
+The _tep_get_first_event()_ function returns a pointer to the first event.
+As events are stored in an array, this function returns the pointer to the
+beginning of the array. The _tep_ argument is trace event parser context.
+
+The _tep_get_events_count()_ function returns the number of the events
+in the array. The _tep_ argument is trace event parser context.
+
+RETURN VALUE
+------------
+The _tep_get_event()_ returns a pointer to the event located at _index_.
+NULL is returned in case of error, in case there are no events or _index_ is
+out of range.
+
+The _tep_get_first_event()_ returns a pointer to the first event. NULL is
+returned in case of error, or in case there are no events.
+
+The _tep_get_events_count()_ returns the number of the events. 0 is
+returned in case of error, or in case there are no events.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+int i,count = tep_get_events_count(tep);
+struct tep_event *event, *events = tep_get_first_event(tep);
+
+if (events == NULL) {
+       /* There are no events */
+} else {
+       for (i = 0; i < count; i++) {
+               event = (events+i);
+               /* process events[i] */
+       }
+
+       /* Get the last event */
+       event = tep_get_event(tep, count-1);
+}
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_list.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_list.txt
new file mode 100644 (file)
index 0000000..fba350e
--- /dev/null
@@ -0,0 +1,122 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_list_events, tep_list_events_copy -
+Get list of events, sorted by given criteria.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_event_sort_type* {
+       _TEP_EVENT_SORT_ID_,
+       _TEP_EVENT_SORT_NAME_,
+       _TEP_EVENT_SORT_SYSTEM_,
+};
+
+struct tep_event pass:[*]pass:[*]*tep_list_events*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
+struct tep_event pass:[*]pass:[*]*tep_list_events_copy*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
+--
+
+DESCRIPTION
+-----------
+The _tep_list_events()_ function returns an array of pointers to the events,
+sorted by the _sort_type_ criteria. The last element of the array is NULL.
+The returned memory must not be freed, it is managed by the library.
+The function is not thread safe. The _tep_ argument is trace event parser
+context. The _sort_type_ argument is the required sort criteria:
+[verse]
+--
+       _TEP_EVENT_SORT_ID_     - sort by the event ID.
+       _TEP_EVENT_SORT_NAME_   - sort by the event (name, system, id) triplet.
+       _TEP_EVENT_SORT_SYSTEM_ - sort by the event (system, name, id) triplet.
+--
+
+The _tep_list_events_copy()_ is a thread safe version of _tep_list_events()_.
+It has the same behavior, but the returned array is allocated internally and
+must be freed by the caller. Note that the content of the array must not be
+freed (see the EXAMPLE below).
+
+RETURN VALUE
+------------
+The _tep_list_events()_ function returns an array of pointers to events.
+In case of an error, NULL is returned. The returned array must not be freed,
+it is managed by the library.
+
+The _tep_list_events_copy()_ function returns an array of pointers to events.
+In case of an error, NULL is returned. The returned array must be freed by
+the caller.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+int i;
+struct tep_event_format **events;
+
+i=0;
+events = tep_list_events(tep, TEP_EVENT_SORT_ID);
+if (events == NULL) {
+       /* Failed to get the events, sorted by ID */
+} else {
+       while(events[i]) {
+               /* walk through the list of the events, sorted by ID */
+               i++;
+       }
+}
+
+i=0;
+events = tep_list_events_copy(tep, TEP_EVENT_SORT_NAME);
+if (events == NULL) {
+       /* Failed to get the events, sorted by name */
+} else {
+       while(events[i]) {
+               /* walk through the list of the events, sorted by name */
+               i++;
+       }
+       free(events);
+}
+
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_find.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_find.txt
new file mode 100644 (file)
index 0000000..0896af5
--- /dev/null
@@ -0,0 +1,118 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_find_common_field, tep_find_field, tep_find_any_field -
+Search for a field in an event.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_format_field pass:[*]*tep_find_common_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
+struct tep_format_field pass:[*]*tep_find_field*(struct tep_event_ormat pass:[*]_event_, const char pass:[*]_name_);
+struct tep_format_field pass:[*]*tep_find_any_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
+--
+
+DESCRIPTION
+-----------
+These functions search for a field with given name in an event. The field
+returned can be used to find the field content from within a data record.
+
+The _tep_find_common_field()_ function searches for a common field with _name_
+in the _event_.
+
+The _tep_find_field()_ function searches for an event specific field with
+_name_ in the _event_.
+
+The _tep_find_any_field()_ function searches for any field with _name_ in the
+_event_.
+
+RETURN VALUE
+------------
+The _tep_find_common_field(), _tep_find_field()_ and _tep_find_any_field()_
+functions return a pointer to the found field, or NULL in case there is no field
+with the requested name.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+void get_htimer_info(struct tep_handle *tep, struct tep_record *record)
+{
+       struct tep_format_field *field;
+       struct tep_event *event;
+       long long softexpires;
+       int mode;
+       int pid;
+
+       event = tep_find_event_by_name(tep, "timer", "hrtimer_start");
+
+       field = tep_find_common_field(event, "common_pid");
+       if (field == NULL) {
+               /* Cannot find "common_pid" field in the event */
+       } else {
+               /* Get pid from the data record */
+               pid = tep_read_number(tep, record->data + field->offset,
+                                     field->size);
+       }
+
+       field = tep_find_field(event, "softexpires");
+       if (field == NULL) {
+               /* Cannot find "softexpires" event specific field in the event */
+       } else {
+               /* Get softexpires parameter from the data record */
+               softexpires = tep_read_number(tep, record->data + field->offset,
+                                             field->size);
+       }
+
+       field = tep_find_any_field(event, "mode");
+       if (field == NULL) {
+               /* Cannot find "mode" field in the event */
+       } else
+       {
+               /* Get mode parameter from the data record */
+               mode = tep_read_number(tep, record->data + field->offset,
+                                      field->size);
+       }
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt
new file mode 100644 (file)
index 0000000..6324f0d
--- /dev/null
@@ -0,0 +1,122 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_any_field_val, tep_get_common_field_val, tep_get_field_val,
+tep_get_field_raw - Get value of a field.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+*#include <trace-seq.h>*
+
+int *tep_get_any_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+int *tep_get_common_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+int *tep_get_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+void pass:[*]*tep_get_field_raw*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int pass:[*]_len_, int _err_);
+--
+
+DESCRIPTION
+-----------
+These functions can be used to find a field and retrieve its value.
+
+The _tep_get_any_field_val()_ function searches in the _record_ for a field
+with _name_, part of the _event_. If the field is found, its value is stored in
+_val_. If there is an error and _err_ is not zero, then an error string is
+written into _s_.
+
+The _tep_get_common_field_val()_ function does the same as
+_tep_get_any_field_val()_, but searches only in the common fields. This works
+for any event as all events include the common fields.
+
+The _tep_get_field_val()_ function does the same as _tep_get_any_field_val()_,
+but searches only in the event specific fields.
+
+The _tep_get_field_raw()_ function searches in the _record_ for a field with
+_name_, part of the _event_. If the field is found, a pointer to where the field
+exists in the record's raw data is returned. The size of the data is stored in
+_len_. If there is an error and _err_ is not zero, then an error string is
+written into _s_.
+
+RETURN VALUE
+------------
+The _tep_get_any_field_val()_, _tep_get_common_field_val()_ and
+_tep_get_field_val()_ functions return 0 on success, or -1 in case of an error.
+
+The _tep_get_field_raw()_ function returns a pointer to field's raw data, and
+places the length of this data in _len_. In case of an error NULL is returned.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+struct tep_event *event = tep_find_event_by_name(tep, "kvm", "kvm_exit");
+...
+void process_record(struct tep_record *record)
+{
+       int len;
+       char *comm;
+       struct tep_event_format *event;
+       unsigned long long val;
+
+       event = tep_find_event_by_record(pevent, record);
+       if (event != NULL) {
+               if (tep_get_common_field_val(NULL, event, "common_type",
+                                            record, &val, 0) == 0) {
+                       /* Got the value of common type field */
+               }
+               if (tep_get_field_val(NULL, event, "pid", record, &val, 0) == 0) {
+                       /* Got the value of pid specific field */
+               }
+               comm = tep_get_field_raw(NULL, event, "comm", record, &len, 0);
+               if (comm != NULL) {
+                       /* Got a pointer to the comm event specific field */
+               }
+       }
+}
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+       Header file to include in order to have access to trace sequences
+       related APIs. Trace sequences are used to allow a function to call
+       several other functions to create a string of data to use.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_print.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_print.txt
new file mode 100644 (file)
index 0000000..9a9df98
--- /dev/null
@@ -0,0 +1,126 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_print_field, tep_print_fields, tep_print_num_field, tep_print_func_field -
+Print the field content.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+*#include <trace-seq.h>*
+
+void *tep_print_field*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, struct tep_format_field pass:[*]_field_);
+void *tep_print_fields*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, int _size_, struct tep_event pass:[*]_event_);
+int *tep_print_num_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
+int *tep_print_func_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
+--
+
+DESCRIPTION
+-----------
+These functions print recorded field's data, according to the field's type.
+
+The _tep_print_field()_ function extracts from the recorded raw _data_ value of
+the _field_ and prints it into _s_, according to the field type.
+
+The _tep_print_fields()_ prints each field name followed by the record's field
+value according to the field's type:
+[verse]
+--
+"field1_name=field1_value field2_name=field2_value ..."
+--
+It iterates all fields of the _event_, and calls _tep_print_field()_ for each of
+them.
+
+The _tep_print_num_field()_ function prints a numeric field with given format
+string. A search is performed in the _event_ for a field with _name_. If such
+field is found, its value is extracted from the _record_ and is printed in the
+_s_, according to the given format string _fmt_. If the argument _err_ is
+non-zero, and an error occures - it is printed in the _s_.
+
+The _tep_print_func_field()_ function prints a function field with given format
+string.  A search is performed in the _event_ for a field with _name_. If such
+field is found, its value is extracted from the _record_. The value is assumed
+to be a function address, and a search is perform to find the name of this
+function. The function name (if found) and its address are printed in the _s_,
+according to the given format string _fmt_. If the argument _err_ is non-zero,
+and an error occures - it is printed in _s_.
+
+RETURN VALUE
+------------
+The _tep_print_num_field()_ and _tep_print_func_field()_ functions return 1
+on success, -1 in case of an error or 0 if the print buffer _s_ is full.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+struct trace_seq seq;
+trace_seq_init(&seq);
+struct tep_event *event = tep_find_event_by_name(tep, "timer", "hrtimer_start");
+...
+void process_record(struct tep_record *record)
+{
+       struct tep_format_field *field_pid = tep_find_common_field(event, "common_pid");
+
+       trace_seq_reset(&seq);
+
+       /* Print the value of "common_pid" */
+       tep_print_field(&seq, record->data, field_pid);
+
+       /* Print all fields of the "hrtimer_start" event */
+       tep_print_fields(&seq, record->data, record->size, event);
+
+       /* Print the value of "expires" field with custom format string */
+       tep_print_num_field(&seq, " timer expires in %llu ", event, "expires", record, 0);
+
+       /* Print the address and the name of "function" field with custom format string */
+       tep_print_func_field(&seq, " timer function is %s ", event, "function", record, 0);
+ }
+ ...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+       Header file to include in order to have access to trace sequences related APIs.
+       Trace sequences are used to allow a function to call several other functions
+       to create a string of data to use.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_read.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_read.txt
new file mode 100644 (file)
index 0000000..64e9e25
--- /dev/null
@@ -0,0 +1,81 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_read_number_field - Reads a number from raw data.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_read_number_field*(struct tep_format_field pass:[*]_field_, const void pass:[*]_data_, unsigned long long pass:[*]_value_);
+--
+
+DESCRIPTION
+-----------
+The _tep_read_number_field()_ function reads the value of the _field_ from the
+raw _data_ and stores it in the _value_. The function sets the _value_ according
+to the endianness of the raw data and the current machine and stores it in
+_value_.
+
+RETURN VALUE
+------------
+The _tep_read_number_field()_ function retunrs 0 in case of success, or -1 in
+case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+struct tep_event *event = tep_find_event_by_name(tep, "timer", "hrtimer_start");
+...
+void process_record(struct tep_record *record)
+{
+       unsigned long long pid;
+       struct tep_format_field *field_pid = tep_find_common_field(event, "common_pid");
+
+       if (tep_read_number_field(field_pid, record->data, &pid) != 0) {
+               /* Failed to get "common_pid" value */
+       }
+}
+...
+--
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-fields.txt b/tools/lib/traceevent/Documentation/libtraceevent-fields.txt
new file mode 100644 (file)
index 0000000..1ccb531
--- /dev/null
@@ -0,0 +1,105 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_event_common_fields, tep_event_fields - Get a list of fields for an event.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_format_field pass:[*]pass:[*]*tep_event_common_fields*(struct tep_event pass:[*]_event_);
+struct tep_format_field pass:[*]pass:[*]*tep_event_fields*(struct tep_event pass:[*]_event_);
+--
+
+DESCRIPTION
+-----------
+The _tep_event_common_fields()_ function returns an array of pointers to common
+fields for the _event_. The array is allocated in the function and must be freed
+by free(). The last element of the array is NULL.
+
+The _tep_event_fields()_ function returns an array of pointers to event specific
+fields for the _event_. The array is allocated in the function and must be freed
+by free(). The last element of the array is NULL.
+
+RETURN VALUE
+------------
+Both _tep_event_common_fields()_ and _tep_event_fields()_ functions return
+an array of pointers to tep_format_field structures in case of success, or
+NULL in case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+int i;
+struct tep_format_field **fields;
+struct tep_event *event = tep_find_event_by_name(tep, "kvm", "kvm_exit");
+if (event != NULL) {
+       fields = tep_event_common_fields(event);
+       if (fields != NULL) {
+               i = 0;
+               while (fields[i]) {
+                       /*
+                         walk through the list of the common fields
+                         of the kvm_exit event
+                       */
+                       i++;
+               }
+               free(fields);
+       }
+       fields = tep_event_fields(event);
+       if (fields != NULL) {
+               i = 0;
+               while (fields[i]) {
+                       /*
+                         walk through the list of the event specific
+                         fields of the kvm_exit event
+                       */
+                       i++;
+               }
+               free(fields);
+       }
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt b/tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt
new file mode 100644 (file)
index 0000000..f401ad3
--- /dev/null
@@ -0,0 +1,91 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_is_file_bigendian, tep_set_file_bigendian - Get / set the endianness of the
+raw data being accessed by the tep handler.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_endian* {
+       TEP_LITTLE_ENDIAN = 0,
+       TEP_BIG_ENDIAN
+};
+
+bool *tep_is_file_bigendian*(struct tep_handle pass:[*]_tep_);
+void *tep_set_file_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
+
+--
+DESCRIPTION
+-----------
+The _tep_is_file_bigendian()_ function gets the endianness of the raw data,
+being accessed by the tep handler. The _tep_ argument is trace event parser
+context.
+
+The _tep_set_file_bigendian()_ function sets the endianness of raw data being
+accessed by the tep handler. The _tep_ argument is trace event parser context.
+[verse]
+--
+The _endian_ argument is the endianness:
+       _TEP_LITTLE_ENDIAN_ - the raw data is in little endian format,
+       _TEP_BIG_ENDIAN_ - the raw data is in big endian format.
+--
+RETURN VALUE
+------------
+The _tep_is_file_bigendian()_ function returns true if the data is in bigendian
+format, false otherwise.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+       tep_set_file_bigendian(tep, TEP_LITTLE_ENDIAN);
+...
+       if (tep_is_file_bigendian(tep)) {
+               /* The raw data is in big endian */
+       } else {
+               /* The raw data is in little endian */
+       }
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-filter.txt b/tools/lib/traceevent/Documentation/libtraceevent-filter.txt
new file mode 100644 (file)
index 0000000..4a9962d
--- /dev/null
@@ -0,0 +1,209 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_filter_alloc, tep_filter_free, tep_filter_reset, tep_filter_make_string,
+tep_filter_copy, tep_filter_compare, tep_filter_match, tep_event_filtered,
+tep_filter_remove_event, tep_filter_strerror, tep_filter_add_filter_str -
+Event filter related APIs.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_event_filter pass:[*]*tep_filter_alloc*(struct tep_handle pass:[*]_tep_);
+void *tep_filter_free*(struct tep_event_filter pass:[*]_filter_);
+void *tep_filter_reset*(struct tep_event_filter pass:[*]_filter_);
+enum tep_errno *tep_filter_add_filter_str*(struct tep_event_filter pass:[*]_filter_, const char pass:[*]_filter_str_);
+int *tep_event_filtered*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+int *tep_filter_remove_event*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+enum tep_errno *tep_filter_match*(struct tep_event_filter pass:[*]_filter_, struct tep_record pass:[*]_record_);
+int *tep_filter_copy*(struct tep_event_filter pass:[*]_dest_, struct tep_event_filter pass:[*]_source_);
+int *tep_filter_compare*(struct tep_event_filter pass:[*]_filter1_, struct tep_event_filter pass:[*]_filter2_);
+char pass:[*]*tep_filter_make_string*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+int *tep_filter_strerror*(struct tep_event_filter pass:[*]_filter_, enum tep_errno _err_, char pass:[*]buf, size_t _buflen_);
+--
+
+DESCRIPTION
+-----------
+Filters can be attached to traced events. They can be used to filter out various
+events when outputting them. Each event can be filtered based on its parameters,
+described in the event's format file. This set of functions can be used to
+create, delete, modify and attach event filters.
+
+The _tep_filter_alloc()_ function creates a new event filter. The _tep_ argument
+is the trace event parser context.
+
+The _tep_filter_free()_ function frees an event filter and all resources that it
+had used.
+
+The _tep_filter_reset()_ function removes all rules from an event filter and
+resets it.
+
+The _tep_filter_add_filter_str()_ function adds a new rule to the _filter_. The
+_filter_str_ argument is the filter string, that contains the rule.
+
+The _tep_event_filtered()_ function checks if the event with _event_id_ has
+_filter_.
+
+The _tep_filter_remove_event()_ function removes a _filter_ for an event with
+_event_id_.
+
+The _tep_filter_match()_ function tests if a _record_ matches given _filter_.
+
+The _tep_filter_copy()_ function copies a _source_ filter into a _dest_ filter.
+
+The _tep_filter_compare()_ function compares two filers - _filter1_ and _filter2_.
+
+The _tep_filter_make_string()_ function constructs a string, displaying
+the _filter_ contents for given _event_id_.
+
+The _tep_filter_strerror()_ function copies the _filter_ error buffer into the
+given _buf_ with the size _buflen_. If the error buffer is empty, in the _buf_
+is copied a string, describing the error _err_.
+
+RETURN VALUE
+------------
+The _tep_filter_alloc()_ function returns a pointer to the newly created event
+filter, or NULL in case of an error.
+
+The _tep_filter_add_filter_str()_ function returns 0 if the rule was
+successfully added or a negative error code.  Use _tep_filter_strerror()_ to see
+actual error message in case of an error.
+
+The _tep_event_filtered()_ function returns 1 if the filter is found for given
+event, or 0 otherwise.
+
+The _tep_filter_remove_event()_ function returns 1 if the vent was removed, or
+0 if the event was not found.
+
+The _tep_filter_match()_ function returns _tep_errno_, according to the result:
+[verse]
+--
+_pass:[TEP_ERRNO__FILTER_MATCH]_       - filter found for event, the record matches.
+_pass:[TEP_ERRNO__FILTER_MISS]_                - filter found for event, the record does not match.
+_pass:[TEP_ERRNO__FILTER_NOT_FOUND]_   - no filter found for record's event.
+_pass:[TEP_ERRNO__NO_FILTER]_          - no rules in the filter.
+--
+or any other _tep_errno_, if an error occurred during the test.
+
+The _tep_filter_copy()_ function returns 0 on success or -1 if not all rules
+ were copied.
+
+The _tep_filter_compare()_ function returns 1 if the two filters hold the same
+content, or 0 if they do not.
+
+The _tep_filter_make_string()_ function returns a string, which must be freed
+with free(), or NULL in case of an error.
+
+The _tep_filter_strerror()_ function returns 0 if message was filled
+successfully, or -1 in case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+char errstr[200];
+int ret;
+
+struct tep_event_filter *filter = tep_filter_alloc(tep);
+struct tep_event_filter *filter1 = tep_filter_alloc(tep);
+ret = tep_filter_add_filter_str(filter, "sched/sched_wakeup:target_cpu==1");
+if(ret < 0) {
+       tep_filter_strerror(filter, ret, errstr, sizeof(errstr));
+       /* Failed to add a new rule to the filter, the error string is in errstr */
+}
+if (tep_filter_copy(filter1, filter) != 0) {
+       /* Failed to copy filter in filter1 */
+}
+...
+if (tep_filter_compare(filter, filter1) != 1) {
+       /* Both filters are different */
+}
+...
+void process_record(struct tep_handle *tep, struct tep_record *record)
+{
+       struct tep_event *event;
+       char *fstring;
+
+       event = tep_find_event_by_record(tep, record);
+
+       if (tep_event_filtered(filter, event->id) == 1) {
+               /* The event has filter */
+               fstring = tep_filter_make_string(filter, event->id);
+               if (fstring != NULL) {
+                       /* The filter for the event is in fstring */
+                       free(fstring);
+               }
+       }
+
+       switch (tep_filter_match(filter, record)) {
+       case TEP_ERRNO__FILTER_MATCH:
+               /* The filter matches the record */
+               break;
+       case TEP_ERRNO__FILTER_MISS:
+               /* The filter does not match the record */
+               break;
+       case TEP_ERRNO__FILTER_NOT_FOUND:
+               /* No filter found for record's event */
+               break;
+       case TEP_ERRNO__NO_FILTER:
+               /* There are no rules in the filter */
+               break
+       default:
+               /* An error occurred during the test */
+               break;
+       }
+
+       if (tep_filter_remove_event(filter, event->id) == 1) {
+               /* The event was removed from the filter */
+       }
+}
+
+...
+tep_filter_reset(filter);
+...
+tep_filter_free(filter);
+tep_filter_free(filter1);
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt b/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt
new file mode 100644 (file)
index 0000000..38bfea3
--- /dev/null
@@ -0,0 +1,183 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_find_function, tep_find_function_address, tep_set_function_resolver,
+tep_reset_function_resolver, tep_register_function, tep_register_print_string -
+function related tep APIs
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+typedef char pass:[*](*tep_func_resolver_t*)(void pass:[*]_priv_, unsigned long long pass:[*]_addrp_, char pass:[**]_modp_);
+int *tep_set_function_resolver*(struct tep_handle pass:[*]_tep_, tep_func_resolver_t pass:[*]_func_, void pass:[*]_priv_);
+void *tep_reset_function_resolver*(struct tep_handle pass:[*]_tep_);
+const char pass:[*]*tep_find_function*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+unsigned long long *tep_find_function_address*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+int *tep_register_function*(struct tep_handle pass:[*]_tep_, char pass:[*]_name_, unsigned long long _addr_, char pass:[*]_mod_);
+int *tep_register_print_string*(struct tep_handle pass:[*]_tep_, const char pass:[*]_fmt_, unsigned long long _addr_);
+--
+
+DESCRIPTION
+-----------
+Some tools may have already a way to resolve the kernel functions. These APIs
+allow them to keep using it instead of duplicating all the entries inside.
+
+The _tep_func_resolver_t_ type is the prototype of the alternative kernel
+functions resolver. This function receives a pointer to its custom context
+(set with the _tep_set_function_resolver()_ call ) and the address of a kernel
+function, which has to be resolved. In case of success, it should return
+the name of the function and its module (if any) in _modp_.
+
+The _tep_set_function_resolver()_ function registers _func_ as an alternative
+kernel functions resolver. The _tep_ argument is trace event parser context.
+The _priv_ argument is a custom context of the _func_ function. The function
+resolver is used by the APIs _tep_find_function()_,
+_tep_find_function_address()_, and _tep_print_func_field()_ to resolve
+a function address to a function name.
+
+The _tep_reset_function_resolver()_ function resets the kernel functions
+resolver to the default function.  The _tep_ argument is trace event parser
+context.
+
+
+These APIs can be used to find function name and start address, by given
+address. The given address does not have to be exact, it will select
+the function that would contain it.
+
+The _tep_find_function()_ function returns the function name, which contains the
+given address _addr_. The _tep_ argument is the trace event parser context.
+
+The _tep_find_function_address()_ function returns the function start address,
+by given address _addr_. The _addr_ does not have to be exact, it will select
+the function that would contain it. The _tep_ argument is the trace event
+parser context.
+
+The _tep_register_function()_ function registers a function name mapped to an
+address and (optional) module. This mapping is used in case the function tracer
+or events have "%pF" or "%pS" parameter in its format string. It is common to
+pass in the kallsyms function names with their corresponding addresses with this
+function. The _tep_ argument is the trace event parser context. The _name_ is
+the name of the function, the string is copied internally. The _addr_ is
+the start address of the function. The _mod_ is the kernel module
+the function may be in (NULL for none).
+
+The _tep_register_print_string()_ function  registers a string by the address
+it was stored in the kernel. Some strings internal to the kernel with static
+address are passed to certain events. The "%s" in the event's format field
+which has an address needs to know what string would be at that address. The
+tep_register_print_string() supplies the parsing with the mapping between kernel
+addresses and those strings. The _tep_ argument is the trace event parser
+context. The _fmt_ is the string to register, it is copied internally.
+The _addr_ is the address the string was located at.
+
+
+RETURN VALUE
+------------
+The _tep_set_function_resolver()_ function returns 0 in case of success, or -1
+in case of an error.
+
+The _tep_find_function()_ function returns the function name, or NULL in case
+it cannot be found.
+
+The _tep_find_function_address()_ function returns the function start address,
+or 0 in case it cannot be found.
+
+The _tep_register_function()_ function returns 0 in case of success. In case of
+an error -1 is returned, and errno is set to the appropriate error number.
+
+The _tep_register_print_string()_ function returns 0 in case of success. In case
+of an error -1 is returned, and errno is set to the appropriate error number.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+char *my_resolve_kernel_addr(void *context,
+                            unsigned long long *addrp, char **modp)
+{
+       struct db *function_database = context;
+       struct symbol *sym = sql_lookup(function_database, *addrp);
+
+       if (!sym)
+               return NULL;
+
+       *modp = sym->module_name;
+       return sym->name;
+}
+
+void show_function( unsigned long long addr)
+{
+       unsigned long long fstart;
+       const char *fname;
+
+       if (tep_set_function_resolver(tep, my_resolve_kernel_addr,
+                                     function_database) != 0) {
+               /* failed to register my_resolve_kernel_addr */
+       }
+
+       /* These APIs use my_resolve_kernel_addr() to resolve the addr */
+       fname = tep_find_function(tep, addr);
+       fstart = tep_find_function_address(tep, addr);
+
+       /*
+          addr is in function named fname, starting at fstart address,
+          at offset (addr - fstart)
+       */
+
+       tep_reset_function_resolver(tep);
+
+}
+...
+       if (tep_register_function(tep, "kvm_exit",
+                               (unsigned long long) 0x12345678, "kvm") != 0) {
+               /* Failed to register kvm_exit address mapping */
+       }
+...
+       if (tep_register_print_string(tep, "print string",
+                               (unsigned long long) 0x87654321, NULL) != 0) {
+               /* Failed to register "print string" address mapping */
+       }
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-func_find.txt b/tools/lib/traceevent/Documentation/libtraceevent-func_find.txt
new file mode 100644 (file)
index 0000000..04840e2
--- /dev/null
@@ -0,0 +1,88 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_find_function,tep_find_function_address - Find function name / start address.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+const char pass:[*]*tep_find_function*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+unsigned long long *tep_find_function_address*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+--
+
+DESCRIPTION
+-----------
+These functions can be used to find function name and start address, by given
+address. The given address does not have to be exact, it will select the function
+that would contain it.
+
+The _tep_find_function()_ function returns the function name, which contains the
+given address _addr_. The _tep_ argument is the trace event parser context.
+
+The _tep_find_function_address()_ function returns the function start address,
+by given address _addr_. The _addr_ does not have to be exact, it will select the
+function that would contain it. The _tep_ argument is the trace event parser context.
+
+RETURN VALUE
+------------
+The _tep_find_function()_ function returns the function name, or NULL in case
+it cannot be found.
+
+The _tep_find_function_address()_ function returns the function start address,
+or 0 in case it cannot be found.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+void show_function( unsigned long long addr)
+{
+       const char *fname = tep_find_function(tep, addr);
+       unsigned long long fstart = tep_find_function_address(tep, addr);
+
+       /* addr is in function named fname, starting at fstart address, at offset (addr - fstart) */
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-handle.txt b/tools/lib/traceevent/Documentation/libtraceevent-handle.txt
new file mode 100644 (file)
index 0000000..8d56831
--- /dev/null
@@ -0,0 +1,101 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_alloc, tep_free,tep_ref, tep_unref,tep_ref_get - Create, destroy, manage
+references of trace event parser context.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_handle pass:[*]*tep_alloc*(void);
+void *tep_free*(struct tep_handle pass:[*]_tep_);
+void *tep_ref*(struct tep_handle pass:[*]_tep_);
+void *tep_unref*(struct tep_handle pass:[*]_tep_);
+int *tep_ref_get*(struct tep_handle pass:[*]_tep_);
+--
+
+DESCRIPTION
+-----------
+These are the main functions to create and destroy tep_handle - the main
+structure, representing the trace event parser context. This context is used as
+the input parameter of most library APIs.
+
+The _tep_alloc()_ function allocates and initializes the tep context.
+
+The _tep_free()_ function will decrement the reference of the _tep_ handler.
+When there is no more references, then it will free the handler, as well
+as clean up all its resources that it had used. The argument _tep_ is
+the pointer to the trace event parser context.
+
+The _tep_ref()_ function adds a reference to the _tep_ handler.
+
+The _tep_unref()_ function removes a reference from the _tep_ handler. When
+the last reference is removed, the _tep_ is destroyed, and all resources that
+it had used are cleaned up.
+
+The _tep_ref_get()_ functions gets the current references of the _tep_ handler.
+
+RETURN VALUE
+------------
+_tep_alloc()_ returns a pointer to a newly created tep_handle structure.
+NULL is returned in case there is not enough free memory to allocate it.
+
+_tep_ref_get()_ returns the current references of _tep_.
+If _tep_ is NULL, 0 is returned.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+
+...
+struct tep_handle *tep = tep_alloc();
+...
+int ref = tep_ref_get(tep);
+tep_ref(tep);
+if ( (ref+1) != tep_ref_get(tep)) {
+       /* Something wrong happened, the counter is not incremented by 1 */
+}
+tep_unref(tep);
+...
+tep_free(tep);
+...
+--
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-header_page.txt b/tools/lib/traceevent/Documentation/libtraceevent-header_page.txt
new file mode 100644 (file)
index 0000000..615d117
--- /dev/null
@@ -0,0 +1,102 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_header_page_size, tep_get_header_timestamp_size, tep_is_old_format -
+Get the data stored in the header page, in kernel context.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_get_header_page_size*(struct tep_handle pass:[*]_tep_);
+int *tep_get_header_timestamp_size*(struct tep_handle pass:[*]_tep_);
+bool *tep_is_old_format*(struct tep_handle pass:[*]_tep_);
+--
+DESCRIPTION
+-----------
+These functions retrieve information from kernel context, stored in tracefs
+events/header_page. Old kernels do not have header page info, so default values
+from user space context are used.
+
+The _tep_get_header_page_size()_ function returns the size of a long integer,
+in kernel context. The _tep_ argument is trace event parser context.
+This information is retrieved from tracefs events/header_page, "commit" field.
+
+The _tep_get_header_timestamp_size()_ function returns the size of timestamps,
+in kernel context. The _tep_ argument is trace event parser context. This
+information is retrieved from tracefs events/header_page, "timestamp" field.
+
+The _tep_is_old_format()_ function returns true if the kernel predates
+the addition of events/header_page, otherwise it returns false.
+
+RETURN VALUE
+------------
+The _tep_get_header_page_size()_ function returns the size of a long integer,
+in bytes.
+
+The _tep_get_header_timestamp_size()_ function returns the size of timestamps,
+in bytes.
+
+The _tep_is_old_format()_ function returns true, if an old kernel is used to
+generate the tracing data, which has no event/header_page. If the kernel is new,
+or _tep_ is NULL, false is returned.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+       int longsize;
+       int timesize;
+       bool old;
+
+       longsize = tep_get_header_page_size(tep);
+       timesize = tep_get_header_timestamp_size(tep);
+       old = tep_is_old_format(tep);
+
+       printf ("%s kernel is used to generate the tracing data.\n",
+               old?"Old":"New");
+       printf("The size of a long integer is %d bytes.\n", longsize);
+       printf("The timestamps size is %d bytes.\n", timesize);
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt b/tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt
new file mode 100644 (file)
index 0000000..d5d375e
--- /dev/null
@@ -0,0 +1,104 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_is_bigendian, tep_is_local_bigendian, tep_set_local_bigendian - Get / set
+the endianness of the local machine.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_endian* {
+       TEP_LITTLE_ENDIAN = 0,
+       TEP_BIG_ENDIAN
+};
+
+int *tep_is_bigendian*(void);
+bool *tep_is_local_bigendian*(struct tep_handle pass:[*]_tep_);
+void *tep_set_local_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
+--
+
+DESCRIPTION
+-----------
+
+The _tep_is_bigendian()_ gets the endianness of the machine, executing
+the function.
+
+The _tep_is_local_bigendian()_ function gets the endianness of the local
+machine, saved in the _tep_ handler. The _tep_ argument is the trace event
+parser context. This API is a bit faster than _tep_is_bigendian()_, as it
+returns cached endianness of the local machine instead of checking it each time.
+
+The _tep_set_local_bigendian()_ function sets the endianness of the local
+machine in the _tep_ handler. The _tep_ argument is trace event parser context.
+The _endian_ argument is the endianness:
+[verse]
+--
+       _TEP_LITTLE_ENDIAN_ - the machine is little endian,
+       _TEP_BIG_ENDIAN_ - the machine is big endian.
+--
+
+RETURN VALUE
+------------
+The _tep_is_bigendian()_ function returns non zero if the endianness of the
+machine, executing the code, is big endian and zero otherwise.
+
+The _tep_is_local_bigendian()_ function returns true, if the endianness of the
+local machine, saved in the _tep_ handler, is big endian, or false otherwise.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+       if (tep_is_bigendian())
+               tep_set_local_bigendian(tep, TEP_BIG_ENDIAN);
+       else
+               tep_set_local_bigendian(tep, TEP_LITTLE_ENDIAN);
+...
+       if (tep_is_local_bigendian(tep))
+               printf("This machine you are running on is bigendian\n");
+       else
+               printf("This machine you are running on is little endian\n");
+
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-long_size.txt b/tools/lib/traceevent/Documentation/libtraceevent-long_size.txt
new file mode 100644 (file)
index 0000000..01d78ea
--- /dev/null
@@ -0,0 +1,78 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_long_size, tep_set_long_size - Get / set the size of a long integer on
+the machine, where the trace is generated, in bytes
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_get_long_size*(strucqt tep_handle pass:[*]_tep_);
+void *tep_set_long_size*(struct tep_handle pass:[*]_tep_, int _long_size_);
+--
+
+DESCRIPTION
+-----------
+The _tep_get_long_size()_ function returns the size of a long integer on the machine,
+where the trace is generated. The _tep_ argument is trace event parser context.
+
+The _tep_set_long_size()_ function sets the size of a long integer on the machine,
+where the trace is generated. The _tep_ argument is trace event parser context.
+The _long_size_ is the size of a long integer, in bytes.
+
+RETURN VALUE
+------------
+The _tep_get_long_size()_ function returns the size of a long integer on the machine,
+where the trace is generated, in bytes.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+tep_set_long_size(tep, 4);
+...
+int long_size = tep_get_long_size(tep);
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-page_size.txt b/tools/lib/traceevent/Documentation/libtraceevent-page_size.txt
new file mode 100644 (file)
index 0000000..452c0cf
--- /dev/null
@@ -0,0 +1,82 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_page_size, tep_set_page_size - Get / set the size of a memory page on
+the machine, where the trace is generated
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_get_page_size*(struct tep_handle pass:[*]_tep_);
+void *tep_set_page_size*(struct tep_handle pass:[*]_tep_, int _page_size_);
+--
+
+DESCRIPTION
+-----------
+The _tep_get_page_size()_ function returns the size of a memory page on
+the machine, where the trace is generated. The _tep_ argument is trace
+event parser context.
+
+The _tep_set_page_size()_ function stores in the _tep_ context the size of a
+memory page on the machine, where the trace is generated.
+The _tep_ argument is trace event parser context.
+The _page_size_ argument is the size of a memory page, in bytes.
+
+RETURN VALUE
+------------
+The _tep_get_page_size()_ function returns size of the memory page, in bytes.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <unistd.h>
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+       int page_size = getpagesize();
+
+       tep_set_page_size(tep, page_size);
+
+       printf("The page size for this machine is %d\n", tep_get_page_size(tep));
+
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt b/tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt
new file mode 100644 (file)
index 0000000..f248114
--- /dev/null
@@ -0,0 +1,90 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_parse_event, tep_parse_format - Parse the event format information
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum tep_errno *tep_parse_event*(struct tep_handle pass:[*]_tep_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
+enum tep_errno *tep_parse_format*(struct tep_handle pass:[*]_tep_, struct tep_event pass:[*]pass:[*]_eventp_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
+--
+
+DESCRIPTION
+-----------
+The _tep_parse_event()_ function parses the event format and creates an event
+structure to quickly parse raw data for a given event. The _tep_ argument is
+the trace event parser context. The created event structure is stored in the
+_tep_ context. The _buf_ argument is a buffer with _size_, where the event
+format data is. The event format data can be taken from
+tracefs/events/.../.../format files. The _sys_ argument is the system of
+the event.
+
+The _tep_parse_format()_ function does the same as _tep_parse_event()_. The only
+difference is in the extra _eventp_ argument, where the newly created event
+structure is returned.
+
+RETURN VALUE
+------------
+Both _tep_parse_event()_ and _tep_parse_format()_ functions return 0 on success,
+or TEP_ERRNO__... in case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+char *buf;
+int size;
+struct tep_event *event = NULL;
+buf = read_file("/sys/kernel/tracing/events/ftrace/print/format", &size);
+if (tep_parse_event(tep, buf, size, "ftrace") != 0) {
+       /* Failed to parse the ftrace print format */
+}
+
+if (tep_parse_format(tep, &event, buf, size, "ftrace") != 0) {
+       /* Failed to parse the ftrace print format */
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt b/tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt
new file mode 100644 (file)
index 0000000..c90f16c
--- /dev/null
@@ -0,0 +1,82 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_parse_header_page - Parses the data stored in the header page.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_parse_header_page*(struct tep_handle pass:[*]_tep_, char pass:[*]_buf_, unsigned long _size_, int _long_size_);
+--
+
+DESCRIPTION
+-----------
+The _tep_parse_header_page()_ function parses the header page data from _buf_,
+and initializes the _tep_, trace event parser context, with it. The buffer
+_buf_ is with _size_, and is supposed to be copied from
+tracefs/events/header_page.
+
+Some old kernels do not have header page info, in this case the
+_tep_parse_header_page()_ function  can be called with _size_ equal to 0. The
+_tep_ context is initialized with default values. The _long_size_ can be used in
+this use case, to set the size of a long integer to be used.
+
+RETURN VALUE
+------------
+The _tep_parse_header_page()_ function returns 0 in case of success, or -1
+in case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+char *buf;
+int size;
+buf = read_file("/sys/kernel/tracing/events/header_page", &size);
+if (tep_parse_header_page(tep, buf, size, sizeof(unsigned long)) != 0) {
+       /* Failed to parse the header page */
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt b/tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt
new file mode 100644 (file)
index 0000000..e9a6911
--- /dev/null
@@ -0,0 +1,137 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_data_type, tep_data_pid,tep_data_preempt_count, tep_data_flags -
+Extract common fields from a record.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *trace_flag_type* {
+       _TRACE_FLAG_IRQS_OFF_,
+       _TRACE_FLAG_IRQS_NOSUPPORT_,
+       _TRACE_FLAG_NEED_RESCHED_,
+       _TRACE_FLAG_HARDIRQ_,
+       _TRACE_FLAG_SOFTIRQ_,
+};
+
+int *tep_data_type*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+int *tep_data_pid*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+int *tep_data_preempt_count*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+int *tep_data_flags*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+--
+
+DESCRIPTION
+-----------
+This set of functions can be used to extract common fields from a record.
+
+The _tep_data_type()_ function gets the event id from the record _rec_.
+It reads the "common_type" field. The _tep_ argument is the trace event parser
+context.
+
+The _tep_data_pid()_ function gets the process id from the record _rec_.
+It reads the "common_pid" field. The _tep_ argument is the trace event parser
+context.
+
+The _tep_data_preempt_count()_ function gets the preemption count from the
+record _rec_. It reads the "common_preempt_count" field. The _tep_ argument is
+the trace event parser context.
+
+The _tep_data_flags()_ function gets the latency flags from the record _rec_.
+It reads the "common_flags" field. The _tep_ argument is the trace event parser
+context. Supported latency flags are:
+[verse]
+--
+       _TRACE_FLAG_IRQS_OFF_,          Interrupts are disabled.
+       _TRACE_FLAG_IRQS_NOSUPPORT_,    Reading IRQ flag is not supported by the architecture.
+       _TRACE_FLAG_NEED_RESCHED_,      Task needs rescheduling.
+       _TRACE_FLAG_HARDIRQ_,           Hard IRQ is running.
+       _TRACE_FLAG_SOFTIRQ_,           Soft IRQ is running.
+--
+
+RETURN VALUE
+------------
+The _tep_data_type()_ function returns an integer, representing the event id.
+
+The _tep_data_pid()_ function returns an integer, representing the process id
+
+The _tep_data_preempt_count()_ function returns an integer, representing the
+preemption count.
+
+The _tep_data_flags()_ function returns an integer, representing the latency
+flags. Look at the _trace_flag_type_ enum for supported flags.
+
+All these functions in case of an error return a negative integer.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+void process_record(struct tep_record *record)
+{
+       int data;
+
+       data = tep_data_type(tep, record);
+       if (data >= 0) {
+               /* Got the ID of the event */
+       }
+
+       data = tep_data_pid(tep, record);
+       if (data >= 0) {
+               /* Got the process ID */
+       }
+
+       data = tep_data_preempt_count(tep, record);
+       if (data >= 0) {
+               /* Got the preemption count */
+       }
+
+       data = tep_data_flags(tep, record);
+       if (data >= 0) {
+               /* Got the latency flags */
+       }
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt b/tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt
new file mode 100644 (file)
index 0000000..53d37d7
--- /dev/null
@@ -0,0 +1,156 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_register_event_handler, tep_unregister_event_handler -  Register /
+unregisters a callback function to parse an event information.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_reg_handler* {
+       _TEP_REGISTER_SUCCESS_,
+       _TEP_REGISTER_SUCCESS_OVERWRITE_,
+};
+
+int *tep_register_event_handler*(struct tep_handle pass:[*]_tep_, int _id_, const char pass:[*]_sys_name_, const char pass:[*]_event_name_, tep_event_handler_func _func_, void pass:[*]_context_);
+int *tep_unregister_event_handler*(struct tep_handle pass:[*]tep, int id, const char pass:[*]sys_name, const char pass:[*]event_name, tep_event_handler_func func, void pass:[*]_context_);
+
+typedef int (*pass:[*]tep_event_handler_func*)(struct trace_seq pass:[*]s, struct tep_record pass:[*]record, struct tep_event pass:[*]event, void pass:[*]context);
+--
+
+DESCRIPTION
+-----------
+The _tep_register_event_handler()_ function registers a handler function,
+which is going to be called to parse the information for a given event.
+The _tep_ argument is the trace event parser context. The _id_ argument is
+the id of the event. The _sys_name_ argument is the name of the system,
+the event belongs to. The _event_name_ argument is the name of the event.
+If _id_ is >= 0, it is used to find the event, otherwise _sys_name_ and
+_event_name_ are used. The _func_ is a pointer to the function, which is going
+to be called to parse the event information. The _context_ argument is a pointer
+to the context data, which will be passed to the _func_. If a handler function
+for the same event is already registered, it will be overridden with the new
+one. This mechanism allows a developer to override the parsing of a given event.
+If for some reason the default print format is not sufficient, the developer
+can register a function for an event to be used to parse the data instead.
+
+The _tep_unregister_event_handler()_ function unregisters the handler function,
+previously registered with _tep_register_event_handler()_. The _tep_ argument
+is the trace event parser context. The _id_, _sys_name_, _event_name_, _func_,
+and _context_ are the same arguments, as when the callback function _func_ was
+registered.
+
+The _tep_event_handler_func_ is the type of the custom event handler
+function. The _s_ argument is the trace sequence, it can be used to create a
+custom string, describing the event. A _record_  to get the event from is passed
+as input parameter and also the _event_ - the handle to the record's event. The
+_context_ is custom context, set when the custom event handler is registered.
+
+RETURN VALUE
+------------
+The _tep_register_event_handler()_ function returns _TEP_REGISTER_SUCCESS_
+if the new handler is registered successfully or
+_TEP_REGISTER_SUCCESS_OVERWRITE_ if an existing handler is overwritten.
+If there is not  enough memory to complete the registration,
+TEP_ERRNO__MEM_ALLOC_FAILED is returned.
+
+The _tep_unregister_event_handler()_ function returns 0 if _func_ was removed
+successful or, -1 if the event was not found.
+
+The _tep_event_handler_func_ should return -1 in case of an error,
+or 0 otherwise.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+int timer_expire_handler(struct trace_seq *s, struct tep_record *record,
+                        struct tep_event *event, void *context)
+{
+       trace_seq_printf(s, "hrtimer=");
+
+       if (tep_print_num_field(s, "0x%llx", event, "timer", record, 0) == -1)
+               tep_print_num_field(s, "0x%llx", event, "hrtimer", record, 1);
+
+       trace_seq_printf(s, " now=");
+
+       tep_print_num_field(s, "%llu", event, "now", record, 1);
+
+       tep_print_func_field(s, " function=%s", event, "function", record, 0);
+
+       return 0;
+}
+...
+       int ret;
+
+       ret = tep_register_event_handler(tep, -1, "timer", "hrtimer_expire_entry",
+                                        timer_expire_handler, NULL);
+       if (ret < 0) {
+               char buf[32];
+
+               tep_strerror(tep, ret, buf, 32)
+               printf("Failed to register handler for hrtimer_expire_entry: %s\n", buf);
+       } else {
+               switch (ret) {
+               case TEP_REGISTER_SUCCESS:
+                       printf ("Registered handler for hrtimer_expire_entry\n");
+                       break;
+               case TEP_REGISTER_SUCCESS_OVERWRITE:
+                       printf ("Overwrote handler for hrtimer_expire_entry\n");
+                       break;
+               }
+       }
+...
+       ret = tep_unregister_event_handler(tep, -1, "timer", "hrtimer_expire_entry",
+                                          timer_expire_handler, NULL);
+       if ( ret )
+               printf ("Failed to unregister handler for hrtimer_expire_entry\n");
+
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+       Header file to include in order to have access to trace sequences
+       related APIs. Trace sequences are used to allow a function to call
+       several other functions to create a string of data to use.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt b/tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt
new file mode 100644 (file)
index 0000000..708dce9
--- /dev/null
@@ -0,0 +1,155 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_register_print_function,tep_unregister_print_function -
+Registers / Unregisters a helper function.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_func_arg_type* {
+       TEP_FUNC_ARG_VOID,
+       TEP_FUNC_ARG_INT,
+       TEP_FUNC_ARG_LONG,
+       TEP_FUNC_ARG_STRING,
+       TEP_FUNC_ARG_PTR,
+       TEP_FUNC_ARG_MAX_TYPES
+};
+
+typedef unsigned long long (*pass:[*]tep_func_handler*)(struct trace_seq pass:[*]s, unsigned long long pass:[*]args);
+
+int *tep_register_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, enum tep_func_arg_type _ret_type_, char pass:[*]_name_, _..._);
+int *tep_unregister_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, char pass:[*]_name_);
+--
+
+DESCRIPTION
+-----------
+Some events may have helper functions in the print format arguments.
+This allows a plugin to dynamically create a way to process one of
+these functions.
+
+The _tep_register_print_function()_ registers such helper function. The _tep_
+argument is the trace event parser context. The _func_ argument  is a pointer
+to the helper function. The _ret_type_ argument is  the return type of the
+helper function, value from the _tep_func_arg_type_ enum. The _name_ is the name
+of the helper function, as seen in the print format arguments. The _..._ is a
+variable list of _tep_func_arg_type_ enums, the _func_ function arguments.
+This list must end with _TEP_FUNC_ARG_VOID_. See 'EXAMPLE' section.
+
+The _tep_unregister_print_function()_ unregisters a helper function, previously
+registered with _tep_register_print_function()_. The _tep_ argument is the
+trace event parser context. The _func_ and _name_ arguments are the same, used
+when the helper function was registered.
+
+The _tep_func_handler_ is the type of the helper function. The _s_ argument is
+the trace sequence, it can be used to create a custom string.
+The _args_  is a list of arguments, defined when the helper function was
+registered.
+
+RETURN VALUE
+------------
+The _tep_register_print_function()_ function returns 0 in case of success.
+In case of an error, TEP_ERRNO_... code is returned.
+
+The _tep_unregister_print_function()_ returns 0 in case of success, or -1 in
+case of an error.
+
+EXAMPLE
+-------
+Some events have internal functions calls, that appear in the print format
+output. For example "tracefs/events/i915/g4x_wm/format" has:
+[source,c]
+--
+print fmt: "pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s",
+           ((REC->pipe) + 'A'), REC->frame, REC->scanline, REC->primary,
+           REC->sprite, REC->cursor, yesno(REC->cxsr), REC->sr_plane,
+           REC->sr_cursor, REC->sr_fbc, yesno(REC->hpll), REC->hpll_plane,
+           REC->hpll_cursor, REC->hpll_fbc, yesno(REC->fbc)
+--
+Notice the call to function _yesno()_ in the print arguments. In the kernel
+context, this function has the following implementation:
+[source,c]
+--
+static const char *yesno(int x)
+{
+       static const char *yes = "yes";
+       static const char *no = "no";
+
+       return x ? yes : no;
+}
+--
+The user space event parser has no idea how to handle this _yesno()_ function.
+The _tep_register_print_function()_ API can be used to register a user space
+helper function, mapped to the kernel's _yesno()_:
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+static const char *yes_no_helper(int x)
+{
+       return x ? "yes" : "no";
+}
+...
+       if ( tep_register_print_function(tep,
+                                   yes_no_helper,
+                                   TEP_FUNC_ARG_STRING,
+                                   "yesno",
+                                   TEP_FUNC_ARG_INT,
+                                   TEP_FUNC_ARG_VOID) != 0) {
+               /* Failed to register yes_no_helper function */
+       }
+
+/*
+   Now, when the event parser encounters this yesno() function, it will know
+   how to handle it.
+*/
+...
+       if (tep_unregister_print_function(tep, yes_no_helper, "yesno") != 0) {
+               /* Failed to unregister yes_no_helper function */
+       }
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+       Header file to include in order to have access to trace sequences
+       related APIs. Trace sequences are used to allow a function to call
+       several other functions to create a string of data to use.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt b/tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt
new file mode 100644 (file)
index 0000000..b059978
--- /dev/null
@@ -0,0 +1,104 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_set_flag, tep_clear_flag, tep_test_flag -
+Manage flags of trace event parser context.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_flag* {
+       _TEP_NSEC_OUTPUT_,
+       _TEP_DISABLE_SYS_PLUGINS_,
+       _TEP_DISABLE_PLUGINS_
+};
+void *tep_set_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
+void *tep_clear_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
+bool *tep_test_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
+--
+
+DESCRIPTION
+-----------
+Trace event parser context flags are defined in *enum tep_flag*:
+[verse]
+--
+_TEP_NSEC_OUTPUT_ - print event's timestamp in nano seconds, instead of micro seconds.
+_TEP_DISABLE_SYS_PLUGINS_ - disable plugins, located in system's plugin
+                       directory. This directory is defined at library compile
+                       time, and usually depends on library installation
+                       prefix: (install_preffix)/lib/traceevent/plugins
+_TEP_DISABLE_PLUGINS_ - disable all library plugins:
+                       - in system's plugin directory
+                       - in directory, defined by the environment variable _TRACEEVENT_PLUGIN_DIR_
+                       - in user's home directory, _~/.traceevent/plugins_
+--
+Note: plugin related flags must me set before calling _tep_load_plugins()_ API.
+
+The _tep_set_flag()_ function sets _flag_ to _tep_ context.
+
+The _tep_clear_flag()_ function clears _flag_ from _tep_ context.
+
+The _tep_test_flag()_ function tests if _flag_ is set to _tep_ context.
+
+RETURN VALUE
+------------
+_tep_test_flag()_ function returns true if _flag_ is set, false otherwise.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+/* Print timestamps in nanoseconds */
+tep_set_flag(tep,  TEP_NSEC_OUTPUT);
+...
+if (tep_test_flag(tep, TEP_NSEC_OUTPUT)) {
+       /* print timestamps in nanoseconds */
+} else {
+       /* print timestamps in microseconds */
+}
+...
+/* Print timestamps in microseconds */
+tep_clear_flag(tep, TEP_NSEC_OUTPUT);
+...
+--
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-strerror.txt b/tools/lib/traceevent/Documentation/libtraceevent-strerror.txt
new file mode 100644 (file)
index 0000000..ee4062a
--- /dev/null
@@ -0,0 +1,85 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_strerror - Returns a string describing regular errno and tep error number.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_strerror*(struct tep_handle pass:[*]_tep_, enum tep_errno _errnum_, char pass:[*]_buf_, size_t _buflen_);
+
+--
+DESCRIPTION
+-----------
+The _tep_strerror()_ function converts tep error number into a human
+readable string.
+The _tep_ argument is trace event parser context. The _errnum_ is a regular
+errno, defined in errno.h, or a tep error number. The string, describing this
+error number is copied in the _buf_ argument. The _buflen_ argument is
+the size of the _buf_.
+
+It as a thread safe wrapper around strerror_r(). The library function has two
+different behaviors - POSIX and GNU specific. The _tep_strerror()_ API always
+behaves as the POSIX version - the error string is copied in the user supplied
+buffer.
+
+RETURN VALUE
+------------
+The _tep_strerror()_ function returns 0, if a valid _errnum_ is passed and the
+string is copied into _buf_. If _errnum_ is not a valid error number,
+-1 is returned and _buf_ is not modified.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+char buf[32];
+char *pool = calloc(1, 128);
+if (tep == NULL) {
+       tep_strerror(tep, TEP_ERRNO__MEM_ALLOC_FAILED, buf, 32);
+       printf ("The pool is not initialized, %s", buf);
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-tseq.txt b/tools/lib/traceevent/Documentation/libtraceevent-tseq.txt
new file mode 100644 (file)
index 0000000..8ac6aa1
--- /dev/null
@@ -0,0 +1,158 @@
+libtraceevent(3)
+================
+
+NAME
+----
+trace_seq_init, trace_seq_destroy, trace_seq_reset, trace_seq_terminate,
+trace_seq_putc, trace_seq_puts, trace_seq_printf, trace_seq_vprintf,
+trace_seq_do_fprintf, trace_seq_do_printf -
+Initialize / destroy a trace sequence.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+*#include <trace-seq.h>*
+
+void *trace_seq_init*(struct trace_seq pass:[*]_s_);
+void *trace_seq_destroy*(struct trace_seq pass:[*]_s_);
+void *trace_seq_reset*(struct trace_seq pass:[*]_s_);
+void *trace_seq_terminate*(struct trace_seq pass:[*]_s_);
+int *trace_seq_putc*(struct trace_seq pass:[*]_s_, unsigned char _c_);
+int *trace_seq_puts*(struct trace_seq pass:[*]_s_, const char pass:[*]_str_);
+int *trace_seq_printf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, _..._);
+int *trace_seq_vprintf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, va_list _args_);
+int *trace_seq_do_printf*(struct trace_seq pass:[*]_s_);
+int *trace_seq_do_fprintf*(struct trace_seq pass:[*]_s_, FILE pass:[*]_fp_);
+--
+
+DESCRIPTION
+-----------
+Trace sequences are used to allow a function to call several other functions
+to create a string of data to use.
+
+The _trace_seq_init()_ function initializes the trace sequence _s_.
+
+The _trace_seq_destroy()_ function destroys the trace sequence _s_ and frees
+all its resources that it had used.
+
+The _trace_seq_reset()_ function re-initializes the trace sequence _s_. All
+characters already written in _s_ will be deleted.
+
+The _trace_seq_terminate()_ function terminates the trace sequence _s_. It puts
+the null character pass:['\0'] at the end of the buffer.
+
+The _trace_seq_putc()_ function puts a single character _c_ in the trace
+sequence _s_.
+
+The _trace_seq_puts()_ function puts a NULL terminated string _str_ in the
+trace sequence _s_.
+
+The _trace_seq_printf()_ function puts a formated string _fmt _with
+variable arguments _..._ in the trace sequence _s_.
+
+The _trace_seq_vprintf()_ function puts a formated string _fmt _with
+list of arguments _args_ in the trace sequence _s_.
+
+The _trace_seq_do_printf()_ function prints the buffer of trace sequence _s_ to
+the standard output stdout.
+
+The _trace_seq_do_fprintf()_ function prints the buffer of trace sequence _s_
+to the given file _fp_.
+
+RETURN VALUE
+------------
+Both _trace_seq_putc()_ and _trace_seq_puts()_ functions return the number of
+characters put in the trace sequence, or 0 in case of an error
+
+Both _trace_seq_printf()_ and _trace_seq_vprintf()_ functions return 0 if the
+trace oversizes the buffer's free space, the number of characters printed, or
+a negative value in case of an error.
+
+Both _trace_seq_do_printf()_ and _trace_seq_do_fprintf()_ functions return the
+number of printed characters, or -1 in case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct trace_seq seq;
+trace_seq_init(&seq);
+...
+void foo_seq_print(struct trace_seq *tseq, char *format, ...)
+{
+       va_list ap;
+       va_start(ap, format);
+       if (trace_seq_vprintf(tseq, format, ap) <= 0) {
+               /* Failed to print in the trace sequence */
+       }
+       va_end(ap);
+}
+
+trace_seq_reset(&seq);
+
+char *str = " MAN page example";
+if (trace_seq_puts(&seq, str) != strlen(str)) {
+       /* Failed to put str in the trace sequence */
+}
+if (trace_seq_putc(&seq, ':') != 1) {
+       /* Failed to put ':' in the trace sequence */
+}
+if (trace_seq_printf(&seq, " trace sequence: %d", 1) <= 0) {
+       /* Failed to print in the trace sequence */
+}
+foo_seq_print( &seq, "  %d\n", 2);
+
+trace_seq_terminate(&seq);
+...
+
+if (trace_seq_do_printf(&seq) < 0 ) {
+       /* Failed to print the sequence buffer to the standard output */
+}
+FILE *fp = fopen("trace.txt", "w");
+if (trace_seq_do_fprintf(&seq, fp) < 0 ) [
+       /* Failed to print the sequence buffer to the trace.txt file */
+}
+
+trace_seq_destroy(&seq);
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+       Header file to include in order to have access to trace sequences related APIs.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent.txt b/tools/lib/traceevent/Documentation/libtraceevent.txt
new file mode 100644 (file)
index 0000000..fbd977b
--- /dev/null
@@ -0,0 +1,203 @@
+libtraceevent(3)
+================
+
+NAME
+----
+libtraceevent - Linux kernel trace event library
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+Management of tep handler data structure and access of its members:
+       struct tep_handle pass:[*]*tep_alloc*(void);
+       void *tep_free*(struct tep_handle pass:[*]_tep_);
+       void *tep_ref*(struct tep_handle pass:[*]_tep_);
+       void *tep_unref*(struct tep_handle pass:[*]_tep_);
+       int *tep_ref_get*(struct tep_handle pass:[*]_tep_);
+       void *tep_set_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
+       void *tep_clear_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
+       bool *tep_test_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flags_);
+       int *tep_get_cpus*(struct tep_handle pass:[*]_tep_);
+       void *tep_set_cpus*(struct tep_handle pass:[*]_tep_, int _cpus_);
+       int *tep_get_long_size*(strucqt tep_handle pass:[*]_tep_);
+       void *tep_set_long_size*(struct tep_handle pass:[*]_tep_, int _long_size_);
+       int *tep_get_page_size*(struct tep_handle pass:[*]_tep_);
+       void *tep_set_page_size*(struct tep_handle pass:[*]_tep_, int _page_size_);
+       bool *tep_is_latency_format*(struct tep_handle pass:[*]_tep_);
+       void *tep_set_latency_format*(struct tep_handle pass:[*]_tep_, int _lat_);
+       int *tep_get_header_page_size*(struct tep_handle pass:[*]_tep_);
+       int *tep_get_header_timestamp_size*(struct tep_handle pass:[*]_tep_);
+       bool *tep_is_old_format*(struct tep_handle pass:[*]_tep_);
+       int *tep_strerror*(struct tep_handle pass:[*]_tep_, enum tep_errno _errnum_, char pass:[*]_buf_, size_t _buflen_);
+
+Register / unregister APIs:
+       int *tep_register_trace_clock*(struct tep_handle pass:[*]_tep_, const char pass:[*]_trace_clock_);
+       int *tep_register_function*(struct tep_handle pass:[*]_tep_, char pass:[*]_name_, unsigned long long _addr_, char pass:[*]_mod_);
+       int *tep_register_event_handler*(struct tep_handle pass:[*]_tep_, int _id_, const char pass:[*]_sys_name_, const char pass:[*]_event_name_, tep_event_handler_func _func_, void pass:[*]_context_);
+       int *tep_unregister_event_handler*(struct tep_handle pass:[*]tep, int id, const char pass:[*]sys_name, const char pass:[*]event_name, tep_event_handler_func func, void pass:[*]_context_);
+       int *tep_register_print_string*(struct tep_handle pass:[*]_tep_, const char pass:[*]_fmt_, unsigned long long _addr_);
+       int *tep_register_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, enum tep_func_arg_type _ret_type_, char pass:[*]_name_, _..._);
+       int *tep_unregister_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, char pass:[*]_name_);
+
+Plugins management:
+       struct tep_plugin_list pass:[*]*tep_load_plugins*(struct tep_handle pass:[*]_tep_);
+       void *tep_unload_plugins*(struct tep_plugin_list pass:[*]_plugin_list_, struct tep_handle pass:[*]_tep_);
+       char pass:[*]pass:[*]*tep_plugin_list_options*(void);
+       void *tep_plugin_free_options_list*(char pass:[*]pass:[*]_list_);
+       int *tep_plugin_add_options*(const char pass:[*]_name_, struct tep_plugin_option pass:[*]_options_);
+       void *tep_plugin_remove_options*(struct tep_plugin_option pass:[*]_options_);
+       void *tep_print_plugins*(struct trace_seq pass:[*]_s_, const char pass:[*]_prefix_, const char pass:[*]_suffix_, const struct tep_plugin_list pass:[*]_list_);
+
+Event related APIs:
+       struct tep_event pass:[*]*tep_get_event*(struct tep_handle pass:[*]_tep_, int _index_);
+       struct tep_event pass:[*]*tep_get_first_event*(struct tep_handle pass:[*]_tep_);
+       int *tep_get_events_count*(struct tep_handle pass:[*]_tep_);
+       struct tep_event pass:[*]pass:[*]*tep_list_events*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
+       struct tep_event pass:[*]pass:[*]*tep_list_events_copy*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
+
+Event printing:
+       void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_, bool _use_trace_clock_);
+       void *tep_print_event_data*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
+       void *tep_event_info*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
+       void *tep_print_event_task*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
+       void *tep_print_event_time*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]record, bool _use_trace_clock_);
+       void *tep_set_print_raw*(struct tep_handle pass:[*]_tep_, int _print_raw_);
+
+Event finding:
+       struct tep_event pass:[*]*tep_find_event*(struct tep_handle pass:[*]_tep_, int _id_);
+       struct tep_event pass:[*]*tep_find_event_by_name*(struct tep_handle pass:[*]_tep_, const char pass:[*]_sys_, const char pass:[*]_name_);
+       struct tep_event pass:[*]*tep_find_event_by_record*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_record_);
+
+Parsing of event files:
+       int *tep_parse_header_page*(struct tep_handle pass:[*]_tep_, char pass:[*]_buf_, unsigned long _size_, int _long_size_);
+       enum tep_errno *tep_parse_event*(struct tep_handle pass:[*]_tep_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
+       enum tep_errno *tep_parse_format*(struct tep_handle pass:[*]_tep_, struct tep_event pass:[*]pass:[*]_eventp_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
+
+APIs related to fields from event's format files:
+       struct tep_format_field pass:[*]pass:[*]*tep_event_common_fields*(struct tep_event pass:[*]_event_);
+       struct tep_format_field pass:[*]pass:[*]*tep_event_fields*(struct tep_event pass:[*]_event_);
+       void pass:[*]*tep_get_field_raw*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int pass:[*]_len_, int _err_);
+       int *tep_get_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+       int *tep_get_common_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+       int *tep_get_any_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+       int *tep_read_number_field*(struct tep_format_field pass:[*]_field_, const void pass:[*]_data_, unsigned long long pass:[*]_value_);
+
+Event fields printing:
+       void *tep_print_field*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, struct tep_format_field pass:[*]_field_);
+       void *tep_print_fields*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, int _size_, struct tep_event pass:[*]_event_);
+       int *tep_print_num_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
+       int *tep_print_func_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
+
+Event fields finding:
+       struct tep_format_field pass:[*]*tep_find_common_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
+       struct tep_format_field pass:[*]*tep_find_field*(struct tep_event_ormat pass:[*]_event_, const char pass:[*]_name_);
+       struct tep_format_field pass:[*]*tep_find_any_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
+
+Functions resolver:
+       int *tep_set_function_resolver*(struct tep_handle pass:[*]_tep_, tep_func_resolver_t pass:[*]_func_, void pass:[*]_priv_);
+       void *tep_reset_function_resolver*(struct tep_handle pass:[*]_tep_);
+       const char pass:[*]*tep_find_function*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+       unsigned long long *tep_find_function_address*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+
+Filter management:
+       struct tep_event_filter pass:[*]*tep_filter_alloc*(struct tep_handle pass:[*]_tep_);
+       enum tep_errno *tep_filter_add_filter_str*(struct tep_event_filter pass:[*]_filter_, const char pass:[*]_filter_str_);
+       enum tep_errno *tep_filter_match*(struct tep_event_filter pass:[*]_filter_, struct tep_record pass:[*]_record_);
+       int *tep_filter_strerror*(struct tep_event_filter pass:[*]_filter_, enum tep_errno _err_, char pass:[*]buf, size_t _buflen_);
+       int *tep_event_filtered*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+       void *tep_filter_reset*(struct tep_event_filter pass:[*]_filter_);
+       void *tep_filter_free*(struct tep_event_filter pass:[*]_filter_);
+       char pass:[*]*tep_filter_make_string*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+       int *tep_filter_remove_event*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+       int *tep_filter_copy*(struct tep_event_filter pass:[*]_dest_, struct tep_event_filter pass:[*]_source_);
+       int *tep_filter_compare*(struct tep_event_filter pass:[*]_filter1_, struct tep_event_filter pass:[*]_filter2_);
+
+Parsing various data from the records:
+       void *tep_data_latency_format*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_);
+       int *tep_data_type*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+       int *tep_data_pid*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+       int *tep_data_preempt_count*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+       int *tep_data_flags*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+
+Command and task related APIs:
+       const char pass:[*]*tep_data_comm_from_pid*(struct tep_handle pass:[*]_tep_, int _pid_);
+       struct cmdline pass:[*]*tep_data_pid_from_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, struct cmdline pass:[*]_next_);
+       int *tep_register_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
+       int *tep_override_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
+       bool *tep_is_pid_registered*(struct tep_handle pass:[*]_tep_, int _pid_);
+       int *tep_cmdline_pid*(struct tep_handle pass:[*]_tep_, struct cmdline pass:[*]_cmdline_);
+
+Endian related APIs:
+       int *tep_is_bigendian*(void);
+       unsigned long long *tep_read_number*(struct tep_handle pass:[*]_tep_, const void pass:[*]_ptr_, int _size_);
+       bool *tep_is_file_bigendian*(struct tep_handle pass:[*]_tep_);
+       void *tep_set_file_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
+       bool *tep_is_local_bigendian*(struct tep_handle pass:[*]_tep_);
+       void *tep_set_local_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
+
+Trace sequences:
+*#include <trace-seq.h>*
+       void *trace_seq_init*(struct trace_seq pass:[*]_s_);
+       void *trace_seq_reset*(struct trace_seq pass:[*]_s_);
+       void *trace_seq_destroy*(struct trace_seq pass:[*]_s_);
+       int *trace_seq_printf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, ...);
+       int *trace_seq_vprintf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, va_list _args_);
+       int *trace_seq_puts*(struct trace_seq pass:[*]_s_, const char pass:[*]_str_);
+       int *trace_seq_putc*(struct trace_seq pass:[*]_s_, unsigned char _c_);
+       void *trace_seq_terminate*(struct trace_seq pass:[*]_s_);
+       int *trace_seq_do_fprintf*(struct trace_seq pass:[*]_s_, FILE pass:[*]_fp_);
+       int *trace_seq_do_printf*(struct trace_seq pass:[*]_s_);
+--
+
+DESCRIPTION
+-----------
+The libtraceevent(3) library provides APIs to access kernel tracepoint events,
+located in the tracefs file system under the events directory.
+
+ENVIRONMENT
+-----------
+[verse]
+--
+TRACEEVENT_PLUGIN_DIR
+       Additional plugin directory. All shared object files, located in this directory will be loaded as traceevent plugins.
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+       Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+       Header file to include in order to have access to trace sequences related APIs.
+       Trace sequences are used to allow a function to call several other functions
+       to create a string of data to use.
+*-ltraceevent*
+       Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to  <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/manpage-1.72.xsl b/tools/lib/traceevent/Documentation/manpage-1.72.xsl
new file mode 100644 (file)
index 0000000..b4d315c
--- /dev/null
@@ -0,0 +1,14 @@
+<!-- manpage-1.72.xsl:
+     special settings for manpages rendered from asciidoc+docbook
+     handles peculiarities in docbook-xsl 1.72.0 -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+               version="1.0">
+
+<xsl:import href="manpage-base.xsl"/>
+
+<!-- these are the special values for the roff control characters
+     needed for docbook-xsl 1.72.0 -->
+<xsl:param name="git.docbook.backslash">&#x2593;</xsl:param>
+<xsl:param name="git.docbook.dot"      >&#x2302;</xsl:param>
+
+</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-base.xsl b/tools/lib/traceevent/Documentation/manpage-base.xsl
new file mode 100644 (file)
index 0000000..a264fa6
--- /dev/null
@@ -0,0 +1,35 @@
+<!-- manpage-base.xsl:
+     special formatting for manpages rendered from asciidoc+docbook -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+               version="1.0">
+
+<!-- these params silence some output from xmlto -->
+<xsl:param name="man.output.quietly" select="1"/>
+<xsl:param name="refentry.meta.get.quietly" select="1"/>
+
+<!-- convert asciidoc callouts to man page format;
+     git.docbook.backslash and git.docbook.dot params
+     must be supplied by another XSL file or other means -->
+<xsl:template match="co">
+       <xsl:value-of select="concat(
+                             $git.docbook.backslash,'fB(',
+                             substring-after(@id,'-'),')',
+                             $git.docbook.backslash,'fR')"/>
+</xsl:template>
+<xsl:template match="calloutlist">
+       <xsl:value-of select="$git.docbook.dot"/>
+       <xsl:text>sp&#10;</xsl:text>
+       <xsl:apply-templates/>
+       <xsl:text>&#10;</xsl:text>
+</xsl:template>
+<xsl:template match="callout">
+       <xsl:value-of select="concat(
+                             $git.docbook.backslash,'fB',
+                             substring-after(@arearefs,'-'),
+                             '. ',$git.docbook.backslash,'fR')"/>
+       <xsl:apply-templates/>
+       <xsl:value-of select="$git.docbook.dot"/>
+       <xsl:text>br&#10;</xsl:text>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-bold-literal.xsl b/tools/lib/traceevent/Documentation/manpage-bold-literal.xsl
new file mode 100644 (file)
index 0000000..608eb5d
--- /dev/null
@@ -0,0 +1,17 @@
+<!-- manpage-bold-literal.xsl:
+     special formatting for manpages rendered from asciidoc+docbook -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+               version="1.0">
+
+<!-- render literal text as bold (instead of plain or monospace);
+     this makes literal text easier to distinguish in manpages
+     viewed on a tty -->
+<xsl:template match="literal">
+       <xsl:value-of select="$git.docbook.backslash"/>
+       <xsl:text>fB</xsl:text>
+       <xsl:apply-templates/>
+       <xsl:value-of select="$git.docbook.backslash"/>
+       <xsl:text>fR</xsl:text>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-normal.xsl b/tools/lib/traceevent/Documentation/manpage-normal.xsl
new file mode 100644 (file)
index 0000000..a48f5b1
--- /dev/null
@@ -0,0 +1,13 @@
+<!-- manpage-normal.xsl:
+     special settings for manpages rendered from asciidoc+docbook
+     handles anything we want to keep away from docbook-xsl 1.72.0 -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+               version="1.0">
+
+<xsl:import href="manpage-base.xsl"/>
+
+<!-- these are the normal values for the roff control characters -->
+<xsl:param name="git.docbook.backslash">\</xsl:param>
+<xsl:param name="git.docbook.dot"      >.</xsl:param>
+
+</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl b/tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl
new file mode 100644 (file)
index 0000000..a63c763
--- /dev/null
@@ -0,0 +1,21 @@
+<!-- manpage-suppress-sp.xsl:
+     special settings for manpages rendered from asciidoc+docbook
+     handles erroneous, inline .sp in manpage output of some
+     versions of docbook-xsl -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+               version="1.0">
+
+<!-- attempt to work around spurious .sp at the tail of the line
+     that some versions of docbook stylesheets seem to add -->
+<xsl:template match="simpara">
+  <xsl:variable name="content">
+    <xsl:apply-templates/>
+  </xsl:variable>
+  <xsl:value-of select="normalize-space($content)"/>
+  <xsl:if test="not(ancestor::authorblurb) and
+                not(ancestor::personblurb)">
+    <xsl:text>&#10;&#10;</xsl:text>
+  </xsl:if>
+</xsl:template>
+
+</xsl:stylesheet>
index 941761d..3292c29 100644 (file)
@@ -50,9 +50,13 @@ man_dir = $(prefix)/share/man
 man_dir_SQ = '$(subst ','\'',$(man_dir))'
 pkgconfig_dir ?= $(word 1,$(shell $(PKG_CONFIG)                \
                        --variable pc_path pkg-config | tr ":" " "))
+includedir_relative = traceevent
+includedir = $(prefix)/include/$(includedir_relative)
+includedir_SQ = '$(subst ','\'',$(includedir))'
 
 export man_dir man_dir_SQ INSTALL
 export DESTDIR DESTDIR_SQ
+export EVENT_PARSE_VERSION
 
 set_plugin_dir := 1
 
@@ -279,6 +283,8 @@ define do_install_pkgconfig_file
                cp -f ${PKG_CONFIG_FILE}.template ${PKG_CONFIG_FILE};           \
                sed -i "s|INSTALL_PREFIX|${1}|g" ${PKG_CONFIG_FILE};            \
                sed -i "s|LIB_VERSION|${EVENT_PARSE_VERSION}|g" ${PKG_CONFIG_FILE}; \
+               sed -i "s|LIB_DIR|${libdir}|g" ${PKG_CONFIG_FILE}; \
+               sed -i "s|HEADER_DIR|$(includedir)|g" ${PKG_CONFIG_FILE}; \
                $(call do_install,$(PKG_CONFIG_FILE),$(pkgconfig_dir),644);     \
        else                                                                    \
                (echo Failed to locate pkg-config directory) 1>&2;              \
@@ -300,10 +306,10 @@ install_pkgconfig:
 
 install_headers:
        $(call QUIET_INSTALL, headers) \
-               $(call do_install,event-parse.h,$(prefix)/include/traceevent,644); \
-               $(call do_install,event-utils.h,$(prefix)/include/traceevent,644); \
-               $(call do_install,trace-seq.h,$(prefix)/include/traceevent,644); \
-               $(call do_install,kbuffer.h,$(prefix)/include/traceevent,644)
+               $(call do_install,event-parse.h,$(DESTDIR)$(includedir_SQ),644); \
+               $(call do_install,event-utils.h,$(DESTDIR)$(includedir_SQ),644); \
+               $(call do_install,trace-seq.h,$(DESTDIR)$(includedir_SQ),644); \
+               $(call do_install,kbuffer.h,$(DESTDIR)$(includedir_SQ),644)
 
 install: install_lib
 
@@ -313,6 +319,38 @@ clean:
                $(RM) TRACEEVENT-CFLAGS tags TAGS; \
                $(RM) $(PKG_CONFIG_FILE)
 
+PHONY += doc
+doc:
+       $(call descend,Documentation)
+
+PHONY += doc-clean
+doc-clean:
+       $(call descend,Documentation,clean)
+
+PHONY += doc-install
+doc-install:
+       $(call descend,Documentation,install)
+
+PHONY += doc-uninstall
+doc-uninstall:
+       $(call descend,Documentation,uninstall)
+
+PHONY += help
+help:
+       @echo 'Possible targets:'
+       @echo''
+       @echo '  all                 - default, compile the library and the'\
+                                     'plugins'
+       @echo '  plugins             - compile the plugins'
+       @echo '  install             - install the library, the plugins,'\
+                                       'the header and pkgconfig files'
+       @echo '  clean               - clean the library and the plugins object files'
+       @echo '  doc                 - compile the documentation files - man'\
+                                       'and html pages, in the Documentation directory'
+       @echo '  doc-clean           - clean the documentation files'
+       @echo '  doc-install         - install the man pages'
+       @echo '  doc-uninstall       - uninstall the man pages'
+       @echo''
 PHONY += force plugins
 force:
 
index 42e4d6c..86384fc 100644 (file)
@@ -1,6 +1,6 @@
 prefix=INSTALL_PREFIX
-libdir=${prefix}/lib64
-includedir=${prefix}/include/traceevent
+libdir=LIB_DIR
+includedir=HEADER_DIR
 
 Name: libtraceevent
 URL: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
index 53f8be0..8815823 100644 (file)
@@ -7,11 +7,12 @@ ARCH := x86
 endif
 
 # always use the host compiler
+HOSTAR ?= ar
 HOSTCC ?= gcc
 HOSTLD ?= ld
+AR      = $(HOSTAR)
 CC      = $(HOSTCC)
 LD      = $(HOSTLD)
-AR      = ar
 
 ifeq ($(srctree),)
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
index 9b75344..6876ee4 100644 (file)
@@ -47,7 +47,7 @@ clean:
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
-       for program in $(ALL_PROGRAMS); do              \
+       for program in $(ALL_PROGRAMS) pcitest.sh; do   \
                install $$program $(DESTDIR)$(bindir);  \
        done;                                           \
        for script in $(ALL_SCRIPTS); do                \
index 138fb6e..18ed1b0 100644 (file)
@@ -199,6 +199,18 @@ also be supplied. For example:
 
   perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ...
 
+EVENT QUALIFIERS:
+
+It is also possible to add extra qualifiers to an event:
+
+percore:
+
+Sums up the event counts for all hardware threads in a core, e.g.:
+
+
+  perf stat -e cpu/event=0,umask=0x3,percore=1/
+
+
 EVENT GROUPS
 ------------
 
index 58986f4..de26943 100644 (file)
@@ -406,7 +406,8 @@ symbolic names, e.g. on x86, ax, si. To list the available registers use
 --intr-regs=ax,bx. The list of register is architecture dependent.
 
 --user-regs::
-Capture user registers at sample time. Same arguments as -I.
+Similar to -I, but capture user registers at sample time. To list the available
+user registers use --user-regs=\?.
 
 --running-time::
 Record running and enabled time for read events (:S)
@@ -478,6 +479,11 @@ Also at some cases executing less output write syscalls with bigger data size
 can take less time than executing more output write syscalls with smaller data
 size thus lowering runtime profiling overhead.
 
+-z::
+--compression-level[=n]::
+Produce compressed trace using specified level n (default: 1 - fastest compression,
+22 - smallest trace)
+
 --all-kernel::
 Configure all used events to run in kernel space.
 
index 39c05f8..1e312c2 100644 (file)
@@ -43,6 +43,10 @@ report::
          param1 and param2 are defined as formats for the PMU in
          /sys/bus/event_source/devices/<pmu>/format/*
 
+         'percore' is a event qualifier that sums up the event counts for both
+         hardware threads in a core. For example:
+         perf stat -A -a -e cpu/event,percore=1/,otherevent ...
+
        - a symbolically formed event like 'pmu/config=M,config1=N,config2=K/'
          where M, N, K are numbers (in decimal, hex, octal format).
          Acceptable values for each of 'config', 'config1' and 'config2'
index 593ef49..6967e9b 100644 (file)
@@ -272,6 +272,19 @@ struct {
 
 Two uint64_t for the time of first sample and the time of last sample.
 
+        HEADER_COMPRESSED = 27,
+
+struct {
+       u32     version;
+       u32     type;
+       u32     level;
+       u32     ratio;
+       u32     mmap_len;
+};
+
+Indicates that trace contains records of PERF_RECORD_COMPRESSED type
+that have perf_events records in compressed form.
+
        other bits are reserved and should ignored for now
        HEADER_FEAT_BITS        = 256,
 
@@ -437,6 +450,17 @@ struct auxtrace_error_event {
 Describes a header feature. These are records used in pipe-mode that
 contain information that otherwise would be in perf.data file's header.
 
+       PERF_RECORD_COMPRESSED                  = 81,
+
+struct compressed_event {
+       struct perf_event_header        header;
+       char                            data[];
+};
+
+The header is followed by compressed data frame that can be decompressed
+into array of perf trace records. The size of the entire compressed event
+record including the header is limited by the max value of header.size.
+
 Event types
 
 Define the event attributes with their IDs.
index 864e375..401f0ed 100644 (file)
@@ -22,6 +22,8 @@ OPTIONS
          verbose          - general debug messages
          ordered-events   - ordered events object debug messages
          data-convert     - data convert command debug messages
+         stderr           - write debug output (option -v) to stderr
+                            in browser mode
 
 --buildid-dir::
        Setup buildid cache directory. It has higher priority than
index 7f6d538..b7cd91a 100644 (file)
@@ -8,9 +8,10 @@
 
 void perf_regs_load(u64 *regs);
 
+#define PERF_REGS_MAX PERF_REG_X86_XMM_MAX
+#define PERF_XMM_REGS_MASK     (~((1ULL << PERF_REG_X86_XMM0) - 1))
 #ifndef HAVE_ARCH_X86_64_SUPPORT
 #define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1)
-#define PERF_REGS_MAX PERF_REG_X86_32_MAX
 #define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
 #else
 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
@@ -18,7 +19,6 @@ void perf_regs_load(u64 *regs);
                       (1ULL << PERF_REG_X86_FS) | \
                       (1ULL << PERF_REG_X86_GS))
 #define PERF_REGS_MASK (((1ULL << PERF_REG_X86_64_MAX) - 1) & ~REG_NOSUPPORT)
-#define PERF_REGS_MAX PERF_REG_X86_64_MAX
 #define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
 #endif
 #define PERF_REG_IP PERF_REG_X86_IP
@@ -77,6 +77,28 @@ static inline const char *perf_reg_name(int id)
        case PERF_REG_X86_R15:
                return "R15";
 #endif /* HAVE_ARCH_X86_64_SUPPORT */
+
+#define XMM(x) \
+       case PERF_REG_X86_XMM ## x:     \
+       case PERF_REG_X86_XMM ## x + 1: \
+               return "XMM" #x;
+       XMM(0)
+       XMM(1)
+       XMM(2)
+       XMM(3)
+       XMM(4)
+       XMM(5)
+       XMM(6)
+       XMM(7)
+       XMM(8)
+       XMM(9)
+       XMM(10)
+       XMM(11)
+       XMM(12)
+       XMM(13)
+       XMM(14)
+       XMM(15)
+#undef XMM
        default:
                return NULL;
        }
index fead6b3..7886ca5 100644 (file)
@@ -31,6 +31,22 @@ const struct sample_reg sample_reg_masks[] = {
        SMPL_REG(R14, PERF_REG_X86_R14),
        SMPL_REG(R15, PERF_REG_X86_R15),
 #endif
+       SMPL_REG2(XMM0, PERF_REG_X86_XMM0),
+       SMPL_REG2(XMM1, PERF_REG_X86_XMM1),
+       SMPL_REG2(XMM2, PERF_REG_X86_XMM2),
+       SMPL_REG2(XMM3, PERF_REG_X86_XMM3),
+       SMPL_REG2(XMM4, PERF_REG_X86_XMM4),
+       SMPL_REG2(XMM5, PERF_REG_X86_XMM5),
+       SMPL_REG2(XMM6, PERF_REG_X86_XMM6),
+       SMPL_REG2(XMM7, PERF_REG_X86_XMM7),
+       SMPL_REG2(XMM8, PERF_REG_X86_XMM8),
+       SMPL_REG2(XMM9, PERF_REG_X86_XMM9),
+       SMPL_REG2(XMM10, PERF_REG_X86_XMM10),
+       SMPL_REG2(XMM11, PERF_REG_X86_XMM11),
+       SMPL_REG2(XMM12, PERF_REG_X86_XMM12),
+       SMPL_REG2(XMM13, PERF_REG_X86_XMM13),
+       SMPL_REG2(XMM14, PERF_REG_X86_XMM14),
+       SMPL_REG2(XMM15, PERF_REG_X86_XMM15),
        SMPL_REG_END
 };
 
@@ -254,3 +270,31 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
 
        return SDT_ARG_VALID;
 }
+
+uint64_t arch__intr_reg_mask(void)
+{
+       struct perf_event_attr attr = {
+               .type                   = PERF_TYPE_HARDWARE,
+               .config                 = PERF_COUNT_HW_CPU_CYCLES,
+               .sample_type            = PERF_SAMPLE_REGS_INTR,
+               .sample_regs_intr       = PERF_XMM_REGS_MASK,
+               .precise_ip             = 1,
+               .disabled               = 1,
+               .exclude_kernel         = 1,
+       };
+       int fd;
+       /*
+        * In an unnamed union, init it here to build on older gcc versions
+        */
+       attr.sample_period = 1;
+
+       event_attr_init(&attr);
+
+       fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+       if (fd != -1) {
+               close(fd);
+               return (PERF_XMM_REGS_MASK | PERF_REGS_MASK);
+       }
+
+       return PERF_REGS_MASK;
+}
index 67f9d9f..77deb3a 100644 (file)
@@ -159,8 +159,6 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
        struct perf_evsel *evsel = iter->evsel;
        int err;
 
-       hist__account_cycles(sample->branch_stack, al, sample, false);
-
        bi = he->branch_info;
        err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
 
@@ -199,6 +197,8 @@ static int process_branch_callback(struct perf_evsel *evsel,
        if (a.map != NULL)
                a.map->dso->hit = 1;
 
+       hist__account_cycles(sample->branch_stack, al, sample, false);
+
        ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
        return ret;
 }
index 24086b7..8e0e06d 100644 (file)
@@ -837,6 +837,9 @@ int cmd_inject(int argc, const char **argv)
        if (inject.session == NULL)
                return -1;
 
+       if (zstd_init(&(inject.session->zstd_data), 0) < 0)
+               pr_warning("Decompression initialization failed.\n");
+
        if (inject.build_ids) {
                /*
                 * to make sure the mmap records are ordered correctly
@@ -867,6 +870,7 @@ int cmd_inject(int argc, const char **argv)
        ret = __cmd_inject(&inject);
 
 out_delete:
+       zstd_fini(&(inject.session->zstd_data));
        perf_session__delete(inject.session);
        return ret;
 }
index c5e1055..e2c3a58 100644 (file)
@@ -133,6 +133,11 @@ static int record__write(struct record *rec, struct perf_mmap *map __maybe_unuse
        return 0;
 }
 
+static int record__aio_enabled(struct record *rec);
+static int record__comp_enabled(struct record *rec);
+static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
+                           void *src, size_t src_size);
+
 #ifdef HAVE_AIO_SUPPORT
 static int record__aio_write(struct aiocb *cblock, int trace_fd,
                void *buf, size_t size, off_t off)
@@ -183,9 +188,9 @@ static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
        if (rem_size == 0) {
                cblock->aio_fildes = -1;
                /*
-                * md->refcount is incremented in perf_mmap__push() for
-                * every enqueued aio write request so decrement it because
-                * the request is now complete.
+                * md->refcount is incremented in record__aio_pushfn() for
+                * every aio write request started in record__aio_push() so
+                * decrement it because the request is now complete.
                 */
                perf_mmap__put(md);
                rc = 1;
@@ -240,18 +245,89 @@ static int record__aio_sync(struct perf_mmap *md, bool sync_all)
        } while (1);
 }
 
-static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
+struct record_aio {
+       struct record   *rec;
+       void            *data;
+       size_t          size;
+};
+
+static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
 {
-       struct record *rec = to;
-       int ret, trace_fd = rec->session->data->file.fd;
+       struct record_aio *aio = to;
 
-       rec->samples++;
+       /*
+        * map->base data pointed by buf is copied into free map->aio.data[] buffer
+        * to release space in the kernel buffer as fast as possible, calling
+        * perf_mmap__consume() from perf_mmap__push() function.
+        *
+        * That lets the kernel to proceed with storing more profiling data into
+        * the kernel buffer earlier than other per-cpu kernel buffers are handled.
+        *
+        * Coping can be done in two steps in case the chunk of profiling data
+        * crosses the upper bound of the kernel buffer. In this case we first move
+        * part of data from map->start till the upper bound and then the reminder
+        * from the beginning of the kernel buffer till the end of the data chunk.
+        */
+
+       if (record__comp_enabled(aio->rec)) {
+               size = zstd_compress(aio->rec->session, aio->data + aio->size,
+                                    perf_mmap__mmap_len(map) - aio->size,
+                                    buf, size);
+       } else {
+               memcpy(aio->data + aio->size, buf, size);
+       }
+
+       if (!aio->size) {
+               /*
+                * Increment map->refcount to guard map->aio.data[] buffer
+                * from premature deallocation because map object can be
+                * released earlier than aio write request started on
+                * map->aio.data[] buffer is complete.
+                *
+                * perf_mmap__put() is done at record__aio_complete()
+                * after started aio request completion or at record__aio_push()
+                * if the request failed to start.
+                */
+               perf_mmap__get(map);
+       }
+
+       aio->size += size;
+
+       return size;
+}
 
-       ret = record__aio_write(cblock, trace_fd, bf, size, off);
+static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
+{
+       int ret, idx;
+       int trace_fd = rec->session->data->file.fd;
+       struct record_aio aio = { .rec = rec, .size = 0 };
+
+       /*
+        * Call record__aio_sync() to wait till map->aio.data[] buffer
+        * becomes available after previous aio write operation.
+        */
+
+       idx = record__aio_sync(map, false);
+       aio.data = map->aio.data[idx];
+       ret = perf_mmap__push(map, &aio, record__aio_pushfn);
+       if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
+               return ret;
+
+       rec->samples++;
+       ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
        if (!ret) {
-               rec->bytes_written += size;
+               *off += aio.size;
+               rec->bytes_written += aio.size;
                if (switch_output_size(rec))
                        trigger_hit(&switch_output_trigger);
+       } else {
+               /*
+                * Decrement map->refcount incremented in record__aio_pushfn()
+                * back if record__aio_write() operation failed to start, otherwise
+                * map->refcount is decremented in record__aio_complete() after
+                * aio write operation finishes successfully.
+                */
+               perf_mmap__put(map);
        }
 
        return ret;
@@ -273,7 +349,7 @@ static void record__aio_mmap_read_sync(struct record *rec)
        struct perf_evlist *evlist = rec->evlist;
        struct perf_mmap *maps = evlist->mmap;
 
-       if (!rec->opts.nr_cblocks)
+       if (!record__aio_enabled(rec))
                return;
 
        for (i = 0; i < evlist->nr_mmaps; i++) {
@@ -307,13 +383,8 @@ static int record__aio_parse(const struct option *opt,
 #else /* HAVE_AIO_SUPPORT */
 static int nr_cblocks_max = 0;
 
-static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
-{
-       return -1;
-}
-
-static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
-               void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
+static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
+                           off_t *off __maybe_unused)
 {
        return -1;
 }
@@ -372,6 +443,32 @@ static int record__mmap_flush_parse(const struct option *opt,
        return 0;
 }
 
+#ifdef HAVE_ZSTD_SUPPORT
+static unsigned int comp_level_default = 1;
+
+static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
+{
+       struct record_opts *opts = opt->value;
+
+       if (unset) {
+               opts->comp_level = 0;
+       } else {
+               if (str)
+                       opts->comp_level = strtol(str, NULL, 0);
+               if (!opts->comp_level)
+                       opts->comp_level = comp_level_default;
+       }
+
+       return 0;
+}
+#endif
+static unsigned int comp_level_max = 22;
+
+static int record__comp_enabled(struct record *rec)
+{
+       return rec->opts.comp_level > 0;
+}
+
 static int process_synthesized_event(struct perf_tool *tool,
                                     union perf_event *event,
                                     struct perf_sample *sample __maybe_unused,
@@ -385,6 +482,11 @@ static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size
 {
        struct record *rec = to;
 
+       if (record__comp_enabled(rec)) {
+               size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
+               bf   = map->data;
+       }
+
        rec->samples++;
        return record__write(rec, map, bf, size);
 }
@@ -582,7 +684,7 @@ static int record__mmap_evlist(struct record *rec,
                                 opts->auxtrace_mmap_pages,
                                 opts->auxtrace_snapshot_mode,
                                 opts->nr_cblocks, opts->affinity,
-                                opts->mmap_flush) < 0) {
+                                opts->mmap_flush, opts->comp_level) < 0) {
                if (errno == EPERM) {
                        pr_err("Permission error mapping pages.\n"
                               "Consider increasing "
@@ -771,6 +873,37 @@ static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
        }
 }
 
+static size_t process_comp_header(void *record, size_t increment)
+{
+       struct compressed_event *event = record;
+       size_t size = sizeof(*event);
+
+       if (increment) {
+               event->header.size += increment;
+               return increment;
+       }
+
+       event->header.type = PERF_RECORD_COMPRESSED;
+       event->header.size = size;
+
+       return size;
+}
+
+static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
+                           void *src, size_t src_size)
+{
+       size_t compressed;
+       size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct compressed_event) - 1;
+
+       compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
+                                                    max_record_size, process_comp_header);
+
+       session->bytes_transferred += src_size;
+       session->bytes_compressed  += compressed;
+
+       return compressed;
+}
+
 static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
                                    bool overwrite, bool synch)
 {
@@ -779,7 +912,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
        int rc = 0;
        struct perf_mmap *maps;
        int trace_fd = rec->data.file.fd;
-       off_t off;
+       off_t off = 0;
 
        if (!evlist)
                return 0;
@@ -805,20 +938,14 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
                                map->flush = 1;
                        }
                        if (!record__aio_enabled(rec)) {
-                               if (perf_mmap__push(map, rec, record__pushfn) != 0) {
+                               if (perf_mmap__push(map, rec, record__pushfn) < 0) {
                                        if (synch)
                                                map->flush = flush;
                                        rc = -1;
                                        goto out;
                                }
                        } else {
-                               int idx;
-                               /*
-                                * Call record__aio_sync() to wait till map->data buffer
-                                * becomes available after previous aio write request.
-                                */
-                               idx = record__aio_sync(map, false);
-                               if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
+                               if (record__aio_push(rec, map, &off) < 0) {
                                        record__aio_set_pos(trace_fd, off);
                                        if (synch)
                                                map->flush = flush;
@@ -888,6 +1015,8 @@ static void record__init_features(struct record *rec)
                perf_header__clear_feat(&session->header, HEADER_CLOCKID);
 
        perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
+       if (!record__comp_enabled(rec))
+               perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
 
        perf_header__clear_feat(&session->header, HEADER_STAT);
 }
@@ -1186,6 +1315,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        bool disabled = false, draining = false;
        struct perf_evlist *sb_evlist = NULL;
        int fd;
+       float ratio = 0;
 
        atexit(record__sig_exit);
        signal(SIGCHLD, sig_handler);
@@ -1215,6 +1345,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        fd = perf_data__fd(data);
        rec->session = session;
 
+       if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
+               pr_err("Compression initialization failed.\n");
+               return -1;
+       }
+
+       session->header.env.comp_type  = PERF_COMP_ZSTD;
+       session->header.env.comp_level = rec->opts.comp_level;
+
        record__init_features(rec);
 
        if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
@@ -1244,6 +1382,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                err = -1;
                goto out_child;
        }
+       session->header.env.comp_mmap_len = session->evlist->mmap_len;
 
        err = bpf__apply_obj_config();
        if (err) {
@@ -1491,6 +1630,11 @@ out_child:
        record__mmap_read_all(rec, true);
        record__aio_mmap_read_sync(rec);
 
+       if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
+               ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
+               session->header.env.comp_ratio = ratio + 0.5;
+       }
+
        if (forks) {
                int exit_status;
 
@@ -1537,12 +1681,19 @@ out_child:
                else
                        samples[0] = '\0';
 
-               fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
+               fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
                        perf_data__size(data) / 1024.0 / 1024.0,
                        data->path, postfix, samples);
+               if (ratio) {
+                       fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
+                                       rec->session->bytes_transferred / 1024.0 / 1024.0,
+                                       ratio);
+               }
+               fprintf(stderr, " ]\n");
        }
 
 out_delete_session:
+       zstd_fini(&session->zstd_data);
        perf_session__delete(session);
 
        if (!opts->no_bpf_event)
@@ -2017,10 +2168,10 @@ static struct option __record_options[] = {
                    "use per-thread mmaps"),
        OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
                    "sample selected machine registers on interrupt,"
-                   " use -I ? to list register names", parse_regs),
+                   " use '-I?' to list register names", parse_intr_regs),
        OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
                    "sample selected machine registers on interrupt,"
-                   " use -I ? to list register names", parse_regs),
+                   " use '--user-regs=?' to list register names", parse_user_regs),
        OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
                    "Record running/enabled time of read (:S) events"),
        OPT_CALLBACK('k', "clockid", &record.opts,
@@ -2068,6 +2219,11 @@ static struct option __record_options[] = {
        OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
                     "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
                     record__parse_affinity),
+#ifdef HAVE_ZSTD_SUPPORT
+       OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
+                           "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
+                           record__parse_comp_level),
+#endif
        OPT_END()
 };
 
@@ -2127,6 +2283,12 @@ int cmd_record(int argc, const char **argv)
                        "cgroup monitoring only available in system-wide mode");
 
        }
+
+       if (rec->opts.comp_level != 0) {
+               pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
+               rec->no_buildid = true;
+       }
+
        if (rec->opts.record_switch_events &&
            !perf_can_record_switch_events()) {
                ui__error("kernel does not support recording context switch events\n");
@@ -2272,12 +2434,15 @@ int cmd_record(int argc, const char **argv)
 
        if (rec->opts.nr_cblocks > nr_cblocks_max)
                rec->opts.nr_cblocks = nr_cblocks_max;
-       if (verbose > 0)
-               pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+       pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
 
        pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
        pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
 
+       if (rec->opts.comp_level > comp_level_max)
+               rec->opts.comp_level = comp_level_max;
+       pr_debug("comp level: %d\n", rec->opts.comp_level);
+
        err = __cmd_record(&record, argc, argv);
 out:
        perf_evlist__delete(rec->evlist);
index 4054eb1..1ca533f 100644 (file)
@@ -136,9 +136,6 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
        if (!ui__has_annotation() && !rep->symbol_ipc)
                return 0;
 
-       hist__account_cycles(sample->branch_stack, al, sample,
-                            rep->nonany_branch_mode);
-
        if (sort__mode == SORT_MODE__BRANCH) {
                bi = he->branch_info;
                err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
@@ -181,9 +178,6 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
        if (!ui__has_annotation() && !rep->symbol_ipc)
                return 0;
 
-       hist__account_cycles(sample->branch_stack, al, sample,
-                            rep->nonany_branch_mode);
-
        bi = he->branch_info;
        err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
        if (err)
@@ -282,6 +276,11 @@ static int process_sample_event(struct perf_tool *tool,
        if (al.map != NULL)
                al.map->dso->hit = 1;
 
+       if (ui__has_annotation() || rep->symbol_ipc) {
+               hist__account_cycles(sample->branch_stack, &al, sample,
+                                    rep->nonany_branch_mode);
+       }
+
        ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
        if (ret < 0)
                pr_debug("problem adding hist entry, skipping event\n");
@@ -1259,6 +1258,9 @@ repeat:
        if (session == NULL)
                return -1;
 
+       if (zstd_init(&(session->zstd_data), 0) < 0)
+               pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
+
        if (report.queue_size) {
                ordered_events__set_alloc_size(&session->ordered_events,
                                               report.queue_size);
@@ -1449,7 +1451,7 @@ repeat:
 error:
        if (report.ptime_range)
                zfree(&report.ptime_range);
-
+       zstd_fini(&(session->zstd_data));
        perf_session__delete(session);
        return ret;
 }
index a3c0608..24b8e69 100644 (file)
@@ -847,6 +847,18 @@ static int perf_stat__get_core_cached(struct perf_stat_config *config,
        return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
 }
 
+static bool term_percore_set(void)
+{
+       struct perf_evsel *counter;
+
+       evlist__for_each_entry(evsel_list, counter) {
+               if (counter->percore)
+                       return true;
+       }
+
+       return false;
+}
+
 static int perf_stat_init_aggr_mode(void)
 {
        int nr;
@@ -867,6 +879,15 @@ static int perf_stat_init_aggr_mode(void)
                stat_config.aggr_get_id = perf_stat__get_core_cached;
                break;
        case AGGR_NONE:
+               if (term_percore_set()) {
+                       if (cpu_map__build_core_map(evsel_list->cpus,
+                                                   &stat_config.aggr_map)) {
+                               perror("cannot build core map");
+                               return -1;
+                       }
+                       stat_config.aggr_get_id = perf_stat__get_core_cached;
+               }
+               break;
        case AGGR_GLOBAL:
        case AGGR_THREAD:
        case AGGR_UNSET:
index 369eae6..d59dee6 100644 (file)
@@ -86,6 +86,7 @@ struct record_opts {
        int          nr_cblocks;
        int          affinity;
        int          mmap_flush;
+       unsigned int comp_level;
 };
 
 enum perf_affinity {
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json
new file mode 100644 (file)
index 0000000..0ac9b79
--- /dev/null
@@ -0,0 +1,179 @@
+[
+    {
+        "ArchStdEvent": "L1D_CACHE_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_INVAL",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_RD",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_WR",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_INVAL",
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS_RD",
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS_WR",
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS_SHARED",
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS_NOT_SHARED",
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS_NORMAL",
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS_PERIPH",
+    },
+    {
+        "ArchStdEvent": "MEM_ACCESS_RD",
+    },
+    {
+        "ArchStdEvent": "MEM_ACCESS_WR",
+    },
+    {
+        "ArchStdEvent": "UNALIGNED_LD_SPEC",
+    },
+    {
+        "ArchStdEvent": "UNALIGNED_ST_SPEC",
+    },
+    {
+        "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+    },
+    {
+        "ArchStdEvent": "LDREX_SPEC",
+    },
+    {
+        "ArchStdEvent": "STREX_PASS_SPEC",
+    },
+    {
+        "ArchStdEvent": "STREX_FAIL_SPEC",
+    },
+    {
+        "ArchStdEvent": "LD_SPEC",
+    },
+    {
+        "ArchStdEvent": "ST_SPEC",
+    },
+    {
+        "ArchStdEvent": "LDST_SPEC",
+    },
+    {
+        "ArchStdEvent": "DP_SPEC",
+    },
+    {
+        "ArchStdEvent": "ASE_SPEC",
+    },
+    {
+        "ArchStdEvent": "VFP_SPEC",
+    },
+    {
+        "ArchStdEvent": "PC_WRITE_SPEC",
+    },
+    {
+        "ArchStdEvent": "CRYPTO_SPEC",
+    },
+    {
+        "ArchStdEvent": "BR_IMMED_SPEC",
+    },
+    {
+        "ArchStdEvent": "BR_RETURN_SPEC",
+    },
+    {
+        "ArchStdEvent": "BR_INDIRECT_SPEC",
+    },
+    {
+        "ArchStdEvent": "ISB_SPEC",
+    },
+    {
+        "ArchStdEvent": "DSB_SPEC",
+    },
+    {
+        "ArchStdEvent": "DMB_SPEC",
+    },
+    {
+        "ArchStdEvent": "EXC_UNDEF",
+    },
+    {
+        "ArchStdEvent": "EXC_SVC",
+    },
+    {
+        "ArchStdEvent": "EXC_PABORT",
+    },
+    {
+        "ArchStdEvent": "EXC_DABORT",
+    },
+    {
+        "ArchStdEvent": "EXC_IRQ",
+    },
+    {
+        "ArchStdEvent": "EXC_FIQ",
+    },
+    {
+        "ArchStdEvent": "EXC_SMC",
+    },
+    {
+        "ArchStdEvent": "EXC_HVC",
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_PABORT",
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_DABORT",
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_OTHER",
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_IRQ",
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_FIQ",
+    },
+    {
+        "ArchStdEvent": "RC_LD_SPEC",
+    },
+    {
+        "ArchStdEvent": "RC_ST_SPEC",
+    },
+]
index 59cd860..927fcdd 100644 (file)
 #
 #
 #Family-model,Version,Filename,EventType
-0x00000000410fd03[[:xdigit:]],v1,arm/cortex-a53,core
+0x00000000410fd030,v1,arm/cortex-a53,core
+0x00000000420f1000,v1,arm/cortex-a53,core
+0x00000000410fd070,v1,arm/cortex-a57-a72,core
+0x00000000410fd080,v1,arm/cortex-a57-a72,core
 0x00000000420f5160,v1,cavium/thunderx2,core
 0x00000000430f0af0,v1,cavium/thunderx2,core
 0x00000000480fd010,v1,hisilicon/hip08,core
index 68c92bb..58f77fd 100644 (file)
@@ -235,6 +235,7 @@ static struct map {
        { "iMPH-U", "uncore_arb" },
        { "CPU-M-CF", "cpum_cf" },
        { "CPU-M-SF", "cpum_sf" },
+       { "UPI LL", "uncore_upi" },
        {}
 };
 
@@ -414,7 +415,6 @@ static int save_arch_std_events(void *data, char *name, char *event,
                                char *metric_name, char *metric_group)
 {
        struct event_struct *es;
-       struct stat *sb = data;
 
        es = malloc(sizeof(*es));
        if (!es)
index 74ef92f..affed7d 100755 (executable)
@@ -456,6 +456,10 @@ class CallGraphLevelItemBase(object):
                self.query_done = False;
                self.child_count = 0
                self.child_items = []
+               if parent_item:
+                       self.level = parent_item.level + 1
+               else:
+                       self.level = 0
 
        def getChildItem(self, row):
                return self.child_items[row]
@@ -877,9 +881,14 @@ class TreeWindowBase(QMdiSubWindow):
                super(TreeWindowBase, self).__init__(parent)
 
                self.model = None
-               self.view = None
                self.find_bar = None
 
+               self.view = QTreeView()
+               self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+               self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
+
+               self.context_menu = TreeContextMenu(self.view)
+
        def DisplayFound(self, ids):
                if not len(ids):
                        return False
@@ -921,7 +930,6 @@ class CallGraphWindow(TreeWindowBase):
 
                self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
 
-               self.view = QTreeView()
                self.view.setModel(self.model)
 
                for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
@@ -944,7 +952,6 @@ class CallTreeWindow(TreeWindowBase):
 
                self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
 
-               self.view = QTreeView()
                self.view.setModel(self.model)
 
                for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
@@ -1649,10 +1656,14 @@ class BranchWindow(QMdiSubWindow):
 
                self.view = QTreeView()
                self.view.setUniformRowHeights(True)
+               self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+               self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
                self.view.setModel(self.model)
 
                self.ResizeColumnsToContents()
 
+               self.context_menu = TreeContextMenu(self.view)
+
                self.find_bar = FindBar(self, self, True)
 
                self.finder = ChildDataItemFinder(self.model.root)
@@ -2261,6 +2272,240 @@ class ResizeColumnsToContentsBase(QObject):
                self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths)
                self.ResizeColumnsToContents()
 
+# Convert value to CSV
+
+def ToCSValue(val):
+       if '"' in val:
+               val = val.replace('"', '""')
+       if "," in val or '"' in val:
+               val = '"' + val + '"'
+       return val
+
+# Key to sort table model indexes by row / column, assuming fewer than 1000 columns
+
+glb_max_cols = 1000
+
+def RowColumnKey(a):
+       return a.row() * glb_max_cols + a.column()
+
+# Copy selected table cells to clipboard
+
+def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
+       indexes = sorted(view.selectedIndexes(), key=RowColumnKey)
+       idx_cnt = len(indexes)
+       if not idx_cnt:
+               return
+       if idx_cnt == 1:
+               with_hdr=False
+       min_row = indexes[0].row()
+       max_row = indexes[0].row()
+       min_col = indexes[0].column()
+       max_col = indexes[0].column()
+       for i in indexes:
+               min_row = min(min_row, i.row())
+               max_row = max(max_row, i.row())
+               min_col = min(min_col, i.column())
+               max_col = max(max_col, i.column())
+       if max_col > glb_max_cols:
+               raise RuntimeError("glb_max_cols is too low")
+       max_width = [0] * (1 + max_col - min_col)
+       for i in indexes:
+               c = i.column() - min_col
+               max_width[c] = max(max_width[c], len(str(i.data())))
+       text = ""
+       pad = ""
+       sep = ""
+       if with_hdr:
+               model = indexes[0].model()
+               for col in range(min_col, max_col + 1):
+                       val = model.headerData(col, Qt.Horizontal)
+                       if as_csv:
+                               text += sep + ToCSValue(val)
+                               sep = ","
+                       else:
+                               c = col - min_col
+                               max_width[c] = max(max_width[c], len(val))
+                               width = max_width[c]
+                               align = model.headerData(col, Qt.Horizontal, Qt.TextAlignmentRole)
+                               if align & Qt.AlignRight:
+                                       val = val.rjust(width)
+                               text += pad + sep + val
+                               pad = " " * (width - len(val))
+                               sep = "  "
+               text += "\n"
+               pad = ""
+               sep = ""
+       last_row = min_row
+       for i in indexes:
+               if i.row() > last_row:
+                       last_row = i.row()
+                       text += "\n"
+                       pad = ""
+                       sep = ""
+               if as_csv:
+                       text += sep + ToCSValue(str(i.data()))
+                       sep = ","
+               else:
+                       width = max_width[i.column() - min_col]
+                       if i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
+                               val = str(i.data()).rjust(width)
+                       else:
+                               val = str(i.data())
+                       text += pad + sep + val
+                       pad = " " * (width - len(val))
+                       sep = "  "
+       QApplication.clipboard().setText(text)
+
+def CopyTreeCellsToClipboard(view, as_csv=False, with_hdr=False):
+       indexes = view.selectedIndexes()
+       if not len(indexes):
+               return
+
+       selection = view.selectionModel()
+
+       first = None
+       for i in indexes:
+               above = view.indexAbove(i)
+               if not selection.isSelected(above):
+                       first = i
+                       break
+
+       if first is None:
+               raise RuntimeError("CopyTreeCellsToClipboard internal error")
+
+       model = first.model()
+       row_cnt = 0
+       col_cnt = model.columnCount(first)
+       max_width = [0] * col_cnt
+
+       indent_sz = 2
+       indent_str = " " * indent_sz
+
+       expanded_mark_sz = 2
+       if sys.version_info[0] == 3:
+               expanded_mark = "\u25BC "
+               not_expanded_mark = "\u25B6 "
+       else:
+               expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xBC) + " ", "utf-8")
+               not_expanded_mark =  unicode(chr(0xE2) + chr(0x96) + chr(0xB6) + " ", "utf-8")
+       leaf_mark = "  "
+
+       if not as_csv:
+               pos = first
+               while True:
+                       row_cnt += 1
+                       row = pos.row()
+                       for c in range(col_cnt):
+                               i = pos.sibling(row, c)
+                               if c:
+                                       n = len(str(i.data()))
+                               else:
+                                       n = len(str(i.data()).strip())
+                                       n += (i.internalPointer().level - 1) * indent_sz
+                                       n += expanded_mark_sz
+                               max_width[c] = max(max_width[c], n)
+                       pos = view.indexBelow(pos)
+                       if not selection.isSelected(pos):
+                               break
+
+       text = ""
+       pad = ""
+       sep = ""
+       if with_hdr:
+               for c in range(col_cnt):
+                       val = model.headerData(c, Qt.Horizontal, Qt.DisplayRole).strip()
+                       if as_csv:
+                               text += sep + ToCSValue(val)
+                               sep = ","
+                       else:
+                               max_width[c] = max(max_width[c], len(val))
+                               width = max_width[c]
+                               align = model.headerData(c, Qt.Horizontal, Qt.TextAlignmentRole)
+                               if align & Qt.AlignRight:
+                                       val = val.rjust(width)
+                               text += pad + sep + val
+                               pad = " " * (width - len(val))
+                               sep = "   "
+               text += "\n"
+               pad = ""
+               sep = ""
+
+       pos = first
+       while True:
+               row = pos.row()
+               for c in range(col_cnt):
+                       i = pos.sibling(row, c)
+                       val = str(i.data())
+                       if not c:
+                               if model.hasChildren(i):
+                                       if view.isExpanded(i):
+                                               mark = expanded_mark
+                                       else:
+                                               mark = not_expanded_mark
+                               else:
+                                       mark = leaf_mark
+                               val = indent_str * (i.internalPointer().level - 1) + mark + val.strip()
+                       if as_csv:
+                               text += sep + ToCSValue(val)
+                               sep = ","
+                       else:
+                               width = max_width[c]
+                               if c and i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
+                                       val = val.rjust(width)
+                               text += pad + sep + val
+                               pad = " " * (width - len(val))
+                               sep = "   "
+               pos = view.indexBelow(pos)
+               if not selection.isSelected(pos):
+                       break
+               text = text.rstrip() + "\n"
+               pad = ""
+               sep = ""
+
+       QApplication.clipboard().setText(text)
+
+def CopyCellsToClipboard(view, as_csv=False, with_hdr=False):
+       view.CopyCellsToClipboard(view, as_csv, with_hdr)
+
+def CopyCellsToClipboardHdr(view):
+       CopyCellsToClipboard(view, False, True)
+
+def CopyCellsToClipboardCSV(view):
+       CopyCellsToClipboard(view, True, True)
+
+# Context menu
+
+class ContextMenu(object):
+
+       def __init__(self, view):
+               self.view = view
+               self.view.setContextMenuPolicy(Qt.CustomContextMenu)
+               self.view.customContextMenuRequested.connect(self.ShowContextMenu)
+
+       def ShowContextMenu(self, pos):
+               menu = QMenu(self.view)
+               self.AddActions(menu)
+               menu.exec_(self.view.mapToGlobal(pos))
+
+       def AddCopy(self, menu):
+               menu.addAction(CreateAction("&Copy selection", "Copy to clipboard", lambda: CopyCellsToClipboardHdr(self.view), self.view))
+               menu.addAction(CreateAction("Copy selection as CS&V", "Copy to clipboard as CSV", lambda: CopyCellsToClipboardCSV(self.view), self.view))
+
+       def AddActions(self, menu):
+               self.AddCopy(menu)
+
+class TreeContextMenu(ContextMenu):
+
+       def __init__(self, view):
+               super(TreeContextMenu, self).__init__(view)
+
+       def AddActions(self, menu):
+               i = self.view.currentIndex()
+               text = str(i.data()).strip()
+               if len(text):
+                       menu.addAction(CreateAction('Copy "' + text + '"', "Copy to clipboard", lambda: QApplication.clipboard().setText(text), self.view))
+               self.AddCopy(menu)
+
 # Table window
 
 class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
@@ -2279,9 +2524,13 @@ class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
                self.view.verticalHeader().setVisible(False)
                self.view.sortByColumn(-1, Qt.AscendingOrder)
                self.view.setSortingEnabled(True)
+               self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+               self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
 
                self.ResizeColumnsToContents()
 
+               self.context_menu = ContextMenu(self.view)
+
                self.find_bar = FindBar(self, self, True)
 
                self.finder = ChildDataItemFinder(self.data_model)
@@ -2395,6 +2644,10 @@ class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
                self.view.setModel(self.model)
                self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
                self.view.verticalHeader().setVisible(False)
+               self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+               self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
+
+               self.context_menu = ContextMenu(self.view)
 
                self.ResizeColumnsToContents()
 
@@ -2660,6 +2913,60 @@ class HelpOnlyWindow(QMainWindow):
 
                self.setCentralWidget(self.text)
 
+# PostqreSQL server version
+
+def PostqreSQLServerVersion(db):
+       query = QSqlQuery(db)
+       QueryExec(query, "SELECT VERSION()")
+       if query.next():
+               v_str = query.value(0)
+               v_list = v_str.strip().split(" ")
+               if v_list[0] == "PostgreSQL" and v_list[2] == "on":
+                       return v_list[1]
+               return v_str
+       return "Unknown"
+
+# SQLite version
+
+def SQLiteVersion(db):
+       query = QSqlQuery(db)
+       QueryExec(query, "SELECT sqlite_version()")
+       if query.next():
+               return query.value(0)
+       return "Unknown"
+
+# About dialog
+
+class AboutDialog(QDialog):
+
+       def __init__(self, glb, parent=None):
+               super(AboutDialog, self).__init__(parent)
+
+               self.setWindowTitle("About Exported SQL Viewer")
+               self.setMinimumWidth(300)
+
+               pyside_version = "1" if pyside_version_1 else "2"
+
+               text = "<pre>"
+               text += "Python version:     " + sys.version.split(" ")[0] + "\n"
+               text += "PySide version:     " + pyside_version + "\n"
+               text += "Qt version:         " + qVersion() + "\n"
+               if glb.dbref.is_sqlite3:
+                       text += "SQLite version:     " + SQLiteVersion(glb.db) + "\n"
+               else:
+                       text += "PostqreSQL version: " + PostqreSQLServerVersion(glb.db) + "\n"
+               text += "</pre>"
+
+               self.text = QTextBrowser()
+               self.text.setHtml(text)
+               self.text.setReadOnly(True)
+               self.text.setOpenExternalLinks(True)
+
+               self.vbox = QVBoxLayout()
+               self.vbox.addWidget(self.text)
+
+               self.setLayout(self.vbox);
+
 # Font resize
 
 def ResizeFont(widget, diff):
@@ -2732,6 +3039,8 @@ class MainWindow(QMainWindow):
                file_menu.addAction(CreateExitAction(glb.app, self))
 
                edit_menu = menu.addMenu("&Edit")
+               edit_menu.addAction(CreateAction("&Copy", "Copy to clipboard", self.CopyToClipboard, self, QKeySequence.Copy))
+               edit_menu.addAction(CreateAction("Copy as CS&V", "Copy to clipboard as CSV", self.CopyToClipboardCSV, self))
                edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find))
                edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)]))
                edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")]))
@@ -2755,6 +3064,21 @@ class MainWindow(QMainWindow):
 
                help_menu = menu.addMenu("&Help")
                help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
+               help_menu.addAction(CreateAction("&About Exported SQL Viewer", "About this application", self.About, self))
+
+       def Try(self, fn):
+               win = self.mdi_area.activeSubWindow()
+               if win:
+                       try:
+                               fn(win.view)
+                       except:
+                               pass
+
+       def CopyToClipboard(self):
+               self.Try(CopyCellsToClipboardHdr)
+
+       def CopyToClipboardCSV(self):
+               self.Try(CopyCellsToClipboardCSV)
 
        def Find(self):
                win = self.mdi_area.activeSubWindow()
@@ -2773,12 +3097,10 @@ class MainWindow(QMainWindow):
                                pass
 
        def ShrinkFont(self):
-               win = self.mdi_area.activeSubWindow()
-               ShrinkFont(win.view)
+               self.Try(ShrinkFont)
 
        def EnlargeFont(self):
-               win = self.mdi_area.activeSubWindow()
-               EnlargeFont(win.view)
+               self.Try(EnlargeFont)
 
        def EventMenu(self, events, reports_menu):
                branches_events = 0
@@ -2828,6 +3150,10 @@ class MainWindow(QMainWindow):
        def Help(self):
                HelpWindow(self.glb, self)
 
+       def About(self):
+               dialog = AboutDialog(self.glb, self)
+               dialog.exec_()
+
 # XED Disassembler
 
 class xed_state_t(Structure):
index 7f6c520..946ab4b 100644 (file)
@@ -304,7 +304,7 @@ int test__dso_data_cache(struct test *test __maybe_unused, int subtest __maybe_u
        /* Make sure we did not leak any file descriptor. */
        nr_end = open_files_cnt();
        pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
-       TEST_ASSERT_VAL("failed leadking files", nr == nr_end);
+       TEST_ASSERT_VAL("failed leaking files", nr == nr_end);
        return 0;
 }
 
@@ -380,6 +380,6 @@ int test__dso_data_reopen(struct test *test __maybe_unused, int subtest __maybe_
        /* Make sure we did not leak any file descriptor. */
        nr_end = open_files_cnt();
        pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
-       TEST_ASSERT_VAL("failed leadking files", nr == nr_end);
+       TEST_ASSERT_VAL("failed leaking files", nr == nr_end);
        return 0;
 }
index e467235..5363a12 100644 (file)
@@ -107,7 +107,7 @@ make_minimal        := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
 make_minimal        += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
 make_minimal        += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
 make_minimal        += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
-make_minimal        += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1
+make_minimal        += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1 NO_LIBZSTD=1
 
 # $(run) contains all available tests
 run := make_pure
diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
new file mode 100755 (executable)
index 0000000..5dcba80
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/sh
+# Zstd perf.data compression/decompression
+
+trace_file=$(mktemp /tmp/perf.data.XXX)
+perf_tool=perf
+
+skip_if_no_z_record() {
+       $perf_tool record -h 2>&1 | grep -q '\-z, \-\-compression\-level'
+}
+
+collect_z_record() {
+       echo "Collecting compressed record file:"
+       $perf_tool record -o $trace_file -g -z -F 5000 -- \
+               dd count=500 if=/dev/random of=/dev/null
+}
+
+check_compressed_stats() {
+       echo "Checking compressed events stats:"
+       $perf_tool report -i $trace_file --header --stats | \
+               grep -E "(# compressed : Zstd,)|(COMPRESSED events:)"
+}
+
+check_compressed_output() {
+       $perf_tool inject -i $trace_file -o $trace_file.decomp &&
+       $perf_tool report -i $trace_file --stdio | head -n -3 > $trace_file.comp.output &&
+       $perf_tool report -i $trace_file.decomp --stdio | head -n -3 > $trace_file.decomp.output &&
+       diff $trace_file.comp.output $trace_file.decomp.output
+}
+
+skip_if_no_z_record || exit 2
+collect_z_record && check_compressed_stats && check_compressed_output
+err=$?
+rm -f $trace_file*
+exit $err
index 8dd3102..6d5bbc8 100644 (file)
@@ -145,6 +145,8 @@ perf-y += scripting-engines/
 
 perf-$(CONFIG_ZLIB) += zlib.o
 perf-$(CONFIG_LZMA) += lzma.o
+perf-$(CONFIG_ZSTD) += zstd.o
+
 perf-y += demangle-java.o
 perf-y += demangle-rust.o
 
index 0976298..0b8573f 100644 (file)
@@ -1021,7 +1021,7 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
                float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
 
                /* Hide data when there are too many overlaps. */
-               if (ch->reset >= 0x7fff || ch->reset >= ch->num / 2)
+               if (ch->reset >= 0x7fff)
                        return;
 
                for (offset = start; offset <= end; offset++) {
index 892e92e..0cd3369 100644 (file)
@@ -2,6 +2,11 @@
 #ifndef PERF_COMPRESS_H
 #define PERF_COMPRESS_H
 
+#include <stdbool.h>
+#ifdef HAVE_ZSTD_SUPPORT
+#include <zstd.h>
+#endif
+
 #ifdef HAVE_ZLIB_SUPPORT
 int gzip_decompress_to_file(const char *input, int output_fd);
 bool gzip_is_compressed(const char *input);
@@ -12,4 +17,52 @@ int lzma_decompress_to_file(const char *input, int output_fd);
 bool lzma_is_compressed(const char *input);
 #endif
 
+struct zstd_data {
+#ifdef HAVE_ZSTD_SUPPORT
+       ZSTD_CStream    *cstream;
+       ZSTD_DStream    *dstream;
+#endif
+};
+
+#ifdef HAVE_ZSTD_SUPPORT
+
+int zstd_init(struct zstd_data *data, int level);
+int zstd_fini(struct zstd_data *data);
+
+size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
+                                      void *src, size_t src_size, size_t max_record_size,
+                                      size_t process_header(void *record, size_t increment));
+
+size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size,
+                             void *dst, size_t dst_size);
+#else /* !HAVE_ZSTD_SUPPORT */
+
+static inline int zstd_init(struct zstd_data *data __maybe_unused, int level __maybe_unused)
+{
+       return 0;
+}
+
+static inline int zstd_fini(struct zstd_data *data __maybe_unused)
+{
+       return 0;
+}
+
+static inline
+size_t zstd_compress_stream_to_records(struct zstd_data *data __maybe_unused,
+                                      void *dst __maybe_unused, size_t dst_size __maybe_unused,
+                                      void *src __maybe_unused, size_t src_size __maybe_unused,
+                                      size_t max_record_size __maybe_unused,
+                                      size_t process_header(void *record, size_t increment) __maybe_unused)
+{
+       return 0;
+}
+
+static inline size_t zstd_decompress_stream(struct zstd_data *data __maybe_unused, void *src __maybe_unused,
+                                           size_t src_size __maybe_unused, void *dst __maybe_unused,
+                                           size_t dst_size __maybe_unused)
+{
+       return 0;
+}
+#endif
+
 #endif /* PERF_COMPRESS_H */
index 4f8e2b4..271a90b 100644 (file)
@@ -62,6 +62,11 @@ struct perf_env {
        struct cpu_topology_map *cpu;
        struct cpu_cache_level  *caches;
        int                      caches_cnt;
+       u32                     comp_ratio;
+       u32                     comp_ver;
+       u32                     comp_type;
+       u32                     comp_level;
+       u32                     comp_mmap_len;
        struct numa_node        *numa_nodes;
        struct memory_node      *memory_nodes;
        unsigned long long       memory_bsize;
@@ -80,6 +85,12 @@ struct perf_env {
        } bpf_progs;
 };
 
+enum perf_compress_type {
+       PERF_COMP_NONE = 0,
+       PERF_COMP_ZSTD,
+       PERF_COMP_MAX
+};
+
 struct bpf_prog_info_node;
 struct btf_node;
 
index ba7be74..d1ad6c4 100644 (file)
@@ -68,6 +68,7 @@ static const char *perf_event__names[] = {
        [PERF_RECORD_EVENT_UPDATE]              = "EVENT_UPDATE",
        [PERF_RECORD_TIME_CONV]                 = "TIME_CONV",
        [PERF_RECORD_HEADER_FEATURE]            = "FEATURE",
+       [PERF_RECORD_COMPRESSED]                = "COMPRESSED",
 };
 
 static const char *perf_ns__names[] = {
index 4e908ec..9e99955 100644 (file)
@@ -255,6 +255,7 @@ enum perf_user_event_type { /* above any possible kernel type */
        PERF_RECORD_EVENT_UPDATE                = 78,
        PERF_RECORD_TIME_CONV                   = 79,
        PERF_RECORD_HEADER_FEATURE              = 80,
+       PERF_RECORD_COMPRESSED                  = 81,
        PERF_RECORD_HEADER_MAX
 };
 
@@ -627,6 +628,11 @@ struct feature_event {
        char                            data[];
 };
 
+struct compressed_event {
+       struct perf_event_header        header;
+       char                            data[];
+};
+
 union perf_event {
        struct perf_event_header        header;
        struct mmap_event               mmap;
@@ -660,6 +666,7 @@ union perf_event {
        struct feature_event            feat;
        struct ksymbol_event            ksymbol_event;
        struct bpf_event                bpf_event;
+       struct compressed_event         pack;
 };
 
 void perf_event__print_totals(void);
index 4b6783f..69d0fa8 100644 (file)
@@ -1009,7 +1009,8 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
  */
 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
                         unsigned int auxtrace_pages,
-                        bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush)
+                        bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
+                        int comp_level)
 {
        struct perf_evsel *evsel;
        const struct cpu_map *cpus = evlist->cpus;
@@ -1019,7 +1020,8 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
         * Its value is decided by evsel's write_backward.
         * So &mp should not be passed through const pointer.
         */
-       struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush };
+       struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush,
+                                 .comp_level = comp_level };
 
        if (!evlist->mmap)
                evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1051,7 +1053,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
 
 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
 {
-       return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1);
+       return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
 }
 
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
index c9a0f72..49354fe 100644 (file)
@@ -178,7 +178,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
                         unsigned int auxtrace_pages,
                         bool auxtrace_overwrite, int nr_cblocks,
-                        int affinity, int flush);
+                        int affinity, int flush, int comp_level);
 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
 void perf_evlist__munmap(struct perf_evlist *evlist);
 
index a10cf4c..a6f572a 100644 (file)
@@ -813,6 +813,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
                        break;
                case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
                        break;
+               case PERF_EVSEL__CONFIG_TERM_PERCORE:
+                       break;
                default:
                        break;
                }
index 6d190cb..cad54e8 100644 (file)
@@ -50,6 +50,7 @@ enum term_type {
        PERF_EVSEL__CONFIG_TERM_OVERWRITE,
        PERF_EVSEL__CONFIG_TERM_DRV_CFG,
        PERF_EVSEL__CONFIG_TERM_BRANCH,
+       PERF_EVSEL__CONFIG_TERM_PERCORE,
 };
 
 struct perf_evsel_config_term {
@@ -67,6 +68,7 @@ struct perf_evsel_config_term {
                bool    overwrite;
                char    *branch;
                unsigned long max_events;
+               bool    percore;
        } val;
        bool weak;
 };
@@ -158,6 +160,7 @@ struct perf_evsel {
        struct perf_evsel       **metric_events;
        bool                    collect_stat;
        bool                    weak_group;
+       bool                    percore;
        const char              *pmu_name;
        struct {
                perf_evsel__sb_cb_t     *cb;
index 2d2af2a..847ae51 100644 (file)
@@ -1344,6 +1344,30 @@ out:
        return ret;
 }
 
+static int write_compressed(struct feat_fd *ff __maybe_unused,
+                           struct perf_evlist *evlist __maybe_unused)
+{
+       int ret;
+
+       ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
+       if (ret)
+               return ret;
+
+       ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
+       if (ret)
+               return ret;
+
+       ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
+       if (ret)
+               return ret;
+
+       ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
+       if (ret)
+               return ret;
+
+       return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
+}
+
 static void print_hostname(struct feat_fd *ff, FILE *fp)
 {
        fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
@@ -1688,6 +1712,13 @@ static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
        }
 }
 
+static void print_compressed(struct feat_fd *ff, FILE *fp)
+{
+       fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
+               ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
+               ff->ph->env.comp_level, ff->ph->env.comp_ratio);
+}
+
 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
 {
        const char *delimiter = "# pmu mappings: ";
@@ -2667,6 +2698,27 @@ out:
        return err;
 }
 
+static int process_compressed(struct feat_fd *ff,
+                             void *data __maybe_unused)
+{
+       if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
+               return -1;
+
+       if (do_read_u32(ff, &(ff->ph->env.comp_type)))
+               return -1;
+
+       if (do_read_u32(ff, &(ff->ph->env.comp_level)))
+               return -1;
+
+       if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
+               return -1;
+
+       if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
+               return -1;
+
+       return 0;
+}
+
 struct feature_ops {
        int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
        void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2730,6 +2782,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
        FEAT_OPN(DIR_FORMAT,    dir_format,     false),
        FEAT_OPR(BPF_PROG_INFO, bpf_prog_info,  false),
        FEAT_OPR(BPF_BTF,       bpf_btf,        false),
+       FEAT_OPR(COMPRESSED,    compressed,     false),
 };
 
 struct header_print_data {
index 386da49..5b3abe4 100644 (file)
@@ -42,6 +42,7 @@ enum {
        HEADER_DIR_FORMAT,
        HEADER_BPF_PROG_INFO,
        HEADER_BPF_BTF,
+       HEADER_COMPRESSED,
        HEADER_LAST_FEATURE,
        HEADER_FEAT_BITS        = 256,
 };
index 872fab1..f4c3c84 100644 (file)
@@ -58,6 +58,7 @@ enum intel_pt_pkt_state {
        INTEL_PT_STATE_NO_IP,
        INTEL_PT_STATE_ERR_RESYNC,
        INTEL_PT_STATE_IN_SYNC,
+       INTEL_PT_STATE_TNT_CONT,
        INTEL_PT_STATE_TNT,
        INTEL_PT_STATE_TIP,
        INTEL_PT_STATE_TIP_PGD,
@@ -72,8 +73,9 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
        case INTEL_PT_STATE_NO_IP:
        case INTEL_PT_STATE_ERR_RESYNC:
        case INTEL_PT_STATE_IN_SYNC:
-       case INTEL_PT_STATE_TNT:
+       case INTEL_PT_STATE_TNT_CONT:
                return true;
+       case INTEL_PT_STATE_TNT:
        case INTEL_PT_STATE_TIP:
        case INTEL_PT_STATE_TIP_PGD:
        case INTEL_PT_STATE_FUP:
@@ -888,16 +890,20 @@ static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
        timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
        masked_timestamp = timestamp & decoder->period_mask;
        if (decoder->continuous_period) {
-               if (masked_timestamp != decoder->last_masked_timestamp)
+               if (masked_timestamp > decoder->last_masked_timestamp)
                        return 1;
        } else {
                timestamp += 1;
                masked_timestamp = timestamp & decoder->period_mask;
-               if (masked_timestamp != decoder->last_masked_timestamp) {
+               if (masked_timestamp > decoder->last_masked_timestamp) {
                        decoder->last_masked_timestamp = masked_timestamp;
                        decoder->continuous_period = true;
                }
        }
+
+       if (masked_timestamp < decoder->last_masked_timestamp)
+               return decoder->period_ticks;
+
        return decoder->period_ticks - (timestamp - masked_timestamp);
 }
 
@@ -926,7 +932,10 @@ static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
        case INTEL_PT_PERIOD_TICKS:
                timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
                masked_timestamp = timestamp & decoder->period_mask;
-               decoder->last_masked_timestamp = masked_timestamp;
+               if (masked_timestamp > decoder->last_masked_timestamp)
+                       decoder->last_masked_timestamp = masked_timestamp;
+               else
+                       decoder->last_masked_timestamp += decoder->period_ticks;
                break;
        case INTEL_PT_PERIOD_NONE:
        case INTEL_PT_PERIOD_MTC:
@@ -1254,7 +1263,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
                                return -ENOENT;
                        }
                        decoder->tnt.count -= 1;
-                       if (!decoder->tnt.count)
+                       if (decoder->tnt.count)
+                               decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
+                       else
                                decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
                        decoder->tnt.payload <<= 1;
                        decoder->state.from_ip = decoder->ip;
@@ -1285,7 +1296,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
 
                if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
                        decoder->tnt.count -= 1;
-                       if (!decoder->tnt.count)
+                       if (decoder->tnt.count)
+                               decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
+                       else
                                decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
                        if (decoder->tnt.payload & BIT63) {
                                decoder->tnt.payload <<= 1;
@@ -1305,8 +1318,11 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
                                return 0;
                        }
                        decoder->ip += intel_pt_insn.length;
-                       if (!decoder->tnt.count)
+                       if (!decoder->tnt.count) {
+                               decoder->sample_timestamp = decoder->timestamp;
+                               decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
                                return -EAGAIN;
+                       }
                        decoder->tnt.payload <<= 1;
                        continue;
                }
@@ -2365,6 +2381,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
                        err = intel_pt_walk_trace(decoder);
                        break;
                case INTEL_PT_STATE_TNT:
+               case INTEL_PT_STATE_TNT_CONT:
                        err = intel_pt_walk_tnt(decoder);
                        if (err == -EAGAIN)
                                err = intel_pt_walk_trace(decoder);
index 3c520ba..28a9541 100644 (file)
@@ -1234,8 +1234,9 @@ static char *get_kernel_version(const char *root_dir)
        if (!file)
                return NULL;
 
-       version[0] = '\0';
        tmp = fgets(version, sizeof(version), file);
+       if (!tmp)
+               *version = '\0';
        fclose(file);
 
        name = strstr(version, prefix);
index ef3d79b..868c0b0 100644 (file)
@@ -157,6 +157,10 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
 }
 
 #ifdef HAVE_AIO_SUPPORT
+static int perf_mmap__aio_enabled(struct perf_mmap *map)
+{
+       return map->aio.nr_cblocks > 0;
+}
 
 #ifdef HAVE_LIBNUMA_SUPPORT
 static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
@@ -198,7 +202,7 @@ static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affi
 
        return 0;
 }
-#else
+#else /* !HAVE_LIBNUMA_SUPPORT */
 static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
 {
        map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
@@ -285,81 +289,12 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
        zfree(&map->aio.cblocks);
        zfree(&map->aio.aiocb);
 }
-
-int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx,
-                       int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
-                       off_t *off)
+#else /* !HAVE_AIO_SUPPORT */
+static int perf_mmap__aio_enabled(struct perf_mmap *map __maybe_unused)
 {
-       u64 head = perf_mmap__read_head(md);
-       unsigned char *data = md->base + page_size;
-       unsigned long size, size0 = 0;
-       void *buf;
-       int rc = 0;
-
-       rc = perf_mmap__read_init(md);
-       if (rc < 0)
-               return (rc == -EAGAIN) ? 0 : -1;
-
-       /*
-        * md->base data is copied into md->data[idx] buffer to
-        * release space in the kernel buffer as fast as possible,
-        * thru perf_mmap__consume() below.
-        *
-        * That lets the kernel to proceed with storing more
-        * profiling data into the kernel buffer earlier than other
-        * per-cpu kernel buffers are handled.
-        *
-        * Coping can be done in two steps in case the chunk of
-        * profiling data crosses the upper bound of the kernel buffer.
-        * In this case we first move part of data from md->start
-        * till the upper bound and then the reminder from the
-        * beginning of the kernel buffer till the end of
-        * the data chunk.
-        */
-
-       size = md->end - md->start;
-
-       if ((md->start & md->mask) + size != (md->end & md->mask)) {
-               buf = &data[md->start & md->mask];
-               size = md->mask + 1 - (md->start & md->mask);
-               md->start += size;
-               memcpy(md->aio.data[idx], buf, size);
-               size0 = size;
-       }
-
-       buf = &data[md->start & md->mask];
-       size = md->end - md->start;
-       md->start += size;
-       memcpy(md->aio.data[idx] + size0, buf, size);
-
-       /*
-        * Increment md->refcount to guard md->data[idx] buffer
-        * from premature deallocation because md object can be
-        * released earlier than aio write request started
-        * on mmap->data[idx] is complete.
-        *
-        * perf_mmap__put() is done at record__aio_complete()
-        * after started request completion.
-        */
-       perf_mmap__get(md);
-
-       md->prev = head;
-       perf_mmap__consume(md);
-
-       rc = push(to, &md->aio.cblocks[idx], md->aio.data[idx], size0 + size, *off);
-       if (!rc) {
-               *off += size0 + size;
-       } else {
-               /*
-                * Decrement md->refcount back if aio write
-                * operation failed to start.
-                */
-               perf_mmap__put(md);
-       }
-
-       return rc;
+       return 0;
 }
-#else
+
 static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
                               struct mmap_params *mp __maybe_unused)
 {
@@ -374,6 +309,10 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused)
 void perf_mmap__munmap(struct perf_mmap *map)
 {
        perf_mmap__aio_munmap(map);
+       if (map->data != NULL) {
+               munmap(map->data, perf_mmap__mmap_len(map));
+               map->data = NULL;
+       }
        if (map->base != NULL) {
                munmap(map->base, perf_mmap__mmap_len(map));
                map->base = NULL;
@@ -442,6 +381,19 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
 
        map->flush = mp->flush;
 
+       map->comp_level = mp->comp_level;
+
+       if (map->comp_level && !perf_mmap__aio_enabled(map)) {
+               map->data = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+                                MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+               if (map->data == MAP_FAILED) {
+                       pr_debug2("failed to mmap data buffer, error %d\n",
+                                       errno);
+                       map->data = NULL;
+                       return -1;
+               }
+       }
+
        if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
                                &mp->auxtrace_mp, map->base, fd))
                return -1;
@@ -540,7 +492,7 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
 
        rc = perf_mmap__read_init(md);
        if (rc < 0)
-               return (rc == -EAGAIN) ? 0 : -1;
+               return (rc == -EAGAIN) ? 1 : -1;
 
        size = md->end - md->start;
 
index b82f8c2..274ce38 100644 (file)
@@ -40,6 +40,8 @@ struct perf_mmap {
 #endif
        cpu_set_t       affinity_mask;
        u64             flush;
+       void            *data;
+       int             comp_level;
 };
 
 /*
@@ -71,7 +73,7 @@ enum bkw_mmap_state {
 };
 
 struct mmap_params {
-       int                         prot, mask, nr_cblocks, affinity, flush;
+       int prot, mask, nr_cblocks, affinity, flush, comp_level;
        struct auxtrace_mmap_params auxtrace_mp;
 };
 
@@ -99,18 +101,6 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map);
 
 int perf_mmap__push(struct perf_mmap *md, void *to,
                    int push(struct perf_mmap *map, void *to, void *buf, size_t size));
-#ifdef HAVE_AIO_SUPPORT
-int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx,
-                       int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
-                       off_t *off);
-#else
-static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused, int idx __maybe_unused,
-       int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off) __maybe_unused,
-       off_t *off __maybe_unused)
-{
-       return 0;
-}
-#endif
 
 size_t perf_mmap__mmap_len(struct perf_mmap *map);
 
index 4432bfe..cf0b9b8 100644 (file)
@@ -950,6 +950,7 @@ static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
        [PARSE_EVENTS__TERM_TYPE_OVERWRITE]             = "overwrite",
        [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE]           = "no-overwrite",
        [PARSE_EVENTS__TERM_TYPE_DRV_CFG]               = "driver-config",
+       [PARSE_EVENTS__TERM_TYPE_PERCORE]               = "percore",
 };
 
 static bool config_term_shrinked;
@@ -970,6 +971,7 @@ config_term_avail(int term_type, struct parse_events_error *err)
        case PARSE_EVENTS__TERM_TYPE_CONFIG2:
        case PARSE_EVENTS__TERM_TYPE_NAME:
        case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+       case PARSE_EVENTS__TERM_TYPE_PERCORE:
                return true;
        default:
                if (!err)
@@ -1061,6 +1063,14 @@ do {                                                                        \
        case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
                CHECK_TYPE_VAL(NUM);
                break;
+       case PARSE_EVENTS__TERM_TYPE_PERCORE:
+               CHECK_TYPE_VAL(NUM);
+               if ((unsigned int)term->val.num > 1) {
+                       err->str = strdup("expected 0 or 1");
+                       err->idx = term->err_val;
+                       return -EINVAL;
+               }
+               break;
        default:
                err->str = strdup("unknown term");
                err->idx = term->err_term;
@@ -1199,6 +1209,10 @@ do {                                                             \
                case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
                        ADD_CONFIG_TERM(DRV_CFG, drv_cfg, term->val.str);
                        break;
+               case PARSE_EVENTS__TERM_TYPE_PERCORE:
+                       ADD_CONFIG_TERM(PERCORE, percore,
+                                       term->val.num ? true : false);
+                       break;
                default:
                        break;
                }
@@ -1260,6 +1274,18 @@ int parse_events_add_tool(struct parse_events_state *parse_state,
        return add_event_tool(list, &parse_state->idx, tool_event);
 }
 
+static bool config_term_percore(struct list_head *config_terms)
+{
+       struct perf_evsel_config_term *term;
+
+       list_for_each_entry(term, config_terms, list) {
+               if (term->type == PERF_EVSEL__CONFIG_TERM_PERCORE)
+                       return term->val.percore;
+       }
+
+       return false;
+}
+
 int parse_events_add_pmu(struct parse_events_state *parse_state,
                         struct list_head *list, char *name,
                         struct list_head *head_config,
@@ -1333,6 +1359,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
                evsel->metric_name = info.metric_name;
                evsel->pmu_name = name;
                evsel->use_uncore_alias = use_uncore_alias;
+               evsel->percore = config_term_percore(&evsel->config_terms);
        }
 
        return evsel ? 0 : -ENOMEM;
index a052cd6..f7139e1 100644 (file)
@@ -75,6 +75,7 @@ enum {
        PARSE_EVENTS__TERM_TYPE_NOOVERWRITE,
        PARSE_EVENTS__TERM_TYPE_OVERWRITE,
        PARSE_EVENTS__TERM_TYPE_DRV_CFG,
+       PARSE_EVENTS__TERM_TYPE_PERCORE,
        __PARSE_EVENTS__TERM_TYPE_NR,
 };
 
index c54bfe8..ca60988 100644 (file)
@@ -283,6 +283,7 @@ inherit                     { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_INHERIT); }
 no-inherit             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); }
 overwrite              { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_OVERWRITE); }
 no-overwrite           { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOOVERWRITE); }
+percore                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_PERCORE); }
 ,                      { return ','; }
 "/"                    { BEGIN(INITIAL); return '/'; }
 {name_minus}           { return str(yyscanner, PE_NAME); }
index e6599e2..08581e2 100644 (file)
@@ -5,13 +5,14 @@
 #include <subcmd/parse-options.h>
 #include "util/parse-regs-options.h"
 
-int
-parse_regs(const struct option *opt, const char *str, int unset)
+static int
+__parse_regs(const struct option *opt, const char *str, int unset, bool intr)
 {
        uint64_t *mode = (uint64_t *)opt->value;
        const struct sample_reg *r;
        char *s, *os = NULL, *p;
        int ret = -1;
+       uint64_t mask;
 
        if (unset)
                return 0;
@@ -22,6 +23,11 @@ parse_regs(const struct option *opt, const char *str, int unset)
        if (*mode)
                return -1;
 
+       if (intr)
+               mask = arch__intr_reg_mask();
+       else
+               mask = arch__user_reg_mask();
+
        /* str may be NULL in case no arg is passed to -I */
        if (str) {
                /* because str is read-only */
@@ -37,19 +43,20 @@ parse_regs(const struct option *opt, const char *str, int unset)
                        if (!strcmp(s, "?")) {
                                fprintf(stderr, "available registers: ");
                                for (r = sample_reg_masks; r->name; r++) {
-                                       fprintf(stderr, "%s ", r->name);
+                                       if (r->mask & mask)
+                                               fprintf(stderr, "%s ", r->name);
                                }
                                fputc('\n', stderr);
                                /* just printing available regs */
                                return -1;
                        }
                        for (r = sample_reg_masks; r->name; r++) {
-                               if (!strcasecmp(s, r->name))
+                               if ((r->mask & mask) && !strcasecmp(s, r->name))
                                        break;
                        }
                        if (!r->name) {
-                               ui__warning("unknown register %s,"
-                                           " check man page\n", s);
+                               ui__warning("Unknown register \"%s\", check man page or run \"perf record %s?\"\n",
+                                           s, intr ? "-I" : "--user-regs=");
                                goto error;
                        }
 
@@ -65,8 +72,20 @@ parse_regs(const struct option *opt, const char *str, int unset)
 
        /* default to all possible regs */
        if (*mode == 0)
-               *mode = PERF_REGS_MASK;
+               *mode = mask;
 error:
        free(os);
        return ret;
 }
+
+int
+parse_user_regs(const struct option *opt, const char *str, int unset)
+{
+       return __parse_regs(opt, str, unset, false);
+}
+
+int
+parse_intr_regs(const struct option *opt, const char *str, int unset)
+{
+       return __parse_regs(opt, str, unset, true);
+}
index cdefb1a..2b23d25 100644 (file)
@@ -2,5 +2,6 @@
 #ifndef _PERF_PARSE_REGS_OPTIONS_H
 #define _PERF_PARSE_REGS_OPTIONS_H 1
 struct option;
-int parse_regs(const struct option *opt, const char *str, int unset);
+int parse_user_regs(const struct option *opt, const char *str, int unset);
+int parse_intr_regs(const struct option *opt, const char *str, int unset);
 #endif /* _PERF_PARSE_REGS_OPTIONS_H */
index 2acfcc5..2774cec 100644 (file)
@@ -13,6 +13,16 @@ int __weak arch_sdt_arg_parse_op(char *old_op __maybe_unused,
        return SDT_ARG_SKIP;
 }
 
+uint64_t __weak arch__intr_reg_mask(void)
+{
+       return PERF_REGS_MASK;
+}
+
+uint64_t __weak arch__user_reg_mask(void)
+{
+       return PERF_REGS_MASK;
+}
+
 #ifdef HAVE_PERF_REGS_SUPPORT
 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
 {
index c9319f8..cb9c246 100644 (file)
@@ -12,6 +12,7 @@ struct sample_reg {
        uint64_t mask;
 };
 #define SMPL_REG(n, b) { .name = #n, .mask = 1ULL << (b) }
+#define SMPL_REG2(n, b) { .name = #n, .mask = 3ULL << (b) }
 #define SMPL_REG_END { .name = NULL }
 
 extern const struct sample_reg sample_reg_masks[];
@@ -22,6 +23,8 @@ enum {
 };
 
 int arch_sdt_arg_parse_op(char *old_op, char **new_op);
+uint64_t arch__intr_reg_mask(void);
+uint64_t arch__user_reg_mask(void);
 
 #ifdef HAVE_PERF_REGS_SUPPORT
 #include <perf_regs.h>
index bad5f87..2310a17 100644 (file)
 #include "stat.h"
 #include "arch/common.h"
 
+#ifdef HAVE_ZSTD_SUPPORT
+static int perf_session__process_compressed_event(struct perf_session *session,
+                                                 union perf_event *event, u64 file_offset)
+{
+       void *src;
+       size_t decomp_size, src_size;
+       u64 decomp_last_rem = 0;
+       size_t decomp_len = session->header.env.comp_mmap_len;
+       struct decomp *decomp, *decomp_last = session->decomp_last;
+
+       decomp = mmap(NULL, sizeof(struct decomp) + decomp_len, PROT_READ|PROT_WRITE,
+                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+       if (decomp == MAP_FAILED) {
+               pr_err("Couldn't allocate memory for decompression\n");
+               return -1;
+       }
+
+       decomp->file_pos = file_offset;
+       decomp->head = 0;
+
+       if (decomp_last) {
+               decomp_last_rem = decomp_last->size - decomp_last->head;
+               memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
+               decomp->size = decomp_last_rem;
+       }
+
+       src = (void *)event + sizeof(struct compressed_event);
+       src_size = event->pack.header.size - sizeof(struct compressed_event);
+
+       decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
+                               &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
+       if (!decomp_size) {
+               munmap(decomp, sizeof(struct decomp) + decomp_len);
+               pr_err("Couldn't decompress data\n");
+               return -1;
+       }
+
+       decomp->size += decomp_size;
+
+       if (session->decomp == NULL) {
+               session->decomp = decomp;
+               session->decomp_last = decomp;
+       } else {
+               session->decomp_last->next = decomp;
+               session->decomp_last = decomp;
+       }
+
+       pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
+
+       return 0;
+}
+#else /* !HAVE_ZSTD_SUPPORT */
+#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
+#endif
+
 static int perf_session__deliver_event(struct perf_session *session,
                                       union perf_event *event,
                                       struct perf_tool *tool,
@@ -197,6 +252,21 @@ static void perf_session__delete_threads(struct perf_session *session)
        machine__delete_threads(&session->machines.host);
 }
 
+static void perf_session__release_decomp_events(struct perf_session *session)
+{
+       struct decomp *next, *decomp;
+       size_t decomp_len;
+       next = session->decomp;
+       decomp_len = session->header.env.comp_mmap_len;
+       do {
+               decomp = next;
+               if (decomp == NULL)
+                       break;
+               next = decomp->next;
+               munmap(decomp, decomp_len + sizeof(struct decomp));
+       } while (1);
+}
+
 void perf_session__delete(struct perf_session *session)
 {
        if (session == NULL)
@@ -205,6 +275,7 @@ void perf_session__delete(struct perf_session *session)
        auxtrace_index__free(&session->auxtrace_index);
        perf_session__destroy_kernel_maps(session);
        perf_session__delete_threads(session);
+       perf_session__release_decomp_events(session);
        perf_env__exit(&session->header.env);
        machines__exit(&session->machines);
        if (session->data)
@@ -358,6 +429,14 @@ static int process_stat_round_stub(struct perf_session *perf_session __maybe_unu
        return 0;
 }
 
+static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
+                                                      union perf_event *event __maybe_unused,
+                                                      u64 file_offset __maybe_unused)
+{
+       dump_printf(": unhandled!\n");
+       return 0;
+}
+
 void perf_tool__fill_defaults(struct perf_tool *tool)
 {
        if (tool->sample == NULL)
@@ -430,6 +509,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
                tool->time_conv = process_event_op2_stub;
        if (tool->feature == NULL)
                tool->feature = process_event_op2_stub;
+       if (tool->compressed == NULL)
+               tool->compressed = perf_session__process_compressed_event;
 }
 
 static void swap_sample_id_all(union perf_event *event, void *data)
@@ -1373,7 +1454,9 @@ static s64 perf_session__process_user_event(struct perf_session *session,
        int fd = perf_data__fd(session->data);
        int err;
 
-       dump_event(session->evlist, event, file_offset, &sample);
+       if (event->header.type != PERF_RECORD_COMPRESSED ||
+           tool->compressed == perf_session__process_compressed_event_stub)
+               dump_event(session->evlist, event, file_offset, &sample);
 
        /* These events are processed right away */
        switch (event->header.type) {
@@ -1426,6 +1509,11 @@ static s64 perf_session__process_user_event(struct perf_session *session,
                return tool->time_conv(session, event);
        case PERF_RECORD_HEADER_FEATURE:
                return tool->feature(session, event);
+       case PERF_RECORD_COMPRESSED:
+               err = tool->compressed(session, event, file_offset);
+               if (err)
+                       dump_event(session->evlist, event, file_offset, &sample);
+               return err;
        default:
                return -EINVAL;
        }
@@ -1708,6 +1796,8 @@ static int perf_session__flush_thread_stacks(struct perf_session *session)
 
 volatile int session_done;
 
+static int __perf_session__process_decomp_events(struct perf_session *session);
+
 static int __perf_session__process_pipe_events(struct perf_session *session)
 {
        struct ordered_events *oe = &session->ordered_events;
@@ -1788,6 +1878,10 @@ more:
        if (skip > 0)
                head += skip;
 
+       err = __perf_session__process_decomp_events(session);
+       if (err)
+               goto out_err;
+
        if (!session_done())
                goto more;
 done:
@@ -1836,6 +1930,39 @@ fetch_mmaped_event(struct perf_session *session,
        return event;
 }
 
+static int __perf_session__process_decomp_events(struct perf_session *session)
+{
+       s64 skip;
+       u64 size, file_pos = 0;
+       struct decomp *decomp = session->decomp_last;
+
+       if (!decomp)
+               return 0;
+
+       while (decomp->head < decomp->size && !session_done()) {
+               union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
+
+               if (!event)
+                       break;
+
+               size = event->header.size;
+
+               if (size < sizeof(struct perf_event_header) ||
+                   (skip = perf_session__process_event(session, event, file_pos)) < 0) {
+                       pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
+                               decomp->file_pos + decomp->head, event->header.size, event->header.type);
+                       return -EINVAL;
+               }
+
+               if (skip)
+                       size += skip;
+
+               decomp->head += size;
+       }
+
+       return 0;
+}
+
 /*
  * On 64bit we can mmap the data file in one go. No need for tiny mmap
  * slices. On 32bit we use 32MB.
@@ -1945,6 +2072,10 @@ more:
        head += size;
        file_pos += size;
 
+       err = __perf_session__process_decomp_events(session);
+       if (err)
+               goto out;
+
        ui_progress__update(prog, size);
 
        if (session_done())
index d96eccd..dd8920b 100644 (file)
@@ -8,6 +8,7 @@
 #include "machine.h"
 #include "data.h"
 #include "ordered-events.h"
+#include "util/compress.h"
 #include <linux/kernel.h>
 #include <linux/rbtree.h>
 #include <linux/perf_event.h>
@@ -35,6 +36,19 @@ struct perf_session {
        struct ordered_events   ordered_events;
        struct perf_data        *data;
        struct perf_tool        *tool;
+       u64                     bytes_transferred;
+       u64                     bytes_compressed;
+       struct zstd_data        zstd_data;
+       struct decomp           *decomp;
+       struct decomp           *decomp_last;
+};
+
+struct decomp {
+       struct decomp *next;
+       u64 file_pos;
+       u64 head;
+       size_t size;
+       char data[];
 };
 
 struct perf_tool;
index 3324f23..4c53bae 100644 (file)
@@ -88,9 +88,17 @@ static void aggr_printout(struct perf_stat_config *config,
                        config->csv_sep);
                        break;
        case AGGR_NONE:
-               fprintf(config->output, "CPU%*d%s",
-                       config->csv_output ? 0 : -4,
-                       perf_evsel__cpus(evsel)->map[id], config->csv_sep);
+               if (evsel->percore) {
+                       fprintf(config->output, "S%d-C%*d%s",
+                               cpu_map__id_to_socket(id),
+                               config->csv_output ? 0 : -5,
+                               cpu_map__id_to_cpu(id), config->csv_sep);
+               } else {
+                       fprintf(config->output, "CPU%*d%s ",
+                               config->csv_output ? 0 : -5,
+                               perf_evsel__cpus(evsel)->map[id],
+                               config->csv_sep);
+               }
                break;
        case AGGR_THREAD:
                fprintf(config->output, "%*s-%*d%s",
@@ -594,6 +602,41 @@ static void aggr_cb(struct perf_stat_config *config,
        }
 }
 
+static void print_counter_aggrdata(struct perf_stat_config *config,
+                                  struct perf_evsel *counter, int s,
+                                  char *prefix, bool metric_only,
+                                  bool *first)
+{
+       struct aggr_data ad;
+       FILE *output = config->output;
+       u64 ena, run, val;
+       int id, nr;
+       double uval;
+
+       ad.id = id = config->aggr_map->map[s];
+       ad.val = ad.ena = ad.run = 0;
+       ad.nr = 0;
+       if (!collect_data(config, counter, aggr_cb, &ad))
+               return;
+
+       nr = ad.nr;
+       ena = ad.ena;
+       run = ad.run;
+       val = ad.val;
+       if (*first && metric_only) {
+               *first = false;
+               aggr_printout(config, counter, id, nr);
+       }
+       if (prefix && !metric_only)
+               fprintf(output, "%s", prefix);
+
+       uval = val * counter->scale;
+       printout(config, id, nr, counter, uval, prefix,
+                run, ena, 1.0, &rt_stat);
+       if (!metric_only)
+               fputc('\n', output);
+}
+
 static void print_aggr(struct perf_stat_config *config,
                       struct perf_evlist *evlist,
                       char *prefix)
@@ -601,9 +644,7 @@ static void print_aggr(struct perf_stat_config *config,
        bool metric_only = config->metric_only;
        FILE *output = config->output;
        struct perf_evsel *counter;
-       int s, id, nr;
-       double uval;
-       u64 ena, run, val;
+       int s;
        bool first;
 
        if (!(config->aggr_map || config->aggr_get_id))
@@ -616,33 +657,14 @@ static void print_aggr(struct perf_stat_config *config,
         * Without each counter has its own line.
         */
        for (s = 0; s < config->aggr_map->nr; s++) {
-               struct aggr_data ad;
                if (prefix && metric_only)
                        fprintf(output, "%s", prefix);
 
-               ad.id = id = config->aggr_map->map[s];
                first = true;
                evlist__for_each_entry(evlist, counter) {
-                       ad.val = ad.ena = ad.run = 0;
-                       ad.nr = 0;
-                       if (!collect_data(config, counter, aggr_cb, &ad))
-                               continue;
-                       nr = ad.nr;
-                       ena = ad.ena;
-                       run = ad.run;
-                       val = ad.val;
-                       if (first && metric_only) {
-                               first = false;
-                               aggr_printout(config, counter, id, nr);
-                       }
-                       if (prefix && !metric_only)
-                               fprintf(output, "%s", prefix);
-
-                       uval = val * counter->scale;
-                       printout(config, id, nr, counter, uval, prefix,
-                                run, ena, 1.0, &rt_stat);
-                       if (!metric_only)
-                               fputc('\n', output);
+                       print_counter_aggrdata(config, counter, s,
+                                              prefix, metric_only,
+                                              &first);
                }
                if (metric_only)
                        fputc('\n', output);
@@ -1089,6 +1111,30 @@ static void print_footer(struct perf_stat_config *config)
                        "the same PMU. Try reorganizing the group.\n");
 }
 
+static void print_percore(struct perf_stat_config *config,
+                         struct perf_evsel *counter, char *prefix)
+{
+       bool metric_only = config->metric_only;
+       FILE *output = config->output;
+       int s;
+       bool first = true;
+
+       if (!(config->aggr_map || config->aggr_get_id))
+               return;
+
+       for (s = 0; s < config->aggr_map->nr; s++) {
+               if (prefix && metric_only)
+                       fprintf(output, "%s", prefix);
+
+               print_counter_aggrdata(config, counter, s,
+                                      prefix, metric_only,
+                                      &first);
+       }
+
+       if (metric_only)
+               fputc('\n', output);
+}
+
 void
 perf_evlist__print_counters(struct perf_evlist *evlist,
                            struct perf_stat_config *config,
@@ -1139,7 +1185,10 @@ perf_evlist__print_counters(struct perf_evlist *evlist,
                        print_no_aggr_metric(config, evlist, prefix);
                else {
                        evlist__for_each_entry(evlist, counter) {
-                               print_counter(config, counter, prefix);
+                               if (counter->percore)
+                                       print_percore(config, counter, prefix);
+                               else
+                                       print_counter(config, counter, prefix);
                        }
                }
                break;
index 2856cc9..c3115d9 100644 (file)
@@ -277,9 +277,11 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
                if (!evsel->snapshot)
                        perf_evsel__compute_deltas(evsel, cpu, thread, count);
                perf_counts_values__scale(count, config->scale, NULL);
-               if (config->aggr_mode == AGGR_NONE)
-                       perf_stat__update_shadow_stats(evsel, count->val, cpu,
-                                                      &rt_stat);
+               if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
+                       perf_stat__update_shadow_stats(evsel, count->val,
+                                                      cpu, &rt_stat);
+               }
+
                if (config->aggr_mode == AGGR_THREAD) {
                        if (config->stats)
                                perf_stat__update_shadow_stats(evsel,
index 50678d3..403045a 100644 (file)
@@ -15,6 +15,7 @@
 #include "map.h"
 #include "symbol.h"
 #include "unwind.h"
+#include "callchain.h"
 
 #include <api/fs/fs.h>
 
@@ -327,7 +328,7 @@ static int thread__prepare_access(struct thread *thread)
 {
        int err = 0;
 
-       if (symbol_conf.use_callchain)
+       if (dwarf_callchain_users)
                err = __thread__prepare_access(thread);
 
        return err;
index 2503916..9096a6e 100644 (file)
@@ -28,6 +28,7 @@ typedef int (*event_attr_op)(struct perf_tool *tool,
 
 typedef int (*event_op2)(struct perf_session *session, union perf_event *event);
 typedef s64 (*event_op3)(struct perf_session *session, union perf_event *event);
+typedef int (*event_op4)(struct perf_session *session, union perf_event *event, u64 data);
 
 typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event,
                        struct ordered_events *oe);
@@ -72,6 +73,7 @@ struct perf_tool {
                        stat,
                        stat_round,
                        feature;
+       event_op4       compressed;
        event_op3       auxtrace;
        bool            ordered_events;
        bool            ordering_requires_timestamps;
index f3c666a..25e1406 100644 (file)
@@ -617,8 +617,6 @@ static unw_accessors_t accessors = {
 
 static int _unwind__prepare_access(struct thread *thread)
 {
-       if (!dwarf_callchain_users)
-               return 0;
        thread->addr_space = unw_create_addr_space(&accessors, 0);
        if (!thread->addr_space) {
                pr_err("unwind: Can't create unwind address space.\n");
@@ -631,15 +629,11 @@ static int _unwind__prepare_access(struct thread *thread)
 
 static void _unwind__flush_access(struct thread *thread)
 {
-       if (!dwarf_callchain_users)
-               return;
        unw_flush_cache(thread->addr_space, 0, 0);
 }
 
 static void _unwind__finish_access(struct thread *thread)
 {
-       if (!dwarf_callchain_users)
-               return;
        unw_destroy_addr_space(thread->addr_space);
 }
 
index 9778b31..c081197 100644 (file)
@@ -5,6 +5,7 @@
 #include "session.h"
 #include "debug.h"
 #include "env.h"
+#include "callchain.h"
 
 struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
 struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
@@ -24,6 +25,9 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
        struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
        int err;
 
+       if (!dwarf_callchain_users)
+               return 0;
+
        if (thread->addr_space) {
                pr_debug("unwind: thread map already set, dso=%s\n",
                         map->dso->name);
@@ -65,12 +69,18 @@ out_register:
 
 void unwind__flush_access(struct thread *thread)
 {
+       if (!dwarf_callchain_users)
+               return;
+
        if (thread->unwind_libunwind_ops)
                thread->unwind_libunwind_ops->flush_access(thread);
 }
 
 void unwind__finish_access(struct thread *thread)
 {
+       if (!dwarf_callchain_users)
+               return;
+
        if (thread->unwind_libunwind_ops)
                thread->unwind_libunwind_ops->finish_access(thread);
 }
diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
new file mode 100644 (file)
index 0000000..23bdb98
--- /dev/null
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <string.h>
+
+#include "util/compress.h"
+#include "util/debug.h"
+
+int zstd_init(struct zstd_data *data, int level)
+{
+       size_t ret;
+
+       data->dstream = ZSTD_createDStream();
+       if (data->dstream == NULL) {
+               pr_err("Couldn't create decompression stream.\n");
+               return -1;
+       }
+
+       ret = ZSTD_initDStream(data->dstream);
+       if (ZSTD_isError(ret)) {
+               pr_err("Failed to initialize decompression stream: %s\n", ZSTD_getErrorName(ret));
+               return -1;
+       }
+
+       if (!level)
+               return 0;
+
+       data->cstream = ZSTD_createCStream();
+       if (data->cstream == NULL) {
+               pr_err("Couldn't create compression stream.\n");
+               return -1;
+       }
+
+       ret = ZSTD_initCStream(data->cstream, level);
+       if (ZSTD_isError(ret)) {
+               pr_err("Failed to initialize compression stream: %s\n", ZSTD_getErrorName(ret));
+               return -1;
+       }
+
+       return 0;
+}
+
+int zstd_fini(struct zstd_data *data)
+{
+       if (data->dstream) {
+               ZSTD_freeDStream(data->dstream);
+               data->dstream = NULL;
+       }
+
+       if (data->cstream) {
+               ZSTD_freeCStream(data->cstream);
+               data->cstream = NULL;
+       }
+
+       return 0;
+}
+
+size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
+                                      void *src, size_t src_size, size_t max_record_size,
+                                      size_t process_header(void *record, size_t increment))
+{
+       size_t ret, size, compressed = 0;
+       ZSTD_inBuffer input = { src, src_size, 0 };
+       ZSTD_outBuffer output;
+       void *record;
+
+       while (input.pos < input.size) {
+               record = dst;
+               size = process_header(record, 0);
+               compressed += size;
+               dst += size;
+               dst_size -= size;
+               output = (ZSTD_outBuffer){ dst, (dst_size > max_record_size) ?
+                                               max_record_size : dst_size, 0 };
+               ret = ZSTD_compressStream(data->cstream, &output, &input);
+               ZSTD_flushStream(data->cstream, &output);
+               if (ZSTD_isError(ret)) {
+                       pr_err("failed to compress %ld bytes: %s\n",
+                               (long)src_size, ZSTD_getErrorName(ret));
+                       memcpy(dst, src, src_size);
+                       return src_size;
+               }
+               size = output.pos;
+               size = process_header(record, size);
+               compressed += size;
+               dst += size;
+               dst_size -= size;
+       }
+
+       return compressed;
+}
+
+size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size,
+                             void *dst, size_t dst_size)
+{
+       size_t ret;
+       ZSTD_inBuffer input = { src, src_size, 0 };
+       ZSTD_outBuffer output = { dst, dst_size, 0 };
+
+       while (input.pos < input.size) {
+               ret = ZSTD_decompressStream(data->dstream, &output, &input);
+               if (ZSTD_isError(ret)) {
+                       pr_err("failed to decompress (B): %ld -> %ld : %s\n",
+                              src_size, output.size, ZSTD_getErrorName(ret));
+                       break;
+               }
+               output.dst  = dst + output.pos;
+               output.size = dst_size - output.pos;
+       }
+
+       return output.pos;
+}