Domen Puncer <domen@coderock.org>
Douglas Gilbert <dougg@torque.net>
Ed L. Cashin <ecashin@coraid.com>
+Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
Felipe W Damasio <felipewd@terra.com.br>
Felix Kuhling <fxkuehl@gmx.de>
Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@google.com>
Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk.kim@samsung.com>
+Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
James Bottomley <jejb@mulgrave.(none)>
James Bottomley <jejb@titanic.il.steeleye.com>
James E Wilson <wilson@specifix.com>
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
+Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
Rajesh Shah <rajesh.shah@intel.com>
Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
What: /sys/class/tpm/tpmX/device/
Date: April 2005
KernelVersion: 2.6.12
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The device/ directory under a specific TPM instance exposes
the properties of that TPM chip
What: /sys/class/tpm/tpmX/device/active
Date: April 2006
KernelVersion: 2.6.17
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The "active" property prints a '1' if the TPM chip is accepting
commands. An inactive TPM chip still contains all the state of
an active chip (Storage Root Key, NVRAM, etc), and can be
What: /sys/class/tpm/tpmX/device/cancel
Date: June 2005
KernelVersion: 2.6.13
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The "cancel" property allows you to cancel the currently
pending TPM command. Writing any value to cancel will call the
TPM vendor specific cancel operation.
What: /sys/class/tpm/tpmX/device/caps
Date: April 2005
KernelVersion: 2.6.12
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The "caps" property contains TPM manufacturer and version info.
Example output:
What: /sys/class/tpm/tpmX/device/durations
Date: March 2011
KernelVersion: 3.1
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The "durations" property shows the 3 vendor-specific values
used to wait for a short, medium and long TPM command. All
TPM commands are categorized as short, medium or long in
What: /sys/class/tpm/tpmX/device/enabled
Date: April 2006
KernelVersion: 2.6.17
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The "enabled" property prints a '1' if the TPM chip is enabled,
meaning that it should be visible to the OS. This property
may be visible but produce a '0' after some operation that
What: /sys/class/tpm/tpmX/device/owned
Date: April 2006
KernelVersion: 2.6.17
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The "owned" property produces a '1' if the TPM_TakeOwnership
ordinal has been executed successfully in the chip. A '0'
indicates that ownership hasn't been taken.
What: /sys/class/tpm/tpmX/device/pcrs
Date: April 2005
KernelVersion: 2.6.12
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The "pcrs" property will dump the current value of all Platform
Configuration Registers in the TPM. Note that since these
values may be constantly changing, the output is only valid
What: /sys/class/tpm/tpmX/device/pubek
Date: April 2005
KernelVersion: 2.6.12
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The "pubek" property will return the TPM's public endorsement
key if possible. If the TPM has had ownership established and
is version 1.2, the pubek will not be available without the
What: /sys/class/tpm/tpmX/device/temp_deactivated
Date: April 2006
KernelVersion: 2.6.17
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The "temp_deactivated" property returns a '1' if the chip has
been temporarily deactivated, usually until the next power
cycle. Whether a warm boot (reboot) will clear a TPM chip
What: /sys/class/tpm/tpmX/device/timeouts
Date: March 2011
KernelVersion: 3.1
-Contact: tpmdd-devel@lists.sf.net
+Contact: linux-integrity@vger.kernel.org
Description: The "timeouts" property shows the 4 vendor-specific values
for the TPM's interface spec timeouts. The use of these
timeouts is defined by the TPM interface spec that the chip
The four timeout values are shown in usecs, with a trailing
"[original]" or "[adjusted]" depending on whether the values
were scaled by the driver to be reported in usec from msecs.
+
+What: /sys/class/tpm/tpmX/tpm_version_major
+Date: October 2019
+KernelVersion: 5.5
+Contact: linux-integrity@vger.kernel.org
+Description: The "tpm_version_major" property shows the TCG spec major version
+ implemented by the TPM device.
+
+ Example output:
+
+ 2
--- /dev/null
+What: sys/bus/dsa/devices/dsa<m>/cdev_major
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The major number that the character device driver assigned to
+ this device.
+
+What: sys/bus/dsa/devices/dsa<m>/errors
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The error information for this device.
+
+What: sys/bus/dsa/devices/dsa<m>/max_batch_size
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The largest number of work descriptors in a batch.
+
+What: sys/bus/dsa/devices/dsa<m>/max_work_queues_size
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The maximum work queue size supported by this device.
+
+What: sys/bus/dsa/devices/dsa<m>/max_engines
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The maximum number of engines supported by this device.
+
+What: sys/bus/dsa/devices/dsa<m>/max_groups
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The maximum number of groups can be created under this device.
+
+What: sys/bus/dsa/devices/dsa<m>/max_tokens
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The total number of bandwidth tokens supported by this device.
+ The bandwidth tokens represent resources within the DSA
+ implementation, and these resources are allocated by engines to
+ support operations.
+
+What: sys/bus/dsa/devices/dsa<m>/max_transfer_size
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The number of bytes to be read from the source address to
+ perform the operation. The maximum transfer size is dependent on
+ the workqueue the descriptor was submitted to.
+
+What: sys/bus/dsa/devices/dsa<m>/max_work_queues
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The maximum work queue number that this device supports.
+
+What: sys/bus/dsa/devices/dsa<m>/numa_node
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The numa node number for this device.
+
+What: sys/bus/dsa/devices/dsa<m>/op_cap
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The operation capability bit mask specify the operation types
+ supported by the this device.
+
+What: sys/bus/dsa/devices/dsa<m>/state
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The state information of this device. It can be either enabled
+ or disabled.
+
+What: sys/bus/dsa/devices/dsa<m>/group<m>.<n>
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The assigned group under this device.
+
+What: sys/bus/dsa/devices/dsa<m>/engine<m>.<n>
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The assigned engine under this device.
+
+What: sys/bus/dsa/devices/dsa<m>/wq<m>.<n>
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The assigned work queue under this device.
+
+What: sys/bus/dsa/devices/dsa<m>/configurable
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: To indicate if this device is configurable or not.
+
+What: sys/bus/dsa/devices/dsa<m>/token_limit
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The maximum number of bandwidth tokens that may be in use at
+ one time by operations that access low bandwidth memory in the
+ device.
+
+What: sys/bus/dsa/devices/wq<m>.<n>/group_id
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The group id that this work queue belongs to.
+
+What: sys/bus/dsa/devices/wq<m>.<n>/size
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The work queue size for this work queue.
+
+What: sys/bus/dsa/devices/wq<m>.<n>/type
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The type of this work queue, it can be "kernel" type for work
+ queue usages in the kernel space or "user" type for work queue
+ usages by applications in user space.
+
+What: sys/bus/dsa/devices/wq<m>.<n>/cdev_minor
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The minor number assigned to this work queue by the character
+ device driver.
+
+What: sys/bus/dsa/devices/wq<m>.<n>/mode
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The work queue mode type for this work queue.
+
+What: sys/bus/dsa/devices/wq<m>.<n>/priority
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The priority value of this work queue, it is a vlue relative to
+ other work queue in the same group to control quality of service
+ for dispatching work from multiple workqueues in the same group.
+
+What: sys/bus/dsa/devices/wq<m>.<n>/state
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The current state of the work queue.
+
+What: sys/bus/dsa/devices/wq<m>.<n>/threshold
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The number of entries in this work queue that may be filled
+ via a limited portal.
+
+What: sys/bus/dsa/devices/engine<m>.<n>/group_id
+Date: Oct 25, 2019
+KernelVersion: 5.6.0
+Contact: dmaengine@vger.kernel.org
+Description: The group that this engine belongs to.
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/asic_health
-
Date: June 2018
KernelVersion: 4.19
Contact: Vadim Pasternak <vadimpmellanox.com>
The files are read only.
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/fan_dir
-
Date: December 2018
KernelVersion: 5.0
Contact: Vadim Pasternak <vadimpmellanox.com>
The files are read only.
-What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable
-
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version
Date: November 2018
KernelVersion: 5.0
Contact: Vadim Pasternak <vadimpmellanox.com>
Description: These files show with which CPLD versions have been burned
- on LED board.
+ on LED or Gearbox board.
The files are read only.
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable
-
Date: November 2018
KernelVersion: 5.0
Contact: Vadim Pasternak <vadimpmellanox.com>
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_from_comex
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_system
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_voltmon_upgrade_fail
-
Date: November 2018
KernelVersion: 5.0
Contact: Vadim Pasternak <vadimpmellanox.com>
The files are read only.
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld4_version
+Date: November 2018
+KernelVersion: 5.0
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: These files show with which CPLD versions have been burned
+ on LED board.
+
+ The files are read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_thermal
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_wd
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_from_asic
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_reload_bios
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sff_wd
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_swb_wd
Date: June 2019
KernelVersion: 5.3
Contact: Vadim Pasternak <vadimpmellanox.com>
The files are read only.
-What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_thermal
-What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_comex_wd
-What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_from_asic
-What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_reload_bios
-What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sff_wd
-What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_swb_wd
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/config1
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/config2
+Date: January 2020
+KernelVersion: 5.6
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: These files show system static topology identification
+ like system's static I2C topology, number and type of FPGA
+ devices within the system and so on.
+
+ The files are read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_ac_pwr_fail
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_platform
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_soc
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_sw_pwr_off
+Date: January 2020
+KernelVersion: 5.6
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: These files show the system reset causes, as following: reset
+ due to AC power failure, reset invoked from software by
+ assertion reset signal through CPLD. reset caused by signal
+ asserted by SOC through ACPI register, reset invoked from
+ software by assertion power off signal through CPLD.
+
+ The files are read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/pcie_asic_reset_dis
+Date: January 2020
+KernelVersion: 5.6
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: This file allows to retain ASIC up during PCIe root complex
+ reset, when attribute is set 1.
+
+ The file is read/write.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/vpd_wp
+Date: January 2020
+KernelVersion: 5.6
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: This file allows to overwrite system VPD hardware wrtie
+ protection when attribute is set 1.
+
+ The file is read/write.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/voltreg_update_status
+Date: January 2020
+KernelVersion: 5.6
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: This file exposes the configuration update status of burnable
+ voltage regulator devices. The status values are as following:
+ 0 - OK; 1 - CRC failure; 2 = I2C failure; 3 - in progress.
+
+ The file is read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ufm_version
+Date: January 2020
+KernelVersion: 5.6
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: This file exposes the firmware version of burnable voltage
+ regulator devices.
+
+ The file is read only.
The name of devfreq object denoted as ... is same as the
name of device using devfreq.
+What: /sys/class/devfreq/.../name
+Date: November 2019
+Contact: Chanwoo Choi <cw00.choi@samsung.com>
+Description:
+ The /sys/class/devfreq/.../name shows the name of device
+ of the corresponding devfreq object.
+
What: /sys/class/devfreq/.../governor
Date: September 2011
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
Date: October 2012
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
Description:
- This ABI shows the statistics of devfreq behavior on a
- specific device. It shows the time spent in each state and
- the number of transitions between states.
+ This ABI shows or clears the statistics of devfreq behavior
+ on a specific device. It shows the time spent in each state
+ and the number of transitions between states.
In order to activate this ABI, the devfreq target device
driver should provide the list of available frequencies
- with its profile.
+ with its profile. If need to reset the statistics of devfreq
+ behavior on a specific device, enter 0(zero) to 'trans_stat'
+ as following:
+ echo 0 > /sys/class/devfreq/.../trans_stat
What: /sys/class/devfreq/.../userspace/set_freq
Date: September 2011
does not reflect it. Likewise, if one enables a deep state but a
lighter state still is disabled, then this has no effect.
+What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/default_status
+Date: December 2019
+KernelVersion: v5.6
+Contact: Linux power management list <linux-pm@vger.kernel.org>
+Description:
+ (RO) The default status of this state, "enabled" or "disabled".
What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/residency
Date: March 2014
* 0 - normal,
* 1 - overboost,
* 2 - silent
+
+What: /sys/devices/platform/<platform>/throttle_thermal_policy
+Date: Dec 2019
+KernelVersion: 5.6
+Contact: "Leonid Maksymchuk" <leonmaxx@gmail.com>
+Description:
+ Throttle thermal policy mode:
+ * 0 - default,
+ * 1 - overboost,
+ * 2 - silent
Description:
The /sys/power/suspend_stats/last_failed_step file contains
the last failed step in the suspend/resume path.
+
+What: /sys/power/sync_on_suspend
+Date: October 2019
+Contact: Jonas Meurer <jonas@freesources.org>
+Description:
+ This file controls whether or not the kernel will sync()
+ filesystems during system suspend (after freezing user space
+ and before suspending devices).
+
+ Writing a "1" to this file enables the sync() and writing a "0"
+ disables it. Reads from the file return the current value.
+ The default is "1" if the build-time "SUSPEND_SKIP_SYNC" config
+ flag is unset, or "0" otherwise.
--- /dev/null
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+ACPI Fan Performance States
+===========================
+
+When the optional _FPS object is present under an ACPI device representing a
+fan (for example, PNP0C0B or INT3404), the ACPI fan driver creates additional
+"state*" attributes in the sysfs directory of the ACPI device in question.
+These attributes list properties of fan performance states.
+
+For more information on _FPS refer to the ACPI specification at:
+
+http://uefi.org/specifications
+
+For instance, the contents of the INT3404 ACPI device sysfs directory
+may look as follows::
+
+ $ ls -l /sys/bus/acpi/devices/INT3404:00/
+ total 0
+...
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state0
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state1
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state10
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state11
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state2
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state3
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state4
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state5
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state6
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state7
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state8
+ -r--r--r-- 1 root root 4096 Dec 13 20:38 state9
+ -r--r--r-- 1 root root 4096 Dec 13 01:00 status
+ ...
+
+where each of the "state*" files represents one performance state of the fan
+and contains a colon-separated list of 5 integer numbers (fields) with the
+following interpretation::
+
+control_percent:trip_point_index:speed_rpm:noise_level_mdb:power_mw
+
+* ``control_percent``: The percent value to be used to set the fan speed to a
+ specific level using the _FSL object (0-100).
+
+* ``trip_point_index``: The active cooling trip point number that corresponds
+ to this performance state (0-9).
+
+* ``speed_rpm``: Speed of the fan in rotations per minute.
+
+* ``noise_level_mdb``: Audible noise emitted by the fan in this state in
+ millidecibels.
+
+* ``power_mw``: Power draw of the fan in this state in milliwatts.
+
+For example::
+
+ $cat /sys/bus/acpi/devices/INT3404:00/state1
+ 25:0:3200:12500:1250
+
+When a given field is not populated or its value provided by the platform
+firmware is invalid, the "not-defined" string is shown instead of the value.
dsdt-override
ssdt-overlays
cppc_sysfs
+ fan_performance_states
182 = /dev/perfctr Performance-monitoring counters
183 = /dev/hwrng Generic random number generator
184 = /dev/cpu/microcode CPU microcode update interface
- 186 = /dev/atomicps Atomic shapshot of process state data
+ 186 = /dev/atomicps Atomic snapshot of process state data
187 = /dev/irnet IrNET device
188 = /dev/smbusbios SMBus BIOS
189 = /dev/ussp_ctl User space serial port control
``disable``
Whether or not this idle state is disabled.
+``default_status``
+ The default status of this state, "enabled" or "disabled".
+
``latency``
Exit latency of the idle state in microseconds.
--- /dev/null
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+==============================================
+``intel_idle`` CPU Idle Time Management Driver
+==============================================
+
+:Copyright: |copy| 2020 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+
+General Information
+===================
+
+``intel_idle`` is a part of the
+:doc:`CPU idle time management subsystem <cpuidle>` in the Linux kernel
+(``CPUIdle``). It is the default CPU idle time management driver for the
+Nehalem and later generations of Intel processors, but the level of support for
+a particular processor model in it depends on whether or not it recognizes that
+processor model and may also depend on information coming from the platform
+firmware. [To understand ``intel_idle`` it is necessary to know how ``CPUIdle``
+works in general, so this is the time to get familiar with :doc:`cpuidle` if you
+have not done that yet.]
+
+``intel_idle`` uses the ``MWAIT`` instruction to inform the processor that the
+logical CPU executing it is idle and so it may be possible to put some of the
+processor's functional blocks into low-power states. That instruction takes two
+arguments (passed in the ``EAX`` and ``ECX`` registers of the target CPU), the
+first of which, referred to as a *hint*, can be used by the processor to
+determine what can be done (for details refer to Intel Software Developer’s
+Manual [1]_). Accordingly, ``intel_idle`` refuses to work with processors in
+which the support for the ``MWAIT`` instruction has been disabled (for example,
+via the platform firmware configuration menu) or which do not support that
+instruction at all.
+
+``intel_idle`` is not modular, so it cannot be unloaded, which means that the
+only way to pass early-configuration-time parameters to it is via the kernel
+command line.
+
+
+.. _intel-idle-enumeration-of-states:
+
+Enumeration of Idle States
+==========================
+
+Each ``MWAIT`` hint value is interpreted by the processor as a license to
+reconfigure itself in a certain way in order to save energy. The processor
+configurations (with reduced power draw) resulting from that are referred to
+as C-states (in the ACPI terminology) or idle states. The list of meaningful
+``MWAIT`` hint values and idle states (i.e. low-power configurations of the
+processor) corresponding to them depends on the processor model and it may also
+depend on the configuration of the platform.
+
+In order to create a list of available idle states required by the ``CPUIdle``
+subsystem (see :ref:`idle-states-representation` in :doc:`cpuidle`),
+``intel_idle`` can use two sources of information: static tables of idle states
+for different processor models included in the driver itself and the ACPI tables
+of the system. The former are always used if the processor model at hand is
+recognized by ``intel_idle`` and the latter are used if that is required for
+the given processor model (which is the case for all server processor models
+recognized by ``intel_idle``) or if the processor model is not recognized.
+
+If the ACPI tables are going to be used for building the list of available idle
+states, ``intel_idle`` first looks for a ``_CST`` object under one of the ACPI
+objects corresponding to the CPUs in the system (refer to the ACPI specification
+[2]_ for the description of ``_CST`` and its output package). Because the
+``CPUIdle`` subsystem expects that the list of idle states supplied by the
+driver will be suitable for all of the CPUs handled by it and ``intel_idle`` is
+registered as the ``CPUIdle`` driver for all of the CPUs in the system, the
+driver looks for the first ``_CST`` object returning at least one valid idle
+state description and such that all of the idle states included in its return
+package are of the FFH (Functional Fixed Hardware) type, which means that the
+``MWAIT`` instruction is expected to be used to tell the processor that it can
+enter one of them. The return package of that ``_CST`` is then assumed to be
+applicable to all of the other CPUs in the system and the idle state
+descriptions extracted from it are stored in a preliminary list of idle states
+coming from the ACPI tables. [This step is skipped if ``intel_idle`` is
+configured to ignore the ACPI tables; see `below <intel-idle-parameters_>`_.]
+
+Next, the first (index 0) entry in the list of available idle states is
+initialized to represent a "polling idle state" (a pseudo-idle state in which
+the target CPU continuously fetches and executes instructions), and the
+subsequent (real) idle state entries are populated as follows.
+
+If the processor model at hand is recognized by ``intel_idle``, there is a
+(static) table of idle state descriptions for it in the driver. In that case,
+the "internal" table is the primary source of information on idle states and the
+information from it is copied to the final list of available idle states. If
+using the ACPI tables for the enumeration of idle states is not required
+(depending on the processor model), all of the listed idle state are enabled by
+default (so all of them will be taken into consideration by ``CPUIdle``
+governors during CPU idle state selection). Otherwise, some of the listed idle
+states may not be enabled by default if there are no matching entries in the
+preliminary list of idle states coming from the ACPI tables. In that case user
+space still can enable them later (on a per-CPU basis) with the help of
+the ``disable`` idle state attribute in ``sysfs`` (see
+:ref:`idle-states-representation` in :doc:`cpuidle`). This basically means that
+the idle states "known" to the driver may not be enabled by default if they have
+not been exposed by the platform firmware (through the ACPI tables).
+
+If the given processor model is not recognized by ``intel_idle``, but it
+supports ``MWAIT``, the preliminary list of idle states coming from the ACPI
+tables is used for building the final list that will be supplied to the
+``CPUIdle`` core during driver registration. For each idle state in that list,
+the description, ``MWAIT`` hint and exit latency are copied to the corresponding
+entry in the final list of idle states. The name of the idle state represented
+by it (to be returned by the ``name`` idle state attribute in ``sysfs``) is
+"CX_ACPI", where X is the index of that idle state in the final list (note that
+the minimum value of X is 1, because 0 is reserved for the "polling" state), and
+its target residency is based on the exit latency value. Specifically, for
+C1-type idle states the exit latency value is also used as the target residency
+(for compatibility with the majority of the "internal" tables of idle states for
+various processor models recognized by ``intel_idle``) and for the other idle
+state types (C2 and C3) the target residency value is 3 times the exit latency
+(again, that is because it reflects the target residency to exit latency ratio
+in the majority of cases for the processor models recognized by ``intel_idle``).
+All of the idle states in the final list are enabled by default in this case.
+
+
+.. _intel-idle-initialization:
+
+Initialization
+==============
+
+The initialization of ``intel_idle`` starts with checking if the kernel command
+line options forbid the use of the ``MWAIT`` instruction. If that is the case,
+an error code is returned right away.
+
+The next step is to check whether or not the processor model is known to the
+driver, which determines the idle states enumeration method (see
+`above <intel-idle-enumeration-of-states_>`_), and whether or not the processor
+supports ``MWAIT`` (the initialization fails if that is not the case). Then,
+the ``MWAIT`` support in the processor is enumerated through ``CPUID`` and the
+driver initialization fails if the level of support is not as expected (for
+example, if the total number of ``MWAIT`` substates returned is 0).
+
+Next, if the driver is not configured to ignore the ACPI tables (see
+`below <intel-idle-parameters_>`_), the idle states information provided by the
+platform firmware is extracted from them.
+
+Then, ``CPUIdle`` device objects are allocated for all CPUs and the list of
+available idle states is created as explained
+`above <intel-idle-enumeration-of-states_>`_.
+
+Finally, ``intel_idle`` is registered with the help of cpuidle_register_driver()
+as the ``CPUIdle`` driver for all CPUs in the system and a CPU online callback
+for configuring individual CPUs is registered via cpuhp_setup_state(), which
+(among other things) causes the callback routine to be invoked for all of the
+CPUs present in the system at that time (each CPU executes its own instance of
+the callback routine). That routine registers a ``CPUIdle`` device for the CPU
+running it (which enables the ``CPUIdle`` subsystem to operate that CPU) and
+optionally performs some CPU-specific initialization actions that may be
+required for the given processor model.
+
+
+.. _intel-idle-parameters:
+
+Kernel Command Line Options and Module Parameters
+=================================================
+
+The *x86* architecture support code recognizes three kernel command line
+options related to CPU idle time management: ``idle=poll``, ``idle=halt``,
+and ``idle=nomwait``. If any of them is present in the kernel command line, the
+``MWAIT`` instruction is not allowed to be used, so the initialization of
+``intel_idle`` will fail.
+
+Apart from that there are two module parameters recognized by ``intel_idle``
+itself that can be set via the kernel command line (they cannot be updated via
+sysfs, so that is the only way to change their values).
+
+The ``max_cstate`` parameter value is the maximum idle state index in the list
+of idle states supplied to the ``CPUIdle`` core during the registration of the
+driver. It is also the maximum number of regular (non-polling) idle states that
+can be used by ``intel_idle``, so the enumeration of idle states is terminated
+after finding that number of usable idle states (the other idle states that
+potentially might have been used if ``max_cstate`` had been greater are not
+taken into consideration at all). Setting ``max_cstate`` can prevent
+``intel_idle`` from exposing idle states that are regarded as "too deep" for
+some reason to the ``CPUIdle`` core, but it does so by making them effectively
+invisible until the system is shut down and started again which may not always
+be desirable. In practice, it is only really necessary to do that if the idle
+states in question cannot be enabled during system startup, because in the
+working state of the system the CPU power management quality of service (PM
+QoS) feature can be used to prevent ``CPUIdle`` from touching those idle states
+even if they have been enumerated (see :ref:`cpu-pm-qos` in :doc:`cpuidle`).
+Setting ``max_cstate`` to 0 causes the ``intel_idle`` initialization to fail.
+
+The ``noacpi`` module parameter (which is recognized by ``intel_idle`` if the
+kernel has been configured with ACPI support), can be set to make the driver
+ignore the system's ACPI tables entirely (it is unset by default).
+
+
+.. _intel-idle-core-and-package-idle-states:
+
+Core and Package Levels of Idle States
+======================================
+
+Typically, in a processor supporting the ``MWAIT`` instruction there are (at
+least) two levels of idle states (or C-states). One level, referred to as
+"core C-states", covers individual cores in the processor, whereas the other
+level, referred to as "package C-states", covers the entire processor package
+and it may also involve other components of the system (GPUs, memory
+controllers, I/O hubs etc.).
+
+Some of the ``MWAIT`` hint values allow the processor to use core C-states only
+(most importantly, that is the case for the ``MWAIT`` hint value corresponding
+to the ``C1`` idle state), but the majority of them give it a license to put
+the target core (i.e. the core containing the logical CPU executing ``MWAIT``
+with the given hint value) into a specific core C-state and then (if possible)
+to enter a specific package C-state at the deeper level. For example, the
+``MWAIT`` hint value representing the ``C3`` idle state allows the processor to
+put the target core into the low-power state referred to as "core ``C3``" (or
+``CC3``), which happens if all of the logical CPUs (SMT siblings) in that core
+have executed ``MWAIT`` with the ``C3`` hint value (or with a hint value
+representing a deeper idle state), and in addition to that (in the majority of
+cases) it gives the processor a license to put the entire package (possibly
+including some non-CPU components such as a GPU or a memory controller) into the
+low-power state referred to as "package ``C3``" (or ``PC3``), which happens if
+all of the cores have gone into the ``CC3`` state and (possibly) some additional
+conditions are satisfied (for instance, if the GPU is covered by ``PC3``, it may
+be required to be in a certain GPU-specific low-power state for ``PC3`` to be
+reachable).
+
+As a rule, there is no simple way to make the processor use core C-states only
+if the conditions for entering the corresponding package C-states are met, so
+the logical CPU executing ``MWAIT`` with a hint value that is not core-level
+only (like for ``C1``) must always assume that this may cause the processor to
+enter a package C-state. [That is why the exit latency and target residency
+values corresponding to the majority of ``MWAIT`` hint values in the "internal"
+tables of idle states in ``intel_idle`` reflect the properties of package
+C-states.] If using package C-states is not desirable at all, either
+:ref:`PM QoS <cpu-pm-qos>` or the ``max_cstate`` module parameter of
+``intel_idle`` described `above <intel-idle-parameters_>`_ must be used to
+restrict the range of permissible idle states to the ones with core-level only
+``MWAIT`` hint values (like ``C1``).
+
+
+References
+==========
+
+.. [1] *Intel® 64 and IA-32 Architectures Software Developer’s Manual Volume 2B*,
+ https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-2b-manual.html
+
+.. [2] *Advanced Configuration and Power Interface (ACPI) Specification*,
+ https://uefi.org/specifications
:maxdepth: 2
cpuidle
+ intel_idle
cpufreq
intel_pstate
intel_epb
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
+ | RNDR | [63-60] | y |
+ +------------------------------+---------+---------+
| TS | [55-52] | y |
+------------------------------+---------+---------+
| FHM | [51-48] | y |
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
+ | I8MM | [55-52] | y |
+ +------------------------------+---------+---------+
+ | DGH | [51-48] | y |
+ +------------------------------+---------+---------+
+ | BF16 | [47-44] | y |
+ +------------------------------+---------+---------+
| SB | [39-36] | y |
+------------------------------+---------+---------+
| FRINTTS | [35-32] | y |
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
+ | F64MM | [59-56] | y |
+ +------------------------------+---------+---------+
+ | F32MM | [55-52] | y |
+ +------------------------------+---------+---------+
+ | I8MM | [47-44] | y |
+ +------------------------------+---------+---------+
| SM4 | [43-40] | y |
+------------------------------+---------+---------+
| SHA3 | [35-32] | y |
+------------------------------+---------+---------+
+ | BF16 | [23-20] | y |
+ +------------------------------+---------+---------+
| BitPerm | [19-16] | y |
+------------------------------+---------+---------+
| AES | [7-4] | y |
Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
+HWCAP2_SVEI8MM
+
+ Functionality implied by ID_AA64ZFR0_EL1.I8MM == 0b0001.
+
+HWCAP2_SVEF32MM
+
+ Functionality implied by ID_AA64ZFR0_EL1.F32MM == 0b0001.
+
+HWCAP2_SVEF64MM
+
+ Functionality implied by ID_AA64ZFR0_EL1.F64MM == 0b0001.
+
+HWCAP2_SVEBF16
+
+ Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0001.
+
+HWCAP2_I8MM
+
+ Functionality implied by ID_AA64ISAR1_EL1.I8MM == 0b0001.
+
+HWCAP2_BF16
+
+ Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0001.
+
+HWCAP2_DGH
+
+ Functionality implied by ID_AA64ISAR1_EL1.DGH == 0b0001.
+
+HWCAP2_RNG
+
+ Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001.
4. Unused AT_HWCAP bits
-----------------------
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A55 | #1530923 | ARM64_ERRATUM_1530923 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1349291 | N/A |
``ULONG_MAX`` then the XArray is not the data type for you. The most
important user of the XArray is the page cache.
-Each non-``NULL`` entry in the array has three bits associated with
-it called marks. Each mark may be set or cleared independently of
-the others. You can iterate over entries which are marked.
-
Normal pointers may be stored in the XArray directly. They must be 4-byte
aligned, which is true for any pointer returned from kmalloc() and
alloc_page(). It isn't true for arbitrary user-space pointers,
a value entry by calling xa_is_value(), and convert it back to
an integer by calling xa_to_value().
-Some users want to store tagged pointers instead of using the marks
-described above. They can call xa_tag_pointer() to create an
-entry with a tag, xa_untag_pointer() to turn a tagged entry
-back into an untagged pointer and xa_pointer_tag() to retrieve
-the tag of an entry. Tagged pointers use the same bits that are used
-to distinguish value entries from normal pointers, so each user must
+Some users want to tag the pointers they store in the XArray. You can
+call xa_tag_pointer() to create an entry with a tag, xa_untag_pointer()
+to turn a tagged entry back into an untagged pointer and xa_pointer_tag()
+to retrieve the tag of an entry. Tagged pointers use the same bits that
+are used to distinguish value entries from normal pointers, so you must
decide whether they want to store value entries or tagged pointers in
any particular XArray.
An unusual feature of the XArray is the ability to create entries which
occupy a range of indices. Once stored to, looking up any index in
the range will return the same entry as looking up any other index in
-the range. Setting a mark on one index will set it on all of them.
-Storing to any index will store to all of them. Multi-index entries can
-be explicitly split into smaller entries, or storing ``NULL`` into any
-entry will cause the XArray to forget about the range.
+the range. Storing to any index will store to all of them. Multi-index
+entries can be explicitly split into smaller entries, or storing ``NULL``
+into any entry will cause the XArray to forget about the range.
Normal API
==========
at that index is ``NULL``, you can use xa_insert() which
returns ``-EBUSY`` if the entry is not empty.
-You can enquire whether a mark is set on an entry by using
-xa_get_mark(). If the entry is not ``NULL``, you can set a mark
-on it by using xa_set_mark() and remove the mark from an entry by
-calling xa_clear_mark(). You can ask whether any entry in the
-XArray has a particular mark set by calling xa_marked().
-
You can copy entries out of the XArray into a plain array by calling
-xa_extract(). Or you can iterate over the present entries in
-the XArray by calling xa_for_each(). You may prefer to use
-xa_find() or xa_find_after() to move to the next present
-entry in the XArray.
+xa_extract(). Or you can iterate over the present entries in the XArray
+by calling xa_for_each(), xa_for_each_start() or xa_for_each_range().
+You may prefer to use xa_find() or xa_find_after() to move to the next
+present entry in the XArray.
Calling xa_store_range() stores the same entry in a range
of indices. If you do this, some of the other operations will behave
to free the entries first. You can do this by iterating over all present
entries in the XArray using the xa_for_each() iterator.
+Search Marks
+------------
+
+Each entry in the array has three bits associated with it called marks.
+Each mark may be set or cleared independently of the others. You can
+iterate over marked entries by using the xa_for_each_marked() iterator.
+
+You can enquire whether a mark is set on an entry by using
+xa_get_mark(). If the entry is not ``NULL``, you can set a mark on it
+by using xa_set_mark() and remove the mark from an entry by calling
+xa_clear_mark(). You can ask whether any entry in the XArray has a
+particular mark set by calling xa_marked(). Erasing an entry from the
+XArray causes all marks associated with that entry to be cleared.
+
+Setting or clearing a mark on any index of a multi-index entry will
+affect all indices covered by that entry. Querying the mark on any
+index will return the same result.
+
+There is no way to iterate over entries which are not marked; the data
+structure does not allow this to be implemented efficiently. There are
+not currently iterators to search for logical combinations of bits (eg
+iterate over all entries which have both ``XA_MARK_1`` and ``XA_MARK_2``
+set, or iterate over all entries which have ``XA_MARK_0`` or ``XA_MARK_2``
+set). It would be possible to add these if a user arises.
+
Allocating XArrays
------------------
Takes RCU read lock:
* xa_load()
* xa_for_each()
+ * xa_for_each_start()
+ * xa_for_each_range()
* xa_find()
* xa_find_after()
* xa_extract()
then it is good manners to pause the iteration and reenable interrupts
every ``XA_CHECK_SCHED`` entries.
-The xas_get_mark(), xas_set_mark() and
-xas_clear_mark() functions require the xa_state cursor to have
-been moved to the appropriate location in the xarray; they will do
-nothing if you have called xas_pause() or xas_set()
+The xas_get_mark(), xas_set_mark() and xas_clear_mark() functions require
+the xa_state cursor to have been moved to the appropriate location in the
+XArray; they will do nothing if you have called xas_pause() or xas_set()
immediately before.
You can call xas_set_update() to have a callback function
Required properties:
- compatible : should be one or more of
+ "brcm,bcm7216-ahci"
"brcm,bcm7425-ahci"
"brcm,bcm7445-ahci"
"brcm,bcm-nsp-ahci"
- reg-names : "ahci" and "top-ctrl"
- interrupts : interrupt mapping for SATA IRQ
+Optional properties:
+
+- reset: for "brcm,bcm7216-ahci" must be a valid reset phandle
+ pointing to the RESCAL reset controller provider node.
+- reset-names: for "brcm,bcm7216-ahci", must be "rescal".
+
Also see ahci-platform.txt.
Example:
- compatible :
- "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
- "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp
+ - "fsl,fsl,ls1028a-edma" for eDMA used similar to that on Vybrid vf610 SoC
- reg : Specifies base physical address(s) and size of the eDMA registers.
The 1st region is eDMA control register's address and size.
The 2nd and the 3rd regions are programmable channel multiplexing
"fsl,imx6q-sdma"
"fsl,imx7d-sdma"
"fsl,imx8mq-sdma"
+ "fsl,imx8mm-sdma"
+ "fsl,imx8mn-sdma"
+ "fsl,imx8mp-sdma"
The -to variants should be preferred since they allow to determine the
correct ROM script addresses needed for the driver to work without additional
firmware.
-* Ingenic JZ4780 DMA Controller
+* Ingenic XBurst DMA Controller
Required properties:
* ingenic,jz4770-dma
* ingenic,jz4780-dma
* ingenic,x1000-dma
+ * ingenic,x1830-dma
- reg: Should contain the DMA channel registers location and length, followed
by the DMA controller registers location and length.
- interrupts: Should contain the interrupt specifier of the DMA controller.
-- clocks: Should contain a clock specifier for the JZ4780/X1000 PDMA clock.
+- clocks: Should contain a clock specifier for the JZ4780/X1000/X1830 PDMA
+ clock.
- #dma-cells: Must be <2>. Number of integer cells in the dmas property of
DMA clients (see below).
- "renesas,dmac-r8a7794" (R-Car E2)
- "renesas,dmac-r8a7795" (R-Car H3)
- "renesas,dmac-r8a7796" (R-Car M3-W)
+ - "renesas,dmac-r8a77961" (R-Car M3-W+)
- "renesas,dmac-r8a77965" (R-Car M3-N)
- "renesas,dmac-r8a77970" (R-Car V3M)
- "renesas,dmac-r8a77980" (R-Car V3H)
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/ti/k3-udma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments K3 NAVSS Unified DMA Device Tree Bindings
+
+maintainers:
+ - Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+description: |
+ The UDMA-P is intended to perform similar (but significantly upgraded)
+ functions as the packet-oriented DMA used on previous SoC devices. The UDMA-P
+ module supports the transmission and reception of various packet types.
+ The UDMA-P architecture facilitates the segmentation and reassembly of SoC DMA
+ data structure compliant packets to/from smaller data blocks that are natively
+ compatible with the specific requirements of each connected peripheral.
+ Multiple Tx and Rx channels are provided within the DMA which allow multiple
+ segmentation or reassembly operations to be ongoing. The DMA controller
+ maintains state information for each of the channels which allows packet
+ segmentation and reassembly operations to be time division multiplexed between
+ channels in order to share the underlying DMA hardware. An external DMA
+ scheduler is used to control the ordering and rate at which this multiplexing
+ occurs for Transmit operations. The ordering and rate of Receive operations
+ is indirectly controlled by the order in which blocks are pushed into the DMA
+ on the Rx PSI-L interface.
+
+ The UDMA-P also supports acting as both a UTC and UDMA-C for its internal
+ channels. Channels in the UDMA-P can be configured to be either Packet-Based
+ or Third-Party channels on a channel by channel basis.
+
+ All transfers within NAVSS is done between PSI-L source and destination
+ threads.
+ The peripherals serviced by UDMA can be PSI-L native (sa2ul, cpsw, etc) or
+ legacy, non PSI-L native peripherals. In the later case a special, small PDMA
+ is tasked to act as a bridge between the PSI-L fabric and the legacy
+ peripheral.
+
+ PDMAs can be configured via UDMAP peer registers to match with the
+ configuration of the legacy peripheral.
+
+allOf:
+ - $ref: "../dma-controller.yaml#"
+
+properties:
+ "#dma-cells":
+ const: 1
+ description: |
+ The cell is the PSI-L thread ID of the remote (to UDMAP) end.
+ Valid ranges for thread ID depends on the data movement direction:
+ for source thread IDs (rx): 0 - 0x7fff
+ for destination thread IDs (tx): 0x8000 - 0xffff
+
+ Please refer to the device documentation for the PSI-L thread map and also
+ the PSI-L peripheral chapter for the correct thread ID.
+
+ compatible:
+ enum:
+ - ti,am654-navss-main-udmap
+ - ti,am654-navss-mcu-udmap
+ - ti,j721e-navss-main-udmap
+ - ti,j721e-navss-mcu-udmap
+
+ reg:
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: gcfg
+ - const: rchanrt
+ - const: tchanrt
+
+ msi-parent: true
+
+ ti,sci:
+ description: phandle to TI-SCI compatible System controller node
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/phandle
+
+ ti,sci-dev-id:
+ description: TI-SCI device id of UDMAP
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+
+ ti,ringacc:
+ description: phandle to the ring accelerator node
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/phandle
+
+ ti,sci-rm-range-tchan:
+ description: |
+ Array of UDMA tchan resource subtypes for resource allocation for this
+ host
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ # Should be enough
+ maxItems: 255
+
+ ti,sci-rm-range-rchan:
+ description: |
+ Array of UDMA rchan resource subtypes for resource allocation for this
+ host
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ # Should be enough
+ maxItems: 255
+
+ ti,sci-rm-range-rflow:
+ description: |
+ Array of UDMA rflow resource subtypes for resource allocation for this
+ host
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ # Should be enough
+ maxItems: 255
+
+required:
+ - compatible
+ - "#dma-cells"
+ - reg
+ - reg-names
+ - msi-parent
+ - ti,sci
+ - ti,sci-dev-id
+ - ti,ringacc
+ - ti,sci-rm-range-tchan
+ - ti,sci-rm-range-rchan
+ - ti,sci-rm-range-rflow
+
+examples:
+ - |+
+ cbass_main {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cbass_main_navss: navss@30800000 {
+ compatible = "simple-mfd";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-coherent;
+ dma-ranges;
+ ranges;
+
+ ti,sci-dev-id = <118>;
+
+ main_udmap: dma-controller@31150000 {
+ compatible = "ti,am654-navss-main-udmap";
+ reg = <0x0 0x31150000 0x0 0x100>,
+ <0x0 0x34000000 0x0 0x100000>,
+ <0x0 0x35000000 0x0 0x100000>;
+ reg-names = "gcfg", "rchanrt", "tchanrt";
+ #dma-cells = <1>;
+
+ ti,ringacc = <&ringacc>;
+
+ msi-parent = <&inta_main_udmass>;
+
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <188>;
+
+ ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */
+ <0x2>; /* TX_CHAN */
+ ti,sci-rm-range-rchan = <0x4>, /* RX_HCHAN */
+ <0x5>; /* RX_CHAN */
+ ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */
+ };
+ };
+
+ mcasp0: mcasp@02B00000 {
+ dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>;
+ dma-names = "tx", "rx";
+ };
+
+ crypto: crypto@4E00000 {
+ compatible = "ti,sa2ul-crypto";
+
+ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>;
+ dma-names = "tx", "rx1", "rx2";
+ };
+ };
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/adi,adm1177.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADM1177 Hot Swap Controller and Digital Power Monitor
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+ - Beniamin Bia <beniamin.bia@analog.com>
+
+description: |
+ Analog Devices ADM1177 Hot Swap Controller and Digital Power Monitor
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ADM1177.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,adm1177
+
+ reg:
+ maxItems: 1
+
+ avcc-supply:
+ description:
+ Phandle to the Avcc power supply
+
+ shunt-resistor-micro-ohms:
+ description:
+ The value of curent sense resistor in microohms. If not provided,
+ the current reading and overcurrent alert is disabled.
+
+ adi,shutdown-threshold-microamp:
+ description:
+ Specifies the current level at which an over current alert occurs.
+ If not provided, the overcurrent alert is configured to max ADC range
+ based on shunt-resistor-micro-ohms.
+
+ adi,vrange-high-enable:
+ description:
+ Specifies which internal voltage divider to be used. A 1 selects
+ a 7:2 voltage divider while a 0 selects a 14:1 voltage divider.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pwmon@5a {
+ compatible = "adi,adm1177";
+ reg = <0x5a>;
+ shunt-resistor-micro-ohms = <50000>; /* 50 mOhm */
+ adi,shutdown-threshold-microamp = <1059000>; /* 1.059 A */
+ adi,vrange-high-enable;
+ };
+ };
+...
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/hwmon/pmbus/ti,ucd90320.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UCD90320 power sequencer
+
+maintainers:
+ - Jim Wright <wrightj@linux.vnet.ibm.com>
+
+description: |
+ The UCD90320 is a 32-rail PMBus/I2C addressable power-supply sequencer and
+ monitor. The 24 integrated ADC channels (AMONx) monitor the power supply
+ voltage, current, and temperature. Of the 84 GPIO pins, 8 can be used as
+ digital monitors (DMONx), 32 to enable the power supply (ENx), 24 for
+ margining (MARx), 16 for logical GPO, and 32 GPIs for cascading, and system
+ function.
+
+ http://focus.ti.com/lit/ds/symlink/ucd90320.pdf
+
+properties:
+ compatible:
+ enum:
+ - ti,ucd90320
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ucd90320@11 {
+ compatible = "ti,ucd90320";
+ reg = <0x11>;
+ };
+ };
- dma-names: should contain "tx" and "rx".
- atmel,fifo-size: maximum number of data the RX and TX FIFOs can store for FIFO
capable I2C controllers.
-- i2c-sda-hold-time-ns: TWD hold time, only available for "atmel,sama5d4-i2c"
- and "atmel,sama5d2-i2c".
+- i2c-sda-hold-time-ns: TWD hold time, only available for:
+ "atmel,sama5d4-i2c",
+ "atmel,sama5d2-i2c",
+ "microchip,sam9x60-i2c".
- Child nodes conforming to i2c bus binding
Examples :
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/fsl/imx8m-ddrc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: i.MX8M DDR Controller
+
+maintainers:
+ - Leonard Crestez <leonard.crestez@nxp.com>
+
+description:
+ The DDRC block is integrated in i.MX8M for interfacing with DDR based
+ memories.
+
+ It supports switching between different frequencies at runtime but during
+ this process RAM itself becomes briefly inaccessible so actual frequency
+ switching is implemented by TF-A code which runs from a SRAM area.
+
+ The Linux driver for the DDRC doesn't even map registers (they're included
+ for the sake of "describing hardware"), it mostly just exposes firmware
+ capabilities through standard Linux mechanism like devfreq and OPP tables.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,imx8mn-ddrc
+ - fsl,imx8mm-ddrc
+ - fsl,imx8mq-ddrc
+ - const: fsl,imx8m-ddrc
+
+ reg:
+ maxItems: 1
+ description:
+ Base address and size of DDRC CTL area.
+ This is not currently mapped by the imx8m-ddrc driver.
+
+ clocks:
+ maxItems: 4
+
+ clock-names:
+ items:
+ - const: core
+ - const: pll
+ - const: alt
+ - const: apb
+
+ operating-points-v2: true
+ opp-table: true
+
+required:
+ - reg
+ - compatible
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8mm-clock.h>
+ ddrc: memory-controller@3d400000 {
+ compatible = "fsl,imx8mm-ddrc", "fsl,imx8m-ddrc";
+ reg = <0x3d400000 0x400000>;
+ clock-names = "core", "pll", "alt", "apb";
+ clocks = <&clk IMX8MM_CLK_DRAM_CORE>,
+ <&clk IMX8MM_DRAM_PLL>,
+ <&clk IMX8MM_CLK_DRAM_ALT>,
+ <&clk IMX8MM_CLK_DRAM_APB>;
+ operating-points-v2 = <&ddrc_opp_table>;
+ };
- compatible: should be one of the following
- "brcm,bcm7425-sdhci"
- "brcm,bcm7445-sdhci"
+ - "brcm,bcm7216-sdhci"
Refer to clocks/clock-bindings.txt for generic clock consumer properties.
Example:
- sdhci@f03e0100 {
- compatible = "brcm,bcm7425-sdhci";
- reg = <0xf03e0000 0x100>;
- interrupts = <0x0 0x26 0x0>;
- sdhci,auto-cmd12;
- clocks = <&sw_sdio>;
+ sdhci@84b0000 {
sd-uhs-sdr50;
sd-uhs-ddr50;
+ sd-uhs-sdr104;
+ sdhci,auto-cmd12;
+ compatible = "brcm,bcm7216-sdhci",
+ "brcm,bcm7445-sdhci",
+ "brcm,sdhci-brcmstb";
+ reg = <0x84b0000 0x260 0x84b0300 0x200>;
+ reg-names = "host", "cfg";
+ interrupts = <0x0 0x26 0x4>;
+ interrupt-names = "sdio0_0";
+ clocks = <&scmi_clk 245>;
+ clock-names = "sw_sdio";
};
- sdhci@f03e0300 {
+ sdhci@84b1000 {
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ supports-cqe;
non-removable;
bus-width = <0x8>;
- compatible = "brcm,bcm7425-sdhci";
- reg = <0xf03e0200 0x100>;
- interrupts = <0x0 0x27 0x0>;
- sdhci,auto-cmd12;
- clocks = <sw_sdio>;
- mmc-hs200-1_8v;
+ compatible = "brcm,bcm7216-sdhci",
+ "brcm,bcm7445-sdhci",
+ "brcm,sdhci-brcmstb";
+ reg = <0x84b1000 0x260 0x84b1300 0x200>;
+ reg-names = "host", "cfg";
+ interrupts = <0x0 0x27 0x4>;
+ interrupt-names = "sdio1_0";
+ clocks = <&scmi_clk 245>;
+ clock-names = "sw_sdio";
};
"fsl,imx8mq-usdhc"
"fsl,imx8mm-usdhc"
"fsl,imx8mn-usdhc"
+ "fsl,imx8mp-usdhc"
"fsl,imx8qxp-usdhc"
Optional properties:
"renesas,sdhi-r8a7793" - SDHI IP on R8A7793 SoC
"renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC
"renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC
- "renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
+ "renesas,sdhi-r8a7796" - SDHI IP on R8A77960 SoC
+ "renesas,sdhi-r8a77961" - SDHI IP on R8A77961 SoC
"renesas,sdhi-r8a77965" - SDHI IP on R8A77965 SoC
"renesas,sdhi-r8a77970" - SDHI IP on R8A77970 SoC
"renesas,sdhi-r8a77980" - SDHI IP on R8A77980 SoC
+++ /dev/null
-* Rockchip specific extensions to the Synopsys Designware Mobile
- Storage Host Controller
-
-The Synopsys designware mobile storage host controller is used to interface
-a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsys dw mshc controller properties described
-by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
-extensions to the Synopsys Designware Mobile Storage Host Controller.
-
-Required Properties:
-
-* compatible: should be
- - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
- before RK3288
- - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
- - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
- - "rockchip,px30-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip PX30
- - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
- - "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK322x
- - "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3328
- - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
- - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
-
-Optional Properties:
-* clocks: from common clock binding: if ciu-drive and ciu-sample are
- specified in clock-names, should contain handles to these clocks.
-
-* clock-names: Apart from the clock-names described in synopsys-dw-mshc.txt
- two more clocks "ciu-drive" and "ciu-sample" are supported. They are used
- to control the clock phases, "ciu-sample" is required for tuning high-
- speed modes.
-
-* rockchip,default-sample-phase: The default phase to set ciu-sample at
- probing, low speeds or in case where all phases work at tuning time.
- If not specified 0 deg will be used.
-
-* rockchip,desired-num-phases: The desired number of times that the host
- execute tuning when needed. If not specified, the host will do tuning
- for 360 times, namely tuning for each degree.
-
-Example:
-
- rkdwmmc0@12200000 {
- compatible = "rockchip,rk3288-dw-mshc";
- reg = <0x12200000 0x1000>;
- interrupts = <0 75 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/rockchip-dw-mshc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip designware mobile storage host controller device tree bindings
+
+description:
+ Rockchip uses the Synopsys designware mobile storage host controller
+ to interface a SoC with storage medium such as eMMC or SD/MMC cards.
+ This file documents the combined properties for the core Synopsys dw mshc
+ controller that are not already included in the synopsys-dw-mshc-common.yaml
+ file and the Rockchip specific extensions.
+
+allOf:
+ - $ref: "synopsys-dw-mshc-common.yaml#"
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+# Everything else is described in the common file
+properties:
+ compatible:
+ oneOf:
+ # for Rockchip RK2928 and before RK3288
+ - const: rockchip,rk2928-dw-mshc
+ # for Rockchip RK3288
+ - const: rockchip,rk3288-dw-mshc
+ - items:
+ - enum:
+ # for Rockchip PX30
+ - rockchip,px30-dw-mshc
+ # for Rockchip RK3036
+ - rockchip,rk3036-dw-mshc
+ # for Rockchip RK322x
+ - rockchip,rk3228-dw-mshc
+ # for Rockchip RK3308
+ - rockchip,rk3308-dw-mshc
+ # for Rockchip RK3328
+ - rockchip,rk3328-dw-mshc
+ # for Rockchip RK3368
+ - rockchip,rk3368-dw-mshc
+ # for Rockchip RK3399
+ - rockchip,rk3399-dw-mshc
+ # for Rockchip RV1108
+ - rockchip,rv1108-dw-mshc
+ - const: rockchip,rk3288-dw-mshc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+ maxItems: 4
+ description:
+ Handle to "biu" and "ciu" clocks for the bus interface unit clock and
+ the card interface unit clock. If "ciu-drive" and "ciu-sample" are
+ specified in clock-names, it should also contain
+ handles to these clocks.
+
+ clock-names:
+ minItems: 2
+ items:
+ - const: biu
+ - const: ciu
+ - const: ciu-drive
+ - const: ciu-sample
+ description:
+ Apart from the clock-names "biu" and "ciu" two more clocks
+ "ciu-drive" and "ciu-sample" are supported. They are used
+ to control the clock phases, "ciu-sample" is required for tuning
+ high speed modes.
+
+ rockchip,default-sample-phase:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 360
+ default: 0
+ description:
+ The default phase to set "ciu-sample" at probing,
+ low speeds or in case where all phases work at tuning time.
+ If not specified 0 deg will be used.
+
+ rockchip,desired-num-phases:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 360
+ default: 360
+ description:
+ The desired number of times that the host execute tuning when needed.
+ If not specified, the host will do tuning for 360 times,
+ namely tuning for each degree.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3288-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ sdmmc: mmc@ff0c0000 {
+ compatible = "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xff0c0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+ <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ resets = <&cru SRST_MMC0>;
+ reset-names = "reset";
+ fifo-depth = <0x100>;
+ max-frequency = <150000000>;
+ };
+
+...
sdhci-of-at91 driver.
Required properties:
-- compatible: Must be "atmel,sama5d2-sdhci".
+- compatible: Must be "atmel,sama5d2-sdhci" or "microchip,sam9x60-sdhci".
- clocks: Phandlers to the clocks.
-- clock-names: Must be "hclock", "multclk", "baseclk";
+- clock-names: Must be "hclock", "multclk", "baseclk" for
+ "atmel,sama5d2-sdhci".
+ Must be "hclock", "multclk" for "microchip,sam9x60-sdhci".
Optional properties:
+- assigned-clocks: The same with "multclk".
+- assigned-clock-rates The rate of "multclk" in order to not rely on the
+ gck configuration set by previous components.
- microchip,sdcal-inverted: when present, polarity on the SDCAL SoC pin is
inverted. The default polarity for this signal is described in the datasheet.
For instance on SAMA5D2, the pin is usually tied to the GND with a resistor
Example:
-sdmmc0: sdio-host@a0000000 {
+mmc0: sdio-host@a0000000 {
compatible = "atmel,sama5d2-sdhci";
reg = <0xa0000000 0x300>;
interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&sdmmc0_hclk>, <&sdmmc0_gclk>, <&main>;
clock-names = "hclock", "multclk", "baseclk";
+ assigned-clocks = <&sdmmc0_gclk>;
+ assigned-clock-rates = <480000000>;
};
"qcom,msm8996-sdhci", "qcom,sdhci-msm-v4"
"qcom,sdm845-sdhci", "qcom,sdhci-msm-v5"
"qcom,qcs404-sdhci", "qcom,sdhci-msm-v5"
+ "qcom,sc7180-sdhci", "qcom,sdhci-msm-v5";
NOTE that some old device tree files may be floating around that only
have the string "qcom,sdhci-msm-v4" without the SoC compatible string
but doing that should be considered a deprecated practice.
Required properties:
- compatible: Should be "ti,dra7-sdhci" for DRA7 and DRA72 controllers
Should be "ti,k2g-sdhci" for K2G
+ Should be "ti,am335-sdhci" for am335x controllers
+ Should be "ti,am437-sdhci" for am437x controllers
- ti,hwmods: Must be "mmc<n>", <n> is controller instance starting 1
(Not required for K2G).
- pinctrl-names: Should be subset of "default", "hs", "sdr12", "sdr25", "sdr50",
"hs200_1_8v",
- pinctrl-<n> : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt
+Optional properties:
+- dmas: List of DMA specifiers with the controller specific format as described
+ in the generic DMA client binding. A tx and rx specifier is required.
+- dma-names: List of DMA request names. These strings correspond 1:1 with the
+ DMA specifiers listed in dmas. The string naming is to be "tx"
+ and "rx" for TX and RX DMA requests, respectively.
+
Example:
mmc1: mmc@4809c000 {
compatible = "ti,dra7-sdhci";
ti,hwmods = "mmc1";
bus-width = <4>;
vmmc-supply = <&vmmc>; /* phandle to regulator node */
+ dmas = <&sdma 61 &sdma 62>;
+ dma-names = "tx", "rx";
};
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/synopsys-dw-mshc-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys Designware Mobile Storage Host Controller Common Properties
+
+allOf:
+ - $ref: "mmc-controller.yaml#"
+
+maintainers:
+ - Ulf Hansson <ulf.hansson@linaro.org>
+
+# Everything else is described in the common file
+properties:
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: reset
+
+ clock-frequency:
+ description:
+ Should be the frequency (in Hz) of the ciu clock. If this
+ is specified and the ciu clock is specified then we'll try to set the ciu
+ clock to this at probe time.
+
+ fifo-depth:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The maximum size of the tx/rx fifo's. If this property is not
+ specified, the default value of the fifo size is determined from the
+ controller registers.
+
+ card-detect-delay:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - default: 0
+ description:
+ Delay in milli-seconds before detecting card after card
+ insert event. The default value is 0.
+
+ data-addr:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Override fifo address with value provided by DT. The default FIFO reg
+ offset is assumed as 0x100 (version < 0x240A) and 0x200(version >= 0x240A)
+ by driver. If the controller does not follow this rule, please use
+ this property to set fifo address in device tree.
+
+ fifo-watermark-aligned:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Data done irq is expected if data length is less than
+ watermark in PIO mode. But fifo watermark is requested to be aligned
+ with data length in some SoC so that TX/RX irq can be generated with
+ data done irq. Add this watermark quirk to mark this requirement and
+ force fifo watermark setting accordingly.
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ const: rx-tx
+++ /dev/null
-* Synopsys Designware Mobile Storage Host Controller
-
-The Synopsys designware mobile storage host controller is used to interface
-a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core mmc properties described by mmc.txt and the
-properties used by the Synopsys Designware Mobile Storage Host Controller.
-
-Required Properties:
-
-* compatible: should be
- - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
-* #address-cells: should be 1.
-* #size-cells: should be 0.
-
-# Slots (DEPRECATED): The slot specific information are contained within
- child-nodes with each child-node representing a supported slot. There should
- be atleast one child node representing a card slot. The name of the child node
- representing the slot is recommended to be slot@n where n is the unique number
- of the slot connected to the controller. The following are optional properties
- which can be included in the slot child node.
-
- * reg: specifies the physical slot number. The valid values of this
- property is 0 to (num-slots -1), where num-slots is the value
- specified by the num-slots property.
-
- * bus-width: as documented in mmc core bindings.
-
- * wp-gpios: specifies the write protect gpio line. The format of the
- gpio specifier depends on the gpio controller. If a GPIO is not used
- for write-protect, this property is optional.
-
- * disable-wp: If the wp-gpios property isn't present then (by default)
- we'd assume that the write protect is hooked up directly to the
- controller's special purpose write protect line (accessible via
- the WRTPRT register). However, it's possible that we simply don't
- want write protect. In that case specify 'disable-wp'.
- NOTE: This property is not required for slots known to always
- connect to eMMC or SDIO cards.
-
-Optional properties:
-
-* resets: phandle + reset specifier pair, intended to represent hardware
- reset signal present internally in some host controller IC designs.
- See Documentation/devicetree/bindings/reset/reset.txt for details.
-
-* reset-names: request name for using "resets" property. Must be "reset".
- (It will be used together with "resets" property.)
-
-* clocks: from common clock binding: handle to biu and ciu clocks for the
- bus interface unit clock and the card interface unit clock.
-
-* clock-names: from common clock binding: Shall be "biu" and "ciu".
- If the biu clock is missing we'll simply skip enabling it. If the
- ciu clock is missing we'll just assume that the clock is running at
- clock-frequency. It is an error to omit both the ciu clock and the
- clock-frequency.
-
-* clock-frequency: should be the frequency (in Hz) of the ciu clock. If this
- is specified and the ciu clock is specified then we'll try to set the ciu
- clock to this at probe time.
-
-* fifo-depth: The maximum size of the tx/rx fifo's. If this property is not
- specified, the default value of the fifo size is determined from the
- controller registers.
-
-* card-detect-delay: Delay in milli-seconds before detecting card after card
- insert event. The default value is 0.
-
-* data-addr: Override fifo address with value provided by DT. The default FIFO reg
- offset is assumed as 0x100 (version < 0x240A) and 0x200(version >= 0x240A) by
- driver. If the controller does not follow this rule, please use this property
- to set fifo address in device tree.
-
-* fifo-watermark-aligned: Data done irq is expected if data length is less than
- watermark in PIO mode. But fifo watermark is requested to be aligned with data
- length in some SoC so that TX/RX irq can be generated with data done irq. Add this
- watermark quirk to mark this requirement and force fifo watermark setting
- accordingly.
-
-* vmmc-supply: The phandle to the regulator to use for vmmc. If this is
- specified we'll defer probe until we can find this regulator.
-
-* dmas: List of DMA specifiers with the controller specific format as described
- in the generic DMA client binding. Refer to dma.txt for details.
-
-* dma-names: request names for generic DMA client binding. Must be "rx-tx".
- Refer to dma.txt for details.
-
-Aliases:
-
-- All the MSHC controller nodes should be represented in the aliases node using
- the following format 'mshc{n}' where n is a unique number for the alias.
-
-Example:
-
-The MSHC controller node can be split into two portions, SoC specific and
-board specific portions as listed below.
-
- dwmmc0@12200000 {
- compatible = "snps,dw-mshc";
- clocks = <&clock 351>, <&clock 132>;
- clock-names = "biu", "ciu";
- reg = <0x12200000 0x1000>;
- interrupts = <0 75 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- data-addr = <0x200>;
- fifo-watermark-aligned;
- resets = <&rst 20>;
- reset-names = "reset";
- };
-
-[board specific internal DMA resources]
-
- dwmmc0@12200000 {
- clock-frequency = <400000000>;
- clock-freq-min-max = <400000 200000000>;
- broken-cd;
- fifo-depth = <0x80>;
- card-detect-delay = <200>;
- vmmc-supply = <&buck8>;
- bus-width = <8>;
- cap-mmc-highspeed;
- cap-sd-highspeed;
- };
-
-[board specific generic DMA request binding]
-
- dwmmc0@12200000 {
- clock-frequency = <400000000>;
- clock-freq-min-max = <400000 200000000>;
- broken-cd;
- fifo-depth = <0x80>;
- card-detect-delay = <200>;
- vmmc-supply = <&buck8>;
- bus-width = <8>;
- cap-mmc-highspeed;
- cap-sd-highspeed;
- dmas = <&pdma 12>;
- dma-names = "rx-tx";
- };
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/synopsys-dw-mshc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys Designware Mobile Storage Host Controller Binding
+
+allOf:
+ - $ref: "synopsys-dw-mshc-common.yaml#"
+
+maintainers:
+ - Ulf Hansson <ulf.hansson@linaro.org>
+
+# Everything else is described in the common file
+properties:
+ compatible:
+ const: snps,dw-mshc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+ maxItems: 2
+ description:
+ Handle to "biu" and "ciu" clocks for the
+ bus interface unit clock and the card interface unit clock.
+
+ clock-names:
+ items:
+ - const: biu
+ - const: ciu
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ mmc@12200000 {
+ compatible = "snps,dw-mshc";
+ reg = <0x12200000 0x1000>;
+ interrupts = <0 75 0>;
+ clocks = <&clock 351>, <&clock 132>;
+ clock-names = "biu", "ciu";
+ dmas = <&pdma 12>;
+ dma-names = "rx-tx";
+ resets = <&rst 20>;
+ reset-names = "reset";
+ vmmc-supply = <&buck8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ broken-cd;
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ card-detect-delay = <200>;
+ clock-freq-min-max = <400000 200000000>;
+ clock-frequency = <400000000>;
+ data-addr = <0x200>;
+ fifo-depth = <0x80>;
+ fifo-watermark-aligned;
+ };
The settings and programming routines for internal/external
MDIO are different. Must be included for internal MDIO.
+- fsl,erratum-a011043
+ Usage: optional
+ Value type: <boolean>
+ Definition: Indicates the presence of the A011043 erratum
+ describing that the MDIO_CFG[MDIO_RD_ER] bit may be falsely
+ set when reading internal PCS registers. MDIO reads to
+ internal PCS registers may result in having the
+ MDIO_CFG[MDIO_RD_ER] bit set, even when there is no error and
+ read data (MDIO_DATA[MDIO_DATA]) is correct.
+ Software may get false read error when reading internal
+ PCS registers through MDIO. As a workaround, all internal
+ MDIO accesses should ignore the MDIO_CFG[MDIO_RD_ER] bit.
+
For internal PHY device on internal mdio bus, a PHY node should be created.
See the definition of the PHY node in booting-without-of.txt for an
example of how to define a PHY (Internal PHY has no interrupt line).
--- /dev/null
+QCOM CPR (Core Power Reduction)
+
+CPR (Core Power Reduction) is a technology to reduce core power on a CPU
+or other device. Each OPP of a device corresponds to a "corner" that has
+a range of valid voltages for a particular frequency. While the device is
+running at a particular frequency, CPR monitors dynamic factors such as
+temperature, etc. and suggests adjustments to the voltage to save power
+and meet silicon characteristic requirements.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: should be "qcom,qcs404-cpr", "qcom,cpr" for qcs404
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: base address and size of the rbcpr register region
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the CPR interrupt
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: phandle to the reference clock
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "ref"
+
+- vdd-apc-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: phandle to the vdd-apc-supply regulator
+
+- #power-domain-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: should be 0
+
+- operating-points-v2:
+ Usage: required
+ Value type: <phandle>
+ Definition: A phandle to the OPP table containing the
+ performance states supported by the CPR
+ power domain
+
+- acc-syscon:
+ Usage: optional
+ Value type: <phandle>
+ Definition: phandle to syscon for writing ACC settings
+
+- nvmem-cells:
+ Usage: required
+ Value type: <phandle>
+ Definition: phandle to nvmem cells containing the data
+ that makes up a fuse corner, for each fuse corner.
+ As well as the CPR fuse revision.
+
+- nvmem-cell-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: should be "cpr_quotient_offset1", "cpr_quotient_offset2",
+ "cpr_quotient_offset3", "cpr_init_voltage1",
+ "cpr_init_voltage2", "cpr_init_voltage3", "cpr_quotient1",
+ "cpr_quotient2", "cpr_quotient3", "cpr_ring_osc1",
+ "cpr_ring_osc2", "cpr_ring_osc3", "cpr_fuse_revision"
+ for qcs404.
+
+Example:
+
+ cpr_opp_table: cpr-opp-table {
+ compatible = "operating-points-v2-qcom-level";
+
+ cpr_opp1: opp1 {
+ opp-level = <1>;
+ qcom,opp-fuse-level = <1>;
+ };
+ cpr_opp2: opp2 {
+ opp-level = <2>;
+ qcom,opp-fuse-level = <2>;
+ };
+ cpr_opp3: opp3 {
+ opp-level = <3>;
+ qcom,opp-fuse-level = <3>;
+ };
+ };
+
+ power-controller@b018000 {
+ compatible = "qcom,qcs404-cpr", "qcom,cpr";
+ reg = <0x0b018000 0x1000>;
+ interrupts = <0 15 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xo_board>;
+ clock-names = "ref";
+ vdd-apc-supply = <&pms405_s3>;
+ #power-domain-cells = <0>;
+ operating-points-v2 = <&cpr_opp_table>;
+ acc-syscon = <&tcsr>;
+
+ nvmem-cells = <&cpr_efuse_quot_offset1>,
+ <&cpr_efuse_quot_offset2>,
+ <&cpr_efuse_quot_offset3>,
+ <&cpr_efuse_init_voltage1>,
+ <&cpr_efuse_init_voltage2>,
+ <&cpr_efuse_init_voltage3>,
+ <&cpr_efuse_quot1>,
+ <&cpr_efuse_quot2>,
+ <&cpr_efuse_quot3>,
+ <&cpr_efuse_ring1>,
+ <&cpr_efuse_ring2>,
+ <&cpr_efuse_ring3>,
+ <&cpr_efuse_revision>;
+ nvmem-cell-names = "cpr_quotient_offset1",
+ "cpr_quotient_offset2",
+ "cpr_quotient_offset3",
+ "cpr_init_voltage1",
+ "cpr_init_voltage2",
+ "cpr_init_voltage3",
+ "cpr_quotient1",
+ "cpr_quotient2",
+ "cpr_quotient3",
+ "cpr_ring_osc1",
+ "cpr_ring_osc2",
+ "cpr_ring_osc3",
+ "cpr_fuse_revision";
+ };
--- /dev/null
+Monolithic Power Systems MP8859 voltage regulator
+
+Required properties:
+- compatible: "mps,mp8859";
+- reg: I2C slave address.
+
+Optional subnode for regulator: "mp8859_dcdc", using common regulator
+bindings given in <Documentation/devicetree/bindings/regulator/regulator.txt>.
+
+Example:
+
+ mp8859: regulator@66 {
+ compatible = "mps,mp8859";
+ reg = <0x66>;
+ dc_12v: mp8859_dcdc {
+ regulator-name = "dc_12v";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/mps,mpq7920.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Monolithic Power System MPQ7920 PMIC
+
+maintainers:
+ - Saravanan Sekar <sravanhome@gmail.com>
+
+properties:
+ $nodename:
+ pattern: "pmic@[0-9a-f]{1,2}"
+ compatible:
+ enum:
+ - mps,mpq7920
+
+ reg:
+ maxItems: 1
+
+ regulators:
+ type: object
+ allOf:
+ - $ref: regulator.yaml#
+ description: |
+ list of regulators provided by this controller, must be named
+ after their hardware counterparts BUCK[1-4], one LDORTC, and LDO[2-5]
+
+ properties:
+ mps,switch-freq:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [ 0, 1, 2, 3 ]
+ default: 2
+ description: |
+ switching frequency must be one of following corresponding value
+ 1.1MHz, 1.65MHz, 2.2MHz, 2.75MHz
+
+ patternProperties:
+ "^ldo[1-4]$":
+ type: object
+ allOf:
+ - $ref: regulator.yaml#
+
+ "^ldortc$":
+ type: object
+ allOf:
+ - $ref: regulator.yaml#
+
+ "^buck[1-4]$":
+ type: object
+ allOf:
+ - $ref: regulator.yaml#
+
+ properties:
+ mps,buck-softstart:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [ 0, 1, 2, 3 ]
+ description: |
+ defines the soft start time of this buck, must be one of the following
+ corresponding values 150us, 300us, 610us, 920us
+
+ mps,buck-phase-delay:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [ 0, 1, 2, 3 ]
+ description: |
+ defines the phase delay of this buck, must be one of the following
+ corresponding values 0deg, 90deg, 180deg, 270deg
+
+ mps,buck-ovp-disable:
+ type: boolean
+ description: |
+ disables over voltage protection of this buck
+
+ additionalProperties: false
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - regulators
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@69 {
+ compatible = "mps,mpq7920";
+ reg = <0x69>;
+
+ regulators {
+ mps,switch-freq = /bits/ 8 <1>;
+
+ buck1 {
+ regulator-name = "buck1";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <3587500>;
+ regulator-min-microamp = <460000>;
+ regulator-max-microamp = <7600000>;
+ regulator-boot-on;
+ mps,buck-ovp-disable;
+ mps,buck-phase-delay = /bits/ 8 <2>;
+ mps,buck-softstart = /bits/ 8 <1>;
+ };
+
+ ldo2 {
+ regulator-name = "ldo2";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <3587500>;
+ };
+ };
+ };
+ };
+...
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/rohm,bd71828-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BD71828 Power Management Integrated Circuit regulators
+
+maintainers:
+ - Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+
+description: |
+ This module is part of the ROHM BD71828 MFD device. For more details
+ see Documentation/devicetree/bindings/mfd/rohm,bd71828-pmic.yaml.
+
+ The regulator controller is represented as a sub-node of the PMIC node
+ on the device tree.
+
+ Regulator nodes should be named to BUCK_<number> and LDO_<number>.
+ The valid names for BD71828 regulator nodes are
+ BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6, BUCK7
+ LDO1, LDO2, LDO3, LDO4, LDO5, LDO6, LDO7
+
+patternProperties:
+ "^LDO[1-7]$":
+ type: object
+ allOf:
+ - $ref: regulator.yaml#
+ description:
+ Properties for single LDO regulator.
+
+ properties:
+ regulator-name:
+ pattern: "^ldo[1-7]$"
+ description:
+ should be "ldo1", ..., "ldo7"
+
+ "^BUCK[1-7]$":
+ type: object
+ allOf:
+ - $ref: regulator.yaml#
+ description:
+ Properties for single BUCK regulator.
+
+ properties:
+ regulator-name:
+ pattern: "^buck[1-7]$"
+ description:
+ should be "buck1", ..., "buck7"
+
+ rohm,dvs-run-voltage:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ maximum: 3300000
+ description:
+ PMIC default "RUN" state voltage in uV. See below table for
+ bucks which support this. 0 means disabled.
+
+ rohm,dvs-idle-voltage:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ maximum: 3300000
+ description:
+ PMIC default "IDLE" state voltage in uV. See below table for
+ bucks which support this. 0 means disabled.
+
+ rohm,dvs-suspend-voltage:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ maximum: 3300000
+ description:
+ PMIC default "SUSPEND" state voltage in uV. See below table for
+ bucks which support this. 0 means disabled.
+
+ rohm,dvs-lpsr-voltage:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32"
+ - minimum: 0
+ maximum: 3300000
+ description:
+ PMIC default "LPSR" state voltage in uV. See below table for
+ bucks which support this. 0 means disabled.
+
+ # Supported default DVS states:
+ # buck | run | idle | suspend | lpsr
+ #--------------------------------------------------------------
+ # 1, 2, 6, and 7 | supported | supported | supported (*)
+ #--------------------------------------------------------------
+ # 3, 4, and 5 | supported (**)
+ #--------------------------------------------------------------
+ #
+ #(*) LPSR and SUSPEND states use same voltage but both states have own
+ # enable /
+ # disable settings. Voltage 0 can be specified for a state to make
+ # regulator disabled on that state.
+ #
+ #(**) All states use same voltage but have own enable / disable
+ # settings. Voltage 0 can be specified for a state to make
+ # regulator disabled on that state.
+
+ required:
+ - regulator-name
+ additionalProperties: false
+additionalProperties: false
+++ /dev/null
-STM32 BOOSTER - Booster for ADC analog input switches
-
-Some STM32 devices embed a 3.3V booster supplied by Vdda, that can be used
-to supply ADC analog input switches.
-
-Required properties:
-- compatible: Should be one of:
- "st,stm32h7-booster"
- "st,stm32mp1-booster"
-- st,syscfg: Phandle to system configuration controller.
-- vdda-supply: Phandle to the vdda input analog voltage.
-
-Example:
- booster: regulator-booster {
- compatible = "st,stm32mp1-booster";
- st,syscfg = <&syscfg>;
- vdda-supply = <&vdda>;
- };
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/st,stm32-booster.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 booster for ADC analog input switches bindings
+
+maintainers:
+ - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+description: |
+ Some STM32 devices embed a 3.3V booster supplied by Vdda, that can be used
+ to supply ADC analog input switches.
+
+allOf:
+ - $ref: "regulator.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - st,stm32h7-booster
+ - st,stm32mp1-booster
+
+ st,syscfg:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ description: phandle to system configuration controller.
+
+ vdda-supply:
+ description: phandle to the vdda input analog voltage.
+
+required:
+ - compatible
+ - st,syscfg
+ - vdda-supply
+
+examples:
+ - |
+ regulator-booster {
+ compatible = "st,stm32mp1-booster";
+ st,syscfg = <&syscfg>;
+ vdda-supply = <&vdda>;
+ };
+
+...
+++ /dev/null
-STM32 VREFBUF - Voltage reference buffer
-
-Some STM32 devices embed a voltage reference buffer which can be used as
-voltage reference for ADCs, DACs and also as voltage reference for external
-components through the dedicated VREF+ pin.
-
-Required properties:
-- compatible: Must be "st,stm32-vrefbuf".
-- reg: Offset and length of VREFBUF register set.
-- clocks: Must contain an entry for peripheral clock.
-
-Example:
- vrefbuf: regulator@58003c00 {
- compatible = "st,stm32-vrefbuf";
- reg = <0x58003C00 0x8>;
- clocks = <&rcc VREF_CK>;
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <2500000>;
- vdda-supply = <&vdda>;
- };
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/st,stm32-vrefbuf.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Voltage reference buffer bindings
+
+description: |
+ Some STM32 devices embed a voltage reference buffer which can be used as
+ voltage reference for ADCs, DACs and also as voltage reference for external
+ components through the dedicated VREF+ pin.
+
+maintainers:
+ - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+allOf:
+ - $ref: "regulator.yaml#"
+
+properties:
+ compatible:
+ const: st,stm32-vrefbuf
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ vdda-supply:
+ description: phandle to the vdda input analog voltage.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - vdda-supply
+
+examples:
+ - |
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ vrefbuf@50025000 {
+ compatible = "st,stm32-vrefbuf";
+ reg = <0x50025000 0x8>;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2500000>;
+ clocks = <&rcc VREF>;
+ vdda-supply = <&vdda>;
+ };
+
+...
+
+++ /dev/null
-STM32MP1 PWR Regulators
------------------------
-
-Available Regulators in STM32MP1 PWR block are:
- - reg11 for regulator 1V1
- - reg18 for regulator 1V8
- - usb33 for the swtich USB3V3
-
-Required properties:
-- compatible: Must be "st,stm32mp1,pwr-reg"
-- list of child nodes that specify the regulator reg11, reg18 or usb33
- initialization data for defined regulators. The definition for each of
- these nodes is defined using the standard binding for regulators found at
- Documentation/devicetree/bindings/regulator/regulator.txt.
-- vdd-supply: phandle to the parent supply/regulator node for vdd input
-- vdd_3v3_usbfs-supply: phandle to the parent supply/regulator node for usb33
-
-Example:
-
-pwr_regulators: pwr@50001000 {
- compatible = "st,stm32mp1,pwr-reg";
- reg = <0x50001000 0x10>;
- vdd-supply = <&vdd>;
- vdd_3v3_usbfs-supply = <&vdd_usb>;
-
- reg11: reg11 {
- regulator-name = "reg11";
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1100000>;
- };
-
- reg18: reg18 {
- regulator-name = "reg18";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- usb33: usb33 {
- regulator-name = "usb33";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-};
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/st,stm32mp1-pwr-reg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STM32MP1 PWR voltage regulators
+
+maintainers:
+ - Pascal Paillet <p.paillet@st.com>
+
+properties:
+ compatible:
+ const: st,stm32mp1,pwr-reg
+
+ reg:
+ maxItems: 1
+
+ vdd-supply:
+ description: Input supply phandle(s) for vdd input
+
+ vdd_3v3_usbfs-supply:
+ description: Input supply phandle(s) for vdd_3v3_usbfs input
+
+patternProperties:
+ "^(reg11|reg18|usb33)$":
+ type: object
+
+ allOf:
+ - $ref: "regulator.yaml#"
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ pwr@50001000 {
+ compatible = "st,stm32mp1,pwr-reg";
+ reg = <0x50001000 0x10>;
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+
+ reg11 {
+ regulator-name = "reg11";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ reg18 {
+ regulator-name = "reg18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ usb33 {
+ regulator-name = "usb33";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+...
--- /dev/null
+* Texas Instruments K3 NavigatorSS Ring Accelerator
+
+The Ring Accelerator (RA) is a machine which converts read/write accesses
+from/to a constant address into corresponding read/write accesses from/to a
+circular data structure in memory. The RA eliminates the need for each DMA
+controller which needs to access ring elements from having to know the current
+state of the ring (base address, current offset). The DMA controller
+performs a read or write access to a specific address range (which maps to the
+source interface on the RA) and the RA replaces the address for the transaction
+with a new address which corresponds to the head or tail element of the ring
+(head for reads, tail for writes).
+
+The Ring Accelerator is a hardware module that is responsible for accelerating
+management of the packet queues. The K3 SoCs can have more than one RA instances
+
+Required properties:
+- compatible : Must be "ti,am654-navss-ringacc";
+- reg : Should contain register location and length of the following
+ named register regions.
+- reg-names : should be
+ "rt" - The RA Ring Real-time Control/Status Registers
+ "fifos" - The RA Queues Registers
+ "proxy_gcfg" - The RA Proxy Global Config Registers
+ "proxy_target" - The RA Proxy Datapath Registers
+- ti,num-rings : Number of rings supported by RA
+- ti,sci-rm-range-gp-rings : TI-SCI RM subtype for GP ring range
+- ti,sci : phandle on TI-SCI compatible System controller node
+- ti,sci-dev-id : TI-SCI device id of the ring accelerator
+- msi-parent : phandle for "ti,sci-inta" interrupt controller
+
+Optional properties:
+ -- ti,dma-ring-reset-quirk : enable ringacc / udma ring state interoperability
+ issue software w/a
+
+Example:
+
+ringacc: ringacc@3c000000 {
+ compatible = "ti,am654-navss-ringacc";
+ reg = <0x0 0x3c000000 0x0 0x400000>,
+ <0x0 0x38000000 0x0 0x400000>,
+ <0x0 0x31120000 0x0 0x100>,
+ <0x0 0x33000000 0x0 0x40000>;
+ reg-names = "rt", "fifos",
+ "proxy_gcfg", "proxy_target";
+ ti,num-rings = <818>;
+ ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+ ti,dma-ring-reset-quirk;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <187>;
+ msi-parent = <&inta_main_udmass>;
+};
+
+client:
+
+dma_ipx: dma_ipx@<addr> {
+ ...
+ ti,ringacc = <&ringacc>;
+ ...
+}
- clock-names: Should be "clk_apb5".
- pinctrl-names : a pinctrl state named "default" must be defined.
- pinctrl-0 : phandle referencing pin configuration of the device.
+ - resets : phandle to the reset control for this device.
- cs-gpios: Specifies the gpio pins to be used for chipselects.
See: Documentation/devicetree/bindings/spi/spi-bus.txt
- clock-frequency : Input clock frequency to the PSPI block in Hz.
Default is 25000000 Hz.
-Aliases:
-- All the SPI controller nodes should be represented in the aliases node using
- the following format 'spi{n}' withe the correct numbered in "aliases" node.
-
-Example:
-
-aliases {
- spi0 = &spi0;
-};
-
spi0: spi@f0200000 {
compatible = "nuvoton,npcm750-pspi";
reg = <0xf0200000 0x1000>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk NPCM7XX_CLK_APB5>;
clock-names = "clk_apb5";
+ resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_PSPI1>
cs-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
};
spi-rx-bus-width:
allOf:
- $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 1, 2, 4 ]
+ - enum: [ 1, 2, 4, 8 ]
- default: 1
description:
Bus width to the SPI bus used for MISO.
spi-tx-bus-width:
allOf:
- $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 1, 2, 4 ]
+ - enum: [ 1, 2, 4, 8 ]
- default: 1
description:
Bus width to the SPI bus used for MOSI.
+++ /dev/null
-STMicroelectronics STM32 SPI Controller
-
-The STM32 SPI controller is used to communicate with external devices using
-the Serial Peripheral Interface. It supports full-duplex, half-duplex and
-simplex synchronous serial communication with external devices. It supports
-from 4 to 32-bit data size. Although it can be configured as master or slave,
-only master is supported by the driver.
-
-Required properties:
-- compatible: Should be one of:
- "st,stm32h7-spi"
- "st,stm32f4-spi"
-- reg: Offset and length of the device's register set.
-- interrupts: Must contain the interrupt id.
-- clocks: Must contain an entry for spiclk (which feeds the internal clock
- generator).
-- #address-cells: Number of cells required to define a chip select address.
-- #size-cells: Should be zero.
-
-Optional properties:
-- resets: Must contain the phandle to the reset controller.
-- A pinctrl state named "default" may be defined to set pins in mode of
- operation for SPI transfer.
-- dmas: DMA specifiers for tx and rx dma. DMA fifo mode must be used. See the
- STM32 DMA bindings, Documentation/devicetree/bindings/dma/stm32-dma.txt.
-- dma-names: DMA request names should include "tx" and "rx" if present.
-- cs-gpios: list of GPIO chip selects. See the SPI bus bindings,
- Documentation/devicetree/bindings/spi/spi-bus.txt
-
-
-Child nodes represent devices on the SPI bus
- See ../spi/spi-bus.txt
-
-Optional properties:
-- st,spi-midi-ns: Only for STM32H7, (Master Inter-Data Idleness) minimum time
- delay in nanoseconds inserted between two consecutive data
- frames.
-
-
-Example:
- spi2: spi@40003800 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32h7-spi";
- reg = <0x40003800 0x400>;
- interrupts = <36>;
- clocks = <&rcc SPI2_CK>;
- resets = <&rcc 1166>;
- dmas = <&dmamux1 0 39 0x400 0x01>,
- <&dmamux1 1 40 0x400 0x01>;
- dma-names = "rx", "tx";
- pinctrl-0 = <&spi2_pins_b>;
- pinctrl-names = "default";
- cs-gpios = <&gpioa 11 0>;
-
- aardvark@0 {
- compatible = "totalphase,aardvark";
- reg = <0>;
- spi-max-frequency = <4000000>;
- st,spi-midi-ns = <4000>;
- };
- };
Atmel SPI device
Required properties:
-- compatible : should be "atmel,at91rm9200-spi".
+- compatible : should be "atmel,at91rm9200-spi" or "microchip,sam9x60-spi".
- reg: Address and length of the register set for the device
- interrupts: Should contain spi interrupt
- cs-gpios: chipselects (optional for SPI controller version >= 2 with the
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/st,stm32-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 SPI Controller bindings
+
+description: |
+ The STM32 SPI controller is used to communicate with external devices using
+ the Serial Peripheral Interface. It supports full-duplex, half-duplex and
+ simplex synchronous serial communication with external devices. It supports
+ from 4 to 32-bit data size.
+
+maintainers:
+ - Erwan Leray <erwan.leray@st.com>
+ - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: st,stm32f4-spi
+
+ then:
+ properties:
+ st,spi-midi-ns: false
+
+properties:
+ compatible:
+ enum:
+ - st,stm32f4-spi
+ - st,stm32h7-spi
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ description: |
+ DMA specifiers for tx and rx dma. DMA fifo mode must be used. See
+ the STM32 DMA bindings Documentation/devicetree/bindings/dma/stm32-dma.txt.
+ items:
+ - description: rx DMA channel
+ - description: tx DMA channel
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+patternProperties:
+ "^[a-zA-Z][a-zA-Z0-9,+\\-._]{0,63}@[0-9a-f]+$":
+ type: object
+ # SPI slave nodes must be children of the SPI master node and can
+ # contain the following properties.
+ properties:
+ st,spi-midi-ns:
+ description: |
+ Only for STM32H7, (Master Inter-Data Idleness) minimum time
+ delay in nanoseconds inserted between two consecutive data frames.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ spi@4000b000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32h7-spi";
+ reg = <0x4000b000 0x400>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc SPI2_K>;
+ resets = <&rcc SPI2_R>;
+ dmas = <&dmamux1 0 39 0x400 0x05>,
+ <&dmamux1 1 40 0x400 0x05>;
+ dma-names = "rx", "tx";
+ cs-gpios = <&gpioa 11 0>;
+
+ aardvark@0 {
+ compatible = "totalphase,aardvark";
+ reg = <0>;
+ spi-max-frequency = <4000000>;
+ st,spi-midi-ns = <4000>;
+ };
+ };
+
+...
Note that callbacks will always be invoked from the DMA
engines tasklet, never from interrupt context.
+ Optional: per descriptor metadata
+ ---------------------------------
+ DMAengine provides two ways for metadata support.
+
+ DESC_METADATA_CLIENT
+
+ The metadata buffer is allocated/provided by the client driver and it is
+ attached to the descriptor.
+
+ .. code-block:: c
+
+ int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len);
+
+ DESC_METADATA_ENGINE
+
+ The metadata buffer is allocated/managed by the DMA driver. The client
+ driver can ask for the pointer, maximum size and the currently used size of
+ the metadata and can directly update or read it.
+
+ Becasue the DMA driver manages the memory area containing the metadata,
+ clients must make sure that they do not try to access or get the pointer
+ after their transfer completion callback has run for the descriptor.
+ If no completion callback has been defined for the transfer, then the
+ metadata must not be accessed after issue_pending.
+ In other words: if the aim is to read back metadata after the transfer is
+ completed, then the client must use completion callback.
+
+ .. code-block:: c
+
+ void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+
+ int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+
+ Client drivers can query if a given mode is supported with:
+
+ .. code-block:: c
+
+ bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+ enum dma_desc_metadata_mode mode);
+
+ Depending on the used mode client drivers must follow different flow.
+
+ DESC_METADATA_CLIENT
+
+ - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ 1. prepare the descriptor (dmaengine_prep_*)
+ construct the metadata in the client's buffer
+ 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ descriptor
+ 3. submit the transfer
+ - DMA_DEV_TO_MEM:
+ 1. prepare the descriptor (dmaengine_prep_*)
+ 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ descriptor
+ 3. submit the transfer
+ 4. when the transfer is completed, the metadata should be available in the
+ attached buffer
+
+ DESC_METADATA_ENGINE
+
+ - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ 1. prepare the descriptor (dmaengine_prep_*)
+ 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the
+ engine's metadata area
+ 3. update the metadata at the pointer
+ 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the
+ amount of data the client has placed into the metadata buffer
+ 5. submit the transfer
+ - DMA_DEV_TO_MEM:
+ 1. prepare the descriptor (dmaengine_prep_*)
+ 2. submit the transfer
+ 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get
+ the pointer to the engine's metadata area
+ 4. read out the metadata from the pointer
+
+ .. note::
+
+ When DESC_METADATA_ENGINE mode is used the metadata area for the descriptor
+ is no longer valid after the transfer has been completed (valid up to the
+ point when the completion callback returns if used).
+
+ Mixed use of DESC_METADATA_CLIENT / DESC_METADATA_ENGINE is not allowed,
+ client drivers must use either of the modes per descriptor.
+
4. Submit the transaction
Once the descriptor has been prepared and the callback information
(DMA_CYCLIC). Addresses pointing to a device's register (e.g. a FIFO)
are typically fixed.
+Per descriptor metadata support
+-------------------------------
+Some data movement architecture (DMA controller and peripherals) uses metadata
+associated with a transaction. The DMA controller role is to transfer the
+payload and the metadata alongside.
+The metadata itself is not used by the DMA engine itself, but it contains
+parameters, keys, vectors, etc for peripheral or from the peripheral.
+
+The DMAengine framework provides a generic ways to facilitate the metadata for
+descriptors. Depending on the architecture the DMA driver can implement either
+or both of the methods and it is up to the client driver to choose which one
+to use.
+
+- DESC_METADATA_CLIENT
+
+ The metadata buffer is allocated/provided by the client driver and it is
+ attached (via the dmaengine_desc_attach_metadata() helper to the descriptor.
+
+ From the DMA driver the following is expected for this mode:
+ - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM
+ The data from the provided metadata buffer should be prepared for the DMA
+ controller to be sent alongside of the payload data. Either by copying to a
+ hardware descriptor, or highly coupled packet.
+ - DMA_DEV_TO_MEM
+ On transfer completion the DMA driver must copy the metadata to the client
+ provided metadata buffer before notifying the client about the completion.
+ After the transfer completion, DMA drivers must not touch the metadata
+ buffer provided by the client.
+
+- DESC_METADATA_ENGINE
+
+ The metadata buffer is allocated/managed by the DMA driver. The client driver
+ can ask for the pointer, maximum size and the currently used size of the
+ metadata and can directly update or read it. dmaengine_desc_get_metadata_ptr()
+ and dmaengine_desc_set_metadata_len() is provided as helper functions.
+
+ From the DMA driver the following is expected for this mode:
+ - get_metadata_ptr
+ Should return a pointer for the metadata buffer, the maximum size of the
+ metadata buffer and the currently used / valid (if any) bytes in the buffer.
+ - set_metadata_len
+ It is called by the clients after it have placed the metadata to the buffer
+ to let the DMA driver know the number of valid bytes provided.
+
+ Note: since the client will ask for the metadata pointer in the completion
+ callback (in DMA_DEV_TO_MEM case) the DMA driver must ensure that the
+ descriptor is not freed up prior the callback is called.
+
Device operations
-----------------
DMA controllers enumerated via ACPI should be registered in the system to
provide generic access to their resources. For example, a driver that would
like to be accessible to slave devices via generic API call
-dma_request_slave_channel() must register itself at the end of the probe
-function like this::
+dma_request_chan() must register itself at the end of the probe function like
+this::
err = devm_acpi_dma_controller_register(dev, xlate_func, dw);
/* Handle the error if it's not a case of !CONFIG_ACPI */
}
#endif
-dma_request_slave_channel() will call xlate_func() for each registered DMA
-controller. In the xlate function the proper channel must be chosen based on
+dma_request_chan() will call xlate_func() for each registered DMA controller.
+In the xlate function the proper channel must be chosen based on
information in struct acpi_dma_spec and the properties of the controller
provided by struct acpi_dma.
-Clients must call dma_request_slave_channel() with the string parameter that
-corresponds to a specific FixedDMA resource. By default "tx" means the first
-entry of the FixedDMA resource array, "rx" means the second entry. The table
-below shows a layout::
+Clients must call dma_request_chan() with the string parameter that corresponds
+to a specific FixedDMA resource. By default "tx" means the first entry of the
+FixedDMA resource array, "rx" means the second entry. The table below shows a
+layout::
Device (I2C0)
{
--- /dev/null
+Kernel driver adm1177
+=====================
+
+Supported chips:
+ * Analog Devices ADM1177
+ Prefix: 'adm1177'
+ Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADM1177.pdf
+
+Author: Beniamin Bia <beniamin.bia@analog.com>
+
+
+Description
+-----------
+
+This driver supports hardware monitoring for Analog Devices ADM1177
+Hot-Swap Controller and Digital Power Monitors with Soft Start Pin.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Current maxim attribute
+is read-write, all other attributes are read-only.
+
+in0_input Measured voltage in microvolts.
+
+curr1_input Measured current in microamperes.
+curr1_max_alarm Overcurrent alarm in microamperes.
--- /dev/null
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver drivetemp
+=======================
+
+
+References
+----------
+
+ANS T13/1699-D
+Information technology - AT Attachment 8 - ATA/ATAPI Command Set (ATA8-ACS)
+
+ANS Project T10/BSR INCITS 513
+Information technology - SCSI Primary Commands - 4 (SPC-4)
+
+ANS Project INCITS 557
+Information technology - SCSI / ATA Translation - 5 (SAT-5)
+
+
+Description
+-----------
+
+This driver supports reporting the temperature of disk and solid state
+drives with temperature sensors.
+
+If supported, it uses the ATA SCT Command Transport feature to read
+the current drive temperature and, if available, temperature limits
+as well as historic minimum and maximum temperatures. If SCT Command
+Transport is not supported, the driver uses SMART attributes to read
+the drive temperature.
+
+
+Sysfs entries
+-------------
+
+Only the temp1_input attribute is always available. Other attributes are
+available only if reported by the drive. All temperatures are reported in
+milli-degrees Celsius.
+
+======================= =====================================================
+temp1_input Current drive temperature
+temp1_lcrit Minimum temperature limit. Operating the device below
+ this temperature may cause physical damage to the
+ device.
+temp1_min Minimum recommended continuous operating limit
+temp1_max Maximum recommended continuous operating temperature
+temp1_crit Maximum temperature limit. Operating the device above
+ this temperature may cause physical damage to the
+ device.
+temp1_lowest Minimum temperature seen this power cycle
+temp1_highest Maximum temperature seen this power cycle
+======================= =====================================================
adm1025
adm1026
adm1031
+ adm1177
adm1275
adm9240
ads7828
da9055
dell-smm-hwmon
dme1737
+ drivetemp
ds1621
ds620
emc1403
max1619
max1668
max197
+ max20730
max20751
max31722
+ max31730
max31785
max31790
max34440
wm831x
wm8350
xgene-hwmon
+ xdpe12284
zl6100
.. only:: subproject and html
--- /dev/null
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Kernel driver max20730
+======================
+
+Supported chips:
+
+ * Maxim MAX20730
+
+ Prefix: 'max20730'
+
+ Addresses scanned: -
+
+ Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX20730.pdf
+
+ * Maxim MAX20734
+
+ Prefix: 'max20734'
+
+ Addresses scanned: -
+
+ Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX20734.pdf
+
+ * Maxim MAX20743
+
+ Prefix: 'max20743'
+
+ Addresses scanned: -
+
+ Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX20743.pdf
+
+Author: Guenter Roeck <linux@roeck-us.net>
+
+
+Description
+-----------
+
+This driver implements support for Maxim MAX20730, MAX20734, and MAX20743
+Integrated, Step-Down Switching Regulators with PMBus support.
+
+The driver is a client driver to the core PMBus driver.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
+details.
+
+
+Sysfs entries
+-------------
+
+=================== ===== =======================================================
+curr1_crit RW/RO Critical output current. Please see datasheet for
+ supported limits. Read-only if the chip is
+ write protected; read-write otherwise.
+curr1_crit_alarm RO Output current critical alarm
+curr1_input RO Output current
+curr1_label RO 'iout1'
+in1_alarm RO Input voltage alarm
+in1_input RO Input voltage
+in1_label RO 'vin'
+in2_alarm RO Output voltage alarm
+in2_input RO Output voltage
+in2_label RO 'vout1'
+temp1_crit RW/RO Critical temeperature. Supported values are 130 or 150
+ degrees C. Read-only if the chip is write protected;
+ read-write otherwise.
+temp1_crit_alarm RO Temperature critical alarm
+temp1_input RO Chip temperature
+=================== ===== =======================================================
--- /dev/null
+Kernel driver max31790
+======================
+
+Supported chips:
+
+ * Maxim MAX31730
+
+ Prefix: 'max31730'
+
+ Addresses scanned: 0x1c, 0x1d, 0x1e, 0x1f, 0x4c, 0x4d, 0x4e, 0x4f
+
+ Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31730.pdf
+
+Author: Guenter Roeck <linux@roeck-us.net>
+
+
+Description
+-----------
+
+This driver implements support for Maxim MAX31730.
+
+The MAX31730 temperature sensor monitors its own temperature and the
+temperatures of three external diode-connected transistors. The operating
+supply voltage is from 3.0V to 3.6V. Resistance cancellation compensates
+for high series resistance in circuit-board traces and the external thermal
+diode, while beta compensation corrects for temperature-measurement
+errors due to low-beta sensing transistors.
+
+
+Sysfs entries
+-------------
+
+=================== == =======================================================
+temp[1-4]_enable RW Temperature enable/disable
+ Set to 0 to enable channel, 0 to disable
+temp[1-4]_input RO Temperature input
+temp[2-4]_fault RO Fault indicator for remote channels
+temp[1-4]_max RW Maximum temperature
+temp[1-4]_max_alarm RW Maximum temperature alarm
+temp[1-4]_min RW Minimum temperature. Common for all channels.
+ Only temp1_min is writeable.
+temp[1-4]_min_alarm RO Minimum temperature alarm
+temp[2-4]_offset RW Temperature offset for remote channels
+=================== == =======================================================
http://www.ti.com/lit/gpn/tps544c25
+ * Maxim MAX20796
+
+ Prefix: 'max20796'
+
+ Addresses scanned: -
+
+ Datasheet:
+
+ Not published
+
* Generic PMBus devices
Prefix: 'pmbus'
Supported chips:
- * TI UCD90120, UCD90124, UCD90160, UCD9090, and UCD90910
+ * TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, and UCD90910
- Prefixes: 'ucd90120', 'ucd90124', 'ucd90160', 'ucd9090', 'ucd90910'
+ Prefixes: 'ucd90120', 'ucd90124', 'ucd90160', 'ucd90320', 'ucd9090',
+ 'ucd90910'
Addresses scanned: -
- http://focus.ti.com/lit/ds/symlink/ucd90120.pdf
- http://focus.ti.com/lit/ds/symlink/ucd90124.pdf
- http://focus.ti.com/lit/ds/symlink/ucd90160.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd90320.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
- http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
functions. Twelve of these pins offer PWM functionality. Using these pins, the
UCD90160 offers support for margining, and general-purpose PWM functions.
+The UCD90320 is a 32-rail PMBus/I2C addressable power-supply sequencer and
+monitor. The 24 integrated ADC channels (AMONx) monitor the power supply
+voltage, current, and temperature. Of the 84 GPIO pins, 8 can be used as
+digital monitors (DMONx), 32 to enable the power supply (ENx), 24 for margining
+(MARx), 16 for logical GPO, and 32 GPIs for cascading, and system function.
+
The UCD9090 is a 10-rail PMBus/I2C addressable power-supply sequencer and
monitor. The device integrates a 12-bit ADC for monitoring up to 10 power-supply
voltage inputs. Twenty-three GPIO pins can be used for power supply enables,
--- /dev/null
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver xdpe122
+=====================
+
+Supported chips:
+
+ * Infineon XDPE12254
+
+ Prefix: 'xdpe12254'
+
+ * Infineon XDPE12284
+
+ Prefix: 'xdpe12284'
+
+Authors:
+
+ Vadim Pasternak <vadimp@mellanox.com>
+
+Description
+-----------
+
+This driver implements support for Infineon Multi-phase XDPE122 family
+dual loop voltage regulators.
+The family includes XDPE12284 and XDPE12254 devices.
+The devices from this family complaint with:
+- Intel VR13 and VR13HC rev 1.3, IMVP8 rev 1.2 and IMPVP9 rev 1.3 DC-DC
+ converter specification.
+- Intel SVID rev 1.9. protocol.
+- PMBus rev 1.3 interface.
+
+Devices support linear format for reading input voltage, input and output current,
+input and output power and temperature.
+Device supports VID format for reading output voltage. The below modes are
+supported:
+- VR12.0 mode, 5-mV DAC - 0x01.
+- VR12.5 mode, 10-mV DAC - 0x02.
+- IMVP9 mode, 5-mV DAC - 0x03.
+- AMD mode 6.25mV - 0x10.
+
+Devices support two pages for telemetry.
+
+The driver provides for current: input, maximum and critical thresholds
+and maximum and critical alarms. Critical thresholds and critical alarm are
+supported only for current output.
+The driver exports the following attributes for via the sysfs files, where
+indexes 1, 2 are for "iin" and 3, 4 for "iout":
+
+**curr[3-4]_crit**
+
+**curr[3-4]_crit_alarm**
+
+**curr[1-4]_input**
+
+**curr[1-4]_label**
+
+**curr[1-4]_max**
+
+**curr[1-4]_max_alarm**
+
+The driver provides for voltage: input, critical and low critical thresholds
+and critical and low critical alarms.
+The driver exports the following attributes for via the sysfs files, where
+indexes 1, 2 are for "vin" and 3, 4 for "vout":
+
+**in[1-4]_crit**
+
+**in[1-4_crit_alarm**
+
+**in[1-4]_input**
+
+**in[1-4_label**
+
+**in[1-4]_lcrit**
+
+**in[1-41_lcrit_alarm**
+
+The driver provides for power: input and alarms. Power alarm is supported only
+for power input.
+The driver exports the following attributes for via the sysfs files, where
+indexes 1, 2 are for "pin" and 3, 4 for "pout":
+
+**power[1-2]_alarm**
+
+**power[1-4]_input**
+
+**power[1-4]_label**
+
+The driver provides for temperature: input, maximum and critical thresholds
+and maximum and critical alarms.
+The driver exports the following attributes for via the sysfs files:
+
+**temp[1-2]_crit**
+
+**temp[1-2]_crit_alarm**
+
+**temp[1-2]_input**
+
+**temp[1-2]_max**
+
+**temp[1-2]_max_alarm**
Besides the video4linux interface, the driver has a private interface
for accessing the Motion Eye extended parameters (camera sharpness,
-agc, video framerate), the shapshot and the MJPEG capture facilities.
+agc, video framerate), the snapshot and the MJPEG capture facilities.
This interface consists of several ioctls (prototypes and structures
can be found in include/linux/meye.h):
with the current initial RTO of 1second. With this the final timeout
for a passive TCP connection will happen after 63seconds.
-tcp_syncookies - BOOLEAN
+tcp_syncookies - INTEGER
Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
Send out syncookies when the syn backlog queue of a socket
overflows. This is to prevent against the common 'SYN flood attack'
mainline tree from Linus, and ``net-next`` is where the new code goes
for the future release. You can find the trees here:
-- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
Q: How often do changes from these trees make it to the mainline Linus tree?
----------------------------------------------------------------------------
Red Hat Josh Poimboeuf <jpoimboe@redhat.com>
SUSE Jiri Kosina <jkosina@suse.cz>
- Amazon
+ Amazon Peter Bowen <pzb@amzn.com>
Google Kees Cook <keescook@chromium.org>
============= ========================================================
ACPI COMPONENT ARCHITECTURE (ACPICA)
M: Robert Moore <robert.moore@intel.com>
-M: Erik Schmauss <erik.schmauss@intel.com>
+M: Erik Kaneda <erik.kaneda@intel.com>
M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
L: linux-acpi@vger.kernel.org
L: devel@acpica.org
F: drivers/i2c/busses/i2c-altera.c
ALTERA MAILBOX DRIVER
-M: Ley Foon Tan <lftan@altera.com>
+M: Ley Foon Tan <ley.foon.tan@intel.com>
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
S: Maintained
F: drivers/mailbox/mailbox-altera.c
F: drivers/iio/imu/adis16460.c
F: Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml
+ANALOG DEVICES INC ADM1177 DRIVER
+M: Beniamin Bia <beniamin.bia@analog.com>
+M: Michael Hennerich <Michael.Hennerich@analog.com>
+L: linux-hwmon@vger.kernel.org
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/hwmon/adm1177.c
+F: Documentation/devicetree/bindings/hwmon/adi,adm1177.yaml
+
ANALOG DEVICES INC ADP5061 DRIVER
M: Stefan Popa <stefan.popa@analog.com>
L: linux-pm@vger.kernel.org
ARM/ACTIONS SEMI ARCHITECTURE
M: Andreas Färber <afaerber@suse.de>
-R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
N: owl
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git
S: Maintained
F: Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+F: Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
F: arch/arm/boot/dts/rk3*
F: arch/arm/boot/dts/rv1108*
F: arch/arm/mach-rockchip/
F: arch/mips/net/
BPF JIT for NFP NICs
-M: Jakub Kicinski <jakub.kicinski@netronome.com>
+M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Supported
M: Andrew Lunn <andrew@lunn.ch>
M: Florian Fainelli <f.fainelli@gmail.com>
M: Heiner Kallweit <hkallweit1@gmail.com>
+R: Russell King <linux@armlinux.org.uk>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-class-net-phydev
F: drivers/scsi/hisi_sas/
F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
+HISILICON V3XX SPI NOR FLASH Controller Driver
+M: John Garry <john.garry@huawei.com>
+W: http://www.hisilicon.com
+S: Maintained
+F: drivers/spi/spi-hisi-sfc-v3xx.c
+
HISILICON QM AND ZIP Controller DRIVER
M: Zhou Wang <wangzhou1@hisilicon.com>
L: linux-crypto@vger.kernel.org
F: drivers/i3c/master/dw*
I3C DRIVER FOR CADENCE I3C MASTER IP
-M: Przemysław Gaj <pgaj@cadence.com>
-S: Maintained
-F: Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
-F: drivers/i3c/master/i3c-master-cdns.c
+M: Przemysław Gaj <pgaj@cadence.com>
+S: Maintained
+F: Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
+F: drivers/i3c/master/i3c-master-cdns.c
IA64 (Itanium) PLATFORM
M: Tony Luck <tony.luck@intel.com>
S: Supported
F: drivers/dma/ioat*
+INTEL IADX DRIVER
+M: Dave Jiang <dave.jiang@intel.com>
+L: dmaengine@vger.kernel.org
+S: Supported
+F: drivers/dma/idxd/*
+F: include/uapi/linux/idxd.h
+F: include/linux/idxd.h
+
INTEL IDLE DRIVER
M: Jacob Pan <jacob.jun.pan@linux.intel.com>
M: Len Brown <lenb@kernel.org>
F: arch/x86/include/asm/intel_telemetry.h
F: drivers/platform/x86/intel_telemetry*
+INTEL UNCORE FREQUENCY CONTROL
+M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/intel-uncore-frequency.c
+
INTEL VIRTUAL BUTTON DRIVER
M: AceLan Kao <acelan.kao@canonical.com>
L: platform-driver-x86@vger.kernel.org
F: drivers/platform/x86/intel-vbtn.c
INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
-M: Stanislaw Gruszka <sgruszka@redhat.com>
+M: Stanislaw Gruszka <stf_xl@wp.pl>
L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/net/wireless/intel/iwlegacy/
F: Documentation/driver-api/serial/moxa-smartio.rst
F: drivers/tty/mxser.*
+MONOLITHIC POWER SYSTEM PMIC DRIVER
+M: Saravanan Sekar <sravanhome@gmail.com>
+S: Maintained
+F: Documentation/devicetree/bindings/regulator/mpq7920.yaml
+F: drivers/regulator/mpq7920.c
+F: drivers/regulator/mpq7920.h
+
MR800 AVERMEDIA USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
F: net/netrom/
NETRONOME ETHERNET DRIVERS
-M: Jakub Kicinski <jakub.kicinski@netronome.com>
+M: Jakub Kicinski <kuba@kernel.org>
L: oss-drivers@netronome.com
S: Maintained
F: drivers/net/ethernet/netronome/
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net
Q: http://patchwork.ozlabs.org/project/netdev/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
S: Odd Fixes
F: Documentation/devicetree/bindings/net/
F: drivers/net/
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
+M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net
Q: http://patchwork.ozlabs.org/project/netdev/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
B: mailto:netdev@vger.kernel.org
S: Maintained
F: net/
M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
L: netdev@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
S: Maintained
F: net/ipv4/
F: net/ipv6/
M: Aviad Yehezkel <aviadye@mellanox.com>
M: John Fastabend <john.fastabend@gmail.com>
M: Daniel Borkmann <daniel@iogearbox.net>
-M: Jakub Kicinski <jakub.kicinski@netronome.com>
+M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
F: net/tls/*
Q: http://patchwork.kernel.org/project/linux-wireless/list/
NETDEVSIM
-M: Jakub Kicinski <jakub.kicinski@netronome.com>
+M: Jakub Kicinski <kuba@kernel.org>
S: Maintained
F: drivers/net/netdevsim/*
F: drivers/scsi/nsp32*
NIOS2 ARCHITECTURE
-M: Ley Foon Tan <lftan@altera.com>
+M: Ley Foon Tan <ley.foon.tan@intel.com>
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
S: Maintained
F: drivers/pci/controller/pci-aardvark.c
PCI DRIVER FOR ALTERA PCIE IP
-M: Ley Foon Tan <lftan@altera.com>
+M: Ley Foon Tan <ley.foon.tan@intel.com>
L: rfi@lists.rocketboards.org (moderated for non-subscribers)
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/PCI/pci-error-recovery.rst
PCI MSI DRIVER FOR ALTERA MSI IP
-M: Ley Foon Tan <lftan@altera.com>
+M: Ley Foon Tan <ley.foon.tan@intel.com>
L: rfi@lists.rocketboards.org (moderated for non-subscribers)
L: linux-pci@vger.kernel.org
S: Supported
F: drivers/iio/chemical/pms7003.c
F: Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
+PLX DMA DRIVER
+M: Logan Gunthorpe <logang@deltatee.com>
+S: Maintained
+F: drivers/dma/plx_dma.c
+
PMBUS HARDWARE MONITORING DRIVERS
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
F: Documentation/devicetree/bindings/opp/qcom-nvmem-cpufreq.txt
F: drivers/cpufreq/qcom-cpufreq-nvmem.c
+QUALCOMM CORE POWER REDUCTION (CPR) AVS DRIVER
+M: Niklas Cassel <nks@flawful.org>
+L: linux-pm@vger.kernel.org
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/power/avs/qcom,cpr.txt
+F: drivers/power/avs/qcom-cpr.c
+
QUALCOMM EMAC GIGABIT ETHERNET DRIVER
M: Timur Tabi <timur@kernel.org>
L: netdev@vger.kernel.org
QUALCOMM ETHQOS ETHERNET DRIVER
M: Vinod Koul <vkoul@kernel.org>
-M: Niklas Cassel <niklas.cassel@linaro.org>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
F: arch/mips/ralink
RALINK RT2X00 WIRELESS LAN DRIVER
-M: Stanislaw Gruszka <sgruszka@redhat.com>
+M: Stanislaw Gruszka <stf_xl@wp.pl>
M: Helmut Schaa <helmut.schaa@googlemail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
SAMSUNG SXGBE DRIVERS
M: Byungho An <bh74.an@samsung.com>
-M: Girish K S <ks.giri@samsung.com>
-M: Vipul Pandya <vipul.pandya@samsung.com>
S: Supported
L: netdev@vger.kernel.org
F: drivers/net/ethernet/samsung/sxgbe/
F: tools/testing/selftests/timers/
TIPC NETWORK LAYER
-M: Jon Maloy <jon.maloy@ericsson.com>
+M: Jon Maloy <jmaloy@redhat.com>
M: Ying Xue <ying.xue@windriver.com>
L: netdev@vger.kernel.org (core kernel code)
L: tipc-discussion@lists.sourceforge.net (user apps, general discussion)
M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
M: David S. Miller <davem@davemloft.net>
-M: Jakub Kicinski <jakub.kicinski@netronome.com>
+M: Jakub Kicinski <kuba@kernel.org>
M: Jesper Dangaard Brouer <hawk@kernel.org>
M: John Fastabend <john.fastabend@gmail.com>
L: netdev@vger.kernel.org
VERSION = 5
PATCHLEVEL = 5
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS
- ST2 r58, r59, PT_sp + 12
+ ST2 r58, r59, PT_r58
#endif
.endm
LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
- ld r12, [sp, PT_sp + 4]
- ld r30, [sp, PT_sp + 8]
+ ld r12, [sp, PT_r12]
+ ld r30, [sp, PT_r30]
; Restore SP (into AUX_USER_SP) only if returning to U mode
; - for K mode, it will be implicitly restored as stack is unwound
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS
- LD2 r58, r59, PT_sp + 12
+ LD2 r58, r59, PT_r58
#endif
.endm
#define _ASM_ARC_HUGEPAGE_H
#include <linux/types.h>
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
static inline pte_t pmd_pte(pmd_t pmd)
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
- DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
+
+#ifdef CONFIG_ISA_ARCV2
+ OFFSET(PT_r12, pt_regs, r12);
+ OFFSET(PT_r30, pt_regs, r30);
+#endif
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ OFFSET(PT_r58, pt_regs, r58);
+ OFFSET(PT_r59, pt_regs, r59);
+#endif
return 0;
}
menuconfig ARC_PLAT_EZNPS
bool "\"EZchip\" ARC dev platform"
select CPU_BIG_ENDIAN
- select CLKSRC_NPS
+ select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
select EZNPS_GIC
select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
help
select HAVE_ARM_SMCCC if CPU_V7
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
select HAVE_CONTEXT_TRACKING
+ select HAVE_COPY_THREAD_TLS
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS if MMU
};
/ {
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>; /* 512 MB */
+ };
+
clk_mcasp0_fixed: clk_mcasp0_fixed {
#clock-cells = <0>;
compatible = "fixed-clock";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&spi0_pins_default>;
pinctrl-1 = <&spi0_pins_sleep>;
+ ti,pindir-d0-out-d1-in = <1>;
};
&spi1 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&spi1_pins_default>;
pinctrl-1 = <&spi1_pins_sleep>;
+ ti,pindir-d0-out-d1-in = <1>;
};
&usb2_phy1 {
&pcie1_rc {
status = "okay";
- gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
-};
-
-&pcie1_ep {
- gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio5 18 GPIO_ACTIVE_HIGH>;
};
&mmc1 {
gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
};
-&pcie1_ep {
- gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
-};
-
&mailbox5 {
status = "okay";
mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
reg = <0x0 0x80000000 0x0 0x80000000>;
};
+ main_12v0: fixedregulator-main_12v0 {
+ /* main supply */
+ compatible = "regulator-fixed";
+ regulator-name = "main_12v0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ evm_5v0: fixedregulator-evm_5v0 {
+ /* Output of TPS54531D */
+ compatible = "regulator-fixed";
+ regulator-name = "evm_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&main_12v0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vdd_3v3: fixedregulator-vdd_3v3 {
compatible = "regulator-fixed";
regulator-name = "vdd_3v3";
gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
};
-&pcie1_ep {
- gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
-};
-
&mcasp3 {
#sound-dai-cells = <0>;
assigned-clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
};
};
- pca0: pca9552@60 {
+ pca0: pca9552@61 {
compatible = "nxp,pca9552";
- reg = <0x60>;
+ reg = <0x61>;
#address-cells = <1>;
#size-cells = <0>;
status = "okay";
};
-&i2c13 {
- status = "okay";
-};
-
-&i2c14 {
- status = "okay";
-};
-
-&i2c15 {
- status = "okay";
-};
-
-&i2c0 {
- status = "okay";
-};
-
-&i2c1 {
- status = "okay";
-};
-
-&i2c2 {
- status = "okay";
-};
-
-&i2c3 {
- status = "okay";
-
- power-supply@68 {
- compatible = "ibm,cffps2";
- reg = <0x68>;
- };
-
- power-supply@69 {
- compatible = "ibm,cffps2";
- reg = <0x69>;
- };
-
- power-supply@6a {
- compatible = "ibm,cffps2";
- reg = <0x6a>;
- };
-
- power-supply@6b {
- compatible = "ibm,cffps2";
- reg = <0x6b>;
- };
-};
-
-&i2c4 {
- status = "okay";
-
- tmp275@48 {
- compatible = "ti,tmp275";
- reg = <0x48>;
- };
-
- tmp275@49 {
- compatible = "ti,tmp275";
- reg = <0x49>;
- };
-
- tmp275@4a {
- compatible = "ti,tmp275";
- reg = <0x4a>;
- };
-};
-
-&i2c5 {
- status = "okay";
-
- tmp275@48 {
- compatible = "ti,tmp275";
- reg = <0x48>;
- };
-
- tmp275@49 {
- compatible = "ti,tmp275";
- reg = <0x49>;
- };
-};
-
-&i2c6 {
- status = "okay";
-
- tmp275@48 {
- compatible = "ti,tmp275";
- reg = <0x48>;
- };
-
- tmp275@4a {
- compatible = "ti,tmp275";
- reg = <0x4a>;
- };
-
- tmp275@4b {
- compatible = "ti,tmp275";
- reg = <0x4b>;
- };
-};
-
-&i2c7 {
- status = "okay";
-
- si7021-a20@20 {
- compatible = "silabs,si7020";
- reg = <0x20>;
- };
-
- tmp275@48 {
- compatible = "ti,tmp275";
- reg = <0x48>;
- };
-
- max31785@52 {
- compatible = "maxim,max31785a";
- reg = <0x52>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- fan@0 {
- compatible = "pmbus-fan";
- reg = <0>;
- tach-pulses = <2>;
- };
-
- fan@1 {
- compatible = "pmbus-fan";
- reg = <1>;
- tach-pulses = <2>;
- };
-
- fan@2 {
- compatible = "pmbus-fan";
- reg = <2>;
- tach-pulses = <2>;
- };
-
- fan@3 {
- compatible = "pmbus-fan";
- reg = <3>;
- tach-pulses = <2>;
- };
- };
-
- pca0: pca9552@60 {
- compatible = "nxp,pca9552";
- reg = <0x60>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- gpio@0 {
- reg = <0>;
- };
-
- gpio@1 {
- reg = <1>;
- };
-
- gpio@2 {
- reg = <2>;
- };
-
- gpio@3 {
- reg = <3>;
- };
-
- gpio@4 {
- reg = <4>;
- };
-
- gpio@5 {
- reg = <5>;
- };
-
- gpio@6 {
- reg = <6>;
- };
-
- gpio@7 {
- reg = <7>;
- };
-
- gpio@8 {
- reg = <8>;
- };
-
- gpio@9 {
- reg = <9>;
- };
-
- gpio@10 {
- reg = <10>;
- };
-
- gpio@11 {
- reg = <11>;
- };
-
- gpio@12 {
- reg = <12>;
- };
-
- gpio@13 {
- reg = <13>;
- };
-
- gpio@14 {
- reg = <14>;
- };
-
- gpio@15 {
- reg = <15>;
- };
- };
-
- dps: dps310@76 {
- compatible = "infineon,dps310";
- reg = <0x76>;
- #io-channel-cells = <0>;
- };
-};
-
-&i2c8 {
- status = "okay";
-
- ucd90320@b {
- compatible = "ti,ucd90160";
- reg = <0x0b>;
- };
-
- ucd90320@c {
- compatible = "ti,ucd90160";
- reg = <0x0c>;
- };
-
- ucd90320@11 {
- compatible = "ti,ucd90160";
- reg = <0x11>;
- };
-
- rtc@32 {
- compatible = "epson,rx8900";
- reg = <0x32>;
- };
-
- tmp275@48 {
- compatible = "ti,tmp275";
- reg = <0x48>;
- };
-
- tmp275@4a {
- compatible = "ti,tmp275";
- reg = <0x4a>;
- };
-};
-
-&i2c9 {
- status = "okay";
-
- ir35221@42 {
- compatible = "infineon,ir35221";
- reg = <0x42>;
- };
-
- ir35221@43 {
- compatible = "infineon,ir35221";
- reg = <0x43>;
- };
-
- ir35221@44 {
- compatible = "infineon,ir35221";
- reg = <0x44>;
- };
-
- tmp423a@4c {
- compatible = "ti,tmp423";
- reg = <0x4c>;
- };
-
- tmp423b@4d {
- compatible = "ti,tmp423";
- reg = <0x4d>;
- };
-
- ir35221@72 {
- compatible = "infineon,ir35221";
- reg = <0x72>;
- };
-
- ir35221@73 {
- compatible = "infineon,ir35221";
- reg = <0x73>;
- };
-
- ir35221@74 {
- compatible = "infineon,ir35221";
- reg = <0x74>;
- };
-};
-
-&i2c10 {
- status = "okay";
-
- ir35221@42 {
- compatible = "infineon,ir35221";
- reg = <0x42>;
- };
-
- ir35221@43 {
- compatible = "infineon,ir35221";
- reg = <0x43>;
- };
-
- ir35221@44 {
- compatible = "infineon,ir35221";
- reg = <0x44>;
- };
-
- tmp423a@4c {
- compatible = "ti,tmp423";
- reg = <0x4c>;
- };
-
- tmp423b@4d {
- compatible = "ti,tmp423";
- reg = <0x4d>;
- };
-
- ir35221@72 {
- compatible = "infineon,ir35221";
- reg = <0x72>;
- };
-
- ir35221@73 {
- compatible = "infineon,ir35221";
- reg = <0x73>;
- };
-
- ir35221@74 {
- compatible = "infineon,ir35221";
- reg = <0x74>;
- };
-};
-
-&i2c11 {
- status = "okay";
-
- tmp275@48 {
- compatible = "ti,tmp275";
- reg = <0x48>;
- };
-
- tmp275@49 {
- compatible = "ti,tmp275";
- reg = <0x49>;
- };
-};
-
-&i2c12 {
- status = "okay";
-};
-
&i2c13 {
status = "okay";
};
};
-&fmc {
- status = "okay";
- flash@0 {
- status = "okay";
- m25p,fast-read;
- label = "bmc";
- spi-max-frequency = <50000000>;
-#include "openbmc-flash-layout-128.dtsi"
- };
-
- flash@1 {
- status = "okay";
- m25p,fast-read;
- label = "alt-bmc";
- spi-max-frequency = <50000000>;
- };
-};
-
-&spi1 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_spi1_default>;
-
- flash@0 {
- status = "okay";
- m25p,fast-read;
- label = "pnor";
- spi-max-frequency = <100000000>;
- };
-};
-
&mac2 {
status = "okay";
pinctrl-names = "default";
&emmc {
status = "okay";
+};
+
+&fsim0 {
+ status = "okay";
+
#address-cells = <2>;
#size-cells = <0>;
status = "okay";
};
-&i2c0 {
- status = "okay";
-};
-
-&i2c1 {
- status = "okay";
-};
-
-&i2c2 {
- status = "okay";
-};
-
-&i2c3 {
- status = "okay";
-
- bmp: bmp280@77 {
- compatible = "bosch,bmp280";
- reg = <0x77>;
- #io-channel-cells = <1>;
- };
-
- max31785@52 {
- compatible = "maxim,max31785a";
- reg = <0x52>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- fan@0 {
- compatible = "pmbus-fan";
- reg = <0>;
- tach-pulses = <2>;
- maxim,fan-rotor-input = "tach";
- maxim,fan-pwm-freq = <25000>;
- maxim,fan-dual-tach;
- maxim,fan-no-watchdog;
- maxim,fan-no-fault-ramp;
- maxim,fan-ramp = <2>;
- maxim,fan-fault-pin-mon;
- };
-
- fan@1 {
- compatible = "pmbus-fan";
- reg = <1>;
- tach-pulses = <2>;
- maxim,fan-rotor-input = "tach";
- maxim,fan-pwm-freq = <25000>;
- maxim,fan-dual-tach;
- maxim,fan-no-watchdog;
- maxim,fan-no-fault-ramp;
- maxim,fan-ramp = <2>;
- maxim,fan-fault-pin-mon;
- };
-
- fan@2 {
- compatible = "pmbus-fan";
- reg = <2>;
- tach-pulses = <2>;
- maxim,fan-rotor-input = "tach";
- maxim,fan-pwm-freq = <25000>;
- maxim,fan-dual-tach;
- maxim,fan-no-watchdog;
- maxim,fan-no-fault-ramp;
- maxim,fan-ramp = <2>;
- maxim,fan-fault-pin-mon;
- };
-
- fan@3 {
- compatible = "pmbus-fan";
- reg = <3>;
- tach-pulses = <2>;
- maxim,fan-rotor-input = "tach";
- maxim,fan-pwm-freq = <25000>;
- maxim,fan-dual-tach;
- maxim,fan-no-watchdog;
- maxim,fan-no-fault-ramp;
- maxim,fan-ramp = <2>;
- maxim,fan-fault-pin-mon;
- };
- };
-
- dps: dps310@76 {
- compatible = "infineon,dps310";
- reg = <0x76>;
- #io-channel-cells = <0>;
- };
-
- pca0: pca9552@60 {
- compatible = "nxp,pca9552";
- reg = <0x60>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- gpio@0 {
- reg = <0>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@1 {
- reg = <1>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@2 {
- reg = <2>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@3 {
- reg = <3>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@4 {
- reg = <4>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@5 {
- reg = <5>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@6 {
- reg = <6>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@7 {
- reg = <7>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@8 {
- reg = <8>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@9 {
- reg = <9>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@10 {
- reg = <10>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@11 {
- reg = <11>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@12 {
- reg = <12>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@13 {
- reg = <13>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@14 {
- reg = <14>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@15 {
- reg = <15>;
- type = <PCA955X_TYPE_GPIO>;
- };
- };
-
- power-supply@68 {
- compatible = "ibm,cffps1";
- reg = <0x68>;
- };
-
- power-supply@69 {
- compatible = "ibm,cffps1";
- reg = <0x69>;
- };
-};
-
-&i2c4 {
- status = "okay";
-
- tmp423a@4c {
- compatible = "ti,tmp423";
- reg = <0x4c>;
- };
-
- ir35221@70 {
- compatible = "infineon,ir35221";
- reg = <0x70>;
- };
-
- ir35221@71 {
- compatible = "infineon,ir35221";
- reg = <0x71>;
- };
-};
-
-&i2c5 {
- status = "okay";
-
- tmp423a@4c {
- compatible = "ti,tmp423";
- reg = <0x4c>;
- };
-
- ir35221@70 {
- compatible = "infineon,ir35221";
- reg = <0x70>;
- };
-
- ir35221@71 {
- compatible = "infineon,ir35221";
- reg = <0x71>;
- };
-};
-
-&i2c7 {
- status = "okay";
-};
-
-&i2c9 {
- status = "okay";
-
- tmp275@4a {
- compatible = "ti,tmp275";
- reg = <0x4a>;
- };
-};
-
-&i2c10 {
- status = "okay";
-};
-
-&i2c11 {
- status = "okay";
-
- pca9552: pca9552@60 {
- compatible = "nxp,pca9552";
- reg = <0x60>;
- #address-cells = <1>;
- #size-cells = <0>;
- gpio-controller;
- #gpio-cells = <2>;
-
- gpio-line-names = "PS_SMBUS_RESET_N", "APSS_RESET_N",
- "GPU0_TH_OVERT_N_BUFF", "GPU1_TH_OVERT_N_BUFF",
- "GPU2_TH_OVERT_N_BUFF", "GPU3_TH_OVERT_N_BUFF",
- "GPU4_TH_OVERT_N_BUFF", "GPU5_TH_OVERT_N_BUFF",
- "GPU0_PWR_GOOD_BUFF", "GPU1_PWR_GOOD_BUFF",
- "GPU2_PWR_GOOD_BUFF", "GPU3_PWR_GOOD_BUFF",
- "GPU4_PWR_GOOD_BUFF", "GPU5_PWR_GOOD_BUFF",
- "12V_BREAKER_FLT_N", "THROTTLE_UNLATCHED_N";
-
- gpio@0 {
- reg = <0>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@1 {
- reg = <1>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@2 {
- reg = <2>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@3 {
- reg = <3>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@4 {
- reg = <4>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@5 {
- reg = <5>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@6 {
- reg = <6>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@7 {
- reg = <7>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@8 {
- reg = <8>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@9 {
- reg = <9>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@10 {
- reg = <10>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@11 {
- reg = <11>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@12 {
- reg = <12>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@13 {
- reg = <13>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@14 {
- reg = <14>;
- type = <PCA955X_TYPE_GPIO>;
- };
-
- gpio@15 {
- reg = <15>;
- type = <PCA955X_TYPE_GPIO>;
- };
- };
-
- rtc@32 {
- compatible = "epson,rx8900";
- reg = <0x32>;
- };
-
- eeprom@51 {
- compatible = "atmel,24c64";
- reg = <0x51>;
- };
-
- ucd90160@64 {
- compatible = "ti,ucd90160";
- reg = <0x64>;
- };
-};
-
-&i2c12 {
- status = "okay";
-};
-
-&i2c13 {
- status = "okay";
-};
-
&pinctrl {
/* Hog these as no driver is probed for the entire LPC block */
pinctrl-names = "default";
spi-max-frequency = <50000000>;
status = "disabled";
};
-
- fsim0: fsi@1e79b000 {
- compatible = "aspeed,ast2600-fsi-master", "fsi-master";
- reg = <0x1e79b000 0x94>;
- interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_fsi1_default>;
- clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
- status = "disabled";
- };
-
- fsim1: fsi@1e79b100 {
- compatible = "aspeed,ast2600-fsi-master", "fsi-master";
- reg = <0x1e79b100 0x94>;
- interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_fsi2_default>;
- clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
- status = "disabled";
- };
};
mdio0: mdio@1e650000 {
ranges = <0 0x1e78a000 0x1000>;
};
+ fsim0: fsi@1e79b000 {
+ compatible = "aspeed,ast2600-fsi-master", "fsi-master";
+ reg = <0x1e79b000 0x94>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fsi1_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
+ status = "disabled";
+ };
+
+ fsim1: fsi@1e79b100 {
+ compatible = "aspeed,ast2600-fsi-master", "fsi-master";
+ reg = <0x1e79b100 0x94>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fsi2_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
+ status = "disabled";
+ };
};
};
};
/dts-v1/;
#include "imx6dl.dtsi"
-#include "imx6qdl-icore.dtsi"
+#include "imx6qdl-icore-1.5.dtsi"
/ {
model = "Engicam i.CoreM6 DualLite/Solo MIPI Starter Kit";
#sound-dai-cells = <0>;
clocks = <&clk_ext_audio_codec>;
VDDA-supply = <®_3p3v>;
- VDDIO-supply = <®_3p3v>;
+ VDDIO-supply = <&sw2_reg>;
};
};
};
rtc@56 {
- compatible = "rv3029c2";
+ compatible = "microcrystal,rv3029";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rtc_hw300>;
reg = <0x56>;
vin-supply = <&vgen5_reg>;
};
-®_vdd3p0 {
- vin-supply = <&sw2_reg>;
-};
-
®_vdd2p5 {
vin-supply = <&vgen5_reg>;
};
vin-supply = <&sw2_reg>;
};
-®_vdd3p0 {
- vin-supply = <&sw2_reg>;
-};
-
®_vdd2p5 {
vin-supply = <&sw2_reg>;
};
status = "okay";
};
-®_3p0 {
- vin-supply = <&sw2_reg>;
-};
-
&snvs_poweroff {
status = "okay";
};
vin-supply = <&vgen6_reg>;
};
-®_vdd3p0 {
- vin-supply = <&sw2_reg>;
-};
-
®_vdd2p5 {
vin-supply = <&vgen6_reg>;
};
vin-supply = <&vgen6_reg>;
};
-®_vdd3p0 {
- vin-supply = <&sw2_reg>;
-};
-
®_vdd2p5 {
vin-supply = <&vgen6_reg>;
};
reg = <0x80000000 0x10000000>;
};
};
+
+&gpmi {
+ status = "okay";
+};
#address-cells = <1>;
#size-cells = <0>;
- cpu0: cpu@0 {
+ cpu0: cpu@f00 {
compatible = "arm,cortex-a7";
device_type = "cpu";
- reg = <0>;
+ reg = <0xf00>;
};
};
&aobus {
pmu: pmu@e0 {
compatible = "amlogic,meson8-pmu", "syscon";
- reg = <0xe0 0x8>;
+ reg = <0xe0 0x18>;
};
pinctrl_aobus: pinctrl@84 {
twsi1: i2c@d4011000 {
compatible = "mrvl,mmp-twsi";
- reg = <0xd4011000 0x1000>;
+ reg = <0xd4011000 0x70>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&soc_clocks MMP2_CLK_TWSI0>;
resets = <&soc_clocks MMP2_CLK_TWSI0>;
twsi2: i2c@d4031000 {
compatible = "mrvl,mmp-twsi";
- reg = <0xd4031000 0x1000>;
+ reg = <0xd4031000 0x70>;
interrupt-parent = <&twsi_mux>;
interrupts = <0>;
clocks = <&soc_clocks MMP2_CLK_TWSI1>;
twsi3: i2c@d4032000 {
compatible = "mrvl,mmp-twsi";
- reg = <0xd4032000 0x1000>;
+ reg = <0xd4032000 0x70>;
interrupt-parent = <&twsi_mux>;
interrupts = <1>;
clocks = <&soc_clocks MMP2_CLK_TWSI2>;
twsi4: i2c@d4033000 {
compatible = "mrvl,mmp-twsi";
- reg = <0xd4033000 0x1000>;
+ reg = <0xd4033000 0x70>;
interrupt-parent = <&twsi_mux>;
interrupts = <2>;
clocks = <&soc_clocks MMP2_CLK_TWSI3>;
twsi5: i2c@d4033800 {
compatible = "mrvl,mmp-twsi";
- reg = <0xd4033800 0x1000>;
+ reg = <0xd4033800 0x70>;
interrupt-parent = <&twsi_mux>;
interrupts = <3>;
clocks = <&soc_clocks MMP2_CLK_TWSI4>;
twsi6: i2c@d4034000 {
compatible = "mrvl,mmp-twsi";
- reg = <0xd4034000 0x1000>;
+ reg = <0xd4034000 0x70>;
interrupt-parent = <&twsi_mux>;
interrupts = <4>;
clocks = <&soc_clocks MMP2_CLK_TWSI5>;
initial-mode = <1>; /* initialize in HUB mode */
disabled-ports = <1>;
intn-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
- reset-gpios = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */
+ reset-gpios = <&pio 4 16 GPIO_ACTIVE_LOW>; /* PE16 */
connect-gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */
refclk-frequency = <19200000>;
};
#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
@ make CNTP_* and CNTPCT accessible from PL1
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
- lsr r7, #16
- and r7, #0xf
- cmp r7, #1
- bne 1f
+ ubfx r7, r7, #16, #4
+ teq r7, #0
+ beq 1f
mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
int
-copy_thread(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stk_sz, struct task_struct *p)
+copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
+ unsigned long stk_sz, struct task_struct *p, unsigned long tls)
{
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
clear_ptrace_hw_breakpoint(p);
if (clone_flags & CLONE_SETTLS)
- thread->tp_value[0] = childregs->ARM_r3;
+ thread->tp_value[0] = tls;
thread->tp_value[1] = get_tpuser();
thread_notify(THREAD_NOTIFY_COPY, thread);
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF
select REGMAP_MMIO
+ select RESET_CONTROLLER
select HAVE_IDE
select PINCTRL_SINGLE
ret = clk_prepare_enable(clk);
if (ret)
return ret;
- rate = clk_get_rate(clk) / 2;
+ rate = clk_get_rate(clk);
} else if (cpu_is_pj4()) {
rate = 6500000;
} else {
bool
select ARCH_HAS_BANDGAP
select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARCH_HAS_RESET_CONTROLLER
select ARCH_OMAP
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
select OMAP_DM_TIMER
select OMAP_GPMC
select PINCTRL
+ select RESET_CONTROLLER
select SOC_BUS
select TI_SYSC
select OMAP_IRQCHIP
select CLKSRC_TI_32K
- select ARCH_HAS_RESET_CONTROLLER
help
Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
static struct clockdomain *ti_sysc_find_one_clockdomain(struct clk *clk)
{
+ struct clk_hw *hw = __clk_get_hw(clk);
struct clockdomain *clkdm = NULL;
struct clk_hw_omap *hwclk;
- hwclk = to_clk_hw_omap(__clk_get_hw(clk));
+ hwclk = to_clk_hw_omap(hw);
+ if (!omap2_clk_is_hw_omap(hw))
+ return NULL;
+
if (hwclk && hwclk->clkdm_name)
clkdm = clkdm_lookup(hwclk->clkdm_name);
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_CONTEXT_TRACKING
+ select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_FUNCTION_ARG_ACCESS_API
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_RCU_TABLE_FREE
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
config ARCH_PROC_KCORE_TEXT
def_bool y
+config BROKEN_GAS_INST
+ def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n)
+
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
If unsure, say Y.
+config ARM64_WORKAROUND_SPECULATIVE_AT_VHE
+ bool
+
config ARM64_ERRATUM_1165522
bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
default y
+ select ARM64_WORKAROUND_SPECULATIVE_AT_VHE
help
This option adds a workaround for ARM Cortex-A76 erratum 1165522.
If unsure, say Y.
+config ARM64_ERRATUM_1530923
+ bool "Cortex-A55: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
+ default y
+ select ARM64_WORKAROUND_SPECULATIVE_AT_VHE
+ help
+ This option adds a workaround for ARM Cortex-A55 erratum 1530923.
+
+ Affected Cortex-A55 cores (r0p0, r0p1, r1p0, r2p0) could end-up with
+ corrupted TLBs by speculating an AT instruction during a guest
+ context switch.
+
+ If unsure, say Y.
+
config ARM64_ERRATUM_1286807
bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
default y
invalidated has been observed by other observers. The
workaround repeats the TLBI+DSB operation.
+config ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
+ bool
+
config ARM64_ERRATUM_1319367
bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
default y
+ select ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
help
This option adds work arounds for ARM Cortex-A57 erratum 1319537
and A72 erratum 1319367
instruction if the cpu does not implement the feature.
config ARM64_LSE_ATOMICS
+ bool
+ default ARM64_USE_LSE_ATOMICS
+ depends on $(as-instr,.arch_extension lse)
+
+config ARM64_USE_LSE_ATOMICS
bool "Atomic instructions"
depends on JUMP_LABEL
default y
endmenu
+menu "ARMv8.5 architectural features"
+
+config ARM64_E0PD
+ bool "Enable support for E0PD"
+ default y
+ help
+ E0PD (part of the ARMv8.5 extensions) allows us to ensure
+ that EL0 accesses made via TTBR1 always fault in constant time,
+ providing similar benefits to KASLR as those provided by KPTI, but
+ with lower overhead and without disrupting legitimate access to
+ kernel memory such as SPE.
+
+ This option enables E0PD for TTBR1 where available.
+
+config ARCH_RANDOM
+ bool "Enable support for random number generation"
+ default y
+ help
+ Random number generation (part of the ARMv8.5 Extensions)
+ provides a high bandwidth, cryptographically secure
+ hardware random number generator.
+
+endmenu
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
config ARM64_PSEUDO_NMI
bool "Support for NMI-like interrupts"
- select CONFIG_ARM_GIC_V3
+ select ARM_GIC_V3
help
Adds support for mimicking Non-Maskable Interrupts through the use of
GIC interrupt priority. This support requires version 3 or later of
endif
endif
-# Check for binutils support for specific extensions
-lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
-
-ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
- ifeq ($(lseinstr),)
+ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
+ ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
$(warning LSE atomics not supported by binutils)
endif
endif
return 0; \
}' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
-ifeq ($(CONFIG_ARM64), y)
-brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
-
- ifneq ($(brokengasinst),)
+ifeq ($(CONFIG_BROKEN_GAS_INST),y)
$(warning Detected assembler with broken .inst; disassembly will be unreliable)
- endif
endif
-KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) \
+KBUILD_CFLAGS += -mgeneral-regs-only \
$(compat_vdso) $(cc_has_k_constraint)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
-KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) $(compat_vdso)
+KBUILD_AFLAGS += $(compat_vdso)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
-targets := Image Image.gz
+targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
vmmc-supply = <®_dcdc1>;
- vqmmc-supply = <®_dcdc1>;
+ vqmmc-supply = <®_eldo1>;
bus-width = <8>;
non-removable;
cap-mmc-hw-reset;
&mmc1 {
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
- vmmc-supply = <®_aldo2>;
+ vmmc-supply = <®_dcdc1>;
vqmmc-supply = <®_dldo4>;
mmc-pwrseq = <&wifi_pwrseq>;
bus-width = <4>;
pmu {
compatible = "arm,armv8-pmuv3";
- interrupts = <0 120 8>,
- <0 121 8>,
- <0 122 8>,
- <0 123 8>;
+ interrupts = <0 170 4>,
+ <0 171 4>,
+ <0 172 4>,
+ <0 173 4>;
interrupt-affinity = <&cpu0>,
<&cpu1>,
<&cpu2>,
};
gpio-keys {
- compatible = "gpio-keys-polled";
- poll-interval = <100>;
+ compatible = "gpio-keys";
key1 {
label = "A";
linux,code = <BTN_0>;
gpios = <&gpio GPIOH_6 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio_intc>;
+ interrupts = <34 IRQ_TYPE_EDGE_BOTH>;
};
key2 {
label = "B";
linux,code = <BTN_1>;
gpios = <&gpio GPIOH_7 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio_intc>;
+ interrupts = <35 IRQ_TYPE_EDGE_BOTH>;
};
key3 {
label = "C";
linux,code = <BTN_2>;
gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio_intc>;
+ interrupts = <2 IRQ_TYPE_EDGE_BOTH>;
+ };
+
+ mic_mute {
+ label = "MicMute";
+ linux,code = <SW_MUTE_DEVICE>;
+ linux,input-type = <EV_SW>;
+ gpios = <&gpio_ao GPIOE_2 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio_intc>;
+ interrupts = <99 IRQ_TYPE_EDGE_BOTH>;
+ };
+
+ power_key {
+ label = "PowerKey";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio_intc>;
+ interrupts = <3 IRQ_TYPE_EDGE_BOTH>;
};
};
bluetooth {
compatible = "brcm,bcm43438-bt";
+ interrupt-parent = <&gpio_intc>;
+ interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
max-speed = <2000000>;
clocks = <&wifi32k>;
dcfg: syscon@1e00000 {
compatible = "fsl,ls1028a-dcfg", "syscon";
reg = <0x0 0x1e00000 0x0 0x10000>;
- big-endian;
+ little-endian;
};
rst: syscon@1e60000 {
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>,
- <&clk IMX8MM_CLK_SDMA1_ROOT>;
+ <&clk IMX8MM_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_imu>;
interrupt-parent = <&gpio3>;
- interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
vdd-supply = <®_3v3_p>;
vddio-supply = <®_3v3_p>;
};
pmu {
compatible = "arm,armv8-pmuv3";
- interrupts = <0 120 8>,
- <0 121 8>,
- <0 122 8>,
- <0 123 8>;
+ interrupts = <0 170 4>,
+ <0 171 4>,
+ <0 172 4>,
+ <0 173 4>;
interrupt-affinity = <&cpu0>,
<&cpu1>,
<&cpu2>,
ir-receiver {
compatible = "gpio-ir-receiver";
- gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_LOW>;
+ linux,rc-map-name = "rc-beelink-gs1";
};
};
static inline void apply_alternatives_module(void *start, size_t length) { }
#endif
-#define ALTINSTR_ENTRY(feature,cb) \
+#define ALTINSTR_ENTRY(feature) \
" .word 661b - .\n" /* label */ \
- " .if " __stringify(cb) " == 0\n" \
" .word 663f - .\n" /* new instruction */ \
- " .else\n" \
+ " .hword " __stringify(feature) "\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* source len */ \
+ " .byte 664f-663f\n" /* replacement len */
+
+#define ALTINSTR_ENTRY_CB(feature, cb) \
+ " .word 661b - .\n" /* label */ \
" .word " __stringify(cb) "- .\n" /* callback */ \
- " .endif\n" \
" .hword " __stringify(feature) "\n" /* feature bit */ \
" .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */
*
* Alternatives with callbacks do not generate replacement instructions.
*/
-#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
".if "__stringify(cfg_enabled)" == 1\n" \
"661:\n\t" \
oldinstr "\n" \
"662:\n" \
".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(feature,cb) \
+ ALTINSTR_ENTRY(feature) \
".popsection\n" \
- " .if " __stringify(cb) " == 0\n" \
".pushsection .altinstr_replacement, \"a\"\n" \
"663:\n\t" \
newinstr "\n" \
".popsection\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
".org . - (662b-661b) + (664b-663b)\n" \
- ".else\n\t" \
+ ".endif\n"
+
+#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
+ ".if "__stringify(cfg_enabled)" == 1\n" \
+ "661:\n\t" \
+ oldinstr "\n" \
+ "662:\n" \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY_CB(feature, cb) \
+ ".popsection\n" \
"663:\n\t" \
"664:\n\t" \
- ".endif\n" \
".endif\n"
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
- __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
+ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
#define ALTERNATIVE_CB(oldinstr, cb) \
- __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
+ __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
#else
#include <asm/assembler.h>
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+#ifdef CONFIG_ARCH_RANDOM
+
+#include <linux/random.h>
+#include <asm/cpufeature.h>
+
+static inline bool __arm64_rndr(unsigned long *v)
+{
+ bool ok;
+
+ /*
+ * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
+ * and set PSTATE.NZCV to 0b0100 otherwise.
+ */
+ asm volatile(
+ __mrs_s("%0", SYS_RNDR_EL0) "\n"
+ " cset %w1, ne\n"
+ : "=r" (*v), "=r" (ok)
+ :
+ : "cc");
+
+ return ok;
+}
+
+static inline bool __must_check arch_get_random_long(unsigned long *v)
+{
+ return false;
+}
+
+static inline bool __must_check arch_get_random_int(unsigned int *v)
+{
+ return false;
+}
+
+static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
+{
+ /*
+ * Only support the generic interface after we have detected
+ * the system wide capability, avoiding complexity with the
+ * cpufeature code and with potential scheduling between CPUs
+ * with and without the feature.
+ */
+ if (!cpus_have_const_cap(ARM64_HAS_RNG))
+ return false;
+
+ return __arm64_rndr(v);
+}
+
+
+static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+{
+ unsigned long val;
+ bool ok = arch_get_random_seed_long(&val);
+
+ *v = val;
+ return ok;
+}
+
+static inline bool __init __early_cpu_has_rndr(void)
+{
+ /* Open code as we run prior to the first call to cpufeature. */
+ unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
+ return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
+}
+
+#else
+
+static inline bool __arm64_rndr(unsigned long *v) { return false; }
+static inline bool __init __early_cpu_has_rndr(void) { return false; }
+
+#endif /* CONFIG_ARCH_RANDOM */
+#endif /* _ASM_ARCHRANDOM_H */
msr daif, \flags
.endm
- /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
- .macro inherit_daif, pstate:req, tmp:req
- and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
- msr daif, \tmp
- .endm
-
/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
.macro enable_da_f
msr daifclr, #(8 | 4 | 1)
9990:
.endm
-/*
- * SMP data memory barrier
- */
- .macro smp_dmb, opt
- dmb \opt
- .endm
-
/*
* RAS Error Synchronization barrier
*/
b.ne 9998b
.endm
-/*
- * Annotate a function as position independent, i.e., safe to be called before
- * the kernel virtual mapping is activated.
- */
-#define ENDPIPROC(x) \
- .globl __pi_##x; \
- .type __pi_##x, %function; \
- .set __pi_##x, x; \
- .size __pi_##x, . - x; \
- ENDPROC(x)
-
/*
* Annotate a function as being unsuitable for kprobes.
*/
#include <linux/stringify.h>
-#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
#define __LL_SC_FALLBACK(asm_ops) \
" b 3f\n" \
" .subsection 1\n" \
static inline void __lse_atomic_##op(int i, atomic_t *v) \
{ \
asm volatile( \
+ __LSE_PREAMBLE \
" " #asm_op " %w[i], %[v]\n" \
: [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v)); \
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
asm volatile( \
+ __LSE_PREAMBLE \
" " #asm_op #mb " %w[i], %w[i], %[v]" \
: [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v) \
u32 tmp; \
\
asm volatile( \
+ __LSE_PREAMBLE \
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
" add %w[i], %w[i], %w[tmp]" \
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
static inline void __lse_atomic_and(int i, atomic_t *v)
{
asm volatile(
+ __LSE_PREAMBLE
" mvn %w[i], %w[i]\n"
" stclr %w[i], %[v]"
: [i] "+&r" (i), [v] "+Q" (v->counter)
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
{ \
asm volatile( \
+ __LSE_PREAMBLE \
" mvn %w[i], %w[i]\n" \
" ldclr" #mb " %w[i], %w[i], %[v]" \
: [i] "+&r" (i), [v] "+Q" (v->counter) \
static inline void __lse_atomic_sub(int i, atomic_t *v)
{
asm volatile(
+ __LSE_PREAMBLE
" neg %w[i], %w[i]\n"
" stadd %w[i], %[v]"
: [i] "+&r" (i), [v] "+Q" (v->counter)
u32 tmp; \
\
asm volatile( \
+ __LSE_PREAMBLE \
" neg %w[i], %w[i]\n" \
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
" add %w[i], %w[i], %w[tmp]" \
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
{ \
asm volatile( \
+ __LSE_PREAMBLE \
" neg %w[i], %w[i]\n" \
" ldadd" #mb " %w[i], %w[i], %[v]" \
: [i] "+&r" (i), [v] "+Q" (v->counter) \
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
{ \
asm volatile( \
+ __LSE_PREAMBLE \
" " #asm_op " %[i], %[v]\n" \
: [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v)); \
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
{ \
asm volatile( \
+ __LSE_PREAMBLE \
" " #asm_op #mb " %[i], %[i], %[v]" \
: [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v) \
unsigned long tmp; \
\
asm volatile( \
+ __LSE_PREAMBLE \
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
" add %[i], %[i], %x[tmp]" \
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
{
asm volatile(
+ __LSE_PREAMBLE
" mvn %[i], %[i]\n"
" stclr %[i], %[v]"
: [i] "+&r" (i), [v] "+Q" (v->counter)
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
{ \
asm volatile( \
+ __LSE_PREAMBLE \
" mvn %[i], %[i]\n" \
" ldclr" #mb " %[i], %[i], %[v]" \
: [i] "+&r" (i), [v] "+Q" (v->counter) \
static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
{
asm volatile(
+ __LSE_PREAMBLE
" neg %[i], %[i]\n"
" stadd %[i], %[v]"
: [i] "+&r" (i), [v] "+Q" (v->counter)
unsigned long tmp; \
\
asm volatile( \
+ __LSE_PREAMBLE \
" neg %[i], %[i]\n" \
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
" add %[i], %[i], %x[tmp]" \
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
{ \
asm volatile( \
+ __LSE_PREAMBLE \
" neg %[i], %[i]\n" \
" ldadd" #mb " %[i], %[i], %[v]" \
: [i] "+&r" (i), [v] "+Q" (v->counter) \
unsigned long tmp;
asm volatile(
+ __LSE_PREAMBLE
"1: ldr %x[tmp], %[v]\n"
" subs %[ret], %x[tmp], #1\n"
" b.lt 2f\n"
unsigned long tmp; \
\
asm volatile( \
+ __LSE_PREAMBLE \
" mov %" #w "[tmp], %" #w "[old]\n" \
" cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
" mov %" #w "[ret], %" #w "[tmp]" \
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
\
asm volatile( \
+ __LSE_PREAMBLE \
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
" eor %[old1], %[old1], %[oldval1]\n" \
" eor %[old2], %[old2], %[oldval2]\n" \
}
#define ip_fast_csum ip_fast_csum
+extern unsigned int do_csum(const unsigned char *buff, int len);
+#define do_csum do_csum
+
#include <asm-generic/checksum.h>
#endif /* __ASM_CHECKSUM_H */
u32 reg_id_isar3;
u32 reg_id_isar4;
u32 reg_id_isar5;
+ u32 reg_id_isar6;
u32 reg_id_mmfr0;
u32 reg_id_mmfr1;
u32 reg_id_mmfr2;
#define ARM64_SSBS 34
#define ARM64_WORKAROUND_1418040 35
#define ARM64_HAS_SB 36
-#define ARM64_WORKAROUND_1165522 37
+#define ARM64_WORKAROUND_SPECULATIVE_AT_VHE 37
#define ARM64_HAS_ADDRESS_AUTH_ARCH 38
#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
#define ARM64_HAS_GENERIC_AUTH_ARCH 40
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
#define ARM64_WORKAROUND_1542419 47
-#define ARM64_WORKAROUND_1319367 48
+#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48
+#define ARM64_HAS_E0PD 49
+#define ARM64_HAS_RNG 50
-#define ARM64_NCAPS 49
+#define ARM64_NCAPS 51
#endif /* __ASM_CPUCAPS_H */
system_uses_irq_prio_masking();
}
+static inline bool system_capabilities_finalized(void)
+{
+ return static_branch_likely(&arm64_const_caps_ready);
+}
+
#define ARM64_BP_HARDEN_UNKNOWN -1
#define ARM64_BP_HARDEN_WA_NEEDED 0
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
#define QCOM_CPU_PART_FALKOR_V1 0x800
#define QCOM_CPU_PART_FALKOR 0xC00
#define QCOM_CPU_PART_KRYO 0x200
+#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
+#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
#define NVIDIA_CPU_PART_DENVER 0x003
#define NVIDIA_CPU_PART_CARMEL 0x004
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
+#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
trace_hardirqs_off();
}
-static inline unsigned long local_daif_save(void)
+static inline unsigned long local_daif_save_flags(void)
{
unsigned long flags;
flags |= PSR_I_BIT;
}
+ return flags;
+}
+
+static inline unsigned long local_daif_save(void)
+{
+ unsigned long flags;
+
+ flags = local_daif_save_flags();
+
local_daif_mask();
return flags;
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
-void el0_svc_handler(struct pt_regs *regs);
-void el0_svc_compat_handler(struct pt_regs *regs);
+void do_el0_svc(struct pt_regs *regs);
+void do_el0_svc_compat(struct pt_regs *regs);
void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr,
struct pt_regs *regs);
#define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4)
#define KERNEL_HWCAP_FLAGM2 __khwcap2_feature(FLAGM2)
#define KERNEL_HWCAP_FRINT __khwcap2_feature(FRINT)
+#define KERNEL_HWCAP_SVEI8MM __khwcap2_feature(SVEI8MM)
+#define KERNEL_HWCAP_SVEF32MM __khwcap2_feature(SVEF32MM)
+#define KERNEL_HWCAP_SVEF64MM __khwcap2_feature(SVEF64MM)
+#define KERNEL_HWCAP_SVEBF16 __khwcap2_feature(SVEBF16)
+#define KERNEL_HWCAP_I8MM __khwcap2_feature(I8MM)
+#define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16)
+#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
+#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
/*
* This yields a mask that user programs can use to figure out what
struct kimage_arch {
void *dtb;
unsigned long dtb_mem;
+ /* Core ELF header buffer */
+ void *elf_headers;
+ unsigned long elf_headers_mem;
+ unsigned long elf_headers_sz;
};
extern const struct kexec_file_ops kexec_image_ops;
* wrong, and hyp will crash and burn when it uses any
* cpus_have_const_cap() wrapper.
*/
- BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+ BUG_ON(!system_capabilities_finalized());
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
/*
return true;
/* Some implementations have defects that confine them to VHE */
- if (cpus_have_cap(ARM64_WORKAROUND_1165522))
+ if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE))
return true;
return false;
write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
/*
- * ARM erratum 1165522 requires the actual execution of the above
- * before we can switch to the EL1/EL0 translation regime used by
+ * ARM errata 1165522 and 1530923 require the actual execution of the
+ * above before we can switch to the EL1/EL0 translation regime used by
* the guest.
*/
- asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
+ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
}
#endif /* __ARM64_KVM_HYP_H__ */
#define __ALIGN .align 2
#define __ALIGN_STR ".align 2"
+/*
+ * Annotate a function as position independent, i.e., safe to be called before
+ * the kernel virtual mapping is activated.
+ */
+#define SYM_FUNC_START_PI(x) \
+ SYM_FUNC_START_ALIAS(__pi_##x); \
+ SYM_FUNC_START(x)
+
+#define SYM_FUNC_START_WEAK_PI(x) \
+ SYM_FUNC_START_ALIAS(__pi_##x); \
+ SYM_FUNC_START_WEAK(x)
+
+#define SYM_FUNC_END_PI(x) \
+ SYM_FUNC_END(x); \
+ SYM_FUNC_END_ALIAS(__pi_##x)
+
#endif
#include <asm/atomic_ll_sc.h>
-#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+
+#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
#include <linux/compiler_types.h>
#include <linux/export.h>
#include <asm/atomic_lse.h>
#include <asm/cpucaps.h>
-__asm__(".arch_extension lse");
-
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
/* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
- ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+ ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
-#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#else /* CONFIG_ARM64_LSE_ATOMICS */
static inline bool system_uses_lse_atomics(void) { return false; }
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
-#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif /* CONFIG_ARM64_LSE_ATOMICS */
#endif /* __ASM_LSE_H */
*/
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
-static inline bool arm64_kernel_unmapped_at_el0(void)
-{
- return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
- cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
-}
+extern bool arm64_use_ng_mappings;
-static inline bool arm64_kernel_use_ng_mappings(void)
+static inline bool arm64_kernel_unmapped_at_el0(void)
{
- bool tx1_bug;
-
- /* What's a kpti? Use global mappings if we don't know. */
- if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
- return false;
-
- /*
- * Note: this function is called before the CPU capabilities have
- * been configured, so our early mappings will be global. If we
- * later determine that kpti is required, then
- * kpti_install_ng_mappings() will make them non-global.
- */
- if (arm64_kernel_unmapped_at_el0())
- return true;
-
- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return false;
-
- /*
- * KASLR is enabled so we're going to be enabling kpti on non-broken
- * CPUs regardless of their susceptibility to Meltdown. Rather
- * than force everybody to go through the G -> nG dance later on,
- * just put down non-global mappings from the beginning.
- */
- if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
- tx1_bug = false;
-#ifndef MODULE
- } else if (!static_branch_likely(&arm64_const_caps_ready)) {
- extern const struct midr_range cavium_erratum_27456_cpus[];
-
- tx1_bug = is_midr_in_range_list(read_cpuid_id(),
- cavium_erratum_27456_cpus);
-#endif
- } else {
- tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
- }
-
- return !tx1_bug && kaslr_offset() > 0;
+ return arm64_use_ng_mappings;
}
typedef void (*bp_hardening_cb_t)(void);
pgprot_t prot, bool page_mappings_only);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
extern void mark_linear_text_alias_ro(void);
+extern bool kaslr_requires_kpti(void);
#define INIT_MM_CONTEXT(name) \
.pgd = init_pg_dir,
#define PUD_TABLE_BIT (_AT(pudval_t, 1) << 1)
#define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0)
#define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0)
+#define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */
/*
* Level 2 descriptor (PMD).
#define TCR_HD (UL(1) << 40)
#define TCR_NFD0 (UL(1) << 53)
#define TCR_NFD1 (UL(1) << 54)
+#define TCR_E0PD0 (UL(1) << 55)
+#define TCR_E0PD1 (UL(1) << 56)
/*
* TTBR.
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
-#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
+#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
+#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
-#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_READONLY
#define __P011 PAGE_READONLY
-#define __P100 PAGE_EXECONLY
+#define __P100 PAGE_READONLY_EXEC
#define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_READONLY_EXEC
#define __P111 PAGE_READONLY_EXEC
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECONLY
+#define __S100 PAGE_READONLY_EXEC
#define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
-/*
- * Execute-only user mappings do not have the PTE_USER bit set. All valid
- * kernel mappings have the PTE_UXN bit set.
- */
#define pte_valid_not_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
/*
* p??_access_permitted() is true for valid user mappings (subject to the
- * write permission check) other than user execute-only which do not have the
- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
+ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
+ * set.
*/
#define pte_access_permitted(pte, write) \
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
#include <asm-generic/sections.h>
extern char __alt_instructions[], __alt_instructions_end[];
-extern char __exception_text_start[], __exception_text_end[];
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
extern char __hyp_text_start[], __hyp_text_end[];
static __must_check inline bool may_use_simd(void)
{
/*
+ * We must make sure that the SVE has been initialized properly
+ * before using the SIMD in kernel.
* fpsimd_context_busy is only set while preemption is disabled,
* and is clear whenever preemption is enabled. Since
* this_cpu_read() is atomic w.r.t. preemption, fpsimd_context_busy
* migrated, and if it's clear we cannot be migrated to a CPU
* where it is set.
*/
- return !in_irq() && !irqs_disabled() && !in_nmi() &&
- !this_cpu_read(fpsimd_context_busy);
+ return !WARN_ON(!system_capabilities_finalized()) &&
+ system_supports_fpsimd() &&
+ !in_irq() && !irqs_disabled() && !in_nmi() &&
+ !this_cpu_read(fpsimd_context_busy);
}
#else /* ! CONFIG_KERNEL_MODE_NEON */
#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4)
#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5)
#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6)
+#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7)
#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0)
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
+#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
+#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
+
#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
+/* MAIR_ELx memory attributes (used by Linux) */
+#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
+#define MAIR_ATTR_DEVICE_nGnRE UL(0x04)
+#define MAIR_ATTR_DEVICE_GRE UL(0x0c)
+#define MAIR_ATTR_NORMAL_NC UL(0x44)
+#define MAIR_ATTR_NORMAL_WT UL(0xbb)
+#define MAIR_ATTR_NORMAL UL(0xff)
+#define MAIR_ATTR_MASK UL(0xff)
+
+/* Position the attr at the correct index */
+#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
+
/* id_aa64isar0 */
+#define ID_AA64ISAR0_RNDR_SHIFT 60
#define ID_AA64ISAR0_TS_SHIFT 52
#define ID_AA64ISAR0_FHM_SHIFT 48
#define ID_AA64ISAR0_DP_SHIFT 44
#define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64isar1 */
+#define ID_AA64ISAR1_I8MM_SHIFT 52
+#define ID_AA64ISAR1_DGH_SHIFT 48
+#define ID_AA64ISAR1_BF16_SHIFT 44
+#define ID_AA64ISAR1_SPECRES_SHIFT 40
#define ID_AA64ISAR1_SB_SHIFT 36
#define ID_AA64ISAR1_FRINTTS_SHIFT 32
#define ID_AA64ISAR1_GPI_SHIFT 28
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
/* id_aa64zfr0 */
+#define ID_AA64ZFR0_F64MM_SHIFT 56
+#define ID_AA64ZFR0_F32MM_SHIFT 52
+#define ID_AA64ZFR0_I8MM_SHIFT 44
#define ID_AA64ZFR0_SM4_SHIFT 40
#define ID_AA64ZFR0_SHA3_SHIFT 32
+#define ID_AA64ZFR0_BF16_SHIFT 20
#define ID_AA64ZFR0_BITPERM_SHIFT 16
#define ID_AA64ZFR0_AES_SHIFT 4
#define ID_AA64ZFR0_SVEVER_SHIFT 0
+#define ID_AA64ZFR0_F64MM 0x1
+#define ID_AA64ZFR0_F32MM 0x1
+#define ID_AA64ZFR0_I8MM 0x1
+#define ID_AA64ZFR0_BF16 0x1
#define ID_AA64ZFR0_SM4 0x1
#define ID_AA64ZFR0_SHA3 0x1
#define ID_AA64ZFR0_BITPERM 0x1
#define ID_AA64MMFR1_VMIDBITS_16 2
/* id_aa64mmfr2 */
+#define ID_AA64MMFR2_E0PD_SHIFT 60
#define ID_AA64MMFR2_FWB_SHIFT 40
#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_ISAR5_AES_SHIFT 4
#define ID_ISAR5_SEVL_SHIFT 0
+#define ID_ISAR6_I8MM_SHIFT 24
+#define ID_ISAR6_BF16_SHIFT 20
+#define ID_ISAR6_SPECRES_SHIFT 16
+#define ID_ISAR6_SB_SHIFT 12
+#define ID_ISAR6_FHM_SHIFT 8
+#define ID_ISAR6_DP_SHIFT 4
+#define ID_ISAR6_JSCVT_SHIFT 0
+
#define MVFR0_FPROUND_SHIFT 28
#define MVFR0_FPSHVEC_SHIFT 24
#define MVFR0_FPSQRT_SHIFT 20
#endif
#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
#ifndef __COMPAT_SYSCALL_NR
#include <uapi/asm/unistd.h>
#define HWCAP2_SVESM4 (1 << 6)
#define HWCAP2_FLAGM2 (1 << 7)
#define HWCAP2_FRINT (1 << 8)
+#define HWCAP2_SVEI8MM (1 << 9)
+#define HWCAP2_SVEF32MM (1 << 10)
+#define HWCAP2_SVEF64MM (1 << 11)
+#define HWCAP2_SVEBF16 (1 << 12)
+#define HWCAP2_I8MM (1 << 13)
+#define HWCAP2_BF16 (1 << 14)
+#define HWCAP2_DGH (1 << 15)
+#define HWCAP2_RNG (1 << 16)
#endif /* _UAPI__ASM_HWCAP_H */
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
+#define __ARCH_WANT_SYS_CLONE3
#include <asm-generic/unistd.h>
if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
return err;
- current_flags = arch_local_save_flags();
+ current_flags = local_daif_save_flags();
/*
* SEA can interrupt SError, mask it and describe this as an NMI so
};
/*
- * Invoked as late_initcall, since not needed before init spawned.
+ * Invoked as core_initcall, which guarantees that the instruction
+ * emulation is ready for userspace.
*/
static int __init armv8_deprecated_init(void)
{
mov x0, #HVC_SOFT_RESTART
hvc #0 // no return
-1: mov x18, x1 // entry
+1: mov x8, x1 // entry
mov x0, x2 // arg0
mov x1, x3 // arg1
mov x2, x4 // arg2
- br x18
+ br x8
ENDPROC(__cpu_soft_restart)
.popsection
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ }
};
};
#endif
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
+static const struct midr_range erratum_speculative_at_vhe_list[] = {
+#ifdef CONFIG_ARM64_ERRATUM_1165522
+ /* Cortex A76 r0p0 to r2p0 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1530923
+ /* Cortex A55 r0p0 to r2p0 */
+ MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
+#endif
+ {},
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
},
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1165522
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
{
- /* Cortex-A76 r0p0 to r2p0 */
- .desc = "ARM erratum 1165522",
- .capability = ARM64_WORKAROUND_1165522,
- ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ .desc = "ARM errata 1165522, 1530923",
+ .capability = ARM64_WORKAROUND_SPECULATIVE_AT_VHE,
+ ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_vhe_list),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1463225
#ifdef CONFIG_ARM64_ERRATUM_1319367
{
.desc = "ARM erratum 1319367",
- .capability = ARM64_WORKAROUND_1319367,
+ .capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE,
ERRATA_MIDR_RANGE_LIST(ca57_a72),
},
#endif
#define COMPAT_ELF_HWCAP_DEFAULT \
(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_LPAE)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
/* Need also bit for ARM64_CB_PATCH */
DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
+bool arm64_use_ng_mappings = false;
+EXPORT_SYMBOL(arm64_use_ng_mappings);
+
/*
* Flag to indicate if we have computed the system wide
* capabilities based on the boot time active CPUs. This
* will be used to determine if a new booting CPU should
* go through the verification process to make sure that it
* supports the system capabilities, without using a hotplug
- * notifier.
+ * notifier. This is also used to decide if we could use
+ * the fast path for checking constant CPU caps.
*/
-static bool sys_caps_initialised;
-
-static inline void set_sys_caps_initialised(void)
+DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+EXPORT_SYMBOL(arm64_const_caps_ready);
+static inline void finalize_system_capabilities(void)
{
- sys_caps_initialised = true;
+ static_branch_enable(&arm64_const_caps_ready);
}
static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
* sync with the documentation of the CPU feature register ABI.
*/
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
};
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
};
static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_isar6[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
+ ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
/* Op1 = 0, CRn = 0, CRm = 3 */
ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+ init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
info->reg_id_isar4, boot->reg_id_isar4);
taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
info->reg_id_isar5, boot->reg_id_isar5);
+ taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
+ info->reg_id_isar6, boot->reg_id_isar6);
/*
* Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
/* Probe vector lengths, unless we already gave up on SVE */
if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
- !sys_caps_initialised)
+ !system_capabilities_finalized())
sve_update_vq_map();
}
read_sysreg_case(SYS_ID_ISAR3_EL1);
read_sysreg_case(SYS_ID_ISAR4_EL1);
read_sysreg_case(SYS_ID_ISAR5_EL1);
+ read_sysreg_case(SYS_ID_ISAR6_EL1);
read_sysreg_case(SYS_MVFR0_EL1);
read_sysreg_case(SYS_MVFR1_EL1);
read_sysreg_case(SYS_MVFR2_EL1);
return has_cpuid_feature(entry, scope);
}
+/*
+ * This check is triggered during the early boot before the cpufeature
+ * is initialised. Checking the status on the local CPU allows the boot
+ * CPU to detect the need for non-global mappings and thus avoiding a
+ * pagetable re-write after all the CPUs are booted. This check will be
+ * anyway run on individual CPUs, allowing us to get the consistent
+ * state once the SMP CPUs are up and thus make the switch to non-global
+ * mappings if required.
+ */
+bool kaslr_requires_kpti(void)
+{
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return false;
+
+ /*
+ * E0PD does a similar job to KPTI so can be used instead
+ * where available.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
+ u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+ if (cpuid_feature_extract_unsigned_field(mmfr2,
+ ID_AA64MMFR2_E0PD_SHIFT))
+ return false;
+ }
+
+ /*
+ * Systems affected by Cavium erratum 24756 are incompatible
+ * with KPTI.
+ */
+ if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
+ extern const struct midr_range cavium_erratum_27456_cpus[];
+
+ if (is_midr_in_range_list(read_cpuid_id(),
+ cavium_erratum_27456_cpus))
+ return false;
+ }
+
+ return kaslr_offset() > 0;
+}
+
static bool __meltdown_safe = true;
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
static const struct midr_range kpti_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
}
/* Useful for KASLR robustness */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+ if (kaslr_requires_kpti()) {
if (!__kpti_forced) {
str = "KASLR";
__kpti_forced = 1;
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
kpti_remap_fn *remap_fn;
- static bool kpti_applied = false;
int cpu = smp_processor_id();
/*
* it already or we have KASLR enabled and therefore have not
* created any global mappings at all.
*/
- if (kpti_applied || kaslr_offset() > 0)
+ if (arm64_use_ng_mappings)
return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
cpu_uninstall_idmap();
if (!cpu)
- kpti_applied = true;
+ arm64_use_ng_mappings = true;
return;
}
}
#endif /* CONFIG_ARM64_PTR_AUTH */
+#ifdef CONFIG_ARM64_E0PD
+static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
+{
+ if (this_cpu_has_cap(ARM64_HAS_E0PD))
+ sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
+}
+#endif /* CONFIG_ARM64_E0PD */
+
#ifdef CONFIG_ARM64_PSEUDO_NMI
static bool enable_pseudo_nmi;
.cpu_enable = cpu_enable_pan,
},
#endif /* CONFIG_ARM64_PAN */
-#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
{
.desc = "LSE atomic instructions",
.capability = ARM64_HAS_LSE_ATOMICS,
.sign = FTR_UNSIGNED,
.min_field_value = 2,
},
-#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif /* CONFIG_ARM64_LSE_ATOMICS */
{
.desc = "Software prefetching using PRFM",
.capability = ARM64_HAS_NO_HW_PREFETCH,
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
.min_field_value = 0,
.matches = has_no_fpsimd,
},
.sign = FTR_UNSIGNED,
.min_field_value = 1,
},
+#endif
+#ifdef CONFIG_ARM64_E0PD
+ {
+ .desc = "E0PD",
+ .capability = ARM64_HAS_E0PD,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_E0PD_SHIFT,
+ .matches = has_cpuid_feature,
+ .min_field_value = 1,
+ .cpu_enable = cpu_enable_e0pd,
+ },
+#endif
+#ifdef CONFIG_ARCH_RANDOM
+ {
+ .desc = "Random Number Generator",
+ .capability = ARM64_HAS_RNG,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
+ .field_pos = ID_AA64ISAR0_RNDR_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
#endif
{},
};
.match_list = list, \
}
+#define HWCAP_CAP_MATCH(match, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ .matches = match, \
+ }
+
#ifdef CONFIG_ARM64_PTR_AUTH
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
{
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
#endif
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
#ifdef CONFIG_ARM64_PTR_AUTH
{},
};
+#ifdef CONFIG_COMPAT
+static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
+{
+ /*
+ * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
+ * in line with that of arm32 as in vfp_init(). We make sure that the
+ * check is future proof, by making sure value is non-zero.
+ */
+ u32 mvfr1;
+
+ WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
+ if (scope == SCOPE_SYSTEM)
+ mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
+ else
+ mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
+
+ return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
+ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
+ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
+}
+#endif
+
static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
#ifdef CONFIG_COMPAT
+ HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
+ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
+ /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
+ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
+ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
* Otherwise, this CPU should verify that it has all the system
* advertised capabilities.
*/
- if (!sys_caps_initialised)
+ if (!system_capabilities_finalized())
update_cpu_capabilities(SCOPE_LOCAL_CPU);
else
verify_local_cpu_capabilities();
enable_cpu_capabilities(SCOPE_BOOT_CPU);
}
-DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
-EXPORT_SYMBOL(arm64_const_caps_ready);
-
-static void __init mark_const_caps_ready(void)
-{
- static_branch_enable(&arm64_const_caps_ready);
-}
-
bool this_cpu_has_cap(unsigned int n)
{
if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
u32 cwg;
setup_system_capabilities();
- mark_const_caps_ready();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0())
minsigstksz_setup();
/* Advertise that we have computed the system capabilities */
- set_sys_caps_initialised();
+ finalize_system_capabilities();
/*
* Check for sane CTR_EL0.CWG value.
"svesm4",
"flagm2",
"frint",
+ "svei8mm",
+ "svef32mm",
+ "svef64mm",
+ "svebf16",
+ "i8mm",
+ "bf16",
+ "dgh",
+ "rng",
NULL
};
info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+ info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
}
NOKPROBE_SYMBOL(el1_pc);
-static void el1_undef(struct pt_regs *regs)
+static void notrace el1_undef(struct pt_regs *regs)
{
local_daif_inherit(regs);
do_undefinstr(regs);
}
NOKPROBE_SYMBOL(el1_undef);
-static void el1_inv(struct pt_regs *regs, unsigned long esr)
+static void notrace el1_inv(struct pt_regs *regs, unsigned long esr)
{
local_daif_inherit(regs);
bad_mode(regs, 0, esr);
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
- el0_svc_handler(regs);
+ do_el0_svc(regs);
}
NOKPROBE_SYMBOL(el0_svc);
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
- el0_svc_compat_handler(regs);
+ do_el0_svc_compat(regs);
}
NOKPROBE_SYMBOL(el0_svc_compat);
.macro kernel_ventry, el, label, regsize = 64
.align 7
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
.if \el == 0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
.if \regsize == 64
mrs x30, tpidrro_el0
msr tpidrro_el0, xzr
.else
mov x30, xzr
.endif
- .endif
alternative_else_nop_endif
+ .endif
#endif
sub sp, sp, #S_FRAME_SIZE
.if \el == 0
clear_gp_regs
mrs x21, sp_el0
- ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
- ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
- disable_step_tsk x19, x20 // exceptions when scheduling.
+ ldr_this_cpu tsk, __entry_task, x20
+ msr sp_el0, tsk
+
+ // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
+ // when scheduling.
+ ldr x19, [tsk, #TSK_TI_FLAGS]
+ disable_step_tsk x19, x20
apply_ssbd 1, x22, x23
str w21, [sp, #S_SYSCALLNO]
.endif
- /*
- * Set sp_el0 to current thread_info.
- */
- .if \el == 0
- msr sp_el0, tsk
- .endif
-
/* Save pmr */
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
mrs_s x20, SYS_ICC_PMR_EL1
mov x0, sp
bl el0_sync_handler
b ret_to_user
+ENDPROC(el0_sync)
#ifdef CONFIG_COMPAT
.align 6
mov x0, sp
bl el0_sync_compat_handler
b ret_to_user
-ENDPROC(el0_sync)
+ENDPROC(el0_sync_compat)
.align 6
el0_irq_compat:
kernel_entry 0, 32
b el0_irq_naked
+ENDPROC(el0_irq_compat)
el0_error_compat:
kernel_entry 0, 32
b el0_error_naked
+ENDPROC(el0_error_compat)
#endif
.align 6
*/
static void task_fpsimd_load(void)
{
+ WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
if (system_supports_sve() && test_thread_flag(TIF_SVE))
this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
+ WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
+ WARN_ON(!system_supports_fpsimd());
last->st = ¤t->thread.uw.fpsimd_state;
last->sve_state = current->thread.sve_state;
last->sve_vl = current->thread.sve_vl;
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
+ WARN_ON(!system_supports_fpsimd());
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
*/
void fpsimd_restore_current_state(void)
{
- if (!system_supports_fpsimd())
+ /*
+ * For the tasks that were created before we detected the absence of
+ * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
+ * e.g, init. This could be then inherited by the children processes.
+ * If we later detect that the system doesn't support FP/SIMD,
+ * we must clear the flag for all the tasks to indicate that the
+ * FPSTATE is clean (as we can't have one) to avoid looping for ever in
+ * do_notify_resume().
+ */
+ if (!system_supports_fpsimd()) {
+ clear_thread_flag(TIF_FOREIGN_FPSTATE);
return;
+ }
get_cpu_fpsimd_context();
*/
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
- if (!system_supports_fpsimd())
+ if (WARN_ON(!system_supports_fpsimd()))
return;
get_cpu_fpsimd_context();
void fpsimd_flush_task_state(struct task_struct *t)
{
t->thread.fpsimd_cpu = NR_CPUS;
-
+ /*
+ * If we don't support fpsimd, bail out after we have
+ * reset the fpsimd_cpu for this task and clear the
+ * FPSTATE.
+ */
+ if (!system_supports_fpsimd())
+ return;
barrier();
set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
*/
static void fpsimd_flush_cpu_state(void)
{
+ WARN_ON(!system_supports_fpsimd());
__this_cpu_write(fpsimd_last_state.st, NULL);
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
*/
void fpsimd_save_and_flush_cpu_state(void)
{
+ if (!system_supports_fpsimd())
+ return;
WARN_ON(preemptible());
__get_cpu_fpsimd_context();
fpsimd_save();
}
EXPORT_SYMBOL(arch_hibernation_header_restore);
-/*
- * Copies length bytes, starting at src_start into an new page,
- * perform cache maintentance, then maps it at the specified address low
- * address as executable.
- *
- * This is used by hibernate to copy the code it needs to execute when
- * overwriting the kernel text. This function generates a new set of page
- * tables, which it loads into ttbr0.
- *
- * Length is provided as we probably only want 4K of data, even on a 64K
- * page system.
- */
-static int create_safe_exec_page(void *src_start, size_t length,
- unsigned long dst_addr,
- phys_addr_t *phys_dst_addr,
- void *(*allocator)(gfp_t mask),
- gfp_t mask)
+static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
+ unsigned long dst_addr,
+ pgprot_t pgprot)
{
- int rc = 0;
- pgd_t *trans_pgd;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
- unsigned long dst = (unsigned long)allocator(mask);
-
- if (!dst) {
- rc = -ENOMEM;
- goto out;
- }
-
- memcpy((void *)dst, src_start, length);
- __flush_icache_range(dst, dst + length);
-
- trans_pgd = allocator(mask);
- if (!trans_pgd) {
- rc = -ENOMEM;
- goto out;
- }
pgdp = pgd_offset_raw(trans_pgd, dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {
- pudp = allocator(mask);
- if (!pudp) {
- rc = -ENOMEM;
- goto out;
- }
+ pudp = (void *)get_safe_page(GFP_ATOMIC);
+ if (!pudp)
+ return -ENOMEM;
pgd_populate(&init_mm, pgdp, pudp);
}
pudp = pud_offset(pgdp, dst_addr);
if (pud_none(READ_ONCE(*pudp))) {
- pmdp = allocator(mask);
- if (!pmdp) {
- rc = -ENOMEM;
- goto out;
- }
+ pmdp = (void *)get_safe_page(GFP_ATOMIC);
+ if (!pmdp)
+ return -ENOMEM;
pud_populate(&init_mm, pudp, pmdp);
}
pmdp = pmd_offset(pudp, dst_addr);
if (pmd_none(READ_ONCE(*pmdp))) {
- ptep = allocator(mask);
- if (!ptep) {
- rc = -ENOMEM;
- goto out;
- }
+ ptep = (void *)get_safe_page(GFP_ATOMIC);
+ if (!ptep)
+ return -ENOMEM;
pmd_populate_kernel(&init_mm, pmdp, ptep);
}
ptep = pte_offset_kernel(pmdp, dst_addr);
- set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
+ set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
+
+ return 0;
+}
+
+/*
+ * Copies length bytes, starting at src_start into an new page,
+ * perform cache maintenance, then maps it at the specified address low
+ * address as executable.
+ *
+ * This is used by hibernate to copy the code it needs to execute when
+ * overwriting the kernel text. This function generates a new set of page
+ * tables, which it loads into ttbr0.
+ *
+ * Length is provided as we probably only want 4K of data, even on a 64K
+ * page system.
+ */
+static int create_safe_exec_page(void *src_start, size_t length,
+ unsigned long dst_addr,
+ phys_addr_t *phys_dst_addr)
+{
+ void *page = (void *)get_safe_page(GFP_ATOMIC);
+ pgd_t *trans_pgd;
+ int rc;
+
+ if (!page)
+ return -ENOMEM;
+
+ memcpy(page, src_start, length);
+ __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+
+ trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
+ if (!trans_pgd)
+ return -ENOMEM;
+
+ rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
+ PAGE_KERNEL_EXEC);
+ if (rc)
+ return rc;
/*
* Load our new page tables. A strict BBM approach requires that we
*/
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
- write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
+ write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1);
isb();
- *phys_dst_addr = virt_to_phys((void *)dst);
+ *phys_dst_addr = virt_to_phys(page);
-out:
- return rc;
+ return 0;
}
#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
return -ENOMEM;
} else {
set_pud(dst_pudp,
- __pud(pud_val(pud) & ~PMD_SECT_RDONLY));
+ __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
}
} while (dst_pudp++, src_pudp++, addr = next, addr != end);
return 0;
}
+static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
+ unsigned long end)
+{
+ int rc;
+ pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
+
+ if (!trans_pgd) {
+ pr_err("Failed to allocate memory for temporary page tables.\n");
+ return -ENOMEM;
+ }
+
+ rc = copy_page_tables(trans_pgd, start, end);
+ if (!rc)
+ *dst_pgdp = trans_pgd;
+
+ return rc;
+}
+
/*
* Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
*
*/
int swsusp_arch_resume(void)
{
- int rc = 0;
+ int rc;
void *zero_page;
size_t exit_size;
pgd_t *tmp_pg_dir;
* Create a second copy of just the linear map, and use this when
* restoring.
*/
- tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
- if (!tmp_pg_dir) {
- pr_err("Failed to allocate memory for temporary page tables.\n");
- rc = -ENOMEM;
- goto out;
- }
- rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
+ rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
if (rc)
- goto out;
+ return rc;
/*
* We need a zero page that is zero before & after resume in order to
zero_page = (void *)get_safe_page(GFP_ATOMIC);
if (!zero_page) {
pr_err("Failed to allocate zero page.\n");
- rc = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
/*
*/
rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
(unsigned long)hibernate_exit,
- &phys_hibernate_exit,
- (void *)get_safe_page, GFP_ATOMIC);
+ &phys_hibernate_exit);
if (rc) {
pr_err("Failed to create safe executable page for hibernate_exit code.\n");
- goto out;
+ return rc;
}
/*
resume_hdr.reenter_kernel, restore_pblist,
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
-out:
- return rc;
+ return 0;
}
int hibernate_resume_nonboot_cpu_disable(void)
return 0;
}
+ /*
+ * Mix in any entropy obtainable architecturally, open coded
+ * since this runs extremely early.
+ */
+ if (__early_cpu_has_rndr()) {
+ unsigned long raw;
+
+ if (__arm64_rndr(&raw))
+ seed ^= raw;
+ }
+
if (!seed) {
kaslr_status = KASLR_DISABLED_NO_SEED;
return 0;
struct kexec_segment *kernel_segment;
int ret;
- /* We don't support crash kernels yet. */
- if (image->type == KEXEC_TYPE_CRASH)
- return ERR_PTR(-EOPNOTSUPP);
-
/*
* We require a kernel with an unambiguous Image header. Per
* Documentation/arm64/booting.rst, this is the case when image_size
kexec_image_info(kimage);
- pr_debug("%s:%d: control_code_page: %p\n", __func__, __LINE__,
- kimage->control_code_page);
- pr_debug("%s:%d: reboot_code_buffer_phys: %pa\n", __func__, __LINE__,
- &reboot_code_buffer_phys);
- pr_debug("%s:%d: reboot_code_buffer: %p\n", __func__, __LINE__,
- reboot_code_buffer);
- pr_debug("%s:%d: relocate_new_kernel: %p\n", __func__, __LINE__,
- arm64_relocate_new_kernel);
- pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
- __func__, __LINE__, arm64_relocate_new_kernel_size,
- arm64_relocate_new_kernel_size);
-
/*
* Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
* after the kernel is shut down.
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/random.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <asm/byteorder.h>
/* relevant device tree properties */
+#define FDT_PROP_KEXEC_ELFHDR "linux,elfcorehdr"
+#define FDT_PROP_MEM_RANGE "linux,usable-memory-range"
#define FDT_PROP_INITRD_START "linux,initrd-start"
#define FDT_PROP_INITRD_END "linux,initrd-end"
#define FDT_PROP_BOOTARGS "bootargs"
vfree(image->arch.dtb);
image->arch.dtb = NULL;
+ vfree(image->arch.elf_headers);
+ image->arch.elf_headers = NULL;
+ image->arch.elf_headers_sz = 0;
+
return kexec_image_post_load_cleanup_default(image);
}
off = ret;
+ ret = fdt_delprop(dtb, off, FDT_PROP_KEXEC_ELFHDR);
+ if (ret && ret != -FDT_ERR_NOTFOUND)
+ goto out;
+ ret = fdt_delprop(dtb, off, FDT_PROP_MEM_RANGE);
+ if (ret && ret != -FDT_ERR_NOTFOUND)
+ goto out;
+
+ if (image->type == KEXEC_TYPE_CRASH) {
+ /* add linux,elfcorehdr */
+ ret = fdt_appendprop_addrrange(dtb, 0, off,
+ FDT_PROP_KEXEC_ELFHDR,
+ image->arch.elf_headers_mem,
+ image->arch.elf_headers_sz);
+ if (ret)
+ return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
+
+ /* add linux,usable-memory-range */
+ ret = fdt_appendprop_addrrange(dtb, 0, off,
+ FDT_PROP_MEM_RANGE,
+ crashk_res.start,
+ crashk_res.end - crashk_res.start + 1);
+ if (ret)
+ return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
+ }
+
/* add bootargs */
if (cmdline) {
ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline);
}
/*
- * More space needed so that we can add initrd, bootargs, kaslr-seed, and
- * rng-seed.
+ * More space needed so that we can add initrd, bootargs, kaslr-seed,
+ * rng-seed, userable-memory-range and elfcorehdr.
*/
#define DTB_EXTRA_SPACE 0x1000
}
}
+static int prepare_elf_headers(void **addr, unsigned long *sz)
+{
+ struct crash_mem *cmem;
+ unsigned int nr_ranges;
+ int ret;
+ u64 i;
+ phys_addr_t start, end;
+
+ nr_ranges = 1; /* for exclusion of crashkernel region */
+ for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
+ MEMBLOCK_NONE, &start, &end, NULL)
+ nr_ranges++;
+
+ cmem = kmalloc(sizeof(struct crash_mem) +
+ sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL);
+ if (!cmem)
+ return -ENOMEM;
+
+ cmem->max_nr_ranges = nr_ranges;
+ cmem->nr_ranges = 0;
+ for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
+ MEMBLOCK_NONE, &start, &end, NULL) {
+ cmem->ranges[cmem->nr_ranges].start = start;
+ cmem->ranges[cmem->nr_ranges].end = end - 1;
+ cmem->nr_ranges++;
+ }
+
+ /* Exclude crashkernel region */
+ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+
+ if (!ret)
+ ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+
+ kfree(cmem);
+ return ret;
+}
+
int load_other_segments(struct kimage *image,
unsigned long kernel_load_addr,
unsigned long kernel_size,
char *cmdline)
{
struct kexec_buf kbuf;
- void *dtb = NULL;
- unsigned long initrd_load_addr = 0, dtb_len;
+ void *headers, *dtb = NULL;
+ unsigned long headers_sz, initrd_load_addr = 0, dtb_len;
int ret = 0;
kbuf.image = image;
/* not allocate anything below the kernel */
kbuf.buf_min = kernel_load_addr + kernel_size;
+ /* load elf core header */
+ if (image->type == KEXEC_TYPE_CRASH) {
+ ret = prepare_elf_headers(&headers, &headers_sz);
+ if (ret) {
+ pr_err("Preparing elf core header failed\n");
+ goto out_err;
+ }
+
+ kbuf.buffer = headers;
+ kbuf.bufsz = headers_sz;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = headers_sz;
+ kbuf.buf_align = SZ_64K; /* largest supported page size */
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = true;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret) {
+ vfree(headers);
+ goto out_err;
+ }
+ image->arch.elf_headers = headers;
+ image->arch.elf_headers_mem = kbuf.mem;
+ image->arch.elf_headers_sz = headers_sz;
+
+ pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ image->arch.elf_headers_mem, headers_sz, headers_sz);
+ }
+
/* load initrd */
if (initrd) {
kbuf.buffer = initrd;
asmlinkage void ret_from_fork(void) asm("ret_from_fork");
-int copy_thread(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stk_sz, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
+ unsigned long stk_sz, struct task_struct *p, unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
}
/*
- * If a TLS pointer was passed to clone (4th argument), use it
- * for the new thread.
+ * If a TLS pointer was passed to clone, use it for the new
+ * thread.
*/
if (clone_flags & CLONE_SETTLS)
- p->thread.uw.tp_value = childregs->regs[3];
+ p->thread.uw.tp_value = tls;
} else {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
* Only allow a task to be preempted once cpufeatures have been
* enabled.
*/
- if (static_branch_likely(&arm64_const_caps_ready))
+ if (system_capabilities_finalized())
preempt_schedule_irq();
}
return 0;
}
+static int fpr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!system_supports_fpsimd())
+ return -ENODEV;
+ return regset->n;
+}
+
/*
* TODO: update fp accessors for lazy context switching (sync/flush hwstate)
*/
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
+ if (!system_supports_fpsimd())
+ return -EINVAL;
+
if (target == current)
fpsimd_preserve_current_state();
{
int ret;
+ if (!system_supports_fpsimd())
+ return -EINVAL;
+
ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
if (ret)
return ret;
*/
.size = sizeof(u32),
.align = sizeof(u32),
+ .active = fpr_active,
.get = fpr_get,
.set = fpr_set
},
compat_ulong_t fpscr;
int ret, vregs_end_pos;
+ if (!system_supports_fpsimd())
+ return -EINVAL;
+
uregs = &target->thread.uw.fpsimd_state;
if (target == current)
compat_ulong_t fpscr;
int ret, vregs_end_pos;
+ if (!system_supports_fpsimd())
+ return -EINVAL;
+
uregs = &target->thread.uw.fpsimd_state;
vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
.size = sizeof(compat_ulong_t),
.align = sizeof(compat_ulong_t),
+ .active = fpr_active,
.get = compat_vfp_get,
.set = compat_vfp_set
},
*cmdline_p = boot_command_line;
+ /*
+ * If know now we are going to need KPTI then use non-global
+ * mappings from the start, avoiding the cost of rewriting
+ * everything later.
+ */
+ arm64_use_ng_mappings = kaslr_requires_kpti();
+
early_fixmap_init();
early_ioremap_init();
goto done;
case FPSIMD_MAGIC:
+ if (!system_supports_fpsimd())
+ goto invalid;
if (user->fpsimd)
goto invalid;
if (err == 0)
err = parse_user_sigframe(&user, sf);
- if (err == 0) {
+ if (err == 0 && system_supports_fpsimd()) {
if (!user.fpsimd)
return -EINVAL;
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
- if (err == 0) {
+ if (err == 0 && system_supports_fpsimd()) {
struct fpsimd_context __user *fpsimd_ctx =
apply_user_offset(user, user->fpsimd_offset);
err |= preserve_fpsimd_context(fpsimd_ctx);
err |= !valid_user_regs(®s->user_regs, current);
aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
- if (err == 0)
+ if (err == 0 && system_supports_fpsimd())
err |= compat_restore_vfp_context(&aux->vfp);
return err;
aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
- if (err == 0)
+ if (err == 0 && system_supports_fpsimd())
err |= compat_preserve_vfp_context(&aux->vfp);
__put_user_error(0, &aux->end_magic, err);
/* Unsupported */
if (state == ARM64_SSBD_UNKNOWN)
- return -EINVAL;
+ return -ENODEV;
/* Treat the unaffected/mitigated state separately */
if (state == ARM64_SSBD_MITIGATED) {
{
switch (arm64_get_ssbd_state()) {
case ARM64_SSBD_UNKNOWN:
- return -EINVAL;
+ return -ENODEV;
case ARM64_SSBD_FORCE_ENABLE:
return PR_SPEC_DISABLE;
case ARM64_SSBD_KERNEL:
sve_user_disable();
}
-void el0_svc_handler(struct pt_regs *regs)
+void do_el0_svc(struct pt_regs *regs)
{
sve_user_discard();
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
}
#ifdef CONFIG_COMPAT
-void el0_svc_compat_handler(struct pt_regs *regs)
+void do_el0_svc_compat(struct pt_regs *regs)
{
el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
compat_sys_call_table);
.text
.pushsection .hyp.text, "ax"
+/*
+ * We treat x18 as callee-saved as the host may use it as a platform
+ * register (e.g. for shadow call stack).
+ */
.macro save_callee_saved_regs ctxt
+ str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
.endm
.macro restore_callee_saved_regs ctxt
+ // We require \ctxt is not x18-x28
+ ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
// x0: vcpu
// x1: host context
// x2-x17: clobbered by macros
- // x18: guest context
+ // x29: guest context
// Store the host regs
save_callee_saved_regs x1
ret
1:
- add x18, x0, #VCPU_CONTEXT
+ add x29, x0, #VCPU_CONTEXT
// Macro ptrauth_switch_to_guest format:
// ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
// The below macro to restore guest keys is not implemented in C code
// as it may cause Pointer Authentication key signing mismatch errors
// when this feature is enabled for kernel code.
- ptrauth_switch_to_guest x18, x0, x1, x2
+ ptrauth_switch_to_guest x29, x0, x1, x2
// Restore guest regs x0-x17
- ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
- ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
- ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
- ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
- ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
- ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
- ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
- ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
- ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
-
- // Restore guest regs x19-x29, lr
- restore_callee_saved_regs x18
-
- // Restore guest reg x18
- ldr x18, [x18, #CPU_XREG_OFFSET(18)]
+ ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
+ ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
+ ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
+ ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
+ ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
+ ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
+ ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
+ ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
+ ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
+
+ // Restore guest regs x18-x29, lr
+ restore_callee_saved_regs x29
// Do not touch any register after this!
eret
// Retrieve the guest regs x0-x1 from the stack
ldp x2, x3, [sp], #16 // x0, x1
- // Store the guest regs x0-x1 and x4-x18
+ // Store the guest regs x0-x1 and x4-x17
stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
- str x18, [x1, #CPU_XREG_OFFSET(18)]
- // Store the guest regs x19-x29, lr
+ // Store the guest regs x18-x29, lr
save_callee_saved_regs x1
get_host_ctxt x2, x3
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
+ /*
+ * When the system doesn't support FP/SIMD, we cannot rely on
+ * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
+ * abort on the very first access to FP and thus we should never
+ * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
+ * trap the accesses.
+ */
+ if (!system_supports_fpsimd() ||
+ vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_FP_HOST);
write_sysreg(val, cptr_el2);
- if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
isb();
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
/*
- * ARM erratum 1165522 requires the actual execution of the above
- * before we can switch to the EL2/EL0 translation regime used by
+ * ARM errata 1165522 and 1530923 require the actual execution of the
+ * above before we can switch to the EL2/EL0 translation regime used by
* the host.
*/
- asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
+ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
{
u64 mdcr_el2 = read_sysreg(mdcr_el2);
- if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
u64 val;
/*
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
- if (!cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
} else if (!ctxt->__hyp_running_vcpu) {
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
- if (cpus_have_const_cap(ARM64_WORKAROUND_1319367) &&
+ if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
ctxt->__hyp_running_vcpu) {
/*
* Must only be done for host registers, hence the context
local_irq_save(cxt->flags);
- if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+ if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
/*
- * For CPUs that are affected by ARM erratum 1165522, we
- * cannot trust stage-1 to be in a correct state at that
+ * For CPUs that are affected by ARM errata 1165522 or 1530923,
+ * we cannot trust stage-1 to be in a correct state at that
* point. Since we do not want to force a full load of the
* vcpu state, we prevent the EL1 page-table walker to
* allocate new TLBs. This is done by setting the EPD bits
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
- if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
u64 val;
/*
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
isb();
- if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+ if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
/* Restore the registers to what they were */
write_sysreg_el1(cxt->tcr, SYS_TCR);
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
{
write_sysreg(0, vttbr_el2);
- if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
/* Ensure write of the host VMID */
isb();
/* Restore the host's TCR_EL1 */
ID_SANITISED(ID_ISAR4_EL1),
ID_SANITISED(ID_ISAR5_EL1),
ID_SANITISED(ID_MMFR4_EL1),
- ID_UNALLOCATED(2,7),
+ ID_SANITISED(ID_ISAR6_EL1),
/* CRm=3 */
ID_SANITISED(MVFR0_EL1),
# SPDX-License-Identifier: GPL-2.0
lib-y := clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
- clear_page.o memchr.o memcpy.o memmove.o memset.o \
- memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
- strchr.o strrchr.o tishift.o
+ clear_page.o csum.o memchr.o memcpy.o memmove.o \
+ memset.o memcmp.o strcmp.o strncmp.o strlen.o \
+ strnlen.o strchr.o strrchr.o tishift.o
ifeq ($(CONFIG_KERNEL_MODE_NEON), y)
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
* Parameters:
* x0 - dest
*/
-ENTRY(clear_page)
+SYM_FUNC_START(clear_page)
mrs x1, dczid_el0
and w1, w1, #0xf
mov x2, #4
tst x0, #(PAGE_SIZE - 1)
b.ne 1b
ret
-ENDPROC(clear_page)
+SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page)
*
* Alignment fixed up by hardware.
*/
-ENTRY(__arch_clear_user)
+SYM_FUNC_START(__arch_clear_user)
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
ret
-ENDPROC(__arch_clear_user)
+SYM_FUNC_END(__arch_clear_user)
EXPORT_SYMBOL(__arch_clear_user)
.section .fixup,"ax"
.endm
end .req x5
-ENTRY(__arch_copy_from_user)
+SYM_FUNC_START(__arch_copy_from_user)
add end, x0, x2
#include "copy_template.S"
mov x0, #0 // Nothing to copy
ret
-ENDPROC(__arch_copy_from_user)
+SYM_FUNC_END(__arch_copy_from_user)
EXPORT_SYMBOL(__arch_copy_from_user)
.section .fixup,"ax"
end .req x5
-ENTRY(__arch_copy_in_user)
+SYM_FUNC_START(__arch_copy_in_user)
add end, x0, x2
#include "copy_template.S"
mov x0, #0
ret
-ENDPROC(__arch_copy_in_user)
+SYM_FUNC_END(__arch_copy_in_user)
EXPORT_SYMBOL(__arch_copy_in_user)
.section .fixup,"ax"
* x0 - dest
* x1 - src
*/
-ENTRY(copy_page)
+SYM_FUNC_START(copy_page)
alternative_if ARM64_HAS_NO_HW_PREFETCH
// Prefetch three cache lines ahead.
prfm pldl1strm, [x1, #128]
ldp x14, x15, [x1, #96]
ldp x16, x17, [x1, #112]
- mov x18, #(PAGE_SIZE - 128)
+ add x0, x0, #256
add x1, x1, #128
1:
- subs x18, x18, #128
+ tst x0, #(PAGE_SIZE - 1)
alternative_if ARM64_HAS_NO_HW_PREFETCH
prfm pldl1strm, [x1, #384]
alternative_else_nop_endif
- stnp x2, x3, [x0]
+ stnp x2, x3, [x0, #-256]
ldp x2, x3, [x1]
- stnp x4, x5, [x0, #16]
+ stnp x4, x5, [x0, #16 - 256]
ldp x4, x5, [x1, #16]
- stnp x6, x7, [x0, #32]
+ stnp x6, x7, [x0, #32 - 256]
ldp x6, x7, [x1, #32]
- stnp x8, x9, [x0, #48]
+ stnp x8, x9, [x0, #48 - 256]
ldp x8, x9, [x1, #48]
- stnp x10, x11, [x0, #64]
+ stnp x10, x11, [x0, #64 - 256]
ldp x10, x11, [x1, #64]
- stnp x12, x13, [x0, #80]
+ stnp x12, x13, [x0, #80 - 256]
ldp x12, x13, [x1, #80]
- stnp x14, x15, [x0, #96]
+ stnp x14, x15, [x0, #96 - 256]
ldp x14, x15, [x1, #96]
- stnp x16, x17, [x0, #112]
+ stnp x16, x17, [x0, #112 - 256]
ldp x16, x17, [x1, #112]
add x0, x0, #128
add x1, x1, #128
- b.gt 1b
+ b.ne 1b
- stnp x2, x3, [x0]
- stnp x4, x5, [x0, #16]
- stnp x6, x7, [x0, #32]
- stnp x8, x9, [x0, #48]
- stnp x10, x11, [x0, #64]
- stnp x12, x13, [x0, #80]
- stnp x14, x15, [x0, #96]
- stnp x16, x17, [x0, #112]
+ stnp x2, x3, [x0, #-256]
+ stnp x4, x5, [x0, #16 - 256]
+ stnp x6, x7, [x0, #32 - 256]
+ stnp x8, x9, [x0, #48 - 256]
+ stnp x10, x11, [x0, #64 - 256]
+ stnp x12, x13, [x0, #80 - 256]
+ stnp x14, x15, [x0, #96 - 256]
+ stnp x16, x17, [x0, #112 - 256]
ret
-ENDPROC(copy_page)
+SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
.endm
end .req x5
-ENTRY(__arch_copy_to_user)
+SYM_FUNC_START(__arch_copy_to_user)
add end, x0, x2
#include "copy_template.S"
mov x0, #0
ret
-ENDPROC(__arch_copy_to_user)
+SYM_FUNC_END(__arch_copy_to_user)
EXPORT_SYMBOL(__arch_copy_to_user)
.section .fixup,"ax"
.endm
.align 5
-ENTRY(crc32_le)
+SYM_FUNC_START(crc32_le)
alternative_if_not ARM64_HAS_CRC32
b crc32_le_base
alternative_else_nop_endif
__crc32
-ENDPROC(crc32_le)
+SYM_FUNC_END(crc32_le)
.align 5
-ENTRY(__crc32c_le)
+SYM_FUNC_START(__crc32c_le)
alternative_if_not ARM64_HAS_CRC32
b __crc32c_le_base
alternative_else_nop_endif
__crc32 c
-ENDPROC(__crc32c_le)
+SYM_FUNC_END(__crc32c_le)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019-2020 Arm Ltd.
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kernel.h>
+
+#include <net/checksum.h>
+
+/* Looks dumb, but generates nice-ish code */
+static u64 accumulate(u64 sum, u64 data)
+{
+ __uint128_t tmp = (__uint128_t)sum + data;
+ return tmp + (tmp >> 64);
+}
+
+unsigned int do_csum(const unsigned char *buff, int len)
+{
+ unsigned int offset, shift, sum;
+ const u64 *ptr;
+ u64 data, sum64 = 0;
+
+ if (unlikely(len == 0))
+ return 0;
+
+ offset = (unsigned long)buff & 7;
+ /*
+ * This is to all intents and purposes safe, since rounding down cannot
+ * result in a different page or cache line being accessed, and @buff
+ * should absolutely not be pointing to anything read-sensitive. We do,
+ * however, have to be careful not to piss off KASAN, which means using
+ * unchecked reads to accommodate the head and tail, for which we'll
+ * compensate with an explicit check up-front.
+ */
+ kasan_check_read(buff, len);
+ ptr = (u64 *)(buff - offset);
+ len = len + offset - 8;
+
+ /*
+ * Head: zero out any excess leading bytes. Shifting back by the same
+ * amount should be at least as fast as any other way of handling the
+ * odd/even alignment, and means we can ignore it until the very end.
+ */
+ shift = offset * 8;
+ data = READ_ONCE_NOCHECK(*ptr++);
+#ifdef __LITTLE_ENDIAN
+ data = (data >> shift) << shift;
+#else
+ data = (data << shift) >> shift;
+#endif
+
+ /*
+ * Body: straightforward aligned loads from here on (the paired loads
+ * underlying the quadword type still only need dword alignment). The
+ * main loop strictly excludes the tail, so the second loop will always
+ * run at least once.
+ */
+ while (unlikely(len > 64)) {
+ __uint128_t tmp1, tmp2, tmp3, tmp4;
+
+ tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
+ tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
+ tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
+ tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
+
+ len -= 64;
+ ptr += 8;
+
+ /* This is the "don't dump the carry flag into a GPR" idiom */
+ tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+ tmp2 += (tmp2 >> 64) | (tmp2 << 64);
+ tmp3 += (tmp3 >> 64) | (tmp3 << 64);
+ tmp4 += (tmp4 >> 64) | (tmp4 << 64);
+ tmp1 = ((tmp1 >> 64) << 64) | (tmp2 >> 64);
+ tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+ tmp3 = ((tmp3 >> 64) << 64) | (tmp4 >> 64);
+ tmp3 += (tmp3 >> 64) | (tmp3 << 64);
+ tmp1 = ((tmp1 >> 64) << 64) | (tmp3 >> 64);
+ tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+ tmp1 = ((tmp1 >> 64) << 64) | sum64;
+ tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+ sum64 = tmp1 >> 64;
+ }
+ while (len > 8) {
+ __uint128_t tmp;
+
+ sum64 = accumulate(sum64, data);
+ tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
+
+ len -= 16;
+ ptr += 2;
+
+#ifdef __LITTLE_ENDIAN
+ data = tmp >> 64;
+ sum64 = accumulate(sum64, tmp);
+#else
+ data = tmp;
+ sum64 = accumulate(sum64, tmp >> 64);
+#endif
+ }
+ if (len > 0) {
+ sum64 = accumulate(sum64, data);
+ data = READ_ONCE_NOCHECK(*ptr);
+ len -= 8;
+ }
+ /*
+ * Tail: zero any over-read bytes similarly to the head, again
+ * preserving odd/even alignment.
+ */
+ shift = len * -8;
+#ifdef __LITTLE_ENDIAN
+ data = (data << shift) >> shift;
+#else
+ data = (data >> shift) << shift;
+#endif
+ sum64 = accumulate(sum64, data);
+
+ /* Finally, folding */
+ sum64 += (sum64 >> 32) | (sum64 << 32);
+ sum = sum64 >> 32;
+ sum += (sum >> 16) | (sum << 16);
+ if (offset & 1)
+ return (u16)swab32(sum);
+
+ return sum >> 16;
+}
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
-WEAK(memchr)
+SYM_FUNC_START_WEAK_PI(memchr)
and w1, w1, #0xff
1: subs x2, x2, #1
b.mi 2f
ret
2: mov x0, #0
ret
-ENDPIPROC(memchr)
+SYM_FUNC_END_PI(memchr)
EXPORT_SYMBOL_NOKASAN(memchr)
limit_wd .req x12
mask .req x13
-WEAK(memcmp)
+SYM_FUNC_START_WEAK_PI(memcmp)
cbz limit, .Lret0
eor tmp1, src1, src2
tst tmp1, #7
.Lret0:
mov result, #0
ret
-ENDPIPROC(memcmp)
+SYM_FUNC_END_PI(memcmp)
EXPORT_SYMBOL_NOKASAN(memcmp)
.endm
.weak memcpy
-ENTRY(__memcpy)
-ENTRY(memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
+SYM_FUNC_START_PI(memcpy)
#include "copy_template.S"
ret
-ENDPIPROC(memcpy)
+SYM_FUNC_END_PI(memcpy)
EXPORT_SYMBOL(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(__memcpy)
D_h .req x14
.weak memmove
-ENTRY(__memmove)
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(__memmove)
+SYM_FUNC_START_PI(memmove)
cmp dstin, src
b.lo __memcpy
add tmp1, src, count
tst count, #0x3f
b.ne .Ltail63
ret
-ENDPIPROC(memmove)
+SYM_FUNC_END_PI(memmove)
EXPORT_SYMBOL(memmove)
-ENDPROC(__memmove)
+SYM_FUNC_END_ALIAS(__memmove)
EXPORT_SYMBOL(__memmove)
tmp3 .req x9
.weak memset
-ENTRY(__memset)
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(__memset)
+SYM_FUNC_START_PI(memset)
mov dst, dstin /* Preserve return value. */
and A_lw, val, #255
orr A_lw, A_lw, A_lw, lsl #8
ands count, count, zva_bits_x
b.ne .Ltail_maybe_long
ret
-ENDPIPROC(memset)
+SYM_FUNC_END_PI(memset)
EXPORT_SYMBOL(memset)
-ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(__memset)
EXPORT_SYMBOL(__memset)
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
-WEAK(strchr)
+SYM_FUNC_START_WEAK(strchr)
and w1, w1, #0xff
1: ldrb w2, [x0], #1
cmp w2, w1
cmp w2, w1
csel x0, x0, xzr, eq
ret
-ENDPROC(strchr)
+SYM_FUNC_END(strchr)
EXPORT_SYMBOL_NOKASAN(strchr)
zeroones .req x10
pos .req x11
-WEAK(strcmp)
+SYM_FUNC_START_WEAK_PI(strcmp)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
-ENDPIPROC(strcmp)
+SYM_FUNC_END_PI(strcmp)
EXPORT_SYMBOL_NOKASAN(strcmp)
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
-WEAK(strlen)
+SYM_FUNC_START_WEAK_PI(strlen)
mov zeroones, #REP8_01
bic src, srcin, #15
ands tmp1, srcin, #15
csinv data1, data1, xzr, le
csel data2, data2, data2a, le
b .Lrealigned
-ENDPIPROC(strlen)
+SYM_FUNC_END_PI(strlen)
EXPORT_SYMBOL_NOKASAN(strlen)
mask .req x14
endloop .req x15
-WEAK(strncmp)
+SYM_FUNC_START_WEAK_PI(strncmp)
cbz limit, .Lret0
eor tmp1, src1, src2
mov zeroones, #REP8_01
.Lret0:
mov result, #0
ret
-ENDPIPROC(strncmp)
+SYM_FUNC_END_PI(strncmp)
EXPORT_SYMBOL_NOKASAN(strncmp)
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
-WEAK(strnlen)
+SYM_FUNC_START_WEAK_PI(strnlen)
cbz limit, .Lhit_limit
mov zeroones, #REP8_01
bic src, srcin, #15
.Lhit_limit:
mov len, limit
ret
-ENDPIPROC(strnlen)
+SYM_FUNC_END_PI(strnlen)
EXPORT_SYMBOL_NOKASAN(strnlen)
* Returns:
* x0 - address of last occurrence of 'c' or 0
*/
-WEAK(strrchr)
+SYM_FUNC_START_WEAK_PI(strrchr)
mov x3, #0
and w1, w1, #0xff
1: ldrb w2, [x0], #1
b 1b
2: mov x0, x3
ret
-ENDPIPROC(strrchr)
+SYM_FUNC_END_PI(strrchr)
EXPORT_SYMBOL_NOKASAN(strrchr)
#include <asm/assembler.h>
-ENTRY(__ashlti3)
+SYM_FUNC_START(__ashlti3)
cbz x2, 1f
mov x3, #64
sub x3, x3, x2
lsl x1, x0, x1
mov x0, x2
ret
-ENDPROC(__ashlti3)
+SYM_FUNC_END(__ashlti3)
EXPORT_SYMBOL(__ashlti3)
-ENTRY(__ashrti3)
+SYM_FUNC_START(__ashrti3)
cbz x2, 1f
mov x3, #64
sub x3, x3, x2
asr x0, x1, x0
mov x1, x2
ret
-ENDPROC(__ashrti3)
+SYM_FUNC_END(__ashrti3)
EXPORT_SYMBOL(__ashrti3)
-ENTRY(__lshrti3)
+SYM_FUNC_START(__lshrti3)
cbz x2, 1f
mov x3, #64
sub x3, x3, x2
lsr x0, x1, x0
mov x1, x2
ret
-ENDPROC(__lshrti3)
+SYM_FUNC_END(__lshrti3)
EXPORT_SYMBOL(__lshrti3)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(__flush_icache_range)
+SYM_FUNC_START(__flush_icache_range)
/* FALLTHROUGH */
/*
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(__flush_cache_user_range)
+SYM_FUNC_START(__flush_cache_user_range)
uaccess_ttbr0_enable x2, x3, x4
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
9:
mov x0, #-EFAULT
b 1b
-ENDPROC(__flush_icache_range)
-ENDPROC(__flush_cache_user_range)
+SYM_FUNC_END(__flush_icache_range)
+SYM_FUNC_END(__flush_cache_user_range)
/*
* invalidate_icache_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(invalidate_icache_range)
+SYM_FUNC_START(invalidate_icache_range)
alternative_if ARM64_HAS_CACHE_DIC
mov x0, xzr
isb
2:
mov x0, #-EFAULT
b 1b
-ENDPROC(invalidate_icache_range)
+SYM_FUNC_END(invalidate_icache_range)
/*
* __flush_dcache_area(kaddr, size)
* - kaddr - kernel address
* - size - size in question
*/
-ENTRY(__flush_dcache_area)
+SYM_FUNC_START_PI(__flush_dcache_area)
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
-ENDPIPROC(__flush_dcache_area)
+SYM_FUNC_END_PI(__flush_dcache_area)
/*
* __clean_dcache_area_pou(kaddr, size)
* - kaddr - kernel address
* - size - size in question
*/
-ENTRY(__clean_dcache_area_pou)
+SYM_FUNC_START(__clean_dcache_area_pou)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret
-ENDPROC(__clean_dcache_area_pou)
+SYM_FUNC_END(__clean_dcache_area_pou)
/*
* __inval_dcache_area(kaddr, size)
* - kaddr - kernel address
* - size - size in question
*/
-ENTRY(__inval_dcache_area)
+SYM_FUNC_START_LOCAL(__dma_inv_area)
+SYM_FUNC_START_PI(__inval_dcache_area)
/* FALLTHROUGH */
/*
* - start - virtual start address of region
* - size - size in question
*/
-__dma_inv_area:
add x1, x1, x0
dcache_line_size x2, x3
sub x3, x2, #1
b.lo 2b
dsb sy
ret
-ENDPIPROC(__inval_dcache_area)
-ENDPROC(__dma_inv_area)
+SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END(__dma_inv_area)
/*
* __clean_dcache_area_poc(kaddr, size)
* - kaddr - kernel address
* - size - size in question
*/
-ENTRY(__clean_dcache_area_poc)
+SYM_FUNC_START_LOCAL(__dma_clean_area)
+SYM_FUNC_START_PI(__clean_dcache_area_poc)
/* FALLTHROUGH */
/*
* - start - virtual start address of region
* - size - size in question
*/
-__dma_clean_area:
dcache_by_line_op cvac, sy, x0, x1, x2, x3
ret
-ENDPIPROC(__clean_dcache_area_poc)
-ENDPROC(__dma_clean_area)
+SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END(__dma_clean_area)
/*
* __clean_dcache_area_pop(kaddr, size)
* - kaddr - kernel address
* - size - size in question
*/
-ENTRY(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(__clean_dcache_area_pop)
alternative_if_not ARM64_HAS_DCPOP
b __clean_dcache_area_poc
alternative_else_nop_endif
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
-ENDPIPROC(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(__clean_dcache_area_pop)
/*
* __dma_flush_area(start, size)
* - start - virtual start address of region
* - size - size in question
*/
-ENTRY(__dma_flush_area)
+SYM_FUNC_START_PI(__dma_flush_area)
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
-ENDPIPROC(__dma_flush_area)
+SYM_FUNC_END_PI(__dma_flush_area)
/*
* __dma_map_area(start, size, dir)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(__dma_map_area)
+SYM_FUNC_START_PI(__dma_map_area)
cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_area
b __dma_clean_area
-ENDPIPROC(__dma_map_area)
+SYM_FUNC_END_PI(__dma_map_area)
/*
* __dma_unmap_area(start, size, dir)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(__dma_unmap_area)
+SYM_FUNC_START_PI(__dma_unmap_area)
cmp w2, #DMA_TO_DEVICE
b.ne __dma_inv_area
ret
-ENDPIPROC(__dma_unmap_area)
+SYM_FUNC_END_PI(__dma_unmap_area)
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
-#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
-#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
-#else
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
+#define NUM_USER_ASIDS ASID_FIRST_VERSION
#define asid2idx(asid) ((asid) & ~ASID_MASK)
#define idx2asid(idx) asid2idx(idx)
-#endif
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
}
}
+static void set_kpti_asid_bits(void)
+{
+ unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
+ /*
+ * In case of KPTI kernel/user ASIDs are allocated in
+ * pairs, the bottom bit distinguishes the two: if it
+ * is set, then the ASID will map only userspace. Thus
+ * mark even as reserved for kernel.
+ */
+ memset(asid_map, 0xaa, len);
+}
+
+static void set_reserved_asid_bits(void)
+{
+ if (arm64_kernel_unmapped_at_el0())
+ set_kpti_asid_bits();
+ else
+ bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+}
+
static void flush_context(void)
{
int i;
u64 asid;
/* Update the list of reserved ASIDs and the ASID bitmap. */
- bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+ set_reserved_asid_bits();
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
+ /*
+ * We cannot call set_reserved_asid_bits() here because CPU
+ * caps are not finalized yet, so it is safer to assume KPTI
+ * and reserve kernel ASID's from beginning.
+ */
+ if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
+ set_kpti_asid_bits();
+
pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
return 0;
}
const struct fault_info *inf;
struct mm_struct *mm = current->mm;
vm_fault_t fault, major = 0;
- unsigned long vm_flags = VM_READ | VM_WRITE;
+ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (kprobe_page_fault(regs, esr))
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
- unsigned long size = PAGE_SIZE*numpages;
+ unsigned long size = PAGE_SIZE * numpages;
unsigned long end = start + size;
struct vm_struct *area;
int i;
#define TCR_KASAN_FLAGS 0
#endif
-#define MAIR(attr, mt) ((attr) << ((mt) * 8))
+/* Default MAIR_EL1 */
+#define MAIR_EL1_SET \
+ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
+ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
+ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
#ifdef CONFIG_CPU_PM
/**
*
* x0: virtual address of context pointer
*/
-ENTRY(cpu_do_suspend)
+SYM_FUNC_START(cpu_do_suspend)
mrs x2, tpidr_el0
mrs x3, tpidrro_el0
mrs x4, contextidr_el1
stp x10, x11, [x0, #64]
stp x12, x13, [x0, #80]
ret
-ENDPROC(cpu_do_suspend)
+SYM_FUNC_END(cpu_do_suspend)
/**
* cpu_do_resume - restore CPU register context
* x0: Address of context pointer
*/
.pushsection ".idmap.text", "awx"
-ENTRY(cpu_do_resume)
+SYM_FUNC_START(cpu_do_resume)
ldp x2, x3, [x0]
ldp x4, x5, [x0, #16]
ldp x6, x8, [x0, #32]
isb
ret
-ENDPROC(cpu_do_resume)
+SYM_FUNC_END(cpu_do_resume)
.popsection
#endif
*
* - pgd_phys - physical address of new TTB
*/
-ENTRY(cpu_do_switch_mm)
+SYM_FUNC_START(cpu_do_switch_mm)
mrs x2, ttbr1_el1
mmid x1, x1 // get mm->context.id
phys_to_ttbr x3, x0
msr ttbr0_el1, x3 // now update TTBR0
isb
b post_ttbr_update_workaround // Back to C code...
-ENDPROC(cpu_do_switch_mm)
+SYM_FUNC_END(cpu_do_switch_mm)
.pushsection ".idmap.text", "awx"
* This is the low-level counterpart to cpu_replace_ttbr1, and should not be
* called by anything else. It can only be executed from a TTBR0 mapping.
*/
-ENTRY(idmap_cpu_replace_ttbr1)
+SYM_FUNC_START(idmap_cpu_replace_ttbr1)
save_and_disable_daif flags=x2
__idmap_cpu_set_reserved_ttbr1 x1, x3
restore_daif x2
ret
-ENDPROC(idmap_cpu_replace_ttbr1)
+SYM_FUNC_END(idmap_cpu_replace_ttbr1)
.popsection
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
*/
__idmap_kpti_flag:
.long 1
-ENTRY(idmap_kpti_install_ng_mappings)
+SYM_FUNC_START(idmap_kpti_install_ng_mappings)
cpu .req w0
num_cpus .req w1
swapper_pa .req x2
/* We're the boot CPU. Wait for the others to catch up */
sevl
1: wfe
- ldaxr w18, [flag_ptr]
- eor w18, w18, num_cpus
- cbnz w18, 1b
+ ldaxr w17, [flag_ptr]
+ eor w17, w17, num_cpus
+ cbnz w17, 1b
/* We need to walk swapper, so turn off the MMU. */
pre_disable_mmu_workaround
- mrs x18, sctlr_el1
- bic x18, x18, #SCTLR_ELx_M
- msr sctlr_el1, x18
+ mrs x17, sctlr_el1
+ bic x17, x17, #SCTLR_ELx_M
+ msr sctlr_el1, x17
isb
/* Everybody is enjoying the idmap, so we can rewrite swapper. */
isb
/* We're done: fire up the MMU again */
- mrs x18, sctlr_el1
- orr x18, x18, #SCTLR_ELx_M
- msr sctlr_el1, x18
+ mrs x17, sctlr_el1
+ orr x17, x17, #SCTLR_ELx_M
+ msr sctlr_el1, x17
isb
/*
b.ne do_pte
b next_pmd
+ .unreq cpu
+ .unreq num_cpus
+ .unreq swapper_pa
+ .unreq cur_pgdp
+ .unreq end_pgdp
+ .unreq pgd
+ .unreq cur_pudp
+ .unreq end_pudp
+ .unreq pud
+ .unreq cur_pmdp
+ .unreq end_pmdp
+ .unreq pmd
+ .unreq cur_ptep
+ .unreq end_ptep
+ .unreq pte
+
/* Secondary CPUs end up here */
__idmap_kpti_secondary:
/* Uninstall swapper before surgery begins */
- __idmap_cpu_set_reserved_ttbr1 x18, x17
+ __idmap_cpu_set_reserved_ttbr1 x16, x17
/* Increment the flag to let the boot CPU we're ready */
-1: ldxr w18, [flag_ptr]
- add w18, w18, #1
- stxr w17, w18, [flag_ptr]
+1: ldxr w16, [flag_ptr]
+ add w16, w16, #1
+ stxr w17, w16, [flag_ptr]
cbnz w17, 1b
/* Wait for the boot CPU to finish messing around with swapper */
sevl
1: wfe
- ldxr w18, [flag_ptr]
- cbnz w18, 1b
+ ldxr w16, [flag_ptr]
+ cbnz w16, 1b
/* All done, act like nothing happened */
- offset_ttbr1 swapper_ttb, x18
+ offset_ttbr1 swapper_ttb, x16
msr ttbr1_el1, swapper_ttb
isb
ret
- .unreq cpu
- .unreq num_cpus
- .unreq swapper_pa
.unreq swapper_ttb
.unreq flag_ptr
- .unreq cur_pgdp
- .unreq end_pgdp
- .unreq pgd
- .unreq cur_pudp
- .unreq end_pudp
- .unreq pud
- .unreq cur_pmdp
- .unreq end_pmdp
- .unreq pmd
- .unreq cur_ptep
- .unreq end_ptep
- .unreq pte
-ENDPROC(idmap_kpti_install_ng_mappings)
+SYM_FUNC_END(idmap_kpti_install_ng_mappings)
.popsection
#endif
* value of the SCTLR_EL1 register.
*/
.pushsection ".idmap.text", "awx"
-ENTRY(__cpu_setup)
+SYM_FUNC_START(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB
dsb nsh
enable_dbg // since this is per-cpu
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
/*
- * Memory region attributes for LPAE:
- *
- * n = AttrIndx[2:0]
- * n MAIR
- * DEVICE_nGnRnE 000 00000000
- * DEVICE_nGnRE 001 00000100
- * DEVICE_GRE 010 00001100
- * NORMAL_NC 011 01000100
- * NORMAL 100 11111111
- * NORMAL_WT 101 10111011
+ * Memory region attributes
*/
- ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
- MAIR(0x04, MT_DEVICE_nGnRE) | \
- MAIR(0x0c, MT_DEVICE_GRE) | \
- MAIR(0x44, MT_NORMAL_NC) | \
- MAIR(0xff, MT_NORMAL) | \
- MAIR(0xbb, MT_NORMAL_WT)
+ mov_q x5, MAIR_EL1_SET
msr mair_el1, x5
/*
* Prepare SCTLR
#endif /* CONFIG_ARM64_HW_AFDBM */
msr tcr_el1, x10
ret // return to head.S
-ENDPROC(__cpu_setup)
+SYM_FUNC_END(__cpu_setup)
#define XEN_IMM 0xEA1
#define HYPERCALL_SIMPLE(hypercall) \
-ENTRY(HYPERVISOR_##hypercall) \
+SYM_FUNC_START(HYPERVISOR_##hypercall) \
mov x16, #__HYPERVISOR_##hypercall; \
hvc XEN_IMM; \
ret; \
-ENDPROC(HYPERVISOR_##hypercall)
+SYM_FUNC_END(HYPERVISOR_##hypercall)
#define HYPERCALL0 HYPERCALL_SIMPLE
#define HYPERCALL1 HYPERCALL_SIMPLE
HYPERCALL2(vm_assist);
HYPERCALL3(dm_op);
-ENTRY(privcmd_call)
+SYM_FUNC_START(privcmd_call)
mov x16, x0
mov x0, x1
mov x1, x2
*/
uaccess_ttbr0_disable x6, x7
ret
-ENDPROC(privcmd_call);
+SYM_FUNC_END(privcmd_call);
select HAVE_AOUT if MMU
select HAVE_ASM_MODVERSIONS
select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_COPY_THREAD_TLS
select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
select HAVE_UID16
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_CRC64=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_EARLY_PRINTK=y
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_SORT=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_TEST_MEMCAT_P=m
CONFIG_TEST_STACKINIT=m
CONFIG_TEST_MEMINIT=m
-CONFIG_EARLY_PRINTK=y
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE3
#endif /* _ASM_M68K_UNISTD_H_ */
lea %sp@(24),%sp
rts
+ENTRY(__sys_clone3)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr m68k_clone3
+ lea %sp@(28),%sp
+ rts
+
ENTRY(sys_sigreturn)
SAVE_SWITCH_STACK
movel %sp,%sp@- | switch_stack pointer
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/rcupdate.h>
-
+#include <linux/syscalls.h>
#include <linux/uaccess.h>
+
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/setup.h>
* on top of pt_regs, which means that sys_clone() arguments would be
* buried. We could, of course, copy them, but it's too costly for no
* good reason - generic clone() would have to copy them *again* for
- * do_fork() anyway. So in this case it's actually better to pass pt_regs *
- * and extract arguments for do_fork() from there. Eventually we might
- * go for calling do_fork() directly from the wrapper, but only after we
- * are finished with do_fork() prototype conversion.
+ * _do_fork() anyway. So in this case it's actually better to pass pt_regs *
+ * and extract arguments for _do_fork() from there. Eventually we might
+ * go for calling _do_fork() directly from the wrapper, but only after we
+ * are finished with _do_fork() prototype conversion.
*/
asmlinkage int m68k_clone(struct pt_regs *regs)
{
/* regs will be equal to current_pt_regs() */
- return do_fork(regs->d1, regs->d2, 0,
- (int __user *)regs->d3, (int __user *)regs->d4);
+ struct kernel_clone_args args = {
+ .flags = regs->d1 & ~CSIGNAL,
+ .pidfd = (int __user *)regs->d3,
+ .child_tid = (int __user *)regs->d4,
+ .parent_tid = (int __user *)regs->d3,
+ .exit_signal = regs->d1 & CSIGNAL,
+ .stack = regs->d2,
+ .tls = regs->d5,
+ };
+
+ if (!legacy_clone_args_valid(&args))
+ return -EINVAL;
+
+ return _do_fork(&args);
+}
+
+/*
+ * Because extra registers are saved on the stack after the sys_clone3()
+ * arguments, this C wrapper extracts them from pt_regs * and then calls the
+ * generic sys_clone3() implementation.
+ */
+asmlinkage int m68k_clone3(struct pt_regs *regs)
+{
+ return sys_clone3((struct clone_args __user *)regs->d1, regs->d2);
}
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+ unsigned long arg, struct task_struct *p,
+ unsigned long tls)
{
struct fork_frame {
struct switch_stack sw;
p->thread.usp = usp ?: rdusp();
if (clone_flags & CLONE_SETTLS)
- task_thread_info(p)->tp_value = frame->regs.d5;
+ task_thread_info(p)->tp_value = tls;
#ifdef CONFIG_FPU
if (!FPU_IS_EMU) {
432 common fsmount sys_fsmount
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
-# 435 reserved for clone3
+435 common clone3 __sys_clone3
#define PG_dcache_dirty PG_arch_1
void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_range flush_icache_range
+
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+#define flush_icache_page flush_icache_page
+
#ifdef CONFIG_CPU_CACHE_ALIASING
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_dup_mm(struct mm_struct *mm);
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else
-#include <asm-generic/cacheflush.h>
-#undef flush_icache_range
-#undef flush_icache_page
-#undef flush_icache_user_range
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len);
+#define flush_icache_user_range flush_icache_user_range
+
+#include <asm-generic/cacheflush.h>
#endif
#endif /* __NDS32_CACHEFLUSH_H__ */
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
-#define pmd_off_k(address) pmd_offset(pgd_offset_k(address), address)
+#define pmd_off_k(address) pmd_offset(pud_offset(p4d_offset(pgd_offset_k(address), (address)), (address)), (address))
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_COPY_THREAD_TLS
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
static int count;
print_pa_hwpath(dev, hw_path);
- pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
- ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
+ pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+ ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
if (dev->num_addrs) {
* Copy architecture-specific thread state
*/
int
-copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p)
+copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+ unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
{
struct pt_regs *cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
cregs->kpc = (unsigned long) &child_return;
- /* Setup thread TLS area from the 4th parameter in clone */
+ /* Setup thread TLS area */
if (clone_flags & CLONE_SETTLS)
- cregs->cr27 = cregs->gr[23];
+ cregs->cr27 = tls;
}
return 0;
pmd = (pmd_t *) __pa(pmd);
}
- pgd_populate(NULL, pg_dir, __va(pmd));
+ pud_populate(NULL, (pud_t *)pg_dir, __va(pmd));
#endif
pg_dir++;
config PPC_UV
bool "Ultravisor support"
depends on KVM_BOOK3S_HV_POSSIBLE
- select ZONE_DEVICE
- select DEV_PAGEMAP_OPS
- select DEVICE_PRIVATE
- select MEMORY_HOTPLUG
- select MEMORY_HOTREMOVE
+ depends on DEVICE_PRIVATE
default n
help
This option paravirtualizes the kernel to run in POWER platforms that
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy0: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy6: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy1: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy7: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy0: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy1: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe5000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy2: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe7000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy3: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe9000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy4: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xeb000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy5: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy14: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy15: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy8: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy9: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe5000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy10: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe7000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy11: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe9000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy12: ethernet-phy@0 {
reg = <0x0>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xeb000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy13: ethernet-phy@0 {
reg = <0x0>;
*
*/
#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
+
+// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
#define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
- MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
+ MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
+
/*
* For platforms that support on 65bit VA we limit the context bits
*/
#define XIVE_ESB_VAL_P 0x2
#define XIVE_ESB_VAL_Q 0x1
+#define XIVE_ESB_INVALID 0xFF
/*
* Thread Management (aka "TM") registers
enum irqchip_irq_state which, bool *state)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
+ u8 pq;
switch (which) {
case IRQCHIP_STATE_ACTIVE:
- *state = !xd->stale_p &&
- (xd->saved_p ||
- !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
+ pq = xive_esb_read(xd, XIVE_ESB_GET);
+
+ /*
+ * The esb value being all 1's means we couldn't get
+ * the PQ state of the interrupt through mmio. It may
+ * happen, for example when querying a PHB interrupt
+ * while the PHB is in an error state. We consider the
+ * interrupt to be inactive in that case.
+ */
+ *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
+ (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
return 0;
default:
return -EINVAL;
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU
select ARCH_HAS_GCOV_PROFILE_ALL
+ select HAVE_COPY_THREAD_TLS
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
#include <linux/ftrace.h>
#include <asm-generic/asm-prototypes.h>
+long long __lshrti3(long long a, int b);
+long long __ashrti3(long long a, int b);
+long long __ashlti3(long long a, int b);
+
#endif /* _ASM_RISCV_PROTOTYPES_H */
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * SiFive L2 Cache Controller header file
- *
- */
-
-#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H
-#define _ASM_RISCV_SIFIVE_L2_CACHE_H
-
-extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
-extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
-
-#define SIFIVE_L2_ERR_TYPE_CE 0
-#define SIFIVE_L2_ERR_TYPE_UE 1
-
-#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */
#ifdef CONFIG_SMP
li t0, CONFIG_NR_CPUS
- bgeu a0, t0, .Lsecondary_park
+ blt a0, t0, .Lgood_cores
+ tail .Lsecondary_park
+.Lgood_cores:
#endif
/* Pick one hart to run the main boot sequence */
tail smp_callin
#endif
-.align 2
-.Lsecondary_park:
- /* We lack SMP support or have too many harts, so park this hart */
- wfi
- j .Lsecondary_park
END(_start)
#ifdef CONFIG_RISCV_M_MODE
#ifdef CONFIG_FPU
csrr t0, CSR_MISA
andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
- bnez t0, .Lreset_regs_done
+ beqz t0, .Lreset_regs_done
li t1, SR_FS
csrs CSR_STATUS, t1
END(reset_regs)
#endif /* CONFIG_RISCV_M_MODE */
+.section ".text", "ax",@progbits
+.align 2
+.Lsecondary_park:
+ /* We lack SMP support or have too many harts, so park this hart */
+ wfi
+ j .Lsecondary_park
+
__PAGE_ALIGNED_BSS
/* Empty zero page */
.balign PAGE_SIZE
return 0;
}
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+ unsigned long arg, struct task_struct *p, unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
if (usp) /* User fork */
childregs->sp = usp;
if (clone_flags & CLONE_SETTLS)
- childregs->tp = childregs->a5;
+ childregs->tp = tls;
childregs->a0 = 0; /* Return value of fork() */
p->thread.ra = (unsigned long)ret_from_fork;
}
cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
-Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
$(CROSS_COMPILE)objcopy \
- $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
+ $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+ rm $@.tmp
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
*/
#include <linux/linkage.h>
+#include <asm-generic/export.h>
-ENTRY(__lshrti3)
+SYM_FUNC_START(__lshrti3)
beqz a2, .L1
li a5,64
sub a5,a5,a2
- addi sp,sp,-16
sext.w a4,a5
blez a5, .L2
sext.w a2,a2
- sll a4,a1,a4
srl a0,a0,a2
- srl a1,a1,a2
+ sll a4,a1,a4
+ srl a2,a1,a2
or a0,a0,a4
- sd a1,8(sp)
- sd a0,0(sp)
- ld a0,0(sp)
- ld a1,8(sp)
- addi sp,sp,16
- ret
+ mv a1,a2
.L1:
ret
.L2:
- negw a4,a4
- srl a1,a1,a4
- sd a1,0(sp)
- sd zero,8(sp)
- ld a0,0(sp)
- ld a1,8(sp)
- addi sp,sp,16
+ negw a0,a4
+ li a2,0
+ srl a0,a1,a0
+ mv a1,a2
+ ret
+SYM_FUNC_END(__lshrti3)
+EXPORT_SYMBOL(__lshrti3)
+
+SYM_FUNC_START(__ashrti3)
+ beqz a2, .L3
+ li a5,64
+ sub a5,a5,a2
+ sext.w a4,a5
+ blez a5, .L4
+ sext.w a2,a2
+ srl a0,a0,a2
+ sll a4,a1,a4
+ sra a2,a1,a2
+ or a0,a0,a4
+ mv a1,a2
+.L3:
+ ret
+.L4:
+ negw a0,a4
+ srai a2,a1,0x3f
+ sra a0,a1,a0
+ mv a1,a2
+ ret
+SYM_FUNC_END(__ashrti3)
+EXPORT_SYMBOL(__ashrti3)
+
+SYM_FUNC_START(__ashlti3)
+ beqz a2, .L5
+ li a5,64
+ sub a5,a5,a2
+ sext.w a4,a5
+ blez a5, .L6
+ sext.w a2,a2
+ sll a1,a1,a2
+ srl a4,a0,a4
+ sll a2,a0,a2
+ or a1,a1,a4
+ mv a0,a2
+.L5:
+ ret
+.L6:
+ negw a1,a4
+ li a2,0
+ sll a1,a0,a1
+ mv a0,a2
ret
-ENDPROC(__lshrti3)
+SYM_FUNC_END(__ashlti3)
+EXPORT_SYMBOL(__ashlti3)
if (!early_ipl_comp_list_addr)
return;
- if (ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR)
+ if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
pr_info("Linux is running with Secure-IPL enabled\n");
else
pr_info("Linux is running with Secure-IPL disabled\n");
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_COPY_THREAD_TLS
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
extern unsigned long getreg(struct task_struct *child, int regno);
extern int putreg(struct task_struct *child, int regno, unsigned long value);
-extern int arch_copy_tls(struct task_struct *new);
+extern int arch_set_tls(struct task_struct *new, unsigned long tls);
extern void clear_flushed_tls(struct task_struct *task);
extern int syscall_trace_enter(struct pt_regs *regs);
extern void syscall_trace_leave(struct pt_regs *regs);
userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
}
-int copy_thread(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct * p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+ unsigned long arg, struct task_struct * p, unsigned long tls)
{
void (*handler)(void);
int kthread = current->flags & PF_KTHREAD;
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS)
- ret = arch_copy_tls(p);
+ ret = arch_set_tls(p, tls);
}
return ret;
leal efi32_config(%ebp), %eax
movl %eax, efi_config(%ebp)
+ /* Disable paging */
+ movl %cr0, %eax
+ btrl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
jmp startup_32
SYM_FUNC_END(efi32_stub_entry)
#endif
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
+#define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918
#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
+ IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */
IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
#define SNR_M2M_PCI_PMON_BOX_CTL 0x438
#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
-/* SNR PCIE3 */
-#define SNR_PCIE3_PCI_PMON_CTL0 0x508
-#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
-#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e4
-
/* SNR IMC */
#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
.format_group = &snr_m2m_uncore_format_group,
};
-static struct intel_uncore_type snr_uncore_pcie3 = {
- .name = "pcie3",
- .num_counters = 4,
- .num_boxes = 1,
- .perf_ctr_bits = 48,
- .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
- .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
- .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
- .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
- .ops = &ivbep_uncore_pci_ops,
- .format_group = &ivbep_uncore_format_group,
-};
-
enum {
SNR_PCI_UNCORE_M2M,
- SNR_PCI_UNCORE_PCIE3,
};
static struct intel_uncore_type *snr_pci_uncores[] = {
[SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
- [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
NULL,
};
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
},
- { /* PCIe3 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
- .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
- },
{ /* end: all zeroes */ }
};
INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
INTEL_UNCORE_EVENT_DESC(write.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
+ { /* end: all zeroes */ },
};
static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
#define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */
#define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */
+#define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */
/* Xeon Phi */
#if IS_ENABLED(CONFIG_INTEL_PMC_IPC)
-int intel_pmc_ipc_simple_command(int cmd, int sub);
-int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
- u32 *out, u32 outlen, u32 dptr, u32 sptr);
int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
u32 *out, u32 outlen);
int intel_pmc_s0ix_counter_read(u64 *data);
-int intel_pmc_gcr_read(u32 offset, u32 *data);
int intel_pmc_gcr_read64(u32 offset, u64 *data);
-int intel_pmc_gcr_write(u32 offset, u32 data);
-int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val);
#else
-static inline int intel_pmc_ipc_simple_command(int cmd, int sub)
-{
- return -EINVAL;
-}
-
-static inline int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
- u32 *out, u32 outlen, u32 dptr, u32 sptr)
-{
- return -EINVAL;
-}
-
static inline int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
u32 *out, u32 outlen)
{
return -EINVAL;
}
-static inline int intel_pmc_gcr_read(u32 offset, u32 *data)
-{
- return -EINVAL;
-}
-
static inline int intel_pmc_gcr_read64(u32 offset, u64 *data)
{
return -EINVAL;
}
-static inline int intel_pmc_gcr_write(u32 offset, u32 data)
-{
- return -EINVAL;
-}
-
-static inline int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
-{
- return -EINVAL;
-}
-
#endif /*CONFIG_INTEL_PMC_IPC*/
#endif
/* Read single register */
int intel_scu_ipc_ioread8(u16 addr, u8 *data);
-/* Read two sequential registers */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data);
-
-/* Read four sequential registers */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data);
-
/* Read a vector */
int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
/* Write single register */
int intel_scu_ipc_iowrite8(u16 addr, u8 data);
-/* Write two sequential registers */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data);
-
-/* Write four sequential registers */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data);
-
/* Write a vector */
int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
int intel_scu_ipc_simple_command(int cmd, int sub);
int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
u32 *out, int outlen);
-int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
- u32 *out, int outlen, u32 dptr, u32 sptr);
-
-/* I2C control api */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
-
-/* Update FW version */
-int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
extern struct blocking_notifier_head intel_scu_notifier;
struct telemetry_unit_config {
struct telemetry_evtmap *telem_evts;
void __iomem *regmap;
- u32 ssram_base_addr;
u8 ssram_evts_used;
u8 curr_period;
u8 max_period;
u8 min_period;
- u32 ssram_size;
-
};
struct telemetry_plt_config {
extern bool phys_mem_access_encrypted(unsigned long phys_addr,
unsigned long size);
+/**
+ * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
+ * @__dst: destination, in MMIO space (must be 512-bit aligned)
+ * @src: source
+ * @count: number of 512 bits quantities to submit
+ *
+ * Submit data from kernel space to MMIO space, in units of 512 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ *
+ * Warning: Do not use this helper unless your driver has checked that the CPU
+ * instruction is supported on the platform.
+ */
+static inline void iosubmit_cmds512(void __iomem *__dst, const void *src,
+ size_t count)
+{
+ /*
+ * Note that this isn't an "on-stack copy", just definition of "dst"
+ * as a pointer to 64-bytes of stuff that is going to be overwritten.
+ * In the MOVDIR64B case that may be needed as you can use the
+ * MOVDIR64B instruction to copy arbitrary memory around. This trick
+ * lets the compiler know how much gets clobbered.
+ */
+ volatile struct { char _[64]; } *dst = __dst;
+ const u8 *from = src;
+ const u8 *end = from + count * 64;
+
+ while (from < end) {
+ /* MOVDIR64B [rdx], rax */
+ asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
+ : "=m" (dst)
+ : "d" (from), "a" (dst));
+ from += 64;
+ }
+}
+
#endif /* _ASM_X86_IO_H */
enum mce_notifier_prios {
MCE_PRIO_FIRST = INT_MAX,
- MCE_PRIO_SRAO = INT_MAX - 1,
+ MCE_PRIO_UC = INT_MAX - 1,
MCE_PRIO_EXTLOG = INT_MAX - 2,
MCE_PRIO_NFIT = INT_MAX - 3,
MCE_PRIO_EDAC = INT_MAX - 4,
/* These may be used by multiple smca_hwid_mcatypes */
enum smca_bank_types {
SMCA_LS = 0, /* Load Store */
+ SMCA_LS_V2, /* Load Store */
SMCA_IF, /* Instruction Fetch */
SMCA_L2_CACHE, /* L2 Cache */
SMCA_DE, /* Decoder Unit */
static inline void load_ucode_amd_ap(unsigned int family) {}
static inline int __init
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
-void reload_ucode_amd(void) {}
+static inline void reload_ucode_amd(void) {}
#endif
#endif /* _ASM_X86_MICROCODE_AMD_H */
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
+#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
{}
};
EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
return;
clear_all:
- clear_cpu_cap(c, X86_FEATURE_SME);
+ setup_clear_cpu_cap(X86_FEATURE_SME);
clear_sev:
- clear_cpu_cap(c, X86_FEATURE_SEV);
+ setup_clear_cpu_cap(X86_FEATURE_SEV);
}
}
static struct smca_bank_name smca_names[] = {
[SMCA_LS] = { "load_store", "Load Store Unit" },
+ [SMCA_LS_V2] = { "load_store", "Load Store Unit" },
[SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
[SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
[SMCA_DE] = { "decode_unit", "Decode Unit" },
/* ZN Core (HWID=0xB0) MCA types */
{ SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF },
+ { SMCA_LS_V2, HWID_MCATYPE(0xB0, 0x10), 0xFFFFFF },
{ SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
{ SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
{ SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF },
#include "internal.h"
-static DEFINE_MUTEX(mce_log_mutex);
-
/* sysfs synchronization */
static DEFINE_MUTEX(mce_sysfs_mutex);
if (!mce_gen_pool_add(m))
irq_work_queue(&mce_irq_work);
}
-
-void mce_inject_log(struct mce *m)
-{
- mutex_lock(&mce_log_mutex);
- mce_log(m);
- mutex_unlock(&mce_log_mutex);
-}
-EXPORT_SYMBOL_GPL(mce_inject_log);
-
-static struct notifier_block mce_srao_nb;
+EXPORT_SYMBOL_GPL(mce_log);
/*
- * We run the default notifier if we have only the SRAO, the first and the
+ * We run the default notifier if we have only the UC, the first and the
* default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS
* notifiers registered on the chain.
*/
.priority = MCE_PRIO_FIRST,
};
-static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
+static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
{
struct mce *mce = (struct mce *)data;
unsigned long pfn;
- if (!mce)
+ if (!mce || !mce_usable_address(mce))
return NOTIFY_DONE;
- if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
- pfn = mce->addr >> PAGE_SHIFT;
- if (!memory_failure(pfn, 0))
- set_mce_nospec(pfn);
- }
+ if (mce->severity != MCE_AO_SEVERITY &&
+ mce->severity != MCE_DEFERRED_SEVERITY)
+ return NOTIFY_DONE;
+
+ pfn = mce->addr >> PAGE_SHIFT;
+ if (!memory_failure(pfn, 0))
+ set_mce_nospec(pfn);
return NOTIFY_OK;
}
-static struct notifier_block mce_srao_nb = {
- .notifier_call = srao_decode_notifier,
- .priority = MCE_PRIO_SRAO,
+
+static struct notifier_block mce_uc_nb = {
+ .notifier_call = uc_decode_notifier,
+ .priority = MCE_PRIO_UC,
};
static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
log_it:
error_seen = true;
- mce_read_aux(&m, i);
+ if (flags & MCP_DONTLOG)
+ goto clear_it;
+ mce_read_aux(&m, i);
m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
-
/*
* Don't get the IP here because it's unlikely to
* have anything to do with the actual error location.
*/
- if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
- mce_log(&m);
- else if (mce_usable_address(&m)) {
- /*
- * Although we skipped logging this, we still want
- * to take action. Add to the pool so the registered
- * notifiers will see it.
- */
- if (!mce_gen_pool_add(&m))
- mce_schedule_work();
- }
+ if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
+ goto clear_it;
+
+ mce_log(&m);
+
+clear_it:
/*
* Clear state for this bank.
*/
static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
struct pt_regs *regs)
{
- char *tmp;
+ char *tmp = *msg;
int i;
for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
struct mca_config *cfg = &mca_cfg;
int cpu = smp_processor_id();
- char *msg = "Unknown";
struct mce m, *final;
+ char *msg = NULL;
int worst = 0;
/*
ist_end_non_atomic();
} else {
if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
- mce_panic("Failed kernel mode recovery", &m, NULL);
+ mce_panic("Failed kernel mode recovery", &m, msg);
}
out_ist:
{
mcheck_intel_therm_init();
mce_register_decode_chain(&first_nb);
- mce_register_decode_chain(&mce_srao_nb);
+ mce_register_decode_chain(&mce_uc_nb);
mce_register_decode_chain(&mce_default_nb);
mcheck_vendor_init_severity();
i_mce.status |= MCI_STATUS_SYNDV;
if (inj_type == SW_INJ) {
- mce_inject_log(&i_mce);
+ mce_log(&i_mce);
return;
}
}
#endif
-void mce_inject_log(struct mce *m);
-
/*
* We consider records to be equivalent if bank+status+addr+misc all match.
* This is only used when the system is going down because of a fatal error
*temp = (msr_val >> 16) & 0x7F;
}
-static void throttle_active_work(struct work_struct *work)
+static void __maybe_unused throttle_active_work(struct work_struct *work)
{
struct _thermal_state *state = container_of(to_delayed_work(work),
struct _thermal_state, therm_work);
{
struct thermal_state *state = &per_cpu(thermal_state, cpu);
struct device *dev = get_cpu_device(cpu);
+ u32 l;
state->package_throttle.level = PACKAGE_LEVEL;
state->core_throttle.level = CORE_LEVEL;
INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work);
INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work);
+ /* Unmask the thermal vector after the above workqueues are initialized. */
+ l = apic_read(APIC_LVTTHMR);
+ apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
+
return thermal_throttle_add_dev(dev, cpu);
}
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
- /* Unmask the thermal vector: */
- l = apic_read(APIC_LVTTHMR);
- apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
-
pr_info_once("CPU0: Thermal monitoring enabled (%s)\n",
tm2 ? "TM2" : "TM1");
if (static_branch_unlikely(&rdt_mon_enable_key))
rmdir_mondata_subdir_allrdtgrp(r, d->id);
list_del(&d->list);
- if (is_mbm_enabled())
+ if (r->mon_capable && is_mbm_enabled())
cancel_delayed_work(&d->mbm_over);
if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
/*
struct rdt_domain *d;
int cpu;
- if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
- return -ENOMEM;
-
if (level == RDT_RESOURCE_L3)
update = l3_qos_cfg_update;
else if (level == RDT_RESOURCE_L2)
else
return -EINVAL;
+ if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
r_l = &rdt_resources_all[level];
list_for_each_entry(d, &r_l->domains, list) {
/* Pick one CPU from each domain instance to update MSR */
return 0;
}
-int arch_copy_tls(struct task_struct *new)
+int arch_set_tls(struct task_struct *new, unsigned long tls)
{
struct user_desc info;
int idx, ret = -EFAULT;
- if (copy_from_user(&info,
- (void __user *) UPT_SI(&new->thread.regs.regs),
- sizeof(info)))
+ if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
goto out;
ret = -EINVAL;
{
}
-int arch_copy_tls(struct task_struct *t)
+int arch_set_tls(struct task_struct *t, unsigned long tls)
{
/*
* If CLONE_SETTLS is set, we need to save the thread id
- * (which is argument 5, child_tid, of clone) so it can be set
- * during context switches.
+ * so it can be set during context switches.
*/
- t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
+ t->thread.arch.fs = tls;
return 0;
}
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_ARCH_TRACEHOOK
+ select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD
* involved. Much simpler to just not copy those live frames across.
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
- unsigned long thread_fn_arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn,
+ unsigned long thread_fn_arg, struct task_struct *p,
+ unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
childregs->syscall = regs->syscall;
- /* The thread pointer is passed in the '4th argument' (= a5) */
if (clone_flags & CLONE_SETTLS)
- childregs->threadptr = childregs->areg[5];
+ childregs->threadptr = tls;
} else {
p->thread.ra = MAKE_RA_FOR_CALL(
(unsigned long)ret_from_kernel_thread, 1);
config BLK_DEV_INTEGRITY
bool "Block layer data integrity support"
- select CRC_T10DIF if BLK_DEV_INTEGRITY
---help---
Some storage devices allow extra information to be
stored/retrieved to help protect the data. The block layer
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
+config BLK_DEV_INTEGRITY_T10
+ tristate
+ depends on BLK_DEV_INTEGRITY
+ select CRC_T10DIF
+
config BLK_DEV_ZONED
bool "Zoned block device support"
select MQ_IOSCHED_DEADLINE
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
-obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
+obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
+obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
obj-$(CONFIG_BLK_MQ_RDMA) += blk-mq-rdma.o
}
#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
-#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
#define bfq_sample_valid(samples) ((samples) > 80)
*/
static u64 bfq_delta(unsigned long service, unsigned long weight)
{
- u64 d = (u64)service << WFQ_SERVICE_SHIFT;
-
- do_div(d, weight);
- return d;
+ return div64_ul((u64)service << WFQ_SERVICE_SHIFT, weight);
}
/**
}
EXPORT_SYMBOL(zero_fill_bio_iter);
+/**
+ * bio_truncate - truncate the bio to small size of @new_size
+ * @bio: the bio to be truncated
+ * @new_size: new size for truncating the bio
+ *
+ * Description:
+ * Truncate the bio to new size of @new_size. If bio_op(bio) is
+ * REQ_OP_READ, zero the truncated part. This function should only
+ * be used for handling corner cases, such as bio eod.
+ */
void bio_truncate(struct bio *bio, unsigned new_size)
{
struct bio_vec bv;
if (new_size >= bio->bi_iter.bi_size)
return;
- if (bio_data_dir(bio) != READ)
+ if (bio_op(bio) != REQ_OP_READ)
goto exit;
bio_for_each_segment(bv, bio, iter) {
unsigned long mask = queue_segment_boundary(q);
offset = mask & (page_to_phys(start_page) + offset);
- return min_t(unsigned long, mask - offset + 1,
- queue_max_segment_size(q));
+
+ /*
+ * overflow may be triggered in case of zero page physical address
+ * on 32bit arch, use queue's max segment size when that happens.
+ */
+ return min_not_zero(mask - offset + 1,
+ (unsigned long)queue_max_segment_size(q));
}
/**
}
EXPORT_SYMBOL(blk_mq_complete_request);
+/**
+ * blk_mq_start_request - Start processing a request
+ * @rq: Pointer to request to be started
+ *
+ * Function used by device drivers to notify the block layer that a request
+ * is going to be processed now, so blk layer can do proper initializations
+ * such as starting the timeout timer.
+ */
void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
return (queued + errors) != 0;
}
+/**
+ * __blk_mq_run_hw_queue - Run a hardware queue.
+ * @hctx: Pointer to the hardware queue to run.
+ *
+ * Send pending requests to the hardware.
+ */
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{
int srcu_idx;
return next_cpu;
}
+/**
+ * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
+ * @hctx: Pointer to the hardware queue to run.
+ * @async: If we want to run the queue asynchronously.
+ * @msecs: Microseconds of delay to wait before running the queue.
+ *
+ * If !@async, try to run the queue now. Else, run the queue asynchronously and
+ * with a delay of @msecs.
+ */
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
unsigned long msecs)
{
msecs_to_jiffies(msecs));
}
+/**
+ * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
+ * @hctx: Pointer to the hardware queue to run.
+ * @msecs: Microseconds of delay to wait before running the queue.
+ *
+ * Run a hardware queue asynchronously with a delay of @msecs.
+ */
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
__blk_mq_delay_run_hw_queue(hctx, true, msecs);
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
+/**
+ * blk_mq_run_hw_queue - Start to run a hardware queue.
+ * @hctx: Pointer to the hardware queue to run.
+ * @async: If we want to run the queue asynchronously.
+ *
+ * Check if the request queue is not in a quiesced state and if there are
+ * pending requests to be sent. If this is true, run the queue to send requests
+ * to hardware.
+ */
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
int srcu_idx;
}
EXPORT_SYMBOL(blk_mq_run_hw_queue);
+/**
+ * blk_mq_run_hw_queue - Run all hardware queues in a request queue.
+ * @q: Pointer to the request queue to run.
+ * @async: If we want to run the queue asynchronously.
+ */
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{
struct blk_mq_hw_ctx *hctx;
blk_mq_hctx_mark_pending(hctx, ctx);
}
-/*
+/**
+ * blk_mq_request_bypass_insert - Insert a request at dispatch list.
+ * @rq: Pointer to request to be inserted.
+ * @run_queue: If we should run the hardware queue after inserting the request.
+ *
* Should only be used carefully, when the caller knows we want to
* bypass a potential IO scheduler on the target device.
*/
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
- if (rqa->mq_ctx < rqb->mq_ctx)
- return -1;
- else if (rqa->mq_ctx > rqb->mq_ctx)
- return 1;
- else if (rqa->mq_hctx < rqb->mq_hctx)
- return -1;
- else if (rqa->mq_hctx > rqb->mq_hctx)
- return 1;
+ if (rqa->mq_ctx != rqb->mq_ctx)
+ return rqa->mq_ctx > rqb->mq_ctx;
+ if (rqa->mq_hctx != rqb->mq_hctx)
+ return rqa->mq_hctx > rqb->mq_hctx;
return blk_rq_pos(rqa) > blk_rq_pos(rqb);
}
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
- struct blk_mq_hw_ctx *this_hctx;
- struct blk_mq_ctx *this_ctx;
- struct request_queue *this_q;
- struct request *rq;
LIST_HEAD(list);
- LIST_HEAD(rq_list);
- unsigned int depth;
+ if (list_empty(&plug->mq_list))
+ return;
list_splice_init(&plug->mq_list, &list);
if (plug->rq_count > 2 && plug->multiple_queues)
plug->rq_count = 0;
- this_q = NULL;
- this_hctx = NULL;
- this_ctx = NULL;
- depth = 0;
-
- while (!list_empty(&list)) {
- rq = list_entry_rq(list.next);
- list_del_init(&rq->queuelist);
- BUG_ON(!rq->q);
- if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
- if (this_hctx) {
- trace_block_unplug(this_q, depth, !from_schedule);
- blk_mq_sched_insert_requests(this_hctx, this_ctx,
- &rq_list,
- from_schedule);
- }
-
- this_q = rq->q;
- this_ctx = rq->mq_ctx;
- this_hctx = rq->mq_hctx;
- depth = 0;
+ do {
+ struct list_head rq_list;
+ struct request *rq, *head_rq = list_entry_rq(list.next);
+ struct list_head *pos = &head_rq->queuelist; /* skip first */
+ struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
+ struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
+ unsigned int depth = 1;
+
+ list_for_each_continue(pos, &list) {
+ rq = list_entry_rq(pos);
+ BUG_ON(!rq->q);
+ if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
+ break;
+ depth++;
}
- depth++;
- list_add_tail(&rq->queuelist, &rq_list);
- }
-
- /*
- * If 'this_hctx' is set, we know we have entries to complete
- * on 'rq_list'. Do those.
- */
- if (this_hctx) {
- trace_block_unplug(this_q, depth, !from_schedule);
+ list_cut_before(&rq_list, &list, pos);
+ trace_block_unplug(head_rq->q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
from_schedule);
- }
+ } while(!list_empty(&list));
}
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
return BLK_STS_OK;
}
+/**
+ * blk_mq_try_issue_directly - Try to send a request directly to device driver.
+ * @hctx: Pointer of the associated hardware queue.
+ * @rq: Pointer to request to be sent.
+ * @cookie: Request queue cookie.
+ *
+ * If the device has enough resources to accept a new request now, send the
+ * request directly to device driver. Else, insert at hctx->dispatch queue, so
+ * we can try send it another time in the future. Requests inserted at this
+ * queue have higher priority.
+ */
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, blk_qc_t *cookie)
{
}
}
+/**
+ * blk_mq_make_request - Create and send a request to block device.
+ * @q: Request queue pointer.
+ * @bio: Bio pointer.
+ *
+ * Builds up a request structure from @q and @bio and send to the device. The
+ * request may not be queued directly to hardware if:
+ * * This request can be merged with another one
+ * * We want to place request at plug queue for possible future merging
+ * * There is an IO scheduler active at this queue
+ *
+ * It will not queue the request if there is an error with the bio, or at the
+ * request creation.
+ *
+ * Returns: Request queue cookie.
+ */
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
plug = blk_mq_plug(q, bio);
if (unlikely(is_flush_fua)) {
- /* bypass scheduler for flush rq */
+ /* Bypass scheduler for flush requests */
blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true);
} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
blk_add_rq_to_plug(plug, rq);
} else if (q->elevator) {
+ /* Insert the request at the IO scheduler queue */
blk_mq_sched_insert_request(rq, false, true, true);
} else if (plug && !blk_queue_nomerges(q)) {
/*
}
} else if ((q->nr_hw_queues > 1 && is_sync) ||
!data.hctx->dispatch_busy) {
+ /*
+ * There is no scheduler and we can try to send directly
+ * to the hardware.
+ */
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
} else {
+ /* Default case. */
blk_mq_sched_insert_request(rq, false, true, true);
}
* storage device can address. The default of 512 covers most
* hardware.
**/
-void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
+void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
{
q->limits.logical_block_size = size;
break;
}
- bio->bi_opf = op;
+ bio->bi_opf = op | REQ_SYNC;
bio->bi_iter.bi_sector = sector;
sector += zone_sectors;
const char *dname;
int err;
+ /*
+ * Partitions are not supported on zoned block devices that are used as
+ * such.
+ */
+ switch (disk->queue->limits.zoned) {
+ case BLK_ZONED_HM:
+ pr_warn("%s: partitions not supported on host managed zoned block device\n",
+ disk->disk_name);
+ return ERR_PTR(-ENXIO);
+ case BLK_ZONED_HA:
+ pr_info("%s: disabling host aware zoned block device support due to partitions\n",
+ disk->disk_name);
+ disk->queue->limits.zoned = BLK_ZONED_NONE;
+ break;
+ case BLK_ZONED_NONE:
+ break;
+ }
+
err = disk_expand_part_tbl(disk, partno);
if (err)
return ERR_PTR(err);
part = add_partition(disk, p, from, size, state->parts[p].flags,
&state->parts[p].info);
- if (IS_ERR(part)) {
+ if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
printk(KERN_ERR " %s: p%d could not be added: %ld\n",
disk->disk_name, p, -PTR_ERR(part));
return true;
}
/*
- * Partitions are not supported on zoned block devices.
+ * Partitions are not supported on host managed zoned block devices.
*/
- if (bdev_is_zoned(bdev)) {
- pr_warn("%s: ignoring partition table on zoned block device\n",
+ if (disk->queue->limits.zoned == BLK_ZONED_HM) {
+ pr_warn("%s: ignoring partition table on host managed zoned block device\n",
disk->disk_name);
ret = 0;
goto out_free_state;
BUG_ON (!data || !frags);
if (size < 2 * VBLK_SIZE_HEAD) {
- ldm_error("Value of size is to small.");
+ ldm_error("Value of size is too small.");
return false;
}
#include <linux/t10-pi.h>
#include <linux/blkdev.h>
#include <linux/crc-t10dif.h>
+#include <linux/module.h>
#include <net/checksum.h>
typedef __be16 (csum_fn) (void *, unsigned int);
.complete_fn = t10_pi_type3_complete,
};
EXPORT_SYMBOL(t10_pi_type3_ip);
+
+MODULE_LICENSE("GPL");
config ACPI_PROCESSOR_CSTATE
def_bool y
+ depends on ACPI_PROCESSOR
depends on IA64 || X86
config ACPI_PROCESSOR_IDLE
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
acpi_scan_add_handler(&processor_container_handler);
}
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+/**
+ * acpi_processor_claim_cst_control - Request _CST control from the platform.
+ */
+bool acpi_processor_claim_cst_control(void)
+{
+ static bool cst_control_claimed;
+ acpi_status status;
+
+ if (!acpi_gbl_FADT.cst_control || cst_control_claimed)
+ return true;
+
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ acpi_gbl_FADT.cst_control, 8);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("ACPI: Failed to claim processor _CST control\n");
+ return false;
+ }
+
+ cst_control_claimed = true;
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+
+/**
+ * acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
+ * @handle: ACPI handle of the processor object containing the _CST.
+ * @cpu: The numeric ID of the target CPU.
+ * @info: Object write the C-states information into.
+ *
+ * Extract the C-state information for the given CPU from the output of the _CST
+ * control method under the corresponding ACPI processor object (or processor
+ * device object) and populate @info with it.
+ *
+ * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke
+ * acpi_processor_ffh_cstate_probe() to verify them and update the
+ * cpu_cstate_entry data for @cpu.
+ */
+int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *cst;
+ acpi_status status;
+ u64 count;
+ int last_index = 0;
+ int i, ret = 0;
+
+ status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(handle, "No _CST\n");
+ return -ENODEV;
+ }
+
+ cst = buffer.pointer;
+
+ /* There must be at least 2 elements. */
+ if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) {
+ acpi_handle_warn(handle, "Invalid _CST output\n");
+ ret = -EFAULT;
+ goto end;
+ }
+
+ count = cst->package.elements[0].integer.value;
+
+ /* Validate the number of C-states. */
+ if (count < 1 || count != cst->package.count - 1) {
+ acpi_handle_warn(handle, "Inconsistent _CST data\n");
+ ret = -EFAULT;
+ goto end;
+ }
+
+ for (i = 1; i <= count; i++) {
+ union acpi_object *element;
+ union acpi_object *obj;
+ struct acpi_power_register *reg;
+ struct acpi_processor_cx cx;
+
+ /*
+ * If there is not enough space for all C-states, skip the
+ * excess ones and log a warning.
+ */
+ if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) {
+ acpi_handle_warn(handle,
+ "No room for more idle states (limit: %d)\n",
+ ACPI_PROCESSOR_MAX_POWER - 1);
+ break;
+ }
+
+ memset(&cx, 0, sizeof(cx));
+
+ element = &cst->package.elements[i];
+ if (element->type != ACPI_TYPE_PACKAGE)
+ continue;
+
+ if (element->package.count != 4)
+ continue;
+
+ obj = &element->package.elements[0];
+
+ if (obj->type != ACPI_TYPE_BUFFER)
+ continue;
+
+ reg = (struct acpi_power_register *)obj->buffer.pointer;
+
+ obj = &element->package.elements[1];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.type = obj->integer.value;
+ /*
+ * There are known cases in which the _CST output does not
+ * contain C1, so if the type of the first state found is not
+ * C1, leave an empty slot for C1 to be filled in later.
+ */
+ if (i == 1 && cx.type != ACPI_STATE_C1)
+ last_index = 1;
+
+ cx.address = reg->address;
+ cx.index = last_index + 1;
+
+ if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
+ /*
+ * In the majority of cases _CST describes C1 as
+ * a FIXED_HARDWARE C-state, but if the command
+ * line forbids using MWAIT, use CSTATE_HALT for
+ * C1 regardless.
+ */
+ if (cx.type == ACPI_STATE_C1 &&
+ boot_option_idle_override == IDLE_NOMWAIT) {
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+ } else {
+ cx.entry_method = ACPI_CSTATE_FFH;
+ }
+ } else if (cx.type == ACPI_STATE_C1) {
+ /*
+ * In the special case of C1, FIXED_HARDWARE can
+ * be handled by executing the HLT instruction.
+ */
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+ } else {
+ continue;
+ }
+ } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ cx.entry_method = ACPI_CSTATE_SYSTEMIO;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
+ cx.address);
+ } else {
+ continue;
+ }
+
+ if (cx.type == ACPI_STATE_C1)
+ cx.valid = 1;
+
+ obj = &element->package.elements[2];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ cx.latency = obj->integer.value;
+
+ obj = &element->package.elements[3];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ continue;
+
+ memcpy(&info->states[++last_index], &cx, sizeof(cx));
+ }
+
+ acpi_handle_info(handle, "Found %d idle states\n", last_index);
+
+ info->count = last_index;
+
+ end:
+ kfree(buffer.pointer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
if (register_count) {
/*
* if the function of acpi_video_register is already called,
- * don't register the acpi_vide_bus again and return no error.
+ * don't register the acpi_video_bus again and return no error.
*/
goto leave;
}
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2019 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2020 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
/* The buffer_field is different in that it is part of a Buffer, not an op_region */
struct acpi_object_buffer_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *buffer_obj; /* Containing Buffer object */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field; /* Special case for objects created by create_field() */
+ union acpi_operand_object *buffer_obj; /* Containing Buffer object */
};
/******************************************************************************
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
if (ACPI_FAILURE(status)
|| temp64 >= ACPI_NUM_PREDEFINED_REGIONS) {
acpi_os_printf
- ("Invalid adress space ID: must be between 0 and %u inclusive\n",
+ ("Invalid address space ID: must be between 0 and %u inclusive\n",
ACPI_NUM_PREDEFINED_REGIONS - 1);
return (AE_OK);
}
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* FUNCTION: acpi_ds_get_field_names
*
* PARAMETERS: info - create_field info structure
- * ` walk_state - Current method state
+ * walk_state - Current method state
* arg - First parser arg for the field name list
*
* RETURN: Status
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
}
obj_desc->buffer_field.buffer_obj = buffer_desc;
+ obj_desc->buffer_field.is_create_field =
+ aml_opcode == AML_CREATE_FIELD_OP;
/* Reference count for buffer_desc inherits obj_desc count */
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
walk_state));
+ /*
+ * Disassembler: handle create field operators here.
+ *
+ * create_buffer_field is a deferred op that is typically processed in load
+ * pass 2. However, disassembly of control method contents walk the parse
+ * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed
+ * in a later walk. This is a problem when there is a control method that
+ * has the same name as the AML_CREATE object. In this case, any use of the
+ * name segment will be detected as a method call rather than a reference
+ * to a buffer field.
+ *
+ * This earlier creation during disassembly solves this issue by inserting
+ * the named object in the ACPI namespace so that references to this name
+ * would be a name string rather than a method call.
+ */
+ if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) &&
+ (walk_state->op_info->flags & AML_CREATE)) {
+ status = acpi_ds_create_buffer_field(op, walk_state);
+ return_ACPI_STATUS(status);
+ }
+
/* We are only interested in opcodes that have an associated name */
if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* RETURN: Status
*
* DESCRIPTION: Read from a named field. Returns either an Integer or a
- * Buffer, depending on the size of the field.
+ * Buffer, depending on the size of the field and whether if a
+ * field is created by the create_field() operator.
*
******************************************************************************/
* the use of arithmetic operators on the returned value if the
* field size is equal or smaller than an Integer.
*
+ * However, all buffer fields created by create_field operator needs to
+ * remain as a buffer to match other AML interpreter implementations.
+ *
* Note: Field.length is in bits.
*/
buffer_length =
(acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
- if (buffer_length > acpi_gbl_integer_byte_width) {
+ if (buffer_length > acpi_gbl_integer_byte_width ||
+ (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD &&
+ obj_desc->buffer_field.is_create_field)) {
/* Field is too large for an Integer, create a Buffer instead */
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
- timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE);
+ timer_setup(&ghes->timer, ghes_poll_func, 0);
ghes_add_timer(ghes);
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
return status;
}
+struct iort_workaround_oem_info {
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+ u32 oem_revision;
+};
+
+static bool apply_id_count_workaround;
+
+static struct iort_workaround_oem_info wa_info[] __initdata = {
+ {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP07 ",
+ .oem_revision = 0,
+ }, {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP08 ",
+ .oem_revision = 0,
+ }
+};
+
+static void __init
+iort_check_id_count_workaround(struct acpi_table_header *tbl)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
+ if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
+ !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+ wa_info[i].oem_revision == tbl->oem_revision) {
+ apply_id_count_workaround = true;
+ pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n");
+ break;
+ }
+ }
+}
+
+static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map)
+{
+ u32 map_max = map->input_base + map->id_count;
+
+ /*
+ * The IORT specification revision D (Section 3, table 4, page 9) says
+ * Number of IDs = The number of IDs in the range minus one, but the
+ * IORT code ignored the "minus one", and some firmware did that too,
+ * so apply a workaround here to keep compatible with both the spec
+ * compliant and non-spec compliant firmwares.
+ */
+ if (apply_id_count_workaround)
+ map_max--;
+
+ return map_max;
+}
+
static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
u32 *rid_out)
{
return -ENXIO;
}
- if (rid_in < map->input_base ||
- (rid_in >= map->input_base + map->id_count))
+ if (rid_in < map->input_base || rid_in > iort_get_map_max(map))
return -ENXIO;
*rid_out = map->output_base + (rid_in - map->input_base);
return;
}
+ iort_check_id_count_workaround(iort_table);
iort_init_platform_devices();
}
#define PREFIX "ACPI: "
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
+#define ACPI_BATTERY_CAPACITY_VALID(capacity) \
+ ((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN)
#define ACPI_BATTERY_DEVICE_NAME "Battery"
static bool acpi_battery_is_degraded(struct acpi_battery *battery)
{
- return battery->full_charge_capacity && battery->design_capacity &&
+ return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+ ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) &&
battery->full_charge_capacity < battery->design_capacity;
}
enum power_supply_property psp,
union power_supply_propval *val)
{
- int ret = 0;
+ int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_present(battery)) {
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
ret = -ENODEV;
else
val->intval = battery->design_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
- if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
ret = -ENODEV;
else
val->intval = battery->full_charge_capacity * 1000;
val->intval = battery->capacity_now * 1000;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- if (battery->capacity_now && battery->full_charge_capacity)
- val->intval = battery->capacity_now * 100/
- battery->full_charge_capacity;
+ if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
+ full_capacity = battery->full_charge_capacity;
+ else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
+ full_capacity = battery->design_capacity;
+
+ if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
+ full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ ret = -ENODEV;
else
- val->intval = 0;
+ val->intval = battery->capacity_now * 100/
+ full_capacity;
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
+static enum power_supply_property charge_battery_full_cap_broken_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
static int sysfs_add_battery(struct acpi_battery *battery)
{
struct power_supply_config psy_cfg = { .drv_data = battery, };
+ bool full_cap_broken = false;
+
+ if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
+ !ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
+ full_cap_broken = true;
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
- battery->bat_desc.properties = charge_battery_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(charge_battery_props);
- } else if (battery->full_charge_capacity == 0) {
- battery->bat_desc.properties =
- energy_battery_full_cap_broken_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(energy_battery_full_cap_broken_props);
+ if (full_cap_broken) {
+ battery->bat_desc.properties =
+ charge_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(charge_battery_full_cap_broken_props);
+ } else {
+ battery->bat_desc.properties = charge_battery_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(charge_battery_props);
+ }
} else {
- battery->bat_desc.properties = energy_battery_props;
- battery->bat_desc.num_properties =
- ARRAY_SIZE(energy_battery_props);
+ if (full_cap_broken) {
+ battery->bat_desc.properties =
+ energy_battery_full_cap_broken_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_full_cap_broken_props);
+ } else {
+ battery->bat_desc.properties = energy_battery_props;
+ battery->bat_desc.num_properties =
+ ARRAY_SIZE(energy_battery_props);
+ }
}
battery->bat_desc.name = acpi_device_bid(battery->device);
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
+ {
+ /*
+ * Razer Blade Stealth 13 late 2019, notification of the LID device
+ * only happens on close, not on open and _LID always returns closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Razer Blade Stealth 13 Late 2019"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
{}
};
*/
static const struct acpi_device_id special_pm_ids[] = {
{"PNP0C0B", }, /* Generic ACPI fan */
+ {"INT1044", }, /* Fan for Tiger Lake generation */
{"INT3404", }, /* Fan */
{}
};
}
static const struct acpi_device_id int3407_device_ids[] = {
+ {"INT1047", 0},
{"INT3407", 0},
{"", 0},
};
#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
+ {"INT1040"},
+ {"INT1043"},
+ {"INT1044"},
+ {"INT1047"},
{"INT3400"},
{"INT3401", INT3401_DEVICE},
{"INT3402"},
/* --------------------------------------------------------------------------
Event Management
-------------------------------------------------------------------------- */
-static struct acpi_ec_query_handler *
-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
-{
- if (handler)
- kref_get(&handler->kref);
- return handler;
-}
-
static struct acpi_ec_query_handler *
acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
{
struct acpi_ec_query_handler *handler;
- bool found = false;
mutex_lock(&ec->mutex);
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
- found = true;
- break;
+ kref_get(&handler->kref);
+ mutex_unlock(&ec->mutex);
+ return handler;
}
}
mutex_unlock(&ec->mutex);
- return found ? acpi_ec_get_query_handler(handler) : NULL;
+ return NULL;
}
static void acpi_ec_query_handler_release(struct kref *kref)
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
+ {"INT1044", 0},
{"INT3404", 0},
{"", 0},
};
#define FAN_PM_OPS_PTR NULL
#endif
+#define ACPI_FPS_NAME_LEN 20
+
struct acpi_fan_fps {
u64 control;
u64 trip_point;
u64 speed;
u64 noise_level;
u64 power;
+ char name[ACPI_FPS_NAME_LEN];
+ struct device_attribute dev_attr;
};
struct acpi_fan_fif {
return fps1->speed - fps2->speed;
}
+static ssize_t show_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct acpi_fan_fps *fps = container_of(attr, struct acpi_fan_fps, dev_attr);
+ int count;
+
+ if (fps->control == 0xFFFFFFFF || fps->control > 100)
+ count = snprintf(buf, PAGE_SIZE, "not-defined:");
+ else
+ count = snprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+
+ if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->trip_point);
+
+ if (fps->speed == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->speed);
+
+ if (fps->noise_level == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined:");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld:", fps->noise_level * 100);
+
+ if (fps->power == 0xFFFFFFFF)
+ count += snprintf(&buf[count], PAGE_SIZE, "not-defined\n");
+ else
+ count += snprintf(&buf[count], PAGE_SIZE, "%lld\n", fps->power);
+
+ return count;
+}
+
static int acpi_fan_get_fps(struct acpi_device *device)
{
struct acpi_fan *fan = acpi_driver_data(device);
}
for (i = 0; i < fan->fps_count; i++) {
struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
- struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] };
+ struct acpi_buffer fps = { offsetof(struct acpi_fan_fps, name),
+ &fan->fps[i] };
status = acpi_extract_package(&obj->package.elements[i + 1],
&format, &fps);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Invalid _FPS element\n");
- break;
+ goto err;
}
}
sort(fan->fps, fan->fps_count, sizeof(*fan->fps),
acpi_fan_speed_cmp, NULL);
+ for (i = 0; i < fan->fps_count; ++i) {
+ struct acpi_fan_fps *fps = &fan->fps[i];
+
+ snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
+ fps->dev_attr.show = show_state;
+ fps->dev_attr.store = NULL;
+ fps->dev_attr.attr.name = fps->name;
+ fps->dev_attr.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fps->dev_attr.attr);
+ if (status) {
+ int j;
+
+ for (j = 0; j < i; ++j)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
+ break;
+ }
+ }
+
err:
kfree(obj);
return status;
platform_set_drvdata(pdev, fan);
if (acpi_fan_is_acpi4(device)) {
- if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device))
- goto end;
+ result = acpi_fan_get_fif(device);
+ if (result)
+ return result;
+
+ result = acpi_fan_get_fps(device);
+ if (result)
+ return result;
+
fan->acpi4 = true;
} else {
result = acpi_device_update_power(device, NULL);
if (result) {
dev_err(&device->dev, "Failed to set initial power state\n");
- goto end;
+ goto err_end;
}
}
&fan_cooling_ops);
if (IS_ERR(cdev)) {
result = PTR_ERR(cdev);
- goto end;
+ goto err_end;
}
dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id);
result = sysfs_create_link(&cdev->device.kobj,
&pdev->dev.kobj,
"device");
- if (result)
+ if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
+ goto err_end;
+ }
+
+ return 0;
+
+err_end:
+ if (fan->acpi4) {
+ int i;
+
+ for (i = 0; i < fan->fps_count; ++i)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+ }
-end:
return result;
}
{
struct acpi_fan *fan = platform_get_drvdata(pdev);
+ if (fan->acpi4) {
+ struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ int i;
+
+ for (i = 0; i < fan->fps_count; ++i)
+ sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
+ }
sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
sysfs_remove_link(&fan->cdev->device.kobj, "device");
thermal_cooling_device_unregister(fan->cdev);
*
* Return: The cache structure and the level we terminated with.
*/
-static int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
- int local_level,
- struct acpi_subtable_header *res,
- struct acpi_pptt_cache **found,
- int level, int type)
+static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
+ unsigned int local_level,
+ struct acpi_subtable_header *res,
+ struct acpi_pptt_cache **found,
+ unsigned int level, int type)
{
struct acpi_pptt_cache *cache;
if (*found != NULL && cache != *found)
pr_warn("Found duplicate cache level/type unable to determine uniqueness\n");
- pr_debug("Found cache @ level %d\n", level);
+ pr_debug("Found cache @ level %u\n", level);
*found = cache;
/*
* continue looking at this node's resource list
return local_level;
}
-static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu_node,
- int *starting_level, int level,
- int type)
+static struct acpi_pptt_cache *
+acpi_find_cache_level(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ unsigned int *starting_level, unsigned int level,
+ int type)
{
struct acpi_subtable_header *res;
- int number_of_levels = *starting_level;
+ unsigned int number_of_levels = *starting_level;
int resource = 0;
struct acpi_pptt_cache *ret = NULL;
- int local_level;
+ unsigned int local_level;
/* walk down from processor node */
while ((res = acpi_get_pptt_resource(table_hdr, cpu_node, resource))) {
unsigned int level,
struct acpi_pptt_processor **node)
{
- int total_levels = 0;
+ unsigned int total_levels = 0;
struct acpi_pptt_cache *found = NULL;
struct acpi_pptt_processor *cpu_node;
u8 acpi_type = acpi_cache_type(type);
- pr_debug("Looking for CPU %d's level %d cache type %d\n",
+ pr_debug("Looking for CPU %d's level %u cache type %d\n",
acpi_cpu_id, level, acpi_type);
cpu_node = acpi_find_processor_node(table_hdr, acpi_cpu_id);
static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
{
- acpi_status status;
- u64 count;
- int current_count;
- int i, ret = 0;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *cst;
+ int ret;
if (nocst)
return -ENODEV;
- current_count = 0;
-
- status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
- return -ENODEV;
- }
-
- cst = buffer.pointer;
-
- /* There must be at least 2 elements */
- if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
- pr_err("not enough elements in _CST\n");
- ret = -EFAULT;
- goto end;
- }
-
- count = cst->package.elements[0].integer.value;
+ ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
+ if (ret)
+ return ret;
- /* Validate number of power states. */
- if (count < 1 || count != cst->package.count - 1) {
- pr_err("count given by _CST is not valid\n");
- ret = -EFAULT;
- goto end;
- }
+ /*
+ * It is expected that there will be at least 2 states, C1 and
+ * something else (C2 or C3), so fail if that is not the case.
+ */
+ if (pr->power.count < 2)
+ return -EFAULT;
- /* Tell driver that at least _CST is supported. */
pr->flags.has_cst = 1;
-
- for (i = 1; i <= count; i++) {
- union acpi_object *element;
- union acpi_object *obj;
- struct acpi_power_register *reg;
- struct acpi_processor_cx cx;
-
- memset(&cx, 0, sizeof(cx));
-
- element = &(cst->package.elements[i]);
- if (element->type != ACPI_TYPE_PACKAGE)
- continue;
-
- if (element->package.count != 4)
- continue;
-
- obj = &(element->package.elements[0]);
-
- if (obj->type != ACPI_TYPE_BUFFER)
- continue;
-
- reg = (struct acpi_power_register *)obj->buffer.pointer;
-
- if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
- (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
- continue;
-
- /* There should be an easy way to extract an integer... */
- obj = &(element->package.elements[1]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- cx.type = obj->integer.value;
- /*
- * Some buggy BIOSes won't list C1 in _CST -
- * Let acpi_processor_get_power_info_default() handle them later
- */
- if (i == 1 && cx.type != ACPI_STATE_C1)
- current_count++;
-
- cx.address = reg->address;
- cx.index = current_count + 1;
-
- cx.entry_method = ACPI_CSTATE_SYSTEMIO;
- if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
- if (acpi_processor_ffh_cstate_probe
- (pr->id, &cx, reg) == 0) {
- cx.entry_method = ACPI_CSTATE_FFH;
- } else if (cx.type == ACPI_STATE_C1) {
- /*
- * C1 is a special case where FIXED_HARDWARE
- * can be handled in non-MWAIT way as well.
- * In that case, save this _CST entry info.
- * Otherwise, ignore this info and continue.
- */
- cx.entry_method = ACPI_CSTATE_HALT;
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
- } else {
- continue;
- }
- if (cx.type == ACPI_STATE_C1 &&
- (boot_option_idle_override == IDLE_NOMWAIT)) {
- /*
- * In most cases the C1 space_id obtained from
- * _CST object is FIXED_HARDWARE access mode.
- * But when the option of idle=halt is added,
- * the entry_method type should be changed from
- * CSTATE_FFH to CSTATE_HALT.
- * When the option of idle=nomwait is added,
- * the C1 entry_method type should be
- * CSTATE_HALT.
- */
- cx.entry_method = ACPI_CSTATE_HALT;
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
- }
- } else {
- snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
- cx.address);
- }
-
- if (cx.type == ACPI_STATE_C1) {
- cx.valid = 1;
- }
-
- obj = &(element->package.elements[2]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- cx.latency = obj->integer.value;
-
- obj = &(element->package.elements[3]);
- if (obj->type != ACPI_TYPE_INTEGER)
- continue;
-
- current_count++;
- memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
-
- /*
- * We support total ACPI_PROCESSOR_MAX_POWER - 1
- * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
- */
- if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
- pr_warn("Limiting number of power states to max (%d)\n",
- ACPI_PROCESSOR_MAX_POWER);
- pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
- break;
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
- current_count));
-
- /* Validate number of power states discovered */
- if (current_count < 2)
- ret = -EFAULT;
-
- end:
- kfree(buffer.pointer);
-
- return ret;
+ return 0;
}
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
static inline void acpi_processor_cstate_first_run_checks(void)
{
- acpi_status status;
static int first_run;
if (first_run)
max_cstate);
first_run++;
- if (acpi_gbl_FADT.cst_control && !nocst) {
- status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
- acpi_gbl_FADT.cst_control, 8);
- if (ACPI_FAILURE(status))
- ACPI_EXCEPTION((AE_INFO, status,
- "Notifying BIOS of _CST ability failed"));
- }
+ if (nocst)
+ return;
+
+ acpi_processor_claim_cst_control();
}
#else
DMI_MATCH(DMI_PRODUCT_NAME, "102434U"),
},
},
+ {
+ .callback = video_detect_force_native,
+ .ident = "Lenovo E41-25",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "81FS"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ .ident = "Lenovo E41-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82BK"),
+ },
+ },
{
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
+
+ /*
+ * Desktops which falsely report a backlight and which our heuristics
+ * for this do not catch.
+ */
{
.callback = video_detect_force_none,
.ident = "Dell OptiPlex 9020M",
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
},
},
+ {
+ .callback = video_detect_force_none,
+ .ident = "MSI MS-7721",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"),
+ },
+ },
{ },
};
void *cmd_tbl;
u32 opts;
const u32 cmd_fis_len = 5; /* five dwords */
- unsigned int n_elem;
/*
* Fill in command table information. First, the header,
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
}
- n_elem = 0;
if (qc->flags & ATA_QCFLAG_DMAMAP)
- n_elem = acard_ahci_fill_sg(qc, cmd_tbl);
+ acard_ahci_fill_sg(qc, cmd_tbl);
/*
* Fill in command slot information.
BRCM_SATA_BCM7425 = 1,
BRCM_SATA_BCM7445,
BRCM_SATA_NSP,
+ BRCM_SATA_BCM7216,
};
enum brcm_ahci_quirks {
.port_ops = &ahci_brcm_platform_ops,
};
-#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
+ int ret;
brcm_sata_phys_disable(priv);
- return ahci_platform_suspend(dev);
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ ret = ahci_platform_suspend(dev);
+ else
+ ret = 0;
+
+ if (priv->version != BRCM_SATA_BCM7216)
+ reset_control_assert(priv->rcdev);
+
+ return ret;
}
-static int brcm_ahci_resume(struct device *dev)
+static int __maybe_unused brcm_ahci_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
- int ret;
+ int ret = 0;
+
+ if (priv->version == BRCM_SATA_BCM7216)
+ ret = reset_control_reset(priv->rcdev);
+ else
+ ret = reset_control_deassert(priv->rcdev);
+ if (ret)
+ return ret;
/* Make sure clocks are turned on before re-configuration */
ret = ahci_platform_enable_clks(hpriv);
ahci_platform_disable_clks(hpriv);
return ret;
}
-#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
{.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
+ {.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216},
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
{
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
+ const char *reset_name = NULL;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
struct resource *res;
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
- /* Reset is optional depending on platform */
- priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
- if (!IS_ERR_OR_NULL(priv->rcdev))
- reset_control_deassert(priv->rcdev);
+ /* Reset is optional depending on platform and named differently */
+ if (priv->version == BRCM_SATA_BCM7216)
+ reset_name = "rescal";
+ else
+ reset_name = "ahci";
+
+ priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
+ if (IS_ERR(priv->rcdev))
+ return PTR_ERR(priv->rcdev);
hpriv = ahci_platform_get_resources(pdev, 0);
- if (IS_ERR(hpriv)) {
- ret = PTR_ERR(hpriv);
- goto out_reset;
- }
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
hpriv->plat_data = priv;
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
break;
}
+ if (priv->version == BRCM_SATA_BCM7216)
+ ret = reset_control_reset(priv->rcdev);
+ else
+ ret = reset_control_deassert(priv->rcdev);
+ if (ret)
+ return ret;
+
ret = ahci_platform_enable_clks(hpriv);
if (ret)
goto out_reset;
out_disable_clks:
ahci_platform_disable_clks(hpriv);
out_reset:
- if (!IS_ERR_OR_NULL(priv->rcdev))
+ if (priv->version != BRCM_SATA_BCM7216)
reset_control_assert(priv->rcdev);
return ret;
}
return 0;
}
+static void brcm_ahci_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ /* All resources releasing happens via devres, but our device, unlike a
+ * proper remove is not disappearing, therefore using
+ * brcm_ahci_suspend() here which does explicit power management is
+ * appropriate.
+ */
+ ret = brcm_ahci_suspend(&pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
static struct platform_driver brcm_ahci_driver = {
.probe = brcm_ahci_probe,
.remove = brcm_ahci_remove,
+ .shutdown = brcm_ahci_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
priv->aapl_bus_id = bidp ? *bidp : 0;
/* Fixup missing Apple bus ID in case of media-bay */
- if (priv->mediabay && bidp == 0)
+ if (priv->mediabay && !bidp)
priv->aapl_bus_id = 1;
}
here = (eni_vcc->descr+skip) & (eni_vcc->words-1);
dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci
<< MID_DMA_VCI_SHIFT) | MID_DT_JK;
- j++;
+ dma[j++] = 0;
}
here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1);
if (!eff) size += skip;
if (size != eff) {
dma[j++] = (here << MID_DMA_COUNT_SHIFT) |
(vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK;
- j++;
+ dma[j++] = 0;
}
if (!j || j > 2*RX_DMA_BUF) {
printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
}
if (!to) {
printk ("No more free channels for FS50..\n");
+ kfree(vcc);
return -EBUSY;
}
vcc->channo = dev->channo;
if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
printk ("Channel is in use for FS155.\n");
+ kfree(vcc);
return -EBUSY;
}
}
tc, sizeof (struct fs_transmit_config));
if (!tc) {
fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
+ kfree(vcc);
return -ENOMEM;
}
filechk_fwbin = \
echo "/* Generated by $(src)/Makefile */" ;\
echo " .section .rodata" ;\
- echo " .p2align $(ASM_ALIGN)" ;\
+ echo " .p2align 4" ;\
echo "_fw_$(FWSTR)_bin:" ;\
echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\
echo "_fw_end:" ;\
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
retval = dev->power.disable_depth > 0 ? -EINVAL :
dev->power.runtime_status == RPM_ACTIVE
&& atomic_inc_not_zero(&dev->power.usage_count);
+ trace_rpm_usage_rcuidle(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
dev->power.runtime_auto = true;
if (atomic_dec_and_test(&dev->power.usage_count))
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
+ else
+ trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
if (!old_use || old_delay >= 0) {
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
+ } else {
+ trace_rpm_usage_rcuidle(dev, 0);
}
}
break;
}
+ if (!next_ws)
+ print_wakeup_source_stats(m, &deleted_ws);
+
return next_ws;
}
return i2c_smbus_write_byte_data(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_byte = {
+static const struct regmap_bus regmap_smbus_byte = {
.reg_write = regmap_smbus_byte_reg_write,
.reg_read = regmap_smbus_byte_reg_read,
};
return i2c_smbus_write_word_data(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_word = {
+static const struct regmap_bus regmap_smbus_word = {
.reg_write = regmap_smbus_word_reg_write,
.reg_read = regmap_smbus_word_reg_read,
};
return i2c_smbus_write_word_swapped(i2c, reg, val);
}
-static struct regmap_bus regmap_smbus_word_swapped = {
+static const struct regmap_bus regmap_smbus_word_swapped = {
.reg_write = regmap_smbus_word_write_swapped,
.reg_read = regmap_smbus_word_read_swapped,
};
return -EIO;
}
-static struct regmap_bus regmap_i2c = {
+static const struct regmap_bus regmap_i2c = {
.write = regmap_i2c_write,
.gather_write = regmap_i2c_gather_write,
.read = regmap_i2c_read,
return -EIO;
}
-static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
.write = regmap_i2c_smbus_i2c_write,
.read = regmap_i2c_smbus_i2c_read,
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
WARN_ON(!map->bus);
- /* Check for unwritable registers before we start */
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!regmap_writeable(map,
- reg + regmap_get_offset(map, i)))
- return -EINVAL;
+ /* Check for unwritable or noinc registers in range
+ * before we start
+ */
+ if (!regmap_writeable_noinc(map, reg)) {
+ for (i = 0; i < val_len / map->format.val_bytes; i++) {
+ unsigned int element =
+ reg + regmap_get_offset(map, i);
+ if (!regmap_writeable(map, element) ||
+ regmap_writeable_noinc(map, element))
+ return -EINVAL;
+ }
+ }
if (!map->cache_bypass && map->format.parse_val) {
unsigned int ival;
if (!prop->length)
return NULL;
- if (prop->is_array)
- return prop->pointer;
-
- return &prop->value;
+ return prop->is_inline ? &prop->value : prop->pointer;
}
static const void *property_entry_find(const struct property_entry *props,
static void property_entry_free_data(const struct property_entry *p)
{
- const void *pointer = property_get_pointer(p);
const char * const *src_str;
size_t i, nval;
- if (p->is_array) {
- if (p->type == DEV_PROP_STRING && p->pointer) {
- src_str = p->pointer;
- nval = p->length / sizeof(const char *);
- for (i = 0; i < nval; i++)
- kfree(src_str[i]);
- }
- kfree(pointer);
- } else if (p->type == DEV_PROP_STRING) {
- kfree(p->value.str);
+ if (p->type == DEV_PROP_STRING) {
+ src_str = property_get_pointer(p);
+ nval = p->length / sizeof(*src_str);
+ for (i = 0; i < nval; i++)
+ kfree(src_str[i]);
}
+
+ if (!p->is_inline)
+ kfree(p->pointer);
+
kfree(p->name);
}
-static const char * const *
-property_copy_string_array(const struct property_entry *src)
+static bool property_copy_string_array(const char **dst_ptr,
+ const char * const *src_ptr,
+ size_t nval)
{
- const char **d;
- const char * const *src_str = src->pointer;
- size_t nval = src->length / sizeof(*d);
int i;
- d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
- if (!d)
- return NULL;
-
for (i = 0; i < nval; i++) {
- d[i] = kstrdup(src_str[i], GFP_KERNEL);
- if (!d[i] && src_str[i]) {
+ dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL);
+ if (!dst_ptr[i] && src_ptr[i]) {
while (--i >= 0)
- kfree(d[i]);
- kfree(d);
- return NULL;
+ kfree(dst_ptr[i]);
+ return false;
}
}
- return d;
+ return true;
}
static int property_entry_copy_data(struct property_entry *dst,
const struct property_entry *src)
{
const void *pointer = property_get_pointer(src);
- const void *new;
-
- if (src->is_array) {
- if (!src->length)
- return -ENODATA;
-
- if (src->type == DEV_PROP_STRING) {
- new = property_copy_string_array(src);
- if (!new)
- return -ENOMEM;
- } else {
- new = kmemdup(pointer, src->length, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
- }
+ void *dst_ptr;
+ size_t nval;
+
+ /*
+ * Properties with no data should not be marked as stored
+ * out of line.
+ */
+ if (!src->is_inline && !src->length)
+ return -ENODATA;
+
+ /*
+ * Reference properties are never stored inline as
+ * they are too big.
+ */
+ if (src->type == DEV_PROP_REF && src->is_inline)
+ return -EINVAL;
- dst->is_array = true;
- dst->pointer = new;
- } else if (src->type == DEV_PROP_STRING) {
- new = kstrdup(src->value.str, GFP_KERNEL);
- if (!new && src->value.str)
+ if (src->length <= sizeof(dst->value)) {
+ dst_ptr = &dst->value;
+ dst->is_inline = true;
+ } else {
+ dst_ptr = kmalloc(src->length, GFP_KERNEL);
+ if (!dst_ptr)
return -ENOMEM;
+ dst->pointer = dst_ptr;
+ }
- dst->value.str = new;
+ if (src->type == DEV_PROP_STRING) {
+ nval = src->length / sizeof(const char *);
+ if (!property_copy_string_array(dst_ptr, pointer, nval)) {
+ if (!dst->is_inline)
+ kfree(dst->pointer);
+ return -ENOMEM;
+ }
} else {
- dst->value = src->value;
+ memcpy(dst_ptr, pointer, src->length);
}
dst->length = src->length;
dst->type = src->type;
dst->name = kstrdup(src->name, GFP_KERNEL);
- if (!dst->name)
- goto out_free_data;
+ if (!dst->name) {
+ property_entry_free_data(dst);
+ return -ENOMEM;
+ }
return 0;
-
-out_free_data:
- property_entry_free_data(dst);
- return -ENOMEM;
}
/**
struct fwnode_reference_args *args)
{
struct swnode *swnode = to_swnode(fwnode);
- const struct software_node_reference *ref;
+ const struct software_node_ref_args *ref_array;
+ const struct software_node_ref_args *ref;
const struct property_entry *prop;
struct fwnode_handle *refnode;
+ u32 nargs_prop_val;
+ int error;
int i;
- if (!swnode || !swnode->node->references)
+ if (!swnode)
return -ENOENT;
- for (ref = swnode->node->references; ref->name; ref++)
- if (!strcmp(ref->name, propname))
- break;
+ prop = property_entry_get(swnode->node->properties, propname);
+ if (!prop)
+ return -ENOENT;
+
+ if (prop->type != DEV_PROP_REF)
+ return -EINVAL;
- if (!ref->name || index > (ref->nrefs - 1))
+ /*
+ * We expect that references are never stored inline, even
+ * single ones, as they are too big.
+ */
+ if (prop->is_inline)
+ return -EINVAL;
+
+ if (index * sizeof(*ref) >= prop->length)
return -ENOENT;
- refnode = software_node_fwnode(ref->refs[index].node);
+ ref_array = prop->pointer;
+ ref = &ref_array[index];
+
+ refnode = software_node_fwnode(ref->node);
if (!refnode)
return -ENOENT;
if (nargs_prop) {
- prop = property_entry_get(swnode->node->properties, nargs_prop);
- if (!prop)
- return -EINVAL;
+ error = property_entry_read_int_array(swnode->node->properties,
+ nargs_prop, sizeof(u32),
+ &nargs_prop_val, 1);
+ if (error)
+ return error;
- nargs = prop->value.u32_data;
+ nargs = nargs_prop_val;
}
if (nargs > NR_FWNODE_REFERENCE_ARGS)
args->nargs = nargs;
for (i = 0; i < nargs; i++)
- args->args[i] = ref->refs[index].args[i];
+ args->args[i] = ref->args[i];
return 0;
}
The module name will be test_async_driver_probe.ko
If unsure say N.
+config KUNIT_DRIVER_PE_TEST
+ bool "KUnit Tests for property entry API"
+ depends on KUNIT=y
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
+
+obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+// Unit tests for property entries API
+//
+// Copyright 2019 Google LLC.
+
+#include <kunit/test.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+static void pe_test_uints(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8("prop-u8", 8),
+ PROPERTY_ENTRY_U16("prop-u16", 16),
+ PROPERTY_ENTRY_U32("prop-u32", 32),
+ PROPERTY_ENTRY_U64("prop-u64", 64),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ u8 val_u8, array_u8[2];
+ u16 val_u16, array_u16[2];
+ u32 val_u32, array_u32[2];
+ u64 val_u64, array_u64[2];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_uint_arrays(struct kunit *test)
+{
+ static const u8 a_u8[16] = { 8, 9 };
+ static const u16 a_u16[16] = { 16, 17 };
+ static const u32 a_u32[16] = { 32, 33 };
+ static const u64 a_u64[16] = { 64, 65 };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8),
+ PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16),
+ PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32),
+ PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ u8 val_u8, array_u8[32];
+ u16 val_u16, array_u16[32];
+ u32 val_u32, array_u32[32];
+ u64 val_u64, array_u64[32];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u8, 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8);
+ KUNIT_EXPECT_EQ(test, (int)array_u8[1], 9);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u16, 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16);
+ KUNIT_EXPECT_EQ(test, (int)array_u16[1], 17);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u32, 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32);
+ KUNIT_EXPECT_EQ(test, (int)array_u32[1], 33);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)val_u64, 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64);
+ KUNIT_EXPECT_EQ(test, (int)array_u64[1], 65);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_strings(struct kunit *test)
+{
+ static const char *strings[] = {
+ "string-a",
+ "string-b",
+ };
+
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_STRING("str", "single"),
+ PROPERTY_ENTRY_STRING("empty", ""),
+ PROPERTY_ENTRY_STRING_ARRAY("strs", strings),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ const char *str;
+ const char *strs[10];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_string(node, "str", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "single");
+
+ error = fwnode_property_read_string_array(node, "str", strs, 1);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+ /* asking for more data returns what we have */
+ error = fwnode_property_read_string_array(node, "str", strs, 2);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+ error = fwnode_property_read_string(node, "no-str", &str);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_string_array(node, "no-str", strs, 1);
+ KUNIT_EXPECT_LT(test, error, 0);
+
+ error = fwnode_property_read_string(node, "empty", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "");
+
+ error = fwnode_property_read_string_array(node, "strs", strs, 3);
+ KUNIT_EXPECT_EQ(test, error, 2);
+ KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+ KUNIT_EXPECT_STREQ(test, strs[1], "string-b");
+
+ error = fwnode_property_read_string_array(node, "strs", strs, 1);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+
+ /* NULL argument -> returns size */
+ error = fwnode_property_read_string_array(node, "strs", NULL, 0);
+ KUNIT_EXPECT_EQ(test, error, 2);
+
+ /* accessing array as single value */
+ error = fwnode_property_read_string(node, "strs", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "string-a");
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_bool(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_BOOL("prop"),
+ { }
+ };
+
+ struct fwnode_handle *node;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop"));
+ KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop"));
+
+ fwnode_remove_software_node(node);
+}
+
+/* Verifies that small U8 array is stored inline when property is copied */
+static void pe_test_move_inline_u8(struct kunit *test)
+{
+ static const u8 u8_array_small[8] = { 1, 2, 3, 4 };
+ static const u8 u8_array_big[128] = { 5, 6, 7, 8 };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small),
+ PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big),
+ { }
+ };
+
+ struct property_entry *copy;
+ const u8 *data_ptr;
+
+ copy = property_entries_dup(entries);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+ KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+ data_ptr = (u8 *)©[0].value;
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 1);
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 2);
+
+ KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+ data_ptr = copy[1].pointer;
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 5);
+ KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 6);
+
+ property_entries_free(copy);
+}
+
+/* Verifies that single string array is stored inline when property is copied */
+static void pe_test_move_inline_str(struct kunit *test)
+{
+ static char *str_array_small[] = { "a" };
+ static char *str_array_big[] = { "b", "c", "d", "e" };
+ static char *str_array_small_empty[] = { "" };
+ static struct property_entry entries[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small),
+ PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big),
+ PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty),
+ { }
+ };
+
+ struct property_entry *copy;
+ const char * const *data_ptr;
+
+ copy = property_entries_dup(entries);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+ KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+ KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a");
+
+ KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+ data_ptr = copy[1].pointer;
+ KUNIT_EXPECT_STREQ(test, data_ptr[0], "b");
+ KUNIT_EXPECT_STREQ(test, data_ptr[1], "c");
+
+ KUNIT_EXPECT_TRUE(test, copy[2].is_inline);
+ KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], "");
+
+ property_entries_free(copy);
+}
+
+/* Handling of reference properties */
+static void pe_test_reference(struct kunit *test)
+{
+ static const struct software_node nodes[] = {
+ { .name = "1", },
+ { .name = "2", },
+ { }
+ };
+
+ static const struct software_node_ref_args refs[] = {
+ {
+ .node = &nodes[0],
+ .nargs = 0,
+ },
+ {
+ .node = &nodes[1],
+ .nargs = 2,
+ .args = { 3, 4 },
+ },
+ };
+
+ const struct property_entry entries[] = {
+ PROPERTY_ENTRY_REF("ref-1", &nodes[0]),
+ PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2),
+ PROPERTY_ENTRY_REF_ARRAY("ref-3", refs),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ struct fwnode_reference_args ref;
+ int error;
+
+ error = software_node_register_nodes(nodes);
+ KUNIT_ASSERT_EQ(test, error, 0);
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 1, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 1, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 1U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+
+ /* asking for more args, padded with zero data */
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 3, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 3U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 2, 1, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ /* array of references */
+ error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+ 0, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+ /* second reference in the array */
+ error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+ 2, 1, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 2U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 2, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+ software_node_unregister_nodes(nodes);
+}
+
+static struct kunit_case property_entry_test_cases[] = {
+ KUNIT_CASE(pe_test_uints),
+ KUNIT_CASE(pe_test_uint_arrays),
+ KUNIT_CASE(pe_test_strings),
+ KUNIT_CASE(pe_test_bool),
+ KUNIT_CASE(pe_test_move_inline_u8),
+ KUNIT_CASE(pe_test_move_inline_str),
+ KUNIT_CASE(pe_test_reference),
+ { }
+};
+
+static struct kunit_suite property_entry_test_suite = {
+ .name = "property-entry",
+ .test_cases = property_entry_test_cases,
+};
+
+kunit_test_suite(property_entry_test_suite);
return BLK_STS_IOERR;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
/* Writes must be at the write pointer position */
if (sector != zone->wp)
return BLK_STS_IOERR;
- if (zone->cond == BLK_ZONE_COND_EMPTY)
+ if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
zone->wp += nr_sectors;
return -EINVAL;
}
+ /* Always add a slot for main clocks fck and ick even if unused */
+ if (!nr_fck)
+ ddata->nr_clocks++;
+ if (!nr_ick)
+ ddata->nr_clocks++;
+
ddata->clocks = devm_kcalloc(ddata->dev,
ddata->nr_clocks, sizeof(*ddata->clocks),
GFP_KERNEL);
struct clk *clock;
int i, error;
- if (!ddata->clocks)
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
return 0;
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
struct clk *clock;
int i;
- if (!ddata->clocks)
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
return;
for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
priv->response_read = true;
ret_size = min_t(ssize_t, size, priv->response_length);
- if (!ret_size) {
+ if (ret_size <= 0) {
priv->response_length = 0;
goto out;
}
struct work_struct timeout_work;
struct work_struct async_work;
wait_queue_head_t async_wait;
- size_t response_length;
+ ssize_t response_length;
bool response_read;
bool command_enqueued;
}
static DEVICE_ATTR_RO(timeouts);
-static struct attribute *tpm_dev_attrs[] = {
+static ssize_t tpm_version_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+
+ return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2
+ ? "2" : "1");
+}
+static DEVICE_ATTR_RO(tpm_version_major);
+
+static struct attribute *tpm1_dev_attrs[] = {
&dev_attr_pubek.attr,
&dev_attr_pcrs.attr,
&dev_attr_enabled.attr,
&dev_attr_cancel.attr,
&dev_attr_durations.attr,
&dev_attr_timeouts.attr,
+ &dev_attr_tpm_version_major.attr,
NULL,
};
-static const struct attribute_group tpm_dev_group = {
- .attrs = tpm_dev_attrs,
+static struct attribute *tpm2_dev_attrs[] = {
+ &dev_attr_tpm_version_major.attr,
+ NULL
+};
+
+static const struct attribute_group tpm1_dev_group = {
+ .attrs = tpm1_dev_attrs,
+};
+
+static const struct attribute_group tpm2_dev_group = {
+ .attrs = tpm2_dev_attrs,
};
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return;
-
WARN_ON(chip->groups_cnt != 0);
- chip->groups[chip->groups_cnt++] = &tpm_dev_group;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
+ else
+ chip->groups[chip->groups_cnt++] = &tpm1_dev_group;
}
if (wait_startup(chip, 0) != 0) {
rc = -ENODEV;
- goto err_start;
+ goto out_err;
}
/* Take control of the TPM's interrupt hardware and shut it off */
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0)
- goto err_start;
+ goto out_err;
intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
rc = tpm_chip_start(chip);
if (rc)
- goto err_start;
-
+ goto out_err;
rc = tpm2_probe(chip);
+ tpm_chip_stop(chip);
if (rc)
- goto err_probe;
+ goto out_err;
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
if (rc < 0)
- goto err_probe;
+ goto out_err;
priv->manufacturer_id = vendor;
rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
if (rc < 0)
- goto err_probe;
+ goto out_err;
dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
(chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
probe = probe_itpm(chip);
if (probe < 0) {
rc = -ENODEV;
- goto err_probe;
+ goto out_err;
}
/* Figure out the capabilities */
rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
if (rc < 0)
- goto err_probe;
+ goto out_err;
dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
intfcaps);
if (tpm_get_timeouts(chip)) {
dev_err(dev, "Could not get TPM timeouts and durations\n");
rc = -ENODEV;
- goto err_probe;
+ goto out_err;
}
- chip->flags |= TPM_CHIP_FLAG_IRQ;
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
}
}
- tpm_chip_stop(chip);
-
rc = tpm_chip_register(chip);
if (rc)
- goto err_start;
-
- return 0;
+ goto out_err;
-err_probe:
- tpm_chip_stop(chip);
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
-err_start:
+ return 0;
+out_err:
if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
chip->ops->clk_enable(chip, false);
if (core->flags & CLK_IS_CRITICAL) {
unsigned long flags;
- clk_core_prepare(core);
+ ret = clk_core_prepare(core);
+ if (ret)
+ goto out;
flags = clk_enable_lock();
- clk_core_enable(core);
+ ret = clk_core_enable(core);
clk_enable_unlock(flags);
+ if (ret) {
+ clk_core_unprepare(core);
+ goto out;
+ }
}
clk_core_reparent_orphans_nolock();
static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"};
+static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
static DEFINE_SPINLOCK(reset_lock);
.name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
.name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
.name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
};
static struct clk_regmap *gcc_sdm845_clocks[] = {
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/clk.h>
#include "clk.h"
#include "clk-cpu.h"
exynos5x_subcmus);
}
+ /*
+ * Keep top part of G3D clock path enabled permanently to ensure
+ * that the internal busses get their clock regardless of the
+ * main G3D clock enablement status.
+ */
+ clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d"));
+
samsung_clk_of_add_provider(np, ctx);
}
*/
static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k",
- "pll-periph0", "iosc" };
+ "iosc", "pll-periph0" };
static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = {
- { .index = 2, .shift = 0, .width = 5 },
+ { .index = 3, .shift = 0, .width = 5 },
};
static struct ccu_div ar100_clk = {
static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0);
-static struct ccu_div r_apb1_clk = {
- .div = _SUNXI_CCU_DIV(0, 2),
-
- .common = {
- .reg = 0x00c,
- .hw.init = CLK_HW_INIT("r-apb1",
- "r-ahb",
- &ccu_div_ops,
- 0),
- },
-};
+static SUNXI_CCU_M(r_apb1_clk, "r-apb1", "r-ahb", 0x00c, 0, 2, 0);
static struct ccu_div r_apb2_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0);
-static struct ccu_div apb0_clk = {
- .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO),
-
- .common = {
- .reg = 0x0c,
- .hw.init = CLK_HW_INIT_HW("apb0",
- &ahb0_clk.hw,
- &ccu_div_ops,
- 0),
- },
-};
-
-static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
+static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
/*
* Define the parent as an array that can be reused to save space
static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
&ar100_clk.common,
- &a83t_apb0_clk.common,
+ &apb0_clk.common,
&apb0_pio_clk.common,
&apb0_ir_clk.common,
&apb0_timer_clk.common,
.hws = {
[CLK_AR100] = &ar100_clk.common.hw,
[CLK_AHB0] = &ahb0_clk.hw,
- [CLK_APB0] = &a83t_apb0_clk.common.hw,
+ [CLK_APB0] = &apb0_clk.common.hw,
[CLK_APB0_PIO] = &apb0_pio_clk.common.hw,
[CLK_APB0_IR] = &apb0_ir_clk.common.hw,
[CLK_APB0_TIMER] = &apb0_timer_clk.common.hw,
static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
{
- /* Fix apb0 bus gate parents here */
- apb0_gate_parent[0] = &a83t_apb0_clk.common.hw;
-
sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc);
}
CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu",
.reg = 0x1f0,
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("outa", out_parents,
- &ccu_mp_ops, 0),
+ &ccu_mp_ops,
+ CLK_SET_RATE_PARENT),
}
};
.reg = 0x1f4,
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("outb", out_parents,
- &ccu_mp_ops, 0),
+ &ccu_mp_ops,
+ CLK_SET_RATE_PARENT),
}
};
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_PLL_DDR1 + 1,
};
static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_I2S0 + 1,
};
static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
#define CLK_PLL_DDR1 74
-#define CLK_NUMBER (CLK_I2S0 + 1)
-
#endif /* _CCU_SUN8I_H3_H_ */
periph_banks = banks;
clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL);
- if (!clks)
+ if (!clks) {
kfree(periph_clk_enb_refcnt);
+ return NULL;
+ }
clk_num = num;
cinfo->iobase = of_iomap(node, 0);
cinfo->dev = &pdev->dev;
pm_runtime_enable(cinfo->dev);
- pm_runtime_irq_safe(cinfo->dev);
pm_runtime_get_sync(cinfo->dev);
atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct private_data *priv = policy->driver_data;
+ cpufreq_cpu_put(policy);
+
return brcm_avs_get_frequency(priv->base);
}
static struct cppc_cpudata **all_cpu_data;
struct cppc_workaround_oem_info {
- char oem_id[ACPI_OEM_ID_SIZE +1];
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
};
for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
- wa_info[i].oem_revision == tbl->oem_revision)
+ wa_info[i].oem_revision == tbl->oem_revision) {
apply_hisi_workaround = true;
+ break;
+ }
}
+
+ acpi_put_table(tbl);
}
/* Callback function used to retrieve the max frequency from DMI */
{ .compatible = "fsl,imx8mq", },
{ .compatible = "fsl,imx8mm", },
{ .compatible = "fsl,imx8mn", },
+ { .compatible = "fsl,imx8mp", },
{ .compatible = "marvell,armadaxp", },
{ .compatible = "mediatek,mt8176", },
{ .compatible = "mediatek,mt8183", },
+ { .compatible = "nvidia,tegra20", },
+ { .compatible = "nvidia,tegra30", },
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
if (ret)
return ret;
- if (of_machine_is_compatible("fsl,imx8mn"))
+ if (of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp"))
speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
else
if (of_machine_is_compatible("fsl,imx8mm") ||
of_machine_is_compatible("fsl,imx8mq"))
speed_grade = 1;
- if (of_machine_is_compatible("fsl,imx8mn"))
+ if (of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp"))
speed_grade = 0xb;
}
/**
* struct global_params - Global parameters, mostly tunable via sysfs.
* @no_turbo: Whether or not to use turbo P-states.
- * @turbo_disabled: Whethet or not turbo P-states are available at all,
+ * @turbo_disabled: Whether or not turbo P-states are available at all,
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- struct resource *res;
int err;
priv.dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv.base = devm_ioremap_resource(&pdev->dev, res);
+ priv.base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv.base))
return PTR_ERR(priv.base);
u32 cpu_freq;
spin_lock_irqsave(&loongson2_wait_lock, flags);
- cpu_freq = LOONGSON_CHIPCFG(0);
- LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */
- LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */
+ cpu_freq = readl(LOONGSON_CHIPCFG);
+ /* Put CPU into wait mode */
+ writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
+ /* Restore CPU state */
+ writel(cpu_freq, LOONGSON_CHIPCFG);
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
local_irq_enable();
}
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
int ret;
+ struct cpufreq_policy *policy;
mutex_lock(&cpufreq_lock);
*/
if (s3c_freq->is_dvs) {
pr_debug("cpufreq: leave dvs on reboot\n");
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0);
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_debug("cpufreq: get no policy for cpu0\n");
+ return NOTIFY_BAD;
+ }
+
+ ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0);
+ cpufreq_cpu_put(policy);
+
if (ret < 0)
return NOTIFY_BAD;
}
unsigned long event, void *ptr)
{
int ret;
+ struct cpufreq_policy *policy;
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_debug("cpufreq: get no policy for cpu0\n");
+ return NOTIFY_BAD;
+ }
+
+ ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
+ cpufreq_cpu_put(policy);
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
- struct resource *res;
unsigned int i = 0, err;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs)) {
err = PTR_ERR(data->regs);
goto put_bpmp;
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
- depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
+ depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST
depends on MCPM && !ARM64
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
config ARM_KIRKWOOD_CPUIDLE
bool "CPU Idle Driver for Marvell Kirkwood SoCs"
- depends on MACH_KIRKWOOD && !ARM64
+ depends on (MACH_KIRKWOOD || COMPILE_TEST) && !ARM64
help
This adds the CPU Idle driver for Marvell Kirkwood SoCs.
config ARM_ZYNQ_CPUIDLE
bool "CPU Idle Driver for Xilinx Zynq processors"
- depends on ARCH_ZYNQ && !ARM64
+ depends on (ARCH_ZYNQ || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle on Xilinx Zynq processors.
config ARM_AT91_CPUIDLE
bool "Cpu Idle Driver for the AT91 processors"
default y
- depends on ARCH_AT91 && !ARM64
+ depends on (ARCH_AT91 || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle for AT91 processors.
config ARM_EXYNOS_CPUIDLE
bool "Cpu Idle Driver for the Exynos processors"
- depends on ARCH_EXYNOS && !ARM64
+ depends on (ARCH_EXYNOS || COMPILE_TEST) && !ARM64
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
help
Select this to enable cpuidle for Exynos processors.
config ARM_MVEBU_V7_CPUIDLE
bool "CPU Idle Driver for mvebu v7 family processors"
- depends on ARCH_MVEBU && !ARM64
+ depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64
help
Select this to enable cpuidle on Armada 370, 38x and XP processors.
* @coupled_cpus: mask of cpus that are part of the coupled set
* @requested_state: array of requested states for cpus in the coupled set
* @ready_waiting_counts: combined count of cpus in ready or waiting loops
+ * @abort_barrier: synchronisation point for abort cases
* @online_count: count of cpus that are online
* @refcnt: reference count of cpuidle devices that are using this struct
* @prevent: flag to prevent coupled idle while a cpu is hotplugging
/**
* cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
- * @dev: struct cpuidle_device for this cpu
+ * @this_cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
*
* Calls cpuidle_coupled_poke on all other online cpus.
/**
* cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
- * @dev: struct cpuidle_device for this cpu
+ * @cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
* @next_state: the index in drv->states of the requested state for this cpu
*
/**
* cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
- * @dev: struct cpuidle_device for this cpu
+ * @cpu: target cpu
* @coupled: the struct coupled that contains the current cpu
*
* Removes the requested idle state for the specified cpuidle device.
/**
* cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
- * @cpu - this cpu
+ * @cpu: this cpu
*
* Turns on interrupts and spins until any outstanding poke interrupts have
* been processed and the poke bit has been cleared.
static int __init clps711x_cpuidle_probe(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- clps711x_halt = devm_ioremap_resource(&pdev->dev, res);
+ clps711x_halt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(clps711x_halt))
return PTR_ERR(clps711x_halt);
/* Initialize CPU idle by registering the idle states */
static int kirkwood_cpuidle_probe(struct platform_device *pdev)
{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
+ ddr_operation_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddr_operation_base))
return PTR_ERR(ddr_operation_base);
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
+ * @latency_limit_ns: Idle state exit latency limit
+ *
+ * Return: the index of the deepest available idle state.
*/
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
if (!try_module_get(drv->owner))
return -EINVAL;
- for (i = 0; i < drv->state_count; i++)
+ for (i = 0; i < drv->state_count; i++) {
if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ if (drv->states[i].flags & CPUIDLE_FLAG_OFF)
+ dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
+ }
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
{
int i;
- drv->refcnt = 0;
-
/*
* Use all possible CPUs as the default, because if the kernel boots
* with some CPUs offline and then we online one of them, the CPU
*/
static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (WARN_ON(drv->refcnt > 0))
- return;
-
if (drv->bctimer) {
drv->bctimer = 0;
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
}
EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
-/**
- * cpuidle_driver_ref - get a reference to the driver.
- *
- * Increment the reference counter of the cpuidle driver associated with
- * the current CPU.
- *
- * Returns a pointer to the driver, or NULL if the current CPU has no driver.
- */
-struct cpuidle_driver *cpuidle_driver_ref(void)
-{
- struct cpuidle_driver *drv;
-
- spin_lock(&cpuidle_driver_lock);
-
- drv = cpuidle_get_driver();
- if (drv)
- drv->refcnt++;
-
- spin_unlock(&cpuidle_driver_lock);
- return drv;
-}
-
-/**
- * cpuidle_driver_unref - puts down the refcount for the driver
- *
- * Decrement the reference counter of the cpuidle driver associated with
- * the current CPU.
- */
-void cpuidle_driver_unref(void)
-{
- struct cpuidle_driver *drv;
-
- spin_lock(&cpuidle_driver_lock);
-
- drv = cpuidle_get_driver();
- if (drv && !WARN_ON(drv->refcnt <= 0))
- drv->refcnt--;
-
- spin_unlock(&cpuidle_driver_lock);
-}
-
/**
* cpuidle_driver_state_disabled - Disable or enable an idle state
* @drv: cpuidle driver owning the state
* pattern detection.
*/
cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
- if (cpu_data->interval_idx > INTERVALS)
+ if (cpu_data->interval_idx >= INTERVALS)
cpu_data->interval_idx = 0;
}
/**
* cpuidle_add_interface - add CPU global sysfs attributes
+ * @dev: the target device
*/
int cpuidle_add_interface(struct device *dev)
{
/**
* cpuidle_remove_interface - remove CPU global sysfs attributes
+ * @dev: the target device
*/
void cpuidle_remove_interface(struct device *dev)
{
return size;
}
+static ssize_t show_state_default_status(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ char *buf)
+{
+ return sprintf(buf, "%s\n",
+ state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
+}
+
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
define_one_state_ro(latency, show_state_exit_latency);
define_one_state_rw(disable, show_state_disable, store_state_disable);
define_one_state_ro(above, show_state_above);
define_one_state_ro(below, show_state_below);
+define_one_state_ro(default_status, show_state_default_status);
static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
&attr_disable.attr,
&attr_above.attr,
&attr_below.attr,
+ &attr_default_status.attr,
NULL
};
/**
* cpuidle_add_driver_sysfs - adds the driver name sysfs attribute
- * @device: the target device
+ * @dev: the target device
*/
static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
{
/**
* cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute
- * @device: the target device
+ * @dev: the target device
*/
static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
{
int req_id;
/* Status of the SEC request */
- int fake_busy;
+ atomic_t fake_busy;
};
/**
};
struct sec_dfx {
- u64 send_cnt;
- u64 recv_cnt;
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
};
struct sec_debug {
return;
}
- __sync_add_and_fetch(&req->ctx->sec->debug.dfx.recv_cnt, 1);
+ atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt);
req->ctx->req_op->buf_unmap(req->ctx, req);
mutex_lock(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
mutex_unlock(&qp_ctx->req_lock);
- __sync_add_and_fetch(&ctx->sec->debug.dfx.send_cnt, 1);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
if (ret == -EBUSY)
return -ENOBUFS;
if (!ret) {
- if (req->fake_busy)
+ if (atomic_read(&req->fake_busy))
ret = -EBUSY;
else
ret = -EINPROGRESS;
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
sec_update_iv(req);
- if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
+ if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1)
sk_req->base.complete(&sk_req->base, -EINPROGRESS);
sk_req->base.complete(&sk_req->base, req->err_type);
}
if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
- req->fake_busy = 1;
+ atomic_set(&req->fake_busy, 1);
else
- req->fake_busy = 0;
+ atomic_set(&req->fake_busy, 0);
ret = ctx->req_op->get_res(ctx, req);
if (ret) {
.write = sec_debug_write,
};
+static int debugfs_atomic64_t_get(void *data, u64 *val)
+{
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic64_t_ro, debugfs_atomic64_t_get, NULL,
+ "%lld\n");
+
static int sec_core_debug_init(struct sec_dev *sec)
{
struct hisi_qm *qm = &sec->qm;
debugfs_create_regset32("regs", 0444, tmp_d, regset);
- debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
+ debugfs_create_file("send_cnt", 0444, tmp_d, &dfx->send_cnt,
+ &fops_atomic64_t_ro);
- debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
+ debugfs_create_file("recv_cnt", 0444, tmp_d, &dfx->recv_cnt,
+ &fops_atomic64_t_ro);
return 0;
}
comment "DEVFREQ Drivers"
config ARM_EXYNOS_BUS_DEVFREQ
- tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+ tristate "ARM Exynos Generic Memory Bus DEVFREQ Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select DEVFREQ_GOV_PASSIVE
and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
+config ARM_IMX8M_DDRC_DEVFREQ
+ tristate "i.MX8M DDRC DEVFREQ Driver"
+ depends on (ARCH_MXC && HAVE_ARM_SMCCC) || \
+ (COMPILE_TEST && HAVE_ARM_SMCCC)
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select DEVFREQ_GOV_USERSPACE
+ help
+ This adds the DEVFREQ driver for the i.MX8M DDR Controller. It allows
+ adjusting DRAM frequency.
+
config ARM_TEGRA_DEVFREQ
tristate "NVIDIA Tegra30/114/124/210 DEVFREQ Driver"
depends on ARCH_TEGRA_3x_SOC || ARCH_TEGRA_114_SOC || \
config ARM_RK3399_DMC_DEVFREQ
tristate "ARM RK3399 DMC DEVFREQ Driver"
- depends on ARCH_ROCKCHIP
+ depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
+ (COMPILE_TEST && HAVE_ARM_SMCCC)
select DEVFREQ_EVENT_ROCKCHIP_DFI
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ_EVENT
help
- This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
- It sets the frequency for the memory controller and reads the usage counts
- from hardware.
+ This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
+ It sets the frequency for the memory controller and reads the usage counts
+ from hardware.
source "drivers/devfreq/event/Kconfig"
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
+obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o
obj-$(CONFIG_ARM_TEGRA20_DEVFREQ) += tegra20-devfreq.o
/**
* devfreq_event_remove_edev() - Remove the devfreq-event device registered.
- * @dev : the devfreq-event device
+ * @edev : the devfreq-event device
*
- * Note that this function remove the registered devfreq-event device.
+ * Note that this function removes the registered devfreq-event device.
*/
int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
{
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/sched.h>
+#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/init.h>
#define HZ_PER_KHZ 1000
static struct class *devfreq_class;
+static struct dentry *devfreq_debugfs;
/*
* devfreq core provides delayed work based load monitoring helper
int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
{
int lev, prev_lev, ret = 0;
- unsigned long cur_time;
+ u64 cur_time;
lockdep_assert_held(&devfreq->lock);
- cur_time = jiffies;
+ cur_time = get_jiffies_64();
/* Immediately exit if previous_freq is not initialized yet. */
if (!devfreq->previous_freq)
goto out;
}
- devfreq->time_in_state[prev_lev] +=
- cur_time - devfreq->last_stat_updated;
+ devfreq->stats.time_in_state[prev_lev] +=
+ cur_time - devfreq->stats.last_update;
lev = devfreq_get_freq_level(devfreq, freq);
if (lev < 0) {
}
if (lev != prev_lev) {
- devfreq->trans_table[(prev_lev *
- devfreq->profile->max_state) + lev]++;
- devfreq->total_trans++;
+ devfreq->stats.trans_table[
+ (prev_lev * devfreq->profile->max_state) + lev]++;
+ devfreq->stats.total_trans++;
}
out:
- devfreq->last_stat_updated = cur_time;
+ devfreq->stats.last_update = cur_time;
return ret;
}
EXPORT_SYMBOL(devfreq_update_status);
msecs_to_jiffies(devfreq->profile->polling_ms));
out_update:
- devfreq->last_stat_updated = jiffies;
+ devfreq->stats.last_update = get_jiffies_64();
devfreq->stop_polling = false;
if (devfreq->profile->get_cur_freq &&
goto err_out;
}
- devfreq->trans_table = devm_kzalloc(&devfreq->dev,
+ devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
array3_size(sizeof(unsigned int),
devfreq->profile->max_state,
devfreq->profile->max_state),
GFP_KERNEL);
- if (!devfreq->trans_table) {
+ if (!devfreq->stats.trans_table) {
mutex_unlock(&devfreq->lock);
err = -ENOMEM;
goto err_devfreq;
}
- devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
+ devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
devfreq->profile->max_state,
- sizeof(unsigned long),
+ sizeof(*devfreq->stats.time_in_state),
GFP_KERNEL);
- if (!devfreq->time_in_state) {
+ if (!devfreq->stats.time_in_state) {
mutex_unlock(&devfreq->lock);
err = -ENOMEM;
goto err_devfreq;
}
- devfreq->last_stat_updated = jiffies;
+ devfreq->stats.total_trans = 0;
+ devfreq->stats.last_update = get_jiffies_64();
srcu_init_notifier_head(&devfreq->transition_notifier_list);
}
EXPORT_SYMBOL(devfreq_remove_governor);
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
+}
+static DEVICE_ATTR_RO(name);
+
static ssize_t governor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", min_freq);
}
+static DEVICE_ATTR_RW(min_freq);
static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
return count;
}
-static DEVICE_ATTR_RW(min_freq);
static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
devfreq->profile->freq_table[i]);
for (j = 0; j < max_state; j++)
len += sprintf(buf + len, "%10u",
- devfreq->trans_table[(i * max_state) + j]);
- len += sprintf(buf + len, "%10u\n",
- jiffies_to_msecs(devfreq->time_in_state[i]));
+ devfreq->stats.trans_table[(i * max_state) + j]);
+
+ len += sprintf(buf + len, "%10llu\n", (u64)
+ jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
}
len += sprintf(buf + len, "Total transition : %u\n",
- devfreq->total_trans);
+ devfreq->stats.total_trans);
return len;
}
-static DEVICE_ATTR_RO(trans_stat);
+
+static ssize_t trans_stat_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ int err, value;
+
+ if (df->profile->max_state == 0)
+ return count;
+
+ err = kstrtoint(buf, 10, &value);
+ if (err || value != 0)
+ return -EINVAL;
+
+ mutex_lock(&df->lock);
+ memset(df->stats.time_in_state, 0, (df->profile->max_state *
+ sizeof(*df->stats.time_in_state)));
+ memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
+ df->profile->max_state,
+ df->profile->max_state));
+ df->stats.total_trans = 0;
+ df->stats.last_update = get_jiffies_64();
+ mutex_unlock(&df->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(trans_stat);
static struct attribute *devfreq_attrs[] = {
+ &dev_attr_name.attr,
&dev_attr_governor.attr,
&dev_attr_available_governors.attr,
&dev_attr_cur_freq.attr,
};
ATTRIBUTE_GROUPS(devfreq);
+/**
+ * devfreq_summary_show() - Show the summary of the devfreq devices
+ * @s: seq_file instance to show the summary of devfreq devices
+ * @data: not used
+ *
+ * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
+ * It helps that user can know the detailed information of the devfreq devices.
+ *
+ * Return 0 always because it shows the information without any data change.
+ */
+static int devfreq_summary_show(struct seq_file *s, void *data)
+{
+ struct devfreq *devfreq;
+ struct devfreq *p_devfreq = NULL;
+ unsigned long cur_freq, min_freq, max_freq;
+ unsigned int polling_ms;
+
+ seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
+ "dev_name",
+ "dev",
+ "parent_dev",
+ "governor",
+ "polling_ms",
+ "cur_freq_Hz",
+ "min_freq_Hz",
+ "max_freq_Hz");
+ seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
+ "------------------------------",
+ "----------",
+ "----------",
+ "---------------",
+ "----------",
+ "------------",
+ "------------",
+ "------------");
+
+ mutex_lock(&devfreq_list_lock);
+
+ list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+ if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
+ DEVFREQ_NAME_LEN)) {
+ struct devfreq_passive_data *data = devfreq->data;
+
+ if (data)
+ p_devfreq = data->parent;
+ } else {
+ p_devfreq = NULL;
+ }
+#endif
+
+ mutex_lock(&devfreq->lock);
+ cur_freq = devfreq->previous_freq,
+ get_freq_range(devfreq, &min_freq, &max_freq);
+ polling_ms = devfreq->profile->polling_ms,
+ mutex_unlock(&devfreq->lock);
+
+ seq_printf(s,
+ "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
+ dev_name(devfreq->dev.parent),
+ dev_name(&devfreq->dev),
+ p_devfreq ? dev_name(&p_devfreq->dev) : "null",
+ devfreq->governor_name,
+ polling_ms,
+ cur_freq,
+ min_freq,
+ max_freq);
+ }
+
+ mutex_unlock(&devfreq_list_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
+
static int __init devfreq_init(void)
{
devfreq_class = class_create(THIS_MODULE, "devfreq");
}
devfreq_class->dev_groups = devfreq_groups;
+ devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
+ debugfs_create_file("devfreq_summary", 0444,
+ devfreq_debugfs, NULL,
+ &devfreq_summary_fops);
+
return 0;
}
subsys_initcall(devfreq_init);
/**
* devm_devfreq_register_notifier()
- - Resource-managed devfreq_register_notifier()
+ * - Resource-managed devfreq_register_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
* @nb: The notifier block to be unregistered.
/**
* devm_devfreq_unregister_notifier()
- - Resource-managed devfreq_unregister_notifier()
+ * - Resource-managed devfreq_unregister_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
* @nb: The notifier block to be unregistered.
if PM_DEVFREQ_EVENT
config DEVFREQ_EVENT_EXYNOS_NOCP
- tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+ tristate "Exynos NoC (Network On Chip) Probe DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
select REGMAP_MMIO
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
config DEVFREQ_EVENT_EXYNOS_PPMU
- tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+ tristate "Exynos PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
help
config DEVFREQ_EVENT_ROCKCHIP_DFI
tristate "ROCKCHIP DFI DEVFREQ event Driver"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
help
This add the devfreq-event driver for Rockchip SoC. It provides DFI
(DDR Monitor Module) driver to count ddr load.
// SPDX-License-Identifier: GPL-2.0-only
/*
- * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support
+ * exynos-nocp.c - Exynos NoC (Network On Chip) Probe support
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file
+ * exynos-nocp.h - Exynos NoC (Network on Chip) Probe header file
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
// SPDX-License-Identifier: GPL-2.0-only
/*
- * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
+ * exynos_ppmu.c - Exynos PPMU (Platform Performance Monitoring Unit) support
*
* Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
PPMU_EVENT(dmc1_1),
};
-static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+static int __exynos_ppmu_find_ppmu_id(const char *edev_name)
{
int i;
for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
- if (!strcmp(edev->desc->name, ppmu_events[i].name))
+ if (!strcmp(edev_name, ppmu_events[i].name))
return ppmu_events[i].id;
return -EINVAL;
}
+static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+{
+ return __exynos_ppmu_find_ppmu_id(edev->desc->name);
+}
+
/*
* The devfreq-event ops structure for PPMU v1.1
*/
* use default if not.
*/
if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
- struct devfreq_event_dev edev;
int id;
/* Not all registers take the same value for
* read+write data count.
*/
- edev.desc = &desc[j];
- id = exynos_ppmu_find_ppmu_id(&edev);
+ id = __exynos_ppmu_find_ppmu_id(desc[j].name);
switch (id) {
case PPMU_PMNCNT0:
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * exynos_ppmu.h - EXYNOS PPMU header file
+ * exynos_ppmu.h - Exynos PPMU header file
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
{
struct device *dev = &pdev->dev;
struct rockchip_dfi *data;
- struct resource *res;
struct devfreq_event_desc *desc;
struct device_node *np = pdev->dev.of_node, *node;
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs))
return PTR_ERR(data->regs);
node = of_parse_phandle(np, "rockchip,pmu", 0);
if (node) {
data->regmap_pmu = syscon_node_to_regmap(node);
+ of_node_put(node);
if (IS_ERR(data->regmap_pmu))
return PTR_ERR(data->regmap_pmu);
}
#include <linux/device.h>
#include <linux/export.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
#define DEFAULT_SATURATION_RATIO 40
ret = exynos_bus_get_event(bus, &edata);
if (ret < 0) {
+ dev_err(dev, "failed to get event from devfreq-event devices\n");
stat->total_time = stat->busy_time = 0;
goto err;
}
return ret;
}
-static int exynos_bus_probe(struct platform_device *pdev)
+static int exynos_bus_profile_init(struct exynos_bus *bus,
+ struct devfreq_dev_profile *profile)
{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node, *node;
- struct devfreq_dev_profile *profile;
+ struct device *dev = bus->dev;
struct devfreq_simple_ondemand_data *ondemand_data;
- struct devfreq_passive_data *passive_data;
- struct devfreq *parent_devfreq;
- struct exynos_bus *bus;
- int ret, max_state;
- unsigned long min_freq, max_freq;
- bool passive = false;
-
- if (!np) {
- dev_err(dev, "failed to find devicetree node\n");
- return -EINVAL;
- }
-
- bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
- if (!bus)
- return -ENOMEM;
- mutex_init(&bus->lock);
- bus->dev = &pdev->dev;
- platform_set_drvdata(pdev, bus);
-
- profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
- if (!profile)
- return -ENOMEM;
-
- node = of_parse_phandle(dev->of_node, "devfreq", 0);
- if (node) {
- of_node_put(node);
- passive = true;
- } else {
- ret = exynos_bus_parent_parse_of(np, bus);
- if (ret < 0)
- return ret;
- }
-
- /* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
- if (ret < 0)
- goto err_reg;
-
- if (passive)
- goto passive;
+ int ret;
/* Initialize the struct profile and governor data for parent device */
profile->polling_ms = 50;
profile->exit = exynos_bus_exit;
ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL);
- if (!ondemand_data) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!ondemand_data)
+ return -ENOMEM;
+
ondemand_data->upthreshold = 40;
ondemand_data->downdifferential = 5;
ondemand_data);
if (IS_ERR(bus->devfreq)) {
dev_err(dev, "failed to add devfreq device\n");
- ret = PTR_ERR(bus->devfreq);
- goto err;
+ return PTR_ERR(bus->devfreq);
}
/* Register opp_notifier to catch the change of OPP */
ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq);
if (ret < 0) {
dev_err(dev, "failed to register opp notifier\n");
- goto err;
+ return ret;
}
/*
ret = exynos_bus_enable_edev(bus);
if (ret < 0) {
dev_err(dev, "failed to enable devfreq-event devices\n");
- goto err;
+ return ret;
}
ret = exynos_bus_set_event(bus);
if (ret < 0) {
dev_err(dev, "failed to set event to devfreq-event devices\n");
- goto err;
+ goto err_edev;
}
- goto out;
-passive:
+ return 0;
+
+err_edev:
+ if (exynos_bus_disable_edev(bus))
+ dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+ return ret;
+}
+
+static int exynos_bus_profile_init_passive(struct exynos_bus *bus,
+ struct devfreq_dev_profile *profile)
+{
+ struct device *dev = bus->dev;
+ struct devfreq_passive_data *passive_data;
+ struct devfreq *parent_devfreq;
+
/* Initialize the struct profile and governor data for passive device */
profile->target = exynos_bus_target;
profile->exit = exynos_bus_passive_exit;
/* Get the instance of parent devfreq device */
parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
- if (IS_ERR(parent_devfreq)) {
- ret = -EPROBE_DEFER;
- goto err;
- }
+ if (IS_ERR(parent_devfreq))
+ return -EPROBE_DEFER;
passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
- if (!passive_data) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!passive_data)
+ return -ENOMEM;
+
passive_data->parent = parent_devfreq;
/* Add devfreq device for exynos bus with passive governor */
if (IS_ERR(bus->devfreq)) {
dev_err(dev,
"failed to add devfreq dev with passive governor\n");
- ret = PTR_ERR(bus->devfreq);
- goto err;
+ return PTR_ERR(bus->devfreq);
+ }
+
+ return 0;
+}
+
+static int exynos_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node, *node;
+ struct devfreq_dev_profile *profile;
+ struct exynos_bus *bus;
+ int ret, max_state;
+ unsigned long min_freq, max_freq;
+ bool passive = false;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+ return -EINVAL;
}
-out:
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+ mutex_init(&bus->lock);
+ bus->dev = &pdev->dev;
+ platform_set_drvdata(pdev, bus);
+
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return -ENOMEM;
+
+ node = of_parse_phandle(dev->of_node, "devfreq", 0);
+ if (node) {
+ of_node_put(node);
+ passive = true;
+ } else {
+ ret = exynos_bus_parent_parse_of(np, bus);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Parse the device-tree to get the resource information */
+ ret = exynos_bus_parse_of(np, bus);
+ if (ret < 0)
+ goto err_reg;
+
+ if (passive)
+ ret = exynos_bus_profile_init_passive(bus, profile);
+ else
+ ret = exynos_bus_profile_init(bus, profile);
+
+ if (ret < 0)
+ goto err;
+
max_state = bus->devfreq->profile->max_state;
min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/devfreq.h>
+#include <linux/pm_opp.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/arm-smccc.h>
+
+#define IMX_SIP_DDR_DVFS 0xc2000004
+
+/* Query available frequencies. */
+#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10
+#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11
+
+/*
+ * This should be in a 1:1 mapping with devicetree OPPs but
+ * firmware provides additional info.
+ */
+struct imx8m_ddrc_freq {
+ unsigned long rate;
+ unsigned long smcarg;
+ int dram_core_parent_index;
+ int dram_alt_parent_index;
+ int dram_apb_parent_index;
+};
+
+/* Hardware limitation */
+#define IMX8M_DDRC_MAX_FREQ_COUNT 4
+
+/*
+ * i.MX8M DRAM Controller clocks have the following structure (abridged):
+ *
+ * +----------+ |\ +------+
+ * | dram_pll |-------|M| dram_core | |
+ * +----------+ |U|---------->| D |
+ * /--|X| | D |
+ * dram_alt_root | |/ | R |
+ * | | C |
+ * +---------+ | |
+ * |FIX DIV/4| | |
+ * +---------+ | |
+ * composite: | | |
+ * +----------+ | | |
+ * | dram_alt |----/ | |
+ * +----------+ | |
+ * | dram_apb |-------------------->| |
+ * +----------+ +------+
+ *
+ * The dram_pll is used for higher rates and dram_alt is used for lower rates.
+ *
+ * Frequency switching is implemented in TF-A (via SMC call) and can change the
+ * configuration of the clocks, including mux parents. The dram_alt and
+ * dram_apb clocks are "imx composite" and their parent can change too.
+ *
+ * We need to prepare/enable the new mux parents head of switching and update
+ * their information afterwards.
+ */
+struct imx8m_ddrc {
+ struct devfreq_dev_profile profile;
+ struct devfreq *devfreq;
+
+ /* For frequency switching: */
+ struct clk *dram_core;
+ struct clk *dram_pll;
+ struct clk *dram_alt;
+ struct clk *dram_apb;
+
+ int freq_count;
+ struct imx8m_ddrc_freq freq_table[IMX8M_DDRC_MAX_FREQ_COUNT];
+};
+
+static struct imx8m_ddrc_freq *imx8m_ddrc_find_freq(struct imx8m_ddrc *priv,
+ unsigned long rate)
+{
+ struct imx8m_ddrc_freq *freq;
+ int i;
+
+ /*
+ * Firmware reports values in MT/s, so we round-down from Hz
+ * Rounding is extra generous to ensure a match.
+ */
+ rate = DIV_ROUND_CLOSEST(rate, 250000);
+ for (i = 0; i < priv->freq_count; ++i) {
+ freq = &priv->freq_table[i];
+ if (freq->rate == rate ||
+ freq->rate + 1 == rate ||
+ freq->rate - 1 == rate)
+ return freq;
+ }
+
+ return NULL;
+}
+
+static void imx8m_ddrc_smc_set_freq(int target_freq)
+{
+ struct arm_smccc_res res;
+ u32 online_cpus = 0;
+ int cpu;
+
+ local_irq_disable();
+
+ for_each_online_cpu(cpu)
+ online_cpus |= (1 << (cpu * 8));
+
+ /* change the ddr freqency */
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, target_freq, online_cpus,
+ 0, 0, 0, 0, 0, &res);
+
+ local_irq_enable();
+}
+
+static struct clk *clk_get_parent_by_index(struct clk *clk, int index)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_get_parent_by_index(__clk_get_hw(clk), index);
+
+ return hw ? hw->clk : NULL;
+}
+
+static int imx8m_ddrc_set_freq(struct device *dev, struct imx8m_ddrc_freq *freq)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct clk *new_dram_core_parent;
+ struct clk *new_dram_alt_parent;
+ struct clk *new_dram_apb_parent;
+ int ret;
+
+ /*
+ * Fetch new parents
+ *
+ * new_dram_alt_parent and new_dram_apb_parent are optional but
+ * new_dram_core_parent is not.
+ */
+ new_dram_core_parent = clk_get_parent_by_index(
+ priv->dram_core, freq->dram_core_parent_index - 1);
+ if (!new_dram_core_parent) {
+ dev_err(dev, "failed to fetch new dram_core parent\n");
+ return -EINVAL;
+ }
+ if (freq->dram_alt_parent_index) {
+ new_dram_alt_parent = clk_get_parent_by_index(
+ priv->dram_alt,
+ freq->dram_alt_parent_index - 1);
+ if (!new_dram_alt_parent) {
+ dev_err(dev, "failed to fetch new dram_alt parent\n");
+ return -EINVAL;
+ }
+ } else
+ new_dram_alt_parent = NULL;
+
+ if (freq->dram_apb_parent_index) {
+ new_dram_apb_parent = clk_get_parent_by_index(
+ priv->dram_apb,
+ freq->dram_apb_parent_index - 1);
+ if (!new_dram_apb_parent) {
+ dev_err(dev, "failed to fetch new dram_apb parent\n");
+ return -EINVAL;
+ }
+ } else
+ new_dram_apb_parent = NULL;
+
+ /* increase reference counts and ensure clks are ON before switch */
+ ret = clk_prepare_enable(new_dram_core_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_core parent: %d\n",
+ ret);
+ goto out;
+ }
+ ret = clk_prepare_enable(new_dram_alt_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_alt parent: %d\n",
+ ret);
+ goto out_disable_core_parent;
+ }
+ ret = clk_prepare_enable(new_dram_apb_parent);
+ if (ret) {
+ dev_err(dev, "failed to enable new dram_apb parent: %d\n",
+ ret);
+ goto out_disable_alt_parent;
+ }
+
+ imx8m_ddrc_smc_set_freq(freq->smcarg);
+
+ /* update parents in clk tree after switch. */
+ ret = clk_set_parent(priv->dram_core, new_dram_core_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_core parent: %d\n", ret);
+ if (new_dram_alt_parent) {
+ ret = clk_set_parent(priv->dram_alt, new_dram_alt_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_alt parent: %d\n",
+ ret);
+ }
+ if (new_dram_apb_parent) {
+ ret = clk_set_parent(priv->dram_apb, new_dram_apb_parent);
+ if (ret)
+ dev_warn(dev, "failed to set dram_apb parent: %d\n",
+ ret);
+ }
+
+ /*
+ * Explicitly refresh dram PLL rate.
+ *
+ * Even if it's marked with CLK_GET_RATE_NOCACHE the rate will not be
+ * automatically refreshed when clk_get_rate is called on children.
+ */
+ clk_get_rate(priv->dram_pll);
+
+ /*
+ * clk_set_parent transfer the reference count from old parent.
+ * now we drop extra reference counts used during the switch
+ */
+ clk_disable_unprepare(new_dram_apb_parent);
+out_disable_alt_parent:
+ clk_disable_unprepare(new_dram_alt_parent);
+out_disable_core_parent:
+ clk_disable_unprepare(new_dram_core_parent);
+out:
+ return ret;
+}
+
+static int imx8m_ddrc_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct imx8m_ddrc_freq *freq_info;
+ struct dev_pm_opp *new_opp;
+ unsigned long old_freq, new_freq;
+ int ret;
+
+ new_opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(new_opp)) {
+ ret = PTR_ERR(new_opp);
+ dev_err(dev, "failed to get recommended opp: %d\n", ret);
+ return ret;
+ }
+ dev_pm_opp_put(new_opp);
+
+ old_freq = clk_get_rate(priv->dram_core);
+ if (*freq == old_freq)
+ return 0;
+
+ freq_info = imx8m_ddrc_find_freq(priv, *freq);
+ if (!freq_info)
+ return -EINVAL;
+
+ /*
+ * Read back the clk rate to verify switch was correct and so that
+ * we can report it on all error paths.
+ */
+ ret = imx8m_ddrc_set_freq(dev, freq_info);
+
+ new_freq = clk_get_rate(priv->dram_core);
+ if (ret)
+ dev_err(dev, "ddrc failed freq switch to %lu from %lu: error %d. now at %lu\n",
+ *freq, old_freq, ret, new_freq);
+ else if (*freq != new_freq)
+ dev_err(dev, "ddrc failed freq update to %lu from %lu, now at %lu\n",
+ *freq, old_freq, new_freq);
+ else
+ dev_dbg(dev, "ddrc freq set to %lu (was %lu)\n",
+ *freq, old_freq);
+
+ return ret;
+}
+
+static int imx8m_ddrc_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+
+ *freq = clk_get_rate(priv->dram_core);
+
+ return 0;
+}
+
+static int imx8m_ddrc_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+
+ stat->busy_time = 0;
+ stat->total_time = 0;
+ stat->current_frequency = clk_get_rate(priv->dram_core);
+
+ return 0;
+}
+
+static int imx8m_ddrc_init_freq_info(struct device *dev)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct arm_smccc_res res;
+ int index;
+
+ /* An error here means DDR DVFS API not supported by firmware */
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_COUNT,
+ 0, 0, 0, 0, 0, 0, &res);
+ priv->freq_count = res.a0;
+ if (priv->freq_count <= 0 ||
+ priv->freq_count > IMX8M_DDRC_MAX_FREQ_COUNT)
+ return -ENODEV;
+
+ for (index = 0; index < priv->freq_count; ++index) {
+ struct imx8m_ddrc_freq *freq = &priv->freq_table[index];
+
+ arm_smccc_smc(IMX_SIP_DDR_DVFS, IMX_SIP_DDR_DVFS_GET_FREQ_INFO,
+ index, 0, 0, 0, 0, 0, &res);
+ /* Result should be strictly positive */
+ if ((long)res.a0 <= 0)
+ return -ENODEV;
+
+ freq->rate = res.a0;
+ freq->smcarg = index;
+ freq->dram_core_parent_index = res.a1;
+ freq->dram_alt_parent_index = res.a2;
+ freq->dram_apb_parent_index = res.a3;
+
+ /* dram_core has 2 options: dram_pll or dram_alt_root */
+ if (freq->dram_core_parent_index != 1 &&
+ freq->dram_core_parent_index != 2)
+ return -ENODEV;
+ /* dram_apb and dram_alt have exactly 8 possible parents */
+ if (freq->dram_alt_parent_index > 8 ||
+ freq->dram_apb_parent_index > 8)
+ return -ENODEV;
+ /* dram_core from alt requires explicit dram_alt parent */
+ if (freq->dram_core_parent_index == 2 &&
+ freq->dram_alt_parent_index == 0)
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int imx8m_ddrc_check_opps(struct device *dev)
+{
+ struct imx8m_ddrc *priv = dev_get_drvdata(dev);
+ struct imx8m_ddrc_freq *freq_info;
+ struct dev_pm_opp *opp;
+ unsigned long freq;
+ int i, opp_count;
+
+ /* Enumerate DT OPPs and disable those not supported by firmware */
+ opp_count = dev_pm_opp_get_opp_count(dev);
+ if (opp_count < 0)
+ return opp_count;
+ for (i = 0, freq = 0; i < opp_count; ++i, ++freq) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Failed enumerating OPPs: %ld\n",
+ PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+ dev_pm_opp_put(opp);
+
+ freq_info = imx8m_ddrc_find_freq(priv, freq);
+ if (!freq_info) {
+ dev_info(dev, "Disable unsupported OPP %luHz %luMT/s\n",
+ freq, DIV_ROUND_CLOSEST(freq, 250000));
+ dev_pm_opp_disable(dev, freq);
+ }
+ }
+
+ return 0;
+}
+
+static void imx8m_ddrc_exit(struct device *dev)
+{
+ dev_pm_opp_of_remove_table(dev);
+}
+
+static int imx8m_ddrc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx8m_ddrc *priv;
+ const char *gov = DEVFREQ_GOV_USERSPACE;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = imx8m_ddrc_init_freq_info(dev);
+ if (ret) {
+ dev_err(dev, "failed to init firmware freq info: %d\n", ret);
+ return ret;
+ }
+
+ priv->dram_core = devm_clk_get(dev, "core");
+ if (IS_ERR(priv->dram_core)) {
+ ret = PTR_ERR(priv->dram_core);
+ dev_err(dev, "failed to fetch core clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_pll = devm_clk_get(dev, "pll");
+ if (IS_ERR(priv->dram_pll)) {
+ ret = PTR_ERR(priv->dram_pll);
+ dev_err(dev, "failed to fetch pll clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_alt = devm_clk_get(dev, "alt");
+ if (IS_ERR(priv->dram_alt)) {
+ ret = PTR_ERR(priv->dram_alt);
+ dev_err(dev, "failed to fetch alt clock: %d\n", ret);
+ return ret;
+ }
+ priv->dram_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(priv->dram_apb)) {
+ ret = PTR_ERR(priv->dram_apb);
+ dev_err(dev, "failed to fetch apb clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get OPP table\n");
+ return ret;
+ }
+
+ ret = imx8m_ddrc_check_opps(dev);
+ if (ret < 0)
+ goto err;
+
+ priv->profile.polling_ms = 1000;
+ priv->profile.target = imx8m_ddrc_target;
+ priv->profile.get_dev_status = imx8m_ddrc_get_dev_status;
+ priv->profile.exit = imx8m_ddrc_exit;
+ priv->profile.get_cur_freq = imx8m_ddrc_get_cur_freq;
+ priv->profile.initial_freq = clk_get_rate(priv->dram_core);
+
+ priv->devfreq = devm_devfreq_add_device(dev, &priv->profile,
+ gov, NULL);
+ if (IS_ERR(priv->devfreq)) {
+ ret = PTR_ERR(priv->devfreq);
+ dev_err(dev, "failed to add devfreq device: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dev_pm_opp_of_remove_table(dev);
+ return ret;
+}
+
+static const struct of_device_id imx8m_ddrc_of_match[] = {
+ { .compatible = "fsl,imx8m-ddrc", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx8m_ddrc_of_match);
+
+static struct platform_driver imx8m_ddrc_platdrv = {
+ .probe = imx8m_ddrc_probe,
+ .driver = {
+ .name = "imx8m-ddrc-devfreq",
+ .of_match_table = of_match_ptr(imx8m_ddrc_of_match),
+ },
+};
+module_platform_driver(imx8m_ddrc_platdrv);
+
+MODULE_DESCRIPTION("i.MX8M DDR Controller frequency driver");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
if (res.a0) {
dev_err(dev, "Failed to set dram param: %ld\n",
res.a0);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
}
}
}
node = of_parse_phandle(np, "rockchip,pmu", 0);
if (node) {
data->regmap_pmu = syscon_node_to_regmap(node);
- if (IS_ERR(data->regmap_pmu))
- return PTR_ERR(data->regmap_pmu);
+ of_node_put(node);
+ if (IS_ERR(data->regmap_pmu)) {
+ ret = PTR_ERR(data->regmap_pmu);
+ goto err_edev;
+ }
}
regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
};
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
*/
if (dev_pm_opp_of_add_table(dev)) {
dev_err(dev, "Invalid operating-points in device tree.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_edev;
}
of_property_read_u32(np, "upthreshold",
err_free_opp:
dev_pm_opp_of_remove_table(&pdev->dev);
+err_edev:
+ devfreq_event_disable_edev(data->edev);
+
return ret;
}
the capability to offload memcpy, xor and pq computation
for raid5/6.
+config HISI_DMA
+ tristate "HiSilicon DMA Engine support"
+ depends on ARM64 || (COMPILE_TEST && PCI_MSI)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support HiSilicon Kunpeng DMA engine.
+
config IMG_MDC_DMA
tristate "IMG MDC support"
depends on MIPS || COMPILE_TEST
Enable DMA support for Intel Low Power Subsystem such as found on
Intel Skylake PCH.
+config INTEL_IDXD
+ tristate "Intel Data Accelerators support"
+ depends on PCI && X86_64
+ select DMA_ENGINE
+ select SBITMAP
+ help
+ Enable support for the Intel(R) data accelerators present
+ in Intel Xeon CPU.
+
+ Say Y if you have such a platform.
+
+ If unsure, say N.
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86_64
16 to 32 channels for peripheral to memory or memory to memory
transfers.
+config PLX_DMA
+ tristate "PLX ExpressLane PEX Switch DMA Engine Support"
+ depends on PCI
+ select DMA_ENGINE
+ help
+ Some PLX ExpressLane PCI Switches support additional DMA engines.
+ These are exposed via extra functions on the switch's
+ upstream port. Each function exposes one DMA channel.
+
config SIRF_DMA
tristate "CSR SiRFprimaII/SiRFmarco DMA support"
depends on ARCH_SIRF
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+obj-$(CONFIG_HISI_DMA) += hisi_dma.o
obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+obj-$(CONFIG_INTEL_IDXD) += idxd/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_OWL_DMA) += owl-dma.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PLX_DMA) += plx_dma.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
/* stop DMA activity */
if (c->desc) {
- if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
- vchan_terminate_vdesc(&c->desc->vd);
- else
- vchan_vdesc_fini(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c);
}
struct dma_device *dma_dev;
struct axi_dmac *dmac;
struct resource *res;
+ struct regmap *regmap;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
platform_set_drvdata(pdev, dmac);
- devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
+ regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
+ &axi_dmac_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_free_irq;
+ }
return 0;
+err_free_irq:
+ free_irq(dmac->irq, dmac);
err_unregister_of:
of_dma_controller_free(pdev->dev.of_node);
err_unregister_device:
.flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
};
+static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
+ .nb_channels = 32,
+ .transfer_ord_max = 7,
+ .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
+ { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
/* --- sysfs implementation --- */
+#define DMA_SLAVE_NAME "slave"
+
/**
* dev_to_dma_chan - convert a device pointer to its sysfs container object
* @dev - device node
/* --- client and device registration --- */
-#define dma_device_satisfies_mask(device, mask) \
- __dma_device_satisfies_mask((device), &(mask))
-static int
-__dma_device_satisfies_mask(struct dma_device *device,
- const dma_cap_mask_t *want)
+/**
+ * dma_cap_mask_all - enable iteration over all operation types
+ */
+static dma_cap_mask_t dma_cap_mask_all;
+
+/**
+ * dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan - associated channel for this entry
+ */
+struct dma_chan_tbl_ent {
+ struct dma_chan *chan;
+};
+
+/**
+ * channel_table - percpu lookup table for memory-to-memory offload providers
+ */
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
+{
+ enum dma_transaction_type cap;
+ int err = 0;
+
+ bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
+
+ /* 'interrupt', 'private', and 'slave' are channel capabilities,
+ * but are not associated with an operation so they do not need
+ * an entry in the channel_table
+ */
+ clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+ clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+ clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
+
+ for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+ channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+ if (!channel_table[cap]) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+
+ if (err) {
+ pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ free_percpu(channel_table[cap]);
+ }
+
+ return err;
+}
+arch_initcall(dma_channel_table_init);
+
+/**
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as
+ * the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+ int node = dev_to_node(chan->device->dev);
+ return node == NUMA_NO_NODE ||
+ cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as
+ * the cpu
+ * @cap: capability to match
+ * @cpu: cpu index which the channel should be close to
+ *
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
+ */
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
+{
+ struct dma_device *device;
+ struct dma_chan *chan;
+ struct dma_chan *min = NULL;
+ struct dma_chan *localmin = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (!dma_has_cap(cap, device->cap_mask) ||
+ dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node) {
+ if (!chan->client_count)
+ continue;
+ if (!min || chan->table_count < min->table_count)
+ min = chan;
+
+ if (dma_chan_is_local(chan, cpu))
+ if (!localmin ||
+ chan->table_count < localmin->table_count)
+ localmin = chan;
+ }
+ }
+
+ chan = localmin ? localmin : min;
+
+ if (chan)
+ chan->table_count++;
+
+ return chan;
+}
+
+/**
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for cpu isolation (each cpu gets a dedicated channel for an
+ * operation type) in the SMP case, and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case. Must be called under
+ * dma_list_mutex.
+ */
+static void dma_channel_rebalance(void)
+{
+ struct dma_chan *chan;
+ struct dma_device *device;
+ int cpu;
+ int cap;
+
+ /* undo the last distribution */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ chan->table_count = 0;
+ }
+
+ /* don't populate the channel_table if no clients are available */
+ if (!dmaengine_ref_count)
+ return;
+
+ /* redistribute available channels */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_online_cpu(cpu) {
+ chan = min_chan(cap, cpu);
+ per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+ }
+}
+
+static int dma_device_satisfies_mask(struct dma_device *device,
+ const dma_cap_mask_t *want)
{
dma_cap_mask_t has;
static struct module *dma_chan_to_owner(struct dma_chan *chan)
{
- return chan->device->dev->driver->owner;
+ return chan->device->owner;
}
/**
}
}
+static void dma_device_release(struct kref *ref)
+{
+ struct dma_device *device = container_of(ref, struct dma_device, ref);
+
+ list_del_rcu(&device->global_node);
+ dma_channel_rebalance();
+
+ if (device->device_release)
+ device->device_release(device);
+}
+
+static void dma_device_put(struct dma_device *device)
+{
+ lockdep_assert_held(&dma_list_mutex);
+ kref_put(&device->ref, dma_device_release);
+}
+
/**
* dma_chan_get - try to grab a dma channel's parent driver module
* @chan - channel to grab
if (!try_module_get(owner))
return -ENODEV;
+ ret = kref_get_unless_zero(&chan->device->ref);
+ if (!ret) {
+ ret = -ENODEV;
+ goto module_put_out;
+ }
+
/* allocate upon first client reference */
if (chan->device->device_alloc_chan_resources) {
ret = chan->device->device_alloc_chan_resources(chan);
return 0;
err_out:
+ dma_device_put(chan->device);
+module_put_out:
module_put(owner);
return ret;
}
return;
chan->client_count--;
- module_put(dma_chan_to_owner(chan));
/* This channel is not in use anymore, free it */
if (!chan->client_count && chan->device->device_free_chan_resources) {
chan->router = NULL;
chan->route_data = NULL;
}
+
+ dma_device_put(chan->device);
+ module_put(dma_chan_to_owner(chan));
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
}
EXPORT_SYMBOL(dma_sync_wait);
-/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
-static dma_cap_mask_t dma_cap_mask_all;
-
-/**
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
- * @chan - associated channel for this entry
- */
-struct dma_chan_tbl_ent {
- struct dma_chan *chan;
-};
-
-/**
- * channel_table - percpu lookup table for memory-to-memory offload providers
- */
-static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
-
-static int __init dma_channel_table_init(void)
-{
- enum dma_transaction_type cap;
- int err = 0;
-
- bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
-
- /* 'interrupt', 'private', and 'slave' are channel capabilities,
- * but are not associated with an operation so they do not need
- * an entry in the channel_table
- */
- clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
- clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
- clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
-
- for_each_dma_cap_mask(cap, dma_cap_mask_all) {
- channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
- if (!channel_table[cap]) {
- err = -ENOMEM;
- break;
- }
- }
-
- if (err) {
- pr_err("initialization failure\n");
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- free_percpu(channel_table[cap]);
- }
-
- return err;
-}
-arch_initcall(dma_channel_table_init);
-
/**
* dma_find_channel - find a channel to carry out the operation
* @tx_type: transaction type
}
EXPORT_SYMBOL(dma_issue_pending_all);
-/**
- * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
- */
-static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
-{
- int node = dev_to_node(chan->device->dev);
- return node == NUMA_NO_NODE ||
- cpumask_test_cpu(cpu, cpumask_of_node(node));
-}
-
-/**
- * min_chan - returns the channel with min count and in the same numa-node as the cpu
- * @cap: capability to match
- * @cpu: cpu index which the channel should be close to
- *
- * If some channels are close to the given cpu, the one with the lowest
- * reference count is returned. Otherwise, cpu is ignored and only the
- * reference count is taken into account.
- * Must be called under dma_list_mutex.
- */
-static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
-{
- struct dma_device *device;
- struct dma_chan *chan;
- struct dma_chan *min = NULL;
- struct dma_chan *localmin = NULL;
-
- list_for_each_entry(device, &dma_device_list, global_node) {
- if (!dma_has_cap(cap, device->cap_mask) ||
- dma_has_cap(DMA_PRIVATE, device->cap_mask))
- continue;
- list_for_each_entry(chan, &device->channels, device_node) {
- if (!chan->client_count)
- continue;
- if (!min || chan->table_count < min->table_count)
- min = chan;
-
- if (dma_chan_is_local(chan, cpu))
- if (!localmin ||
- chan->table_count < localmin->table_count)
- localmin = chan;
- }
- }
-
- chan = localmin ? localmin : min;
-
- if (chan)
- chan->table_count++;
-
- return chan;
-}
-
-/**
- * dma_channel_rebalance - redistribute the available channels
- *
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
- * operation type) in the SMP case, and operation isolation (avoid
- * multi-tasking channels) in the non-SMP case. Must be called under
- * dma_list_mutex.
- */
-static void dma_channel_rebalance(void)
-{
- struct dma_chan *chan;
- struct dma_device *device;
- int cpu;
- int cap;
-
- /* undo the last distribution */
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- for_each_possible_cpu(cpu)
- per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
-
- list_for_each_entry(device, &dma_device_list, global_node) {
- if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
- continue;
- list_for_each_entry(chan, &device->channels, device_node)
- chan->table_count = 0;
- }
-
- /* don't populate the channel_table if no clients are available */
- if (!dmaengine_ref_count)
- return;
-
- /* redistribute available channels */
- for_each_dma_cap_mask(cap, dma_cap_mask_all)
- for_each_online_cpu(cpu) {
- chan = min_chan(cap, cpu);
- per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
- }
-}
-
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
{
struct dma_device *device;
{
struct dma_chan *chan;
- if (mask && !__dma_device_satisfies_mask(dev, mask)) {
+ if (mask && !dma_device_satisfies_mask(dev, mask)) {
dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
return NULL;
}
if (has_acpi_companion(dev) && !chan)
chan = acpi_dma_request_slave_chan_by_name(dev, name);
- if (chan) {
- /* Valid channel found or requester needs to be deferred */
- if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
- return chan;
- }
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
+ return chan;
+
+ if (!IS_ERR_OR_NULL(chan))
+ goto found;
/* Try to find the channel via the DMA filter map(s) */
mutex_lock(&dma_list_mutex);
}
mutex_unlock(&dma_list_mutex);
- return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+ if (!IS_ERR_OR_NULL(chan))
+ goto found;
+
+ return ERR_PTR(-EPROBE_DEFER);
+
+found:
+ chan->slave = dev;
+ chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
+ if (!chan->name)
+ return ERR_PTR(-ENOMEM);
+
+ if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
+ DMA_SLAVE_NAME))
+ dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
+ if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
+ dev_err(dev, "Cannot create DMA %s symlink\n", chan->name);
+ return chan;
}
EXPORT_SYMBOL_GPL(dma_request_chan);
/* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
+ if (chan->slave) {
+ sysfs_remove_link(&chan->slave->kobj, chan->name);
+ kfree(chan->name);
+ chan->name = NULL;
+ chan->slave = NULL;
+ }
+ sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);
*/
void dmaengine_put(void)
{
- struct dma_device *device;
+ struct dma_device *device, *_d;
struct dma_chan *chan;
mutex_lock(&dma_list_mutex);
dmaengine_ref_count--;
BUG_ON(dmaengine_ref_count < 0);
/* drop channel references */
- list_for_each_entry(device, &dma_device_list, global_node) {
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
continue;
list_for_each_entry(chan, &device->channels, device_node)
return 0;
}
+static int __dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan,
+ int chan_id)
+{
+ int rc = 0;
+ int chancnt = device->chancnt;
+ atomic_t *idr_ref;
+ struct dma_chan *tchan;
+
+ tchan = list_first_entry_or_null(&device->channels,
+ struct dma_chan, device_node);
+ if (tchan->dev) {
+ idr_ref = tchan->dev->idr_ref;
+ } else {
+ idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+ if (!idr_ref)
+ return -ENOMEM;
+ atomic_set(idr_ref, 0);
+ }
+
+ chan->local = alloc_percpu(typeof(*chan->local));
+ if (!chan->local)
+ goto err_out;
+ chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+ if (!chan->dev) {
+ free_percpu(chan->local);
+ chan->local = NULL;
+ goto err_out;
+ }
+
+ /*
+ * When the chan_id is a negative value, we are dynamically adding
+ * the channel. Otherwise we are static enumerating.
+ */
+ chan->chan_id = chan_id < 0 ? chancnt : chan_id;
+ chan->dev->device.class = &dma_devclass;
+ chan->dev->device.parent = device->dev;
+ chan->dev->chan = chan;
+ chan->dev->idr_ref = idr_ref;
+ chan->dev->dev_id = device->dev_id;
+ atomic_inc(idr_ref);
+ dev_set_name(&chan->dev->device, "dma%dchan%d",
+ device->dev_id, chan->chan_id);
+
+ rc = device_register(&chan->dev->device);
+ if (rc)
+ goto err_out;
+ chan->client_count = 0;
+ device->chancnt = chan->chan_id + 1;
+
+ return 0;
+
+ err_out:
+ free_percpu(chan->local);
+ kfree(chan->dev);
+ if (atomic_dec_return(idr_ref) == 0)
+ kfree(idr_ref);
+ return rc;
+}
+
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ int rc;
+
+ rc = __dma_async_device_channel_register(device, chan, -1);
+ if (rc < 0)
+ return rc;
+
+ dma_channel_rebalance();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
+
+static void __dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ WARN_ONCE(!device->device_release && chan->client_count,
+ "%s called while %d clients hold a reference\n",
+ __func__, chan->client_count);
+ mutex_lock(&dma_list_mutex);
+ list_del(&chan->device_node);
+ device->chancnt--;
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ device_unregister(&chan->dev->device);
+ free_percpu(chan->local);
+}
+
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan)
+{
+ __dma_async_device_channel_unregister(device, chan);
+ dma_channel_rebalance();
+}
+EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
+
/**
* dma_async_device_register - registers DMA devices found
* @device: &dma_device
+ *
+ * After calling this routine the structure should not be freed except in the
+ * device_release() callback which will be called after
+ * dma_async_device_unregister() is called and no further references are taken.
*/
int dma_async_device_register(struct dma_device *device)
{
- int chancnt = 0, rc;
+ int rc, i = 0;
struct dma_chan* chan;
- atomic_t *idr_ref;
if (!device)
return -ENODEV;
return -EIO;
}
+ device->owner = device->dev->driver->owner;
+
if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
return -EIO;
}
+ if (!device->device_release)
+ dev_warn(device->dev,
+ "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
+
+ kref_init(&device->ref);
+
/* note: this only matters in the
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/
if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
- idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
- if (!idr_ref)
- return -ENOMEM;
rc = get_dma_id(device);
- if (rc != 0) {
- kfree(idr_ref);
+ if (rc != 0)
return rc;
- }
-
- atomic_set(idr_ref, 0);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
- rc = -ENOMEM;
- chan->local = alloc_percpu(typeof(*chan->local));
- if (chan->local == NULL)
- goto err_out;
- chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
- if (chan->dev == NULL) {
- free_percpu(chan->local);
- chan->local = NULL;
+ rc = __dma_async_device_channel_register(device, chan, i++);
+ if (rc < 0)
goto err_out;
- }
-
- chan->chan_id = chancnt++;
- chan->dev->device.class = &dma_devclass;
- chan->dev->device.parent = device->dev;
- chan->dev->chan = chan;
- chan->dev->idr_ref = idr_ref;
- chan->dev->dev_id = device->dev_id;
- atomic_inc(idr_ref);
- dev_set_name(&chan->dev->device, "dma%dchan%d",
- device->dev_id, chan->chan_id);
-
- rc = device_register(&chan->dev->device);
- if (rc) {
- free_percpu(chan->local);
- chan->local = NULL;
- kfree(chan->dev);
- atomic_dec(idr_ref);
- goto err_out;
- }
- chan->client_count = 0;
- }
-
- if (!chancnt) {
- dev_err(device->dev, "%s: device has no channels!\n", __func__);
- rc = -ENODEV;
- goto err_out;
}
- device->chancnt = chancnt;
-
mutex_lock(&dma_list_mutex);
/* take references on public channels */
if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
err_out:
/* if we never registered a channel just release the idr */
- if (atomic_read(idr_ref) == 0) {
+ if (!device->chancnt) {
ida_free(&dma_ida, device->dev_id);
- kfree(idr_ref);
return rc;
}
*/
void dma_async_device_unregister(struct dma_device *device)
{
- struct dma_chan *chan;
+ struct dma_chan *chan, *n;
+
+ list_for_each_entry_safe(chan, n, &device->channels, device_node)
+ __dma_async_device_channel_unregister(device, chan);
mutex_lock(&dma_list_mutex);
- list_del_rcu(&device->global_node);
+ /*
+ * setting DMA_PRIVATE ensures the device being torn down will not
+ * be used in the channel_table
+ */
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
dma_channel_rebalance();
+ dma_device_put(device);
mutex_unlock(&dma_list_mutex);
-
- list_for_each_entry(chan, &device->channels, device_node) {
- WARN_ONCE(chan->client_count,
- "%s called while %d clients hold a reference\n",
- __func__, chan->client_count);
- mutex_lock(&dma_list_mutex);
- chan->dev->chan = NULL;
- mutex_unlock(&dma_list_mutex);
- device_unregister(&chan->dev->device);
- free_percpu(chan->local);
- }
}
EXPORT_SYMBOL(dma_async_device_unregister);
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
+static inline int desc_check_and_set_metadata_mode(
+ struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
+{
+ /* Make sure that the metadata mode is not mixed */
+ if (!desc->desc_metadata_mode) {
+ if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
+ desc->desc_metadata_mode = mode;
+ else
+ return -ENOTSUPP;
+ } else if (desc->desc_metadata_mode != mode) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->attach)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->attach(desc, data, len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
+
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len)
+{
+ int ret;
+
+ if (!desc)
+ return ERR_PTR(-EINVAL);
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
+ return ERR_PTR(-ENOTSUPP);
+
+ return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
+
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len)
+{
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
+ if (ret)
+ return ret;
+
+ if (!desc->metadata_ops || !desc->metadata_ops->set_len)
+ return -ENOTSUPP;
+
+ return desc->metadata_ops->set_len(desc, payload_len);
+}
+EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
+
/* dma_wait_for_async_tx - spin wait for a transaction to complete
* @tx: in-flight transaction to wait on
*/
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
-
-
state->last = complete;
state->used = used;
state->residue = 0;
+ state->in_flight_bytes = 0;
}
return dma_async_is_complete(cookie, complete, used);
}
state->residue = residue;
}
+static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
+ u32 in_flight_bytes)
+{
+ if (state)
+ state->in_flight_bytes = in_flight_bytes;
+}
+
struct dmaengine_desc_callback {
dma_async_tx_callback callback;
dma_async_tx_callback_result callback_result;
return (cb->callback) ? true : false;
}
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+
#endif
vchan_get_all_descriptors(&chan->vc, &head);
- /*
- * As vchan_dma_desc_free_list can access to desc_allocated list
- * we need to call it in vc.lock context.
- */
- vchan_dma_desc_free_list(&chan->vc, &head);
-
spin_unlock_irqrestore(&chan->vc.lock, flags);
+ vchan_dma_desc_free_list(&chan->vc, &head);
+
dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
return 0;
u32 ch = fsl_chan->vchan.chan.chan_id;
void __iomem *muxaddr;
unsigned int chans_per_mux, ch_off;
+ int endian_diff[4] = {3, 1, -1, -3};
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+
+ if (fsl_chan->edma->drvdata->mux_swap)
+ ch_off += endian_diff[ch_off % 4];
+
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
enum edma_version version;
u32 dmamuxs;
bool has_dmaclk;
+ bool mux_swap;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
.setup_irq = fsl_edma_irq_init,
};
+static struct fsl_edma_drvdata ls1028a_data = {
+ .version = v1,
+ .dmamuxs = DMAMUX_NR,
+ .mux_swap = true,
+ .setup_irq = fsl_edma_irq_init,
+};
+
static struct fsl_edma_drvdata imx7ulp_data = {
.version = v3,
.dmamuxs = 1,
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
+ { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
{ /* sentinel */ }
};
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
+ if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
return;
list_for_each_entry_safe(comp_temp, _comp_temp,
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2019 HiSilicon Limited. */
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include "virt-dma.h"
+
+#define HISI_DMA_SQ_BASE_L 0x0
+#define HISI_DMA_SQ_BASE_H 0x4
+#define HISI_DMA_SQ_DEPTH 0x8
+#define HISI_DMA_SQ_TAIL_PTR 0xc
+#define HISI_DMA_CQ_BASE_L 0x10
+#define HISI_DMA_CQ_BASE_H 0x14
+#define HISI_DMA_CQ_DEPTH 0x18
+#define HISI_DMA_CQ_HEAD_PTR 0x1c
+#define HISI_DMA_CTRL0 0x20
+#define HISI_DMA_CTRL0_QUEUE_EN_S 0
+#define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4
+#define HISI_DMA_CTRL1 0x24
+#define HISI_DMA_CTRL1_QUEUE_RESET_S 0
+#define HISI_DMA_Q_FSM_STS 0x30
+#define HISI_DMA_FSM_STS_MASK GENMASK(3, 0)
+#define HISI_DMA_INT_STS 0x40
+#define HISI_DMA_INT_STS_MASK GENMASK(12, 0)
+#define HISI_DMA_INT_MSK 0x44
+#define HISI_DMA_MODE 0x217c
+#define HISI_DMA_OFFSET 0x100
+
+#define HISI_DMA_MSI_NUM 30
+#define HISI_DMA_CHAN_NUM 30
+#define HISI_DMA_Q_DEPTH_VAL 1024
+
+#define PCI_BAR_2 2
+
+enum hisi_dma_mode {
+ EP = 0,
+ RC,
+};
+
+enum hisi_dma_chan_status {
+ DISABLE = -1,
+ IDLE = 0,
+ RUN,
+ CPL,
+ PAUSE,
+ HALT,
+ ABORT,
+ WAIT,
+ BUFFCLR,
+};
+
+struct hisi_dma_sqe {
+ __le32 dw0;
+#define OPCODE_MASK GENMASK(3, 0)
+#define OPCODE_SMALL_PACKAGE 0x1
+#define OPCODE_M2M 0x4
+#define LOCAL_IRQ_EN BIT(8)
+#define ATTR_SRC_MASK GENMASK(14, 12)
+ __le32 dw1;
+ __le32 dw2;
+#define ATTR_DST_MASK GENMASK(26, 24)
+ __le32 length;
+ __le64 src_addr;
+ __le64 dst_addr;
+};
+
+struct hisi_dma_cqe {
+ __le32 rsv0;
+ __le32 rsv1;
+ __le16 sq_head;
+ __le16 rsv2;
+ __le16 rsv3;
+ __le16 w0;
+#define STATUS_MASK GENMASK(15, 1)
+#define STATUS_SUCC 0x0
+#define VALID_BIT BIT(0)
+};
+
+struct hisi_dma_desc {
+ struct virt_dma_desc vd;
+ struct hisi_dma_sqe sqe;
+};
+
+struct hisi_dma_chan {
+ struct virt_dma_chan vc;
+ struct hisi_dma_dev *hdma_dev;
+ struct hisi_dma_sqe *sq;
+ struct hisi_dma_cqe *cq;
+ dma_addr_t sq_dma;
+ dma_addr_t cq_dma;
+ u32 sq_tail;
+ u32 cq_head;
+ u32 qp_num;
+ enum hisi_dma_chan_status status;
+ struct hisi_dma_desc *desc;
+};
+
+struct hisi_dma_dev {
+ struct pci_dev *pdev;
+ void __iomem *base;
+ struct dma_device dma_dev;
+ u32 chan_num;
+ u32 chan_depth;
+ struct hisi_dma_chan chan[];
+};
+
+static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct hisi_dma_chan, vc.chan);
+}
+
+static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct hisi_dma_desc, vd);
+}
+
+static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
+ u32 val)
+{
+ writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET);
+}
+
+static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(addr);
+ tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos);
+ writel_relaxed(tmp, addr);
+}
+
+static void hisi_dma_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+ bool pause)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause);
+}
+
+static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
+ bool enable)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable);
+}
+
+static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index,
+ HISI_DMA_INT_STS_MASK);
+}
+
+static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ void __iomem *base = hdma_dev->base;
+
+ hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index,
+ HISI_DMA_INT_STS_MASK);
+ hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0);
+}
+
+static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index *
+ HISI_DMA_OFFSET;
+
+ hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1);
+}
+
+static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
+{
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ u32 index = chan->qp_num, tmp;
+ int ret;
+
+ hisi_dma_pause_dma(hdma_dev, index, true);
+ hisi_dma_enable_dma(hdma_dev, index, false);
+ hisi_dma_mask_irq(hdma_dev, index);
+
+ ret = readl_relaxed_poll_timeout(hdma_dev->base +
+ HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+ FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
+ WARN_ON(1);
+ }
+
+ hisi_dma_do_reset(hdma_dev, index);
+ hisi_dma_reset_qp_point(hdma_dev, index);
+ hisi_dma_pause_dma(hdma_dev, index, false);
+ hisi_dma_enable_dma(hdma_dev, index, true);
+ hisi_dma_unmask_irq(hdma_dev, index);
+
+ ret = readl_relaxed_poll_timeout(hdma_dev->base +
+ HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
+ FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
+ WARN_ON(1);
+ }
+}
+
+static void hisi_dma_free_chan_resources(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+
+ hisi_dma_reset_hw_chan(chan);
+ vchan_free_chan_resources(&chan->vc);
+
+ memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
+ memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth);
+ chan->sq_tail = 0;
+ chan->cq_head = 0;
+ chan->status = DISABLE;
+}
+
+static void hisi_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_hisi_dma_desc(vd));
+}
+
+static struct dma_async_tx_descriptor *
+hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ struct hisi_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->sqe.length = cpu_to_le32(len);
+ desc->sqe.src_addr = cpu_to_le64(src);
+ desc->sqe.dst_addr = cpu_to_le64(dst);
+
+ return vchan_tx_prep(&chan->vc, &desc->vd, flags);
+}
+
+static enum dma_status
+hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(c, cookie, txstate);
+}
+
+static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
+{
+ struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail;
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ struct hisi_dma_desc *desc;
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(&hdma_dev->pdev->dev, "no issued task!\n");
+ chan->desc = NULL;
+ return;
+ }
+ list_del(&vd->node);
+ desc = to_hisi_dma_desc(vd);
+ chan->desc = desc;
+
+ memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
+
+ /* update other field in sqe */
+ sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
+ sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN);
+
+ /* make sure data has been updated in sqe */
+ wmb();
+
+ /* update sq tail, point to new sqe position */
+ chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
+
+ /* update sq_tail to trigger a new task */
+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num,
+ chan->sq_tail);
+}
+
+static void hisi_dma_issue_pending(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ if (vchan_issue_pending(&chan->vc))
+ hisi_dma_start_transfer(chan);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static int hisi_dma_terminate_all(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true);
+ if (chan->desc) {
+ vchan_terminate_vdesc(&chan->desc->vd);
+ chan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&chan->vc, &head);
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+ hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false);
+
+ return 0;
+}
+
+static void hisi_dma_synchronize(struct dma_chan *c)
+{
+ struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
+
+ vchan_synchronize(&chan->vc);
+}
+
+static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
+{
+ size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth;
+ size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth;
+ struct device *dev = &hdma_dev->pdev->dev;
+ struct hisi_dma_chan *chan;
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ chan = &hdma_dev->chan[i];
+ chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma,
+ GFP_KERNEL);
+ if (!chan->sq)
+ return -ENOMEM;
+
+ chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma,
+ GFP_KERNEL);
+ if (!chan->cq)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
+{
+ struct hisi_dma_chan *chan = &hdma_dev->chan[index];
+ u32 hw_depth = hdma_dev->chan_depth - 1;
+ void __iomem *base = hdma_dev->base;
+
+ /* set sq, cq base */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index,
+ lower_32_bits(chan->sq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index,
+ upper_32_bits(chan->sq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index,
+ lower_32_bits(chan->cq_dma));
+ hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index,
+ upper_32_bits(chan->cq_dma));
+
+ /* set sq, cq depth */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth);
+ hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth);
+
+ /* init sq tail and cq head */
+ hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+}
+
+static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_init_hw_qp(hdma_dev, qp_index);
+ hisi_dma_unmask_irq(hdma_dev, qp_index);
+ hisi_dma_enable_dma(hdma_dev, qp_index, true);
+}
+
+static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
+{
+ hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]);
+}
+
+static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
+{
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ hdma_dev->chan[i].qp_num = i;
+ hdma_dev->chan[i].hdma_dev = hdma_dev;
+ hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free;
+ vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev);
+ hisi_dma_enable_qp(hdma_dev, i);
+ }
+}
+
+static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev)
+{
+ int i;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ hisi_dma_disable_qp(hdma_dev, i);
+ tasklet_kill(&hdma_dev->chan[i].vc.task);
+ }
+}
+
+static irqreturn_t hisi_dma_irq(int irq, void *data)
+{
+ struct hisi_dma_chan *chan = data;
+ struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
+ struct hisi_dma_desc *desc;
+ struct hisi_dma_cqe *cqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+
+ desc = chan->desc;
+ cqe = chan->cq + chan->cq_head;
+ if (desc) {
+ if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
+ chan->cq_head = (chan->cq_head + 1) %
+ hdma_dev->chan_depth;
+ hisi_dma_chan_write(hdma_dev->base,
+ HISI_DMA_CQ_HEAD_PTR, chan->qp_num,
+ chan->cq_head);
+ vchan_cookie_complete(&desc->vd);
+ } else {
+ dev_err(&hdma_dev->pdev->dev, "task error!\n");
+ }
+
+ chan->desc = NULL;
+ }
+
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev)
+{
+ struct pci_dev *pdev = hdma_dev->pdev;
+ int i, ret;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+ hisi_dma_irq, IRQF_SHARED, "hisi_dma",
+ &hdma_dev->chan[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* This function enables all hw channels in a device */
+static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev)
+{
+ int ret;
+
+ ret = hisi_dma_alloc_qps_mem(hdma_dev);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n");
+ return ret;
+ }
+
+ ret = hisi_dma_request_qps_irq(hdma_dev);
+ if (ret) {
+ dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n");
+ return ret;
+ }
+
+ hisi_dma_enable_qps(hdma_dev);
+
+ return 0;
+}
+
+static void hisi_dma_disable_hw_channels(void *data)
+{
+ hisi_dma_disable_qps(data);
+}
+
+static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
+ enum hisi_dma_mode mode)
+{
+ writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE);
+}
+
+static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_dma_dev *hdma_dev;
+ struct dma_device *dma_dev;
+ size_t dev_size;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "failed to enable device mem!\n");
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev));
+ if (ret) {
+ dev_err(dev, "failed to remap I/O region!\n");
+ return ret;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM +
+ sizeof(*hdma_dev);
+ hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL);
+ if (!hdma_dev)
+ return -EINVAL;
+
+ hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
+ hdma_dev->pdev = pdev;
+ hdma_dev->chan_num = HISI_DMA_CHAN_NUM;
+ hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
+
+ pci_set_drvdata(pdev, hdma_dev);
+ pci_set_master(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
+ PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "Failed to allocate MSI vectors!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
+ if (ret)
+ return ret;
+
+ dma_dev = &hdma_dev->dma_dev;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
+ dma_dev->device_tx_status = hisi_dma_tx_status;
+ dma_dev->device_issue_pending = hisi_dma_issue_pending;
+ dma_dev->device_terminate_all = hisi_dma_terminate_all;
+ dma_dev->device_synchronize = hisi_dma_synchronize;
+ dma_dev->directions = BIT(DMA_MEM_TO_MEM);
+ dma_dev->dev = dev;
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ hisi_dma_set_mode(hdma_dev, RC);
+
+ ret = hisi_dma_enable_hw_channels(hdma_dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable hw channel!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels,
+ hdma_dev);
+ if (ret)
+ return ret;
+
+ ret = dmaenginem_async_device_register(dma_dev);
+ if (ret < 0)
+ dev_err(dev, "failed to register device!\n");
+
+ return ret;
+}
+
+static const struct pci_device_id hisi_dma_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) },
+ { 0, }
+};
+
+static struct pci_driver hisi_dma_pci_driver = {
+ .name = "hisi_dma",
+ .id_table = hisi_dma_pci_tbl,
+ .probe = hisi_dma_probe,
+};
+
+module_pci_driver(hisi_dma_pci_driver);
+
+MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
+MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>");
+MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl);
--- /dev/null
+obj-$(CONFIG_INTEL_IDXD) += idxd.o
+idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/sched/task.h>
+#include <linux/intel-svm.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+struct idxd_cdev_context {
+ const char *name;
+ dev_t devt;
+ struct ida minor_ida;
+};
+
+/*
+ * ictx is an array based off of accelerator types. enum idxd_type
+ * is used as index
+ */
+static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
+ { .name = "dsa" },
+};
+
+struct idxd_user_context {
+ struct idxd_wq *wq;
+ struct task_struct *task;
+ unsigned int flags;
+};
+
+enum idxd_cdev_cleanup {
+ CDEV_NORMAL = 0,
+ CDEV_FAILED,
+};
+
+static void idxd_cdev_dev_release(struct device *dev)
+{
+ dev_dbg(dev, "releasing cdev device\n");
+ kfree(dev);
+}
+
+static struct device_type idxd_cdev_device_type = {
+ .name = "idxd_cdev",
+ .release = idxd_cdev_dev_release,
+};
+
+static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
+{
+ struct cdev *cdev = inode->i_cdev;
+
+ return container_of(cdev, struct idxd_cdev, cdev);
+}
+
+static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
+{
+ return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
+}
+
+static inline struct idxd_wq *inode_wq(struct inode *inode)
+{
+ return idxd_cdev_wq(inode_idxd_cdev(inode));
+}
+
+static int idxd_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct idxd_user_context *ctx;
+ struct idxd_device *idxd;
+ struct idxd_wq *wq;
+ struct device *dev;
+ struct idxd_cdev *idxd_cdev;
+
+ wq = inode_wq(inode);
+ idxd = wq->idxd;
+ dev = &idxd->pdev->dev;
+ idxd_cdev = &wq->idxd_cdev;
+
+ dev_dbg(dev, "%s called\n", __func__);
+
+ if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq))
+ return -EBUSY;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->wq = wq;
+ filp->private_data = ctx;
+ idxd_wq_get(wq);
+ return 0;
+}
+
+static int idxd_cdev_release(struct inode *node, struct file *filep)
+{
+ struct idxd_user_context *ctx = filep->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+
+ dev_dbg(dev, "%s called\n", __func__);
+ filep->private_data = NULL;
+
+ kfree(ctx);
+ idxd_wq_put(wq);
+ return 0;
+}
+
+static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
+ const char *func)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ dev_info_ratelimited(dev,
+ "%s: %s: mapping too large: %lu\n",
+ current->comm, func,
+ vma->vm_end - vma->vm_start);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct idxd_user_context *ctx = filp->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
+ unsigned long pfn;
+ int rc;
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ rc = check_vma(wq, vma, __func__);
+
+ vma->vm_flags |= VM_DONTCOPY;
+ pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
+ IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_private_data = ctx;
+
+ return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
+ vma->vm_page_prot);
+}
+
+static __poll_t idxd_cdev_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct idxd_user_context *ctx = filp->private_data;
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ unsigned long flags;
+ __poll_t out = 0;
+
+ poll_wait(filp, &idxd_cdev->err_queue, wait);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ if (idxd->sw_err.valid)
+ out = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ return out;
+}
+
+static const struct file_operations idxd_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = idxd_cdev_open,
+ .release = idxd_cdev_release,
+ .mmap = idxd_cdev_mmap,
+ .poll = idxd_cdev_poll,
+};
+
+int idxd_cdev_get_major(struct idxd_device *idxd)
+{
+ return MAJOR(ictx[idxd->type].devt);
+}
+
+static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct idxd_cdev_context *cdev_ctx;
+ struct device *dev;
+ int minor, rc;
+
+ idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
+ if (!idxd_cdev->dev)
+ return -ENOMEM;
+
+ dev = idxd_cdev->dev;
+ dev->parent = &idxd->pdev->dev;
+ dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
+ idxd->id, wq->id);
+ dev->bus = idxd_get_bus_type(idxd);
+
+ cdev_ctx = &ictx[wq->idxd->type];
+ minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+ if (minor < 0) {
+ rc = minor;
+ goto ida_err;
+ }
+
+ dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
+ dev->type = &idxd_cdev_device_type;
+ rc = device_register(dev);
+ if (rc < 0) {
+ dev_err(&idxd->pdev->dev, "device register failed\n");
+ put_device(dev);
+ goto dev_reg_err;
+ }
+ idxd_cdev->minor = minor;
+
+ return 0;
+
+ dev_reg_err:
+ ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+ ida_err:
+ kfree(dev);
+ idxd_cdev->dev = NULL;
+ return rc;
+}
+
+static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
+ enum idxd_cdev_cleanup cdev_state)
+{
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct idxd_cdev_context *cdev_ctx;
+
+ cdev_ctx = &ictx[wq->idxd->type];
+ if (cdev_state == CDEV_NORMAL)
+ cdev_del(&idxd_cdev->cdev);
+ device_unregister(idxd_cdev->dev);
+ /*
+ * The device_type->release() will be called on the device and free
+ * the allocated struct device. We can just forget it.
+ */
+ ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+ idxd_cdev->dev = NULL;
+ idxd_cdev->minor = -1;
+}
+
+int idxd_wq_add_cdev(struct idxd_wq *wq)
+{
+ struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ struct cdev *cdev = &idxd_cdev->cdev;
+ struct device *dev;
+ int rc;
+
+ rc = idxd_wq_cdev_dev_setup(wq);
+ if (rc < 0)
+ return rc;
+
+ dev = idxd_cdev->dev;
+ cdev_init(cdev, &idxd_cdev_fops);
+ cdev_set_parent(cdev, &dev->kobj);
+ rc = cdev_add(cdev, dev->devt, 1);
+ if (rc) {
+ dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
+ idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
+ return rc;
+ }
+
+ init_waitqueue_head(&idxd_cdev->err_queue);
+ return 0;
+}
+
+void idxd_wq_del_cdev(struct idxd_wq *wq)
+{
+ idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
+}
+
+int idxd_cdev_register(void)
+{
+ int rc, i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ ida_init(&ictx[i].minor_ida);
+ rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
+ ictx[i].name);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void idxd_cdev_remove(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ unregister_chrdev_region(ictx[i].devt, MINORMASK);
+ ida_destroy(&ictx[i].minor_ida);
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout);
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand);
+
+/* Interrupt control bits */
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ union msix_perm perm;
+ u32 offset;
+
+ if (vec_id < 0 || vec_id >= msixcnt)
+ return -EINVAL;
+
+ offset = idxd->msix_perm_offset + vec_id * 8;
+ perm.bits = ioread32(idxd->reg_base + offset);
+ perm.ignore = 1;
+ iowrite32(perm.bits, idxd->reg_base + offset);
+
+ return 0;
+}
+
+void idxd_mask_msix_vectors(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ int i, rc;
+
+ for (i = 0; i < msixcnt; i++) {
+ rc = idxd_mask_msix_vector(idxd, i);
+ if (rc < 0)
+ dev_warn(&pdev->dev,
+ "Failed disabling msix vec %d\n", i);
+ }
+}
+
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ int msixcnt = pci_msix_vec_count(pdev);
+ union msix_perm perm;
+ u32 offset;
+
+ if (vec_id < 0 || vec_id >= msixcnt)
+ return -EINVAL;
+
+ offset = idxd->msix_perm_offset + vec_id * 8;
+ perm.bits = ioread32(idxd->reg_base + offset);
+ perm.ignore = 0;
+ iowrite32(perm.bits, idxd->reg_base + offset);
+
+ return 0;
+}
+
+void idxd_unmask_error_interrupts(struct idxd_device *idxd)
+{
+ union genctrl_reg genctrl;
+
+ genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+ genctrl.softerr_int_en = 1;
+ iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+void idxd_mask_error_interrupts(struct idxd_device *idxd)
+{
+ union genctrl_reg genctrl;
+
+ genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+ genctrl.softerr_int_en = 0;
+ iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+}
+
+static void free_hw_descs(struct idxd_wq *wq)
+{
+ int i;
+
+ for (i = 0; i < wq->num_descs; i++)
+ kfree(wq->hw_descs[i]);
+
+ kfree(wq->hw_descs);
+}
+
+static int alloc_hw_descs(struct idxd_wq *wq, int num)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+ int i;
+ int node = dev_to_node(dev);
+
+ wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
+ GFP_KERNEL, node);
+ if (!wq->hw_descs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
+ GFP_KERNEL, node);
+ if (!wq->hw_descs[i]) {
+ free_hw_descs(wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void free_descs(struct idxd_wq *wq)
+{
+ int i;
+
+ for (i = 0; i < wq->num_descs; i++)
+ kfree(wq->descs[i]);
+
+ kfree(wq->descs);
+}
+
+static int alloc_descs(struct idxd_wq *wq, int num)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+ int i;
+ int node = dev_to_node(dev);
+
+ wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
+ GFP_KERNEL, node);
+ if (!wq->descs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
+ GFP_KERNEL, node);
+ if (!wq->descs[i]) {
+ free_descs(wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/* WQ control bits */
+int idxd_wq_alloc_resources(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_group *group = wq->group;
+ struct device *dev = &idxd->pdev->dev;
+ int rc, num_descs, i;
+
+ if (wq->type != IDXD_WQT_KERNEL)
+ return 0;
+
+ num_descs = wq->size +
+ idxd->hw.gen_cap.max_descs_per_engine * group->num_engines;
+ wq->num_descs = num_descs;
+
+ rc = alloc_hw_descs(wq, num_descs);
+ if (rc < 0)
+ return rc;
+
+ wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
+ wq->compls = dma_alloc_coherent(dev, wq->compls_size,
+ &wq->compls_addr, GFP_KERNEL);
+ if (!wq->compls) {
+ rc = -ENOMEM;
+ goto fail_alloc_compls;
+ }
+
+ rc = alloc_descs(wq, num_descs);
+ if (rc < 0)
+ goto fail_alloc_descs;
+
+ rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL,
+ dev_to_node(dev));
+ if (rc < 0)
+ goto fail_sbitmap_init;
+
+ for (i = 0; i < num_descs; i++) {
+ struct idxd_desc *desc = wq->descs[i];
+
+ desc->hw = wq->hw_descs[i];
+ desc->completion = &wq->compls[i];
+ desc->compl_dma = wq->compls_addr +
+ sizeof(struct dsa_completion_record) * i;
+ desc->id = i;
+ desc->wq = wq;
+
+ dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
+ desc->txd.tx_submit = idxd_dma_tx_submit;
+ }
+
+ return 0;
+
+ fail_sbitmap_init:
+ free_descs(wq);
+ fail_alloc_descs:
+ dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ fail_alloc_compls:
+ free_hw_descs(wq);
+ return rc;
+}
+
+void idxd_wq_free_resources(struct idxd_wq *wq)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ if (wq->type != IDXD_WQT_KERNEL)
+ return;
+
+ free_hw_descs(wq);
+ free_descs(wq);
+ dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ sbitmap_free(&wq->sbmap);
+}
+
+int idxd_wq_enable(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 status;
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d already enabled\n", wq->id);
+ return -ENXIO;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
+ dev_dbg(dev, "WQ enable failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ wq->state = IDXD_WQ_ENABLED;
+ dev_dbg(dev, "WQ %d enabled\n", wq->id);
+ return 0;
+}
+
+int idxd_wq_disable(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 status, operand;
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ dev_dbg(dev, "Disabling WQ %d\n", wq->id);
+
+ if (wq->state != IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+ return 0;
+ }
+
+ operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+ rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "WQ disable failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ wq->state = IDXD_WQ_DISABLED;
+ dev_dbg(dev, "WQ %d disabled\n", wq->id);
+ return 0;
+}
+
+int idxd_wq_map_portal(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ resource_size_t start;
+
+ start = pci_resource_start(pdev, IDXD_WQ_BAR);
+ start = start + wq->id * IDXD_PORTAL_SIZE;
+
+ wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
+ if (!wq->dportal)
+ return -ENOMEM;
+ dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
+
+ return 0;
+}
+
+void idxd_wq_unmap_portal(struct idxd_wq *wq)
+{
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ devm_iounmap(dev, wq->dportal);
+}
+
+/* Device control bits */
+static inline bool idxd_is_enabled(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+ if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
+ return true;
+ return false;
+}
+
+static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout)
+{
+ u32 sts, to = timeout;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ while (sts & IDXD_CMDSTS_ACTIVE && --to) {
+ cpu_relax();
+ sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ }
+
+ if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) {
+ dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__);
+ *status = 0;
+ return -EBUSY;
+ }
+
+ *status = sts;
+ return 0;
+}
+
+static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand)
+{
+ union idxd_command_reg cmd;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = cmd_code;
+ cmd.operand = operand;
+ dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
+ __func__, cmd_code, operand);
+ iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
+
+ return 0;
+}
+
+int idxd_device_enable(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ if (idxd_is_enabled(idxd)) {
+ dev_dbg(dev, "Device already enabled\n");
+ return -ENXIO;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ /* If the command is successful or if the device was enabled */
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
+ dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+ return -ENXIO;
+ }
+
+ idxd->state = IDXD_DEV_ENABLED;
+ return 0;
+}
+
+int idxd_device_disable(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ u32 status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ if (!idxd_is_enabled(idxd)) {
+ dev_dbg(dev, "Device is not enabled\n");
+ return 0;
+ }
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ /* If the command is successful or if the device was disabled */
+ if (status != IDXD_CMDSTS_SUCCESS &&
+ !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
+ dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
+ rc = -ENXIO;
+ return rc;
+ }
+
+ idxd->state = IDXD_DEV_CONF_READY;
+ return 0;
+}
+
+int __idxd_device_reset(struct idxd_device *idxd)
+{
+ u32 status;
+ int rc;
+
+ rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0);
+ if (rc < 0)
+ return rc;
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int idxd_device_reset(struct idxd_device *idxd)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = __idxd_device_reset(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ return rc;
+}
+
+/* Device configuration bits */
+static void idxd_group_config_write(struct idxd_group *group)
+{
+ struct idxd_device *idxd = group->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+ u32 grpcfg_offset;
+
+ dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
+
+ /* setup GRPWQCFG */
+ for (i = 0; i < 4; i++) {
+ grpcfg_offset = idxd->grpcfg_offset +
+ group->id * 64 + i * sizeof(u64);
+ iowrite64(group->grpcfg.wqs[i],
+ idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
+ group->id, i, grpcfg_offset,
+ ioread64(idxd->reg_base + grpcfg_offset));
+ }
+
+ /* setup GRPENGCFG */
+ grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
+ iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
+ grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
+
+ /* setup GRPFLAGS */
+ grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
+ iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+ group->id, grpcfg_offset,
+ ioread32(idxd->reg_base + grpcfg_offset));
+}
+
+static int idxd_groups_config_write(struct idxd_device *idxd)
+
+{
+ union gencfg_reg reg;
+ int i;
+ struct device *dev = &idxd->pdev->dev;
+
+ /* Setup bandwidth token limit */
+ if (idxd->token_limit) {
+ reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ reg.token_limit = idxd->token_limit;
+ iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+ }
+
+ dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
+ ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ idxd_group_config_write(group);
+ }
+
+ return 0;
+}
+
+static int idxd_wq_config_write(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 wq_offset;
+ int i;
+
+ if (!wq->group)
+ return 0;
+
+ memset(&wq->wqcfg, 0, sizeof(union wqcfg));
+
+ /* byte 0-3 */
+ wq->wqcfg.wq_size = wq->size;
+
+ if (wq->size == 0) {
+ dev_warn(dev, "Incorrect work queue size: 0\n");
+ return -EINVAL;
+ }
+
+ /* bytes 4-7 */
+ wq->wqcfg.wq_thresh = wq->threshold;
+
+ /* byte 8-11 */
+ wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
+ wq->wqcfg.mode = 1;
+
+ wq->wqcfg.priority = wq->priority;
+
+ /* bytes 12-15 */
+ wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
+ wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+
+ dev_dbg(dev, "WQ %d CFGs\n", wq->id);
+ for (i = 0; i < 8; i++) {
+ wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+ iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
+ dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+ wq->id, i, wq_offset,
+ ioread32(idxd->reg_base + wq_offset));
+ }
+
+ return 0;
+}
+
+static int idxd_wqs_config_write(struct idxd_device *idxd)
+{
+ int i, rc;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ rc = idxd_wq_config_write(wq);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void idxd_group_flags_setup(struct idxd_device *idxd)
+{
+ int i;
+
+ /* TC-A 0 and TC-B 1 should be defaults */
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ if (group->tc_a == -1)
+ group->grpcfg.flags.tc_a = 0;
+ else
+ group->grpcfg.flags.tc_a = group->tc_a;
+ if (group->tc_b == -1)
+ group->grpcfg.flags.tc_b = 1;
+ else
+ group->grpcfg.flags.tc_b = group->tc_b;
+ group->grpcfg.flags.use_token_limit = group->use_token_limit;
+ group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
+ if (group->tokens_allowed)
+ group->grpcfg.flags.tokens_allowed =
+ group->tokens_allowed;
+ else
+ group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
+ }
+}
+
+static int idxd_engines_setup(struct idxd_device *idxd)
+{
+ int i, engines = 0;
+ struct idxd_engine *eng;
+ struct idxd_group *group;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = &idxd->groups[i];
+ group->grpcfg.engines = 0;
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ eng = &idxd->engines[i];
+ group = eng->group;
+
+ if (!group)
+ continue;
+
+ group->grpcfg.engines |= BIT(eng->id);
+ engines++;
+ }
+
+ if (!engines)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int idxd_wqs_setup(struct idxd_device *idxd)
+{
+ struct idxd_wq *wq;
+ struct idxd_group *group;
+ int i, j, configured = 0;
+ struct device *dev = &idxd->pdev->dev;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = &idxd->groups[i];
+ for (j = 0; j < 4; j++)
+ group->grpcfg.wqs[j] = 0;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = &idxd->wqs[i];
+ group = wq->group;
+
+ if (!wq->group)
+ continue;
+ if (!wq->size)
+ continue;
+
+ if (!wq_dedicated(wq)) {
+ dev_warn(dev, "No shared workqueue support.\n");
+ return -EINVAL;
+ }
+
+ group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
+ configured++;
+ }
+
+ if (configured == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int idxd_device_config(struct idxd_device *idxd)
+{
+ int rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ rc = idxd_wqs_setup(idxd);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_engines_setup(idxd);
+ if (rc < 0)
+ return rc;
+
+ idxd_group_flags_setup(idxd);
+
+ rc = idxd_wqs_config_write(idxd);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_groups_config_write(idxd);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
+{
+ return container_of(c, struct idxd_wq, dma_chan);
+}
+
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type)
+{
+ struct dma_async_tx_descriptor *tx;
+ struct dmaengine_result res;
+ int complete = 1;
+
+ if (desc->completion->status == DSA_COMP_SUCCESS)
+ res.result = DMA_TRANS_NOERROR;
+ else if (desc->completion->status)
+ res.result = DMA_TRANS_WRITE_FAILED;
+ else if (comp_type == IDXD_COMPLETE_ABORT)
+ res.result = DMA_TRANS_ABORTED;
+ else
+ complete = 0;
+
+ tx = &desc->txd;
+ if (complete && tx->cookie) {
+ dma_cookie_complete(tx);
+ dma_descriptor_unmap(tx);
+ dmaengine_desc_get_callback_invoke(tx, &res);
+ tx->callback = NULL;
+ tx->callback_result = NULL;
+ }
+}
+
+static void op_flag_setup(unsigned long flags, u32 *desc_flags)
+{
+ *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
+ if (flags & DMA_PREP_INTERRUPT)
+ *desc_flags |= IDXD_OP_FLAG_RCI;
+}
+
+static inline void set_completion_address(struct idxd_desc *desc,
+ u64 *compl_addr)
+{
+ *compl_addr = desc->compl_dma;
+}
+
+static inline void idxd_prep_desc_common(struct idxd_wq *wq,
+ struct dsa_hw_desc *hw, char opcode,
+ u64 addr_f1, u64 addr_f2, u64 len,
+ u64 compl, u32 flags)
+{
+ struct idxd_device *idxd = wq->idxd;
+
+ hw->flags = flags;
+ hw->opcode = opcode;
+ hw->src_addr = addr_f1;
+ hw->dst_addr = addr_f2;
+ hw->xfer_size = len;
+ hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
+ hw->completion_addr = compl;
+
+ /*
+ * Descriptor completion vectors are 1-8 for MSIX. We will round
+ * robin through the 8 vectors.
+ */
+ wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+ hw->int_handle = wq->vec_ptr;
+}
+
+static struct dma_async_tx_descriptor *
+idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct idxd_wq *wq = to_idxd_wq(c);
+ u32 desc_flags;
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_desc *desc;
+
+ if (wq->state != IDXD_WQ_ENABLED)
+ return NULL;
+
+ if (len > idxd->max_xfer_bytes)
+ return NULL;
+
+ op_flag_setup(flags, &desc_flags);
+ desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(desc))
+ return NULL;
+
+ idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
+ dma_src, dma_dest, len, desc->compl_dma,
+ desc_flags);
+
+ desc->txd.flags = flags;
+
+ return &desc->txd;
+}
+
+static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct idxd_wq *wq = to_idxd_wq(chan);
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ idxd_wq_get(wq);
+ dev_dbg(dev, "%s: client_count: %d\n", __func__,
+ idxd_wq_refcount(wq));
+ return 0;
+}
+
+static void idxd_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct idxd_wq *wq = to_idxd_wq(chan);
+ struct device *dev = &wq->idxd->pdev->dev;
+
+ idxd_wq_put(wq);
+ dev_dbg(dev, "%s: client_count: %d\n", __func__,
+ idxd_wq_refcount(wq));
+}
+
+static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dma_chan, cookie, txstate);
+}
+
+/*
+ * issue_pending() does not need to do anything since tx_submit() does the job
+ * already.
+ */
+static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
+{
+}
+
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *c = tx->chan;
+ struct idxd_wq *wq = to_idxd_wq(c);
+ dma_cookie_t cookie;
+ int rc;
+ struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
+
+ cookie = dma_cookie_assign(tx);
+
+ rc = idxd_submit_desc(wq, desc);
+ if (rc < 0) {
+ idxd_free_desc(wq, desc);
+ return rc;
+ }
+
+ return cookie;
+}
+
+static void idxd_dma_release(struct dma_device *device)
+{
+}
+
+int idxd_register_dma_device(struct idxd_device *idxd)
+{
+ struct dma_device *dma = &idxd->dma_dev;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma->dev = &idxd->pdev->dev;
+
+ dma->device_release = idxd_dma_release;
+
+ if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
+ }
+
+ dma->device_tx_status = idxd_dma_tx_status;
+ dma->device_issue_pending = idxd_dma_issue_pending;
+ dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = idxd_dma_free_chan_resources;
+
+ return dma_async_device_register(&idxd->dma_dev);
+}
+
+void idxd_unregister_dma_device(struct idxd_device *idxd)
+{
+ dma_async_device_unregister(&idxd->dma_dev);
+}
+
+int idxd_register_dma_channel(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct dma_device *dma = &idxd->dma_dev;
+ struct dma_chan *chan = &wq->dma_chan;
+ int rc;
+
+ memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
+ chan->device = dma;
+ list_add_tail(&chan->device_node, &dma->channels);
+ rc = dma_async_device_channel_register(dma, chan);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+void idxd_unregister_dma_channel(struct idxd_wq *wq)
+{
+ dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_H_
+#define _IDXD_H_
+
+#include <linux/sbitmap.h>
+#include <linux/dmaengine.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include "registers.h"
+
+#define IDXD_DRIVER_VERSION "1.00"
+
+extern struct kmem_cache *idxd_desc_pool;
+
+#define IDXD_REG_TIMEOUT 50
+#define IDXD_DRAIN_TIMEOUT 5000
+
+enum idxd_type {
+ IDXD_TYPE_UNKNOWN = -1,
+ IDXD_TYPE_DSA = 0,
+ IDXD_TYPE_MAX
+};
+
+#define IDXD_NAME_SIZE 128
+
+struct idxd_device_driver {
+ struct device_driver drv;
+};
+
+struct idxd_irq_entry {
+ struct idxd_device *idxd;
+ int id;
+ struct llist_head pending_llist;
+ struct list_head work_list;
+};
+
+struct idxd_group {
+ struct device conf_dev;
+ struct idxd_device *idxd;
+ struct grpcfg grpcfg;
+ int id;
+ int num_engines;
+ int num_wqs;
+ bool use_token_limit;
+ u8 tokens_allowed;
+ u8 tokens_reserved;
+ int tc_a;
+ int tc_b;
+};
+
+#define IDXD_MAX_PRIORITY 0xf
+
+enum idxd_wq_state {
+ IDXD_WQ_DISABLED = 0,
+ IDXD_WQ_ENABLED,
+};
+
+enum idxd_wq_flag {
+ WQ_FLAG_DEDICATED = 0,
+};
+
+enum idxd_wq_type {
+ IDXD_WQT_NONE = 0,
+ IDXD_WQT_KERNEL,
+ IDXD_WQT_USER,
+};
+
+struct idxd_cdev {
+ struct cdev cdev;
+ struct device *dev;
+ int minor;
+ struct wait_queue_head err_queue;
+};
+
+#define IDXD_ALLOCATED_BATCH_SIZE 128U
+#define WQ_NAME_SIZE 1024
+#define WQ_TYPE_SIZE 10
+
+enum idxd_op_type {
+ IDXD_OP_BLOCK = 0,
+ IDXD_OP_NONBLOCK = 1,
+};
+
+enum idxd_complete_type {
+ IDXD_COMPLETE_NORMAL = 0,
+ IDXD_COMPLETE_ABORT,
+};
+
+struct idxd_wq {
+ void __iomem *dportal;
+ struct device conf_dev;
+ struct idxd_cdev idxd_cdev;
+ struct idxd_device *idxd;
+ int id;
+ enum idxd_wq_type type;
+ struct idxd_group *group;
+ int client_count;
+ struct mutex wq_lock; /* mutex for workqueue */
+ u32 size;
+ u32 threshold;
+ u32 priority;
+ enum idxd_wq_state state;
+ unsigned long flags;
+ union wqcfg wqcfg;
+ atomic_t dq_count; /* dedicated queue flow control */
+ u32 vec_ptr; /* interrupt steering */
+ struct dsa_hw_desc **hw_descs;
+ int num_descs;
+ struct dsa_completion_record *compls;
+ dma_addr_t compls_addr;
+ int compls_size;
+ struct idxd_desc **descs;
+ struct sbitmap sbmap;
+ struct dma_chan dma_chan;
+ struct percpu_rw_semaphore submit_lock;
+ wait_queue_head_t submit_waitq;
+ char name[WQ_NAME_SIZE + 1];
+};
+
+struct idxd_engine {
+ struct device conf_dev;
+ int id;
+ struct idxd_group *group;
+ struct idxd_device *idxd;
+};
+
+/* shadow registers */
+struct idxd_hw {
+ u32 version;
+ union gen_cap_reg gen_cap;
+ union wq_cap_reg wq_cap;
+ union group_cap_reg group_cap;
+ union engine_cap_reg engine_cap;
+ struct opcap opcap;
+};
+
+enum idxd_device_state {
+ IDXD_DEV_HALTED = -1,
+ IDXD_DEV_DISABLED = 0,
+ IDXD_DEV_CONF_READY,
+ IDXD_DEV_ENABLED,
+};
+
+enum idxd_device_flag {
+ IDXD_FLAG_CONFIGURABLE = 0,
+};
+
+struct idxd_device {
+ enum idxd_type type;
+ struct device conf_dev;
+ struct list_head list;
+ struct idxd_hw hw;
+ enum idxd_device_state state;
+ unsigned long flags;
+ int id;
+ int major;
+
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+
+ spinlock_t dev_lock; /* spinlock for device */
+ struct idxd_group *groups;
+ struct idxd_wq *wqs;
+ struct idxd_engine *engines;
+
+ int num_groups;
+
+ u32 msix_perm_offset;
+ u32 wqcfg_offset;
+ u32 grpcfg_offset;
+ u32 perfmon_offset;
+
+ u64 max_xfer_bytes;
+ u32 max_batch_size;
+ int max_groups;
+ int max_engines;
+ int max_tokens;
+ int max_wqs;
+ int max_wq_size;
+ int token_limit;
+ int nr_tokens; /* non-reserved tokens */
+
+ union sw_err_reg sw_err;
+
+ struct msix_entry *msix_entries;
+ int num_wq_irqs;
+ struct idxd_irq_entry *irq_entries;
+
+ struct dma_device dma_dev;
+};
+
+/* IDXD software descriptor */
+struct idxd_desc {
+ struct dsa_hw_desc *hw;
+ dma_addr_t desc_dma;
+ struct dsa_completion_record *completion;
+ dma_addr_t compl_dma;
+ struct dma_async_tx_descriptor txd;
+ struct llist_node llnode;
+ struct list_head list;
+ int id;
+ struct idxd_wq *wq;
+};
+
+#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
+#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
+
+extern struct bus_type dsa_bus_type;
+
+static inline bool wq_dedicated(struct idxd_wq *wq)
+{
+ return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
+}
+
+enum idxd_portal_prot {
+ IDXD_PORTAL_UNLIMITED = 0,
+ IDXD_PORTAL_LIMITED,
+};
+
+static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
+{
+ return prot * 0x1000;
+}
+
+static inline int idxd_get_wq_portal_full_offset(int wq_id,
+ enum idxd_portal_prot prot)
+{
+ return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
+}
+
+static inline void idxd_set_type(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+
+ if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
+ idxd->type = IDXD_TYPE_DSA;
+ else
+ idxd->type = IDXD_TYPE_UNKNOWN;
+}
+
+static inline void idxd_wq_get(struct idxd_wq *wq)
+{
+ wq->client_count++;
+}
+
+static inline void idxd_wq_put(struct idxd_wq *wq)
+{
+ wq->client_count--;
+}
+
+static inline int idxd_wq_refcount(struct idxd_wq *wq)
+{
+ return wq->client_count;
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd);
+int idxd_register_bus_type(void);
+void idxd_unregister_bus_type(void);
+int idxd_setup_sysfs(struct idxd_device *idxd);
+void idxd_cleanup_sysfs(struct idxd_device *idxd);
+int idxd_register_driver(void);
+void idxd_unregister_driver(void);
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
+
+/* device interrupt control */
+irqreturn_t idxd_irq_handler(int vec, void *data);
+irqreturn_t idxd_misc_thread(int vec, void *data);
+irqreturn_t idxd_wq_thread(int irq, void *data);
+void idxd_mask_error_interrupts(struct idxd_device *idxd);
+void idxd_unmask_error_interrupts(struct idxd_device *idxd);
+void idxd_mask_msix_vectors(struct idxd_device *idxd);
+int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
+int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
+
+/* device control */
+int idxd_device_enable(struct idxd_device *idxd);
+int idxd_device_disable(struct idxd_device *idxd);
+int idxd_device_reset(struct idxd_device *idxd);
+int __idxd_device_reset(struct idxd_device *idxd);
+void idxd_device_cleanup(struct idxd_device *idxd);
+int idxd_device_config(struct idxd_device *idxd);
+void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+
+/* work queue control */
+int idxd_wq_alloc_resources(struct idxd_wq *wq);
+void idxd_wq_free_resources(struct idxd_wq *wq);
+int idxd_wq_enable(struct idxd_wq *wq);
+int idxd_wq_disable(struct idxd_wq *wq);
+int idxd_wq_map_portal(struct idxd_wq *wq);
+void idxd_wq_unmap_portal(struct idxd_wq *wq);
+
+/* submission */
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+
+/* dmaengine */
+int idxd_register_dma_device(struct idxd_device *idxd);
+void idxd_unregister_dma_device(struct idxd_device *idxd);
+int idxd_register_dma_channel(struct idxd_wq *wq);
+void idxd_unregister_dma_channel(struct idxd_wq *wq);
+void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type);
+dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+
+/* cdev */
+int idxd_cdev_register(void);
+void idxd_cdev_remove(void);
+int idxd_cdev_get_major(struct idxd_device *idxd);
+int idxd_wq_add_cdev(struct idxd_wq *wq);
+void idxd_wq_del_cdev(struct idxd_wq *wq);
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/aer.h>
+#include <linux/fs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <uapi/linux/idxd.h>
+#include <linux/dmaengine.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "idxd.h"
+
+MODULE_VERSION(IDXD_DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+
+#define DRV_NAME "idxd"
+
+static struct idr idxd_idrs[IDXD_TYPE_MAX];
+static struct mutex idxd_idr_lock;
+
+static struct pci_device_id idxd_pci_tbl[] = {
+ /* DSA ver 1.0 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
+
+static char *idxd_name[] = {
+ "dsa",
+};
+
+const char *idxd_get_dev_name(struct idxd_device *idxd)
+{
+ return idxd_name[idxd->type];
+}
+
+static int idxd_setup_interrupts(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct msix_entry *msix;
+ struct idxd_irq_entry *irq_entry;
+ int i, msixcnt;
+ int rc = 0;
+
+ msixcnt = pci_msix_vec_count(pdev);
+ if (msixcnt < 0) {
+ dev_err(dev, "Not MSI-X interrupt capable.\n");
+ goto err_no_irq;
+ }
+
+ idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
+ msixcnt, GFP_KERNEL);
+ if (!idxd->msix_entries) {
+ rc = -ENOMEM;
+ goto err_no_irq;
+ }
+
+ for (i = 0; i < msixcnt; i++)
+ idxd->msix_entries[i].entry = i;
+
+ rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
+ if (rc) {
+ dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
+ goto err_no_irq;
+ }
+ dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
+
+ /*
+ * We implement 1 completion list per MSI-X entry except for
+ * entry 0, which is for errors and others.
+ */
+ idxd->irq_entries = devm_kcalloc(dev, msixcnt,
+ sizeof(struct idxd_irq_entry),
+ GFP_KERNEL);
+ if (!idxd->irq_entries) {
+ rc = -ENOMEM;
+ goto err_no_irq;
+ }
+
+ for (i = 0; i < msixcnt; i++) {
+ idxd->irq_entries[i].id = i;
+ idxd->irq_entries[i].idxd = idxd;
+ }
+
+ msix = &idxd->msix_entries[0];
+ irq_entry = &idxd->irq_entries[0];
+ rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
+ idxd_misc_thread, 0, "idxd-misc",
+ irq_entry);
+ if (rc < 0) {
+ dev_err(dev, "Failed to allocate misc interrupt.\n");
+ goto err_no_irq;
+ }
+
+ dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
+ msix->vector);
+
+ /* first MSI-X entry is not for wq interrupts */
+ idxd->num_wq_irqs = msixcnt - 1;
+
+ for (i = 1; i < msixcnt; i++) {
+ msix = &idxd->msix_entries[i];
+ irq_entry = &idxd->irq_entries[i];
+
+ init_llist_head(&idxd->irq_entries[i].pending_llist);
+ INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
+ rc = devm_request_threaded_irq(dev, msix->vector,
+ idxd_irq_handler,
+ idxd_wq_thread, 0,
+ "idxd-portal", irq_entry);
+ if (rc < 0) {
+ dev_err(dev, "Failed to allocate irq %d.\n",
+ msix->vector);
+ goto err_no_irq;
+ }
+ dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
+ i, msix->vector);
+ }
+
+ idxd_unmask_error_interrupts(idxd);
+
+ return 0;
+
+ err_no_irq:
+ /* Disable error interrupt generation */
+ idxd_mask_error_interrupts(idxd);
+ pci_disable_msix(pdev);
+ dev_err(dev, "No usable interrupts\n");
+ return rc;
+}
+
+static void idxd_wqs_free_lock(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ percpu_free_rwsem(&wq->submit_lock);
+ }
+}
+
+static int idxd_setup_internals(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ idxd->groups = devm_kcalloc(dev, idxd->max_groups,
+ sizeof(struct idxd_group), GFP_KERNEL);
+ if (!idxd->groups)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ idxd->groups[i].idxd = idxd;
+ idxd->groups[i].id = i;
+ idxd->groups[i].tc_a = -1;
+ idxd->groups[i].tc_b = -1;
+ }
+
+ idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
+ GFP_KERNEL);
+ if (!idxd->wqs)
+ return -ENOMEM;
+
+ idxd->engines = devm_kcalloc(dev, idxd->max_engines,
+ sizeof(struct idxd_engine), GFP_KERNEL);
+ if (!idxd->engines)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+ int rc;
+
+ wq->id = i;
+ wq->idxd = idxd;
+ mutex_init(&wq->wq_lock);
+ atomic_set(&wq->dq_count, 0);
+ init_waitqueue_head(&wq->submit_waitq);
+ wq->idxd_cdev.minor = -1;
+ rc = percpu_init_rwsem(&wq->submit_lock);
+ if (rc < 0) {
+ idxd_wqs_free_lock(idxd);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ idxd->engines[i].idxd = idxd;
+ idxd->engines[i].id = i;
+ }
+
+ return 0;
+}
+
+static void idxd_read_table_offsets(struct idxd_device *idxd)
+{
+ union offsets_reg offsets;
+ struct device *dev = &idxd->pdev->dev;
+
+ offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
+ offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
+ + sizeof(u64));
+ idxd->grpcfg_offset = offsets.grpcfg * 0x100;
+ dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
+ idxd->wqcfg_offset = offsets.wqcfg * 0x100;
+ dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
+ idxd->wqcfg_offset);
+ idxd->msix_perm_offset = offsets.msix_perm * 0x100;
+ dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
+ idxd->msix_perm_offset);
+ idxd->perfmon_offset = offsets.perfmon * 0x100;
+ dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
+}
+
+static void idxd_read_caps(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ /* reading generic capabilities */
+ idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
+ dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
+ idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
+ dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
+ idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
+ dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
+ if (idxd->hw.gen_cap.config_en)
+ set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
+
+ /* reading group capabilities */
+ idxd->hw.group_cap.bits =
+ ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
+ dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
+ idxd->max_groups = idxd->hw.group_cap.num_groups;
+ dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
+ idxd->max_tokens = idxd->hw.group_cap.total_tokens;
+ dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
+ idxd->nr_tokens = idxd->max_tokens;
+
+ /* read engine capabilities */
+ idxd->hw.engine_cap.bits =
+ ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
+ dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
+ idxd->max_engines = idxd->hw.engine_cap.num_engines;
+ dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
+
+ /* read workqueue capabilities */
+ idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
+ dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
+ idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
+ dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
+ idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
+ dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
+
+ /* reading operation capabilities */
+ for (i = 0; i < 4; i++) {
+ idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
+ IDXD_OPCAP_OFFSET + i * sizeof(u64));
+ dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
+ }
+}
+
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
+ void __iomem * const *iomap)
+{
+ struct device *dev = &pdev->dev;
+ struct idxd_device *idxd;
+
+ idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
+ if (!idxd)
+ return NULL;
+
+ idxd->pdev = pdev;
+ idxd->reg_base = iomap[IDXD_MMIO_BAR];
+ spin_lock_init(&idxd->dev_lock);
+
+ return idxd;
+}
+
+static int idxd_probe(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ dev_dbg(dev, "%s entered and resetting device\n", __func__);
+ rc = idxd_device_reset(idxd);
+ if (rc < 0)
+ return rc;
+ dev_dbg(dev, "IDXD reset complete\n");
+
+ idxd_read_caps(idxd);
+ idxd_read_table_offsets(idxd);
+
+ rc = idxd_setup_internals(idxd);
+ if (rc)
+ goto err_setup;
+
+ rc = idxd_setup_interrupts(idxd);
+ if (rc)
+ goto err_setup;
+
+ dev_dbg(dev, "IDXD interrupt setup complete.\n");
+
+ mutex_lock(&idxd_idr_lock);
+ idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
+ mutex_unlock(&idxd_idr_lock);
+ if (idxd->id < 0) {
+ rc = -ENOMEM;
+ goto err_idr_fail;
+ }
+
+ idxd->major = idxd_cdev_get_major(idxd);
+
+ dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
+ return 0;
+
+ err_idr_fail:
+ idxd_mask_error_interrupts(idxd);
+ idxd_mask_msix_vectors(idxd);
+ err_setup:
+ return rc;
+}
+
+static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct idxd_device *idxd;
+ int rc;
+ unsigned int mask;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ dev_dbg(dev, "Mapping BARs\n");
+ mask = (1 << IDXD_MMIO_BAR);
+ rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
+ if (rc)
+ return rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Set DMA masks\n");
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev, iomap);
+ if (!idxd)
+ return -ENOMEM;
+
+ idxd_set_type(idxd);
+
+ dev_dbg(dev, "Set PCI master\n");
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, idxd);
+
+ idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+ rc = idxd_probe(idxd);
+ if (rc) {
+ dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+ return -ENODEV;
+ }
+
+ rc = idxd_setup_sysfs(idxd);
+ if (rc) {
+ dev_err(dev, "IDXD sysfs setup failed\n");
+ return -ENODEV;
+ }
+
+ idxd->state = IDXD_DEV_CONF_READY;
+
+ dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
+ idxd->hw.version);
+
+ return 0;
+}
+
+static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
+{
+ struct idxd_desc *desc, *itr;
+ struct llist_node *head;
+
+ head = llist_del_all(&ie->pending_llist);
+ if (!head)
+ return;
+
+ llist_for_each_entry_safe(desc, itr, head, llnode) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+ idxd_free_desc(desc->wq, desc);
+ }
+}
+
+static void idxd_flush_work_list(struct idxd_irq_entry *ie)
+{
+ struct idxd_desc *desc, *iter;
+
+ list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
+ list_del(&desc->list);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
+ idxd_free_desc(desc->wq, desc);
+ }
+}
+
+static void idxd_shutdown(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ int rc, i;
+ struct idxd_irq_entry *irq_entry;
+ int msixcnt = pci_msix_vec_count(pdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_disable(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ if (rc)
+ dev_err(&pdev->dev, "Disabling device failed\n");
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ idxd_mask_msix_vectors(idxd);
+ idxd_mask_error_interrupts(idxd);
+
+ for (i = 0; i < msixcnt; i++) {
+ irq_entry = &idxd->irq_entries[i];
+ synchronize_irq(idxd->msix_entries[i].vector);
+ if (i == 0)
+ continue;
+ idxd_flush_pending_llist(irq_entry);
+ idxd_flush_work_list(irq_entry);
+ }
+}
+
+static void idxd_remove(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s called\n", __func__);
+ idxd_cleanup_sysfs(idxd);
+ idxd_shutdown(pdev);
+ idxd_wqs_free_lock(idxd);
+ mutex_lock(&idxd_idr_lock);
+ idr_remove(&idxd_idrs[idxd->type], idxd->id);
+ mutex_unlock(&idxd_idr_lock);
+}
+
+static struct pci_driver idxd_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = idxd_pci_tbl,
+ .probe = idxd_pci_probe,
+ .remove = idxd_remove,
+ .shutdown = idxd_shutdown,
+};
+
+static int __init idxd_init_module(void)
+{
+ int err, i;
+
+ /*
+ * If the CPU does not support write512, there's no point in
+ * enumerating the device. We can not utilize it.
+ */
+ if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+ pr_warn("idxd driver failed to load without MOVDIR64B.\n");
+ return -ENODEV;
+ }
+
+ pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
+ DRV_NAME, IDXD_DRIVER_VERSION);
+
+ mutex_init(&idxd_idr_lock);
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ idr_init(&idxd_idrs[i]);
+
+ err = idxd_register_bus_type();
+ if (err < 0)
+ return err;
+
+ err = idxd_register_driver();
+ if (err < 0)
+ goto err_idxd_driver_register;
+
+ err = idxd_cdev_register();
+ if (err)
+ goto err_cdev_register;
+
+ err = pci_register_driver(&idxd_pci_driver);
+ if (err)
+ goto err_pci_register;
+
+ return 0;
+
+err_pci_register:
+ idxd_cdev_remove();
+err_cdev_register:
+ idxd_unregister_driver();
+err_idxd_driver_register:
+ idxd_unregister_bus_type();
+ return err;
+}
+module_init(idxd_init_module);
+
+static void __exit idxd_exit_module(void)
+{
+ pci_unregister_driver(&idxd_pci_driver);
+ idxd_cdev_remove();
+ idxd_unregister_bus_type();
+}
+module_exit(idxd_exit_module);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include "../dmaengine.h"
+#include "idxd.h"
+#include "registers.h"
+
+void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ wq->state = IDXD_WQ_DISABLED;
+ }
+}
+
+static int idxd_restart(struct idxd_device *idxd)
+{
+ int i, rc;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ rc = __idxd_device_reset(idxd);
+ if (rc < 0)
+ goto out;
+
+ rc = idxd_device_config(idxd);
+ if (rc < 0)
+ goto out;
+
+ rc = idxd_device_enable(idxd);
+ if (rc < 0)
+ goto out;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ dev_warn(&idxd->pdev->dev,
+ "Unable to re-enable wq %s\n",
+ dev_name(&wq->conf_dev));
+ }
+ }
+ }
+
+ return 0;
+
+ out:
+ idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_HALTED;
+ return rc;
+}
+
+irqreturn_t idxd_irq_handler(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+
+ idxd_mask_msix_vector(idxd, irq_entry->id);
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ union gensts_reg gensts;
+ u32 cause, val = 0;
+ int i, rc;
+ bool err = false;
+
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+ if (cause & IDXD_INTC_ERR) {
+ spin_lock_bh(&idxd->dev_lock);
+ for (i = 0; i < 4; i++)
+ idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
+ IDXD_SWERR_OFFSET + i * sizeof(u64));
+ iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+ if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
+ int id = idxd->sw_err.wq_idx;
+ struct idxd_wq *wq = &idxd->wqs[id];
+
+ if (wq->type == IDXD_WQT_USER)
+ wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ } else {
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->type == IDXD_WQT_USER)
+ wake_up_interruptible(&wq->idxd_cdev.err_queue);
+ }
+ }
+
+ spin_unlock_bh(&idxd->dev_lock);
+ val |= IDXD_INTC_ERR;
+
+ for (i = 0; i < 4; i++)
+ dev_warn(dev, "err[%d]: %#16.16llx\n",
+ i, idxd->sw_err.bits[i]);
+ err = true;
+ }
+
+ if (cause & IDXD_INTC_CMD) {
+ /* Driver does use command interrupts */
+ val |= IDXD_INTC_CMD;
+ }
+
+ if (cause & IDXD_INTC_OCCUPY) {
+ /* Driver does not utilize occupancy interrupt */
+ val |= IDXD_INTC_OCCUPY;
+ }
+
+ if (cause & IDXD_INTC_PERFMON_OVFL) {
+ /*
+ * Driver does not utilize perfmon counter overflow interrupt
+ * yet.
+ */
+ val |= IDXD_INTC_PERFMON_OVFL;
+ }
+
+ val ^= cause;
+ if (val)
+ dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
+ val);
+
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (!err)
+ return IRQ_HANDLED;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+ if (gensts.state == IDXD_DEVICE_STATE_HALT) {
+ spin_lock_bh(&idxd->dev_lock);
+ if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
+ rc = idxd_restart(idxd);
+ if (rc < 0)
+ dev_err(&idxd->pdev->dev,
+ "idxd restart failed, device halt.");
+ } else {
+ idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_HALTED;
+ dev_err(&idxd->pdev->dev,
+ "idxd halted, need %s.\n",
+ gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
+ "FLR" : "system reset");
+ }
+ spin_unlock_bh(&idxd->dev_lock);
+ }
+
+ idxd_unmask_msix_vector(idxd, irq_entry->id);
+ return IRQ_HANDLED;
+}
+
+static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
+ int *processed)
+{
+ struct idxd_desc *desc, *t;
+ struct llist_node *head;
+ int queued = 0;
+
+ head = llist_del_all(&irq_entry->pending_llist);
+ if (!head)
+ return 0;
+
+ llist_for_each_entry_safe(desc, t, head, llnode) {
+ if (desc->completion->status) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ idxd_free_desc(desc->wq, desc);
+ (*processed)++;
+ } else {
+ list_add_tail(&desc->list, &irq_entry->work_list);
+ queued++;
+ }
+ }
+
+ return queued;
+}
+
+static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
+ int *processed)
+{
+ struct list_head *node, *next;
+ int queued = 0;
+
+ if (list_empty(&irq_entry->work_list))
+ return 0;
+
+ list_for_each_safe(node, next, &irq_entry->work_list) {
+ struct idxd_desc *desc =
+ container_of(node, struct idxd_desc, list);
+
+ if (desc->completion->status) {
+ list_del(&desc->list);
+ /* process and callback */
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ idxd_free_desc(desc->wq, desc);
+ (*processed)++;
+ } else {
+ queued++;
+ }
+ }
+
+ return queued;
+}
+
+irqreturn_t idxd_wq_thread(int irq, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ int rc, processed = 0, retry = 0;
+
+ /*
+ * There are two lists we are processing. The pending_llist is where
+ * submmiter adds all the submitted descriptor after sending it to
+ * the workqueue. It's a lockless singly linked list. The work_list
+ * is the common linux double linked list. We are in a scenario of
+ * multiple producers and a single consumer. The producers are all
+ * the kernel submitters of descriptors, and the consumer is the
+ * kernel irq handler thread for the msix vector when using threaded
+ * irq. To work with the restrictions of llist to remain lockless,
+ * we are doing the following steps:
+ * 1. Iterate through the work_list and process any completed
+ * descriptor. Delete the completed entries during iteration.
+ * 2. llist_del_all() from the pending list.
+ * 3. Iterate through the llist that was deleted from the pending list
+ * and process the completed entries.
+ * 4. If the entry is still waiting on hardware, list_add_tail() to
+ * the work_list.
+ * 5. Repeat until no more descriptors.
+ */
+ do {
+ rc = irq_process_work_list(irq_entry, &processed);
+ if (rc != 0) {
+ retry++;
+ continue;
+ }
+
+ rc = irq_process_pending_llist(irq_entry, &processed);
+ } while (rc != 0 && retry != 10);
+
+ idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
+
+ if (processed == 0)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _IDXD_REGISTERS_H_
+#define _IDXD_REGISTERS_H_
+
+/* PCI Config */
+#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
+
+#define IDXD_MMIO_BAR 0
+#define IDXD_WQ_BAR 2
+#define IDXD_PORTAL_SIZE 0x4000
+
+/* MMIO Device BAR0 Registers */
+#define IDXD_VER_OFFSET 0x00
+#define IDXD_VER_MAJOR_MASK 0xf0
+#define IDXD_VER_MINOR_MASK 0x0f
+#define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4)
+#define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK)
+
+union gen_cap_reg {
+ struct {
+ u64 block_on_fault:1;
+ u64 overlap_copy:1;
+ u64 cache_control_mem:1;
+ u64 cache_control_cache:1;
+ u64 rsvd:3;
+ u64 int_handle_req:1;
+ u64 dest_readback:1;
+ u64 drain_readback:1;
+ u64 rsvd2:6;
+ u64 max_xfer_shift:5;
+ u64 max_batch_shift:4;
+ u64 max_ims_mult:6;
+ u64 config_en:1;
+ u64 max_descs_per_engine:8;
+ u64 rsvd3:24;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_GENCAP_OFFSET 0x10
+
+union wq_cap_reg {
+ struct {
+ u64 total_wq_size:16;
+ u64 num_wqs:8;
+ u64 rsvd:24;
+ u64 shared_mode:1;
+ u64 dedicated_mode:1;
+ u64 rsvd2:1;
+ u64 priority:1;
+ u64 occupancy:1;
+ u64 occupancy_int:1;
+ u64 rsvd3:10;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_WQCAP_OFFSET 0x20
+
+union group_cap_reg {
+ struct {
+ u64 num_groups:8;
+ u64 total_tokens:8;
+ u64 token_en:1;
+ u64 token_limit:1;
+ u64 rsvd:46;
+ };
+ u64 bits;
+} __packed;
+#define IDXD_GRPCAP_OFFSET 0x30
+
+union engine_cap_reg {
+ struct {
+ u64 num_engines:8;
+ u64 rsvd:56;
+ };
+ u64 bits;
+} __packed;
+
+#define IDXD_ENGCAP_OFFSET 0x38
+
+#define IDXD_OPCAP_NOOP 0x0001
+#define IDXD_OPCAP_BATCH 0x0002
+#define IDXD_OPCAP_MEMMOVE 0x0008
+struct opcap {
+ u64 bits[4];
+};
+
+#define IDXD_OPCAP_OFFSET 0x40
+
+#define IDXD_TABLE_OFFSET 0x60
+union offsets_reg {
+ struct {
+ u64 grpcfg:16;
+ u64 wqcfg:16;
+ u64 msix_perm:16;
+ u64 ims:16;
+ u64 perfmon:16;
+ u64 rsvd:48;
+ };
+ u64 bits[2];
+} __packed;
+
+#define IDXD_GENCFG_OFFSET 0x80
+union gencfg_reg {
+ struct {
+ u32 token_limit:8;
+ u32 rsvd:4;
+ u32 user_int_en:1;
+ u32 rsvd2:19;
+ };
+ u32 bits;
+} __packed;
+
+#define IDXD_GENCTRL_OFFSET 0x88
+union genctrl_reg {
+ struct {
+ u32 softerr_int_en:1;
+ u32 rsvd:31;
+ };
+ u32 bits;
+} __packed;
+
+#define IDXD_GENSTATS_OFFSET 0x90
+union gensts_reg {
+ struct {
+ u32 state:2;
+ u32 reset_type:2;
+ u32 rsvd:28;
+ };
+ u32 bits;
+} __packed;
+
+enum idxd_device_status_state {
+ IDXD_DEVICE_STATE_DISABLED = 0,
+ IDXD_DEVICE_STATE_ENABLED,
+ IDXD_DEVICE_STATE_DRAIN,
+ IDXD_DEVICE_STATE_HALT,
+};
+
+enum idxd_device_reset_type {
+ IDXD_DEVICE_RESET_SOFTWARE = 0,
+ IDXD_DEVICE_RESET_FLR,
+ IDXD_DEVICE_RESET_WARM,
+ IDXD_DEVICE_RESET_COLD,
+};
+
+#define IDXD_INTCAUSE_OFFSET 0x98
+#define IDXD_INTC_ERR 0x01
+#define IDXD_INTC_CMD 0x02
+#define IDXD_INTC_OCCUPY 0x04
+#define IDXD_INTC_PERFMON_OVFL 0x08
+
+#define IDXD_CMD_OFFSET 0xa0
+union idxd_command_reg {
+ struct {
+ u32 operand:20;
+ u32 cmd:5;
+ u32 rsvd:6;
+ u32 int_req:1;
+ };
+ u32 bits;
+} __packed;
+
+enum idxd_cmd {
+ IDXD_CMD_ENABLE_DEVICE = 1,
+ IDXD_CMD_DISABLE_DEVICE,
+ IDXD_CMD_DRAIN_ALL,
+ IDXD_CMD_ABORT_ALL,
+ IDXD_CMD_RESET_DEVICE,
+ IDXD_CMD_ENABLE_WQ,
+ IDXD_CMD_DISABLE_WQ,
+ IDXD_CMD_DRAIN_WQ,
+ IDXD_CMD_ABORT_WQ,
+ IDXD_CMD_RESET_WQ,
+ IDXD_CMD_DRAIN_PASID,
+ IDXD_CMD_ABORT_PASID,
+ IDXD_CMD_REQUEST_INT_HANDLE,
+};
+
+#define IDXD_CMDSTS_OFFSET 0xa8
+union cmdsts_reg {
+ struct {
+ u8 err;
+ u16 result;
+ u8 rsvd:7;
+ u8 active:1;
+ };
+ u32 bits;
+} __packed;
+#define IDXD_CMDSTS_ACTIVE 0x80000000
+
+enum idxd_cmdsts_err {
+ IDXD_CMDSTS_SUCCESS = 0,
+ IDXD_CMDSTS_INVAL_CMD,
+ IDXD_CMDSTS_INVAL_WQIDX,
+ IDXD_CMDSTS_HW_ERR,
+ /* enable device errors */
+ IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
+ IDXD_CMDSTS_ERR_CONFIG,
+ IDXD_CMDSTS_ERR_BUSMASTER_EN,
+ IDXD_CMDSTS_ERR_PASID_INVAL,
+ IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
+ IDXD_CMDSTS_ERR_GRP_CONFIG,
+ IDXD_CMDSTS_ERR_GRP_CONFIG2,
+ IDXD_CMDSTS_ERR_GRP_CONFIG3,
+ IDXD_CMDSTS_ERR_GRP_CONFIG4,
+ /* enable wq errors */
+ IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
+ IDXD_CMDSTS_ERR_WQ_ENABLED,
+ IDXD_CMDSTS_ERR_WQ_SIZE,
+ IDXD_CMDSTS_ERR_WQ_PRIOR,
+ IDXD_CMDSTS_ERR_WQ_MODE,
+ IDXD_CMDSTS_ERR_BOF_EN,
+ IDXD_CMDSTS_ERR_PASID_EN,
+ IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
+ IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
+ /* disable device errors */
+ IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
+ /* disable WQ, drain WQ, abort WQ, reset WQ */
+ IDXD_CMDSTS_ERR_DEV_NOT_EN,
+ /* request interrupt handle */
+ IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
+ IDXD_CMDSTS_ERR_NO_HANDLE,
+};
+
+#define IDXD_SWERR_OFFSET 0xc0
+#define IDXD_SWERR_VALID 0x00000001
+#define IDXD_SWERR_OVERFLOW 0x00000002
+#define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
+union sw_err_reg {
+ struct {
+ u64 valid:1;
+ u64 overflow:1;
+ u64 desc_valid:1;
+ u64 wq_idx_valid:1;
+ u64 batch:1;
+ u64 fault_rw:1;
+ u64 priv:1;
+ u64 rsvd:1;
+ u64 error:8;
+ u64 wq_idx:8;
+ u64 rsvd2:8;
+ u64 operation:8;
+ u64 pasid:20;
+ u64 rsvd3:4;
+
+ u64 batch_idx:16;
+ u64 rsvd4:16;
+ u64 invalid_flags:32;
+
+ u64 fault_addr;
+
+ u64 rsvd5;
+ };
+ u64 bits[4];
+} __packed;
+
+union msix_perm {
+ struct {
+ u32 rsvd:2;
+ u32 ignore:1;
+ u32 pasid_en:1;
+ u32 rsvd2:8;
+ u32 pasid:20;
+ };
+ u32 bits;
+} __packed;
+
+union group_flags {
+ struct {
+ u32 tc_a:3;
+ u32 tc_b:3;
+ u32 rsvd:1;
+ u32 use_token_limit:1;
+ u32 tokens_reserved:8;
+ u32 rsvd2:4;
+ u32 tokens_allowed:8;
+ u32 rsvd3:4;
+ };
+ u32 bits;
+} __packed;
+
+struct grpcfg {
+ u64 wqs[4];
+ u64 engines;
+ union group_flags flags;
+} __packed;
+
+union wqcfg {
+ struct {
+ /* bytes 0-3 */
+ u16 wq_size;
+ u16 rsvd;
+
+ /* bytes 4-7 */
+ u16 wq_thresh;
+ u16 rsvd1;
+
+ /* bytes 8-11 */
+ u32 mode:1; /* shared or dedicated */
+ u32 bof:1; /* block on fault */
+ u32 rsvd2:2;
+ u32 priority:4;
+ u32 pasid:20;
+ u32 pasid_en:1;
+ u32 priv:1;
+ u32 rsvd3:2;
+
+ /* bytes 12-15 */
+ u32 max_xfer_shift:5;
+ u32 max_batch_shift:4;
+ u32 rsvd4:23;
+
+ /* bytes 16-19 */
+ u16 occupancy_inth;
+ u16 occupancy_table_sel:1;
+ u16 rsvd5:15;
+
+ /* bytes 20-23 */
+ u16 occupancy_limit;
+ u16 occupancy_int_en:1;
+ u16 rsvd6:15;
+
+ /* bytes 24-27 */
+ u16 occupancy;
+ u16 occupancy_int:1;
+ u16 rsvd7:12;
+ u16 mode_support:1;
+ u16 wq_state:2;
+
+ /* bytes 28-31 */
+ u32 rsvd8;
+ };
+ u32 bits[8];
+} __packed;
+#endif
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <uapi/linux/idxd.h>
+#include "idxd.h"
+#include "registers.h"
+
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
+{
+ struct idxd_desc *desc;
+ int idx;
+ struct idxd_device *idxd = wq->idxd;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return ERR_PTR(-EIO);
+
+ if (optype == IDXD_OP_BLOCK)
+ percpu_down_read(&wq->submit_lock);
+ else if (!percpu_down_read_trylock(&wq->submit_lock))
+ return ERR_PTR(-EBUSY);
+
+ if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) {
+ int rc;
+
+ if (optype == IDXD_OP_NONBLOCK) {
+ percpu_up_read(&wq->submit_lock);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ percpu_up_read(&wq->submit_lock);
+ percpu_down_write(&wq->submit_lock);
+ rc = wait_event_interruptible(wq->submit_waitq,
+ atomic_add_unless(&wq->dq_count,
+ 1, wq->size) ||
+ idxd->state != IDXD_DEV_ENABLED);
+ percpu_up_write(&wq->submit_lock);
+ if (rc < 0)
+ return ERR_PTR(-EINTR);
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return ERR_PTR(-EIO);
+ } else {
+ percpu_up_read(&wq->submit_lock);
+ }
+
+ idx = sbitmap_get(&wq->sbmap, 0, false);
+ if (idx < 0) {
+ atomic_dec(&wq->dq_count);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ desc = wq->descs[idx];
+ memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
+ memset(desc->completion, 0, sizeof(struct dsa_completion_record));
+ return desc;
+}
+
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+ atomic_dec(&wq->dq_count);
+
+ sbitmap_clear_bit(&wq->sbmap, desc->id);
+ wake_up(&wq->submit_waitq);
+}
+
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+ struct idxd_device *idxd = wq->idxd;
+ int vec = desc->hw->int_handle;
+ void __iomem *portal;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -EIO;
+
+ portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED);
+ /*
+ * The wmb() flushes writes to coherent DMA data before possibly
+ * triggering a DMA read. The wmb() is necessary even on UP because
+ * the recipient is a device.
+ */
+ wmb();
+ iosubmit_cmds512(portal, desc->hw, 1);
+
+ /*
+ * Pending the descriptor to the lockless list for the irq_entry
+ * that we designated the descriptor to.
+ */
+ if (desc->hw->flags & IDXD_OP_FLAG_RCI)
+ llist_add(&desc->llnode,
+ &idxd->irq_entries[vec].pending_llist);
+
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <uapi/linux/idxd.h>
+#include "registers.h"
+#include "idxd.h"
+
+static char *idxd_wq_type_names[] = {
+ [IDXD_WQT_NONE] = "none",
+ [IDXD_WQT_KERNEL] = "kernel",
+ [IDXD_WQT_USER] = "user",
+};
+
+static void idxd_conf_device_release(struct device *dev)
+{
+ dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
+}
+
+static struct device_type idxd_group_device_type = {
+ .name = "group",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_wq_device_type = {
+ .name = "wq",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type idxd_engine_device_type = {
+ .name = "engine",
+ .release = idxd_conf_device_release,
+};
+
+static struct device_type dsa_device_type = {
+ .name = "dsa",
+ .release = idxd_conf_device_release,
+};
+
+static inline bool is_dsa_dev(struct device *dev)
+{
+ return dev ? dev->type == &dsa_device_type : false;
+}
+
+static inline bool is_idxd_dev(struct device *dev)
+{
+ return is_dsa_dev(dev);
+}
+
+static inline bool is_idxd_wq_dev(struct device *dev)
+{
+ return dev ? dev->type == &idxd_wq_device_type : false;
+}
+
+static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+{
+ if (wq->type == IDXD_WQT_KERNEL &&
+ strcmp(wq->name, "dmaengine") == 0)
+ return true;
+ return false;
+}
+
+static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+{
+ return wq->type == IDXD_WQT_USER ? true : false;
+}
+
+static int idxd_config_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ int matched = 0;
+
+ if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ if (idxd->state != IDXD_DEV_CONF_READY)
+ return 0;
+ matched = 1;
+ } else if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ if (idxd->state < IDXD_DEV_CONF_READY)
+ return 0;
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ dev_dbg(dev, "%s not disabled\n", dev_name(dev));
+ return 0;
+ }
+ matched = 1;
+ }
+
+ if (matched)
+ dev_dbg(dev, "%s matched\n", dev_name(dev));
+
+ return matched;
+}
+
+static int idxd_config_bus_probe(struct device *dev)
+{
+ int rc;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s called\n", __func__);
+
+ if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ if (idxd->state != IDXD_DEV_CONF_READY) {
+ dev_warn(dev, "Device not ready for config\n");
+ return -EBUSY;
+ }
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENXIO;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+
+ /* Perform IDXD configuration and enabling */
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_warn(dev, "Device config failed: %d\n", rc);
+ return rc;
+ }
+
+ /* start device */
+ rc = idxd_device_enable(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_warn(dev, "Device enable failed: %d\n", rc);
+ return rc;
+ }
+
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_info(dev, "Device %s enabled\n", dev_name(dev));
+
+ rc = idxd_register_dma_device(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ dev_dbg(dev, "Failed to register dmaengine device\n");
+ return rc;
+ }
+ return 0;
+ } else if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ mutex_lock(&wq->wq_lock);
+
+ if (idxd->state != IDXD_DEV_ENABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Enabling while device not enabled.\n");
+ return -EPERM;
+ }
+
+ if (wq->state != IDXD_WQ_DISABLED) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d already enabled.\n", wq->id);
+ return -EBUSY;
+ }
+
+ if (!wq->group) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ not attached to group.\n");
+ return -EINVAL;
+ }
+
+ if (strlen(wq->name) == 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ name not set.\n");
+ return -EINVAL;
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ resource alloc failed\n");
+ return rc;
+ }
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "Writing WQ %d config failed: %d\n",
+ wq->id, rc);
+ return rc;
+ }
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ dev_warn(dev, "WQ %d enabling failed: %d\n",
+ wq->id, rc);
+ return rc;
+ }
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ rc = idxd_wq_map_portal(wq);
+ if (rc < 0) {
+ dev_warn(dev, "wq portal mapping failed: %d\n", rc);
+ rc = idxd_wq_disable(wq);
+ if (rc < 0)
+ dev_warn(dev, "IDXD wq disable failed\n");
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+
+ wq->client_count = 0;
+
+ dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
+
+ if (is_idxd_wq_dmaengine(wq)) {
+ rc = idxd_register_dma_channel(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "DMA channel register failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ } else if (is_idxd_wq_cdev(wq)) {
+ rc = idxd_wq_add_cdev(wq);
+ if (rc < 0) {
+ dev_dbg(dev, "Cdev creation failed\n");
+ mutex_unlock(&wq->wq_lock);
+ return rc;
+ }
+ }
+
+ mutex_unlock(&wq->wq_lock);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void disable_wq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ unsigned long flags;
+ int rc;
+
+ mutex_lock(&wq->wq_lock);
+ dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
+ if (wq->state == IDXD_WQ_DISABLED) {
+ mutex_unlock(&wq->wq_lock);
+ return;
+ }
+
+ if (is_idxd_wq_dmaengine(wq))
+ idxd_unregister_dma_channel(wq);
+ else if (is_idxd_wq_cdev(wq))
+ idxd_wq_del_cdev(wq);
+
+ if (idxd_wq_refcount(wq))
+ dev_warn(dev, "Clients has claim on wq %d: %d\n",
+ wq->id, idxd_wq_refcount(wq));
+
+ idxd_wq_unmap_portal(wq);
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_wq_disable(wq);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ idxd_wq_free_resources(wq);
+ wq->client_count = 0;
+ mutex_unlock(&wq->wq_lock);
+
+ if (rc < 0)
+ dev_warn(dev, "Failed to disable %s: %d\n",
+ dev_name(&wq->conf_dev), rc);
+ else
+ dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+}
+
+static int idxd_config_bus_remove(struct device *dev)
+{
+ int rc;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
+
+ /* disable workqueue here */
+ if (is_idxd_wq_dev(dev)) {
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ disable_wq(wq);
+ } else if (is_idxd_dev(dev)) {
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+ int i;
+
+ dev_dbg(dev, "%s removing dev %s\n", __func__,
+ dev_name(&idxd->conf_dev));
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_DISABLED)
+ continue;
+ dev_warn(dev, "Active wq %d on disable %s.\n", i,
+ dev_name(&idxd->conf_dev));
+ device_release_driver(&wq->conf_dev);
+ }
+
+ idxd_unregister_dma_device(idxd);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_disable(idxd);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ module_put(THIS_MODULE);
+ if (rc < 0)
+ dev_warn(dev, "Device disable failed\n");
+ else
+ dev_info(dev, "Device %s disabled\n", dev_name(dev));
+
+ }
+
+ return 0;
+}
+
+static void idxd_config_bus_shutdown(struct device *dev)
+{
+ dev_dbg(dev, "%s called\n", __func__);
+}
+
+struct bus_type dsa_bus_type = {
+ .name = "dsa",
+ .match = idxd_config_bus_match,
+ .probe = idxd_config_bus_probe,
+ .remove = idxd_config_bus_remove,
+ .shutdown = idxd_config_bus_shutdown,
+};
+
+static struct bus_type *idxd_bus_types[] = {
+ &dsa_bus_type
+};
+
+static struct idxd_device_driver dsa_drv = {
+ .drv = {
+ .name = "dsa",
+ .bus = &dsa_bus_type,
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+};
+
+static struct idxd_device_driver *idxd_drvs[] = {
+ &dsa_drv
+};
+
+struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
+{
+ return idxd_bus_types[idxd->type];
+}
+
+static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
+{
+ if (idxd->type == IDXD_TYPE_DSA)
+ return &dsa_device_type;
+ else
+ return NULL;
+}
+
+/* IDXD generic driver setup */
+int idxd_register_driver(void)
+{
+ int i, rc;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ rc = driver_register(&idxd_drvs[i]->drv);
+ if (rc < 0)
+ goto drv_fail;
+ }
+
+ return 0;
+
+drv_fail:
+ for (; i > 0; i--)
+ driver_unregister(&idxd_drvs[i]->drv);
+ return rc;
+}
+
+void idxd_unregister_driver(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ driver_unregister(&idxd_drvs[i]->drv);
+}
+
+/* IDXD engine attributes */
+static ssize_t engine_group_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_engine *engine =
+ container_of(dev, struct idxd_engine, conf_dev);
+
+ if (engine->group)
+ return sprintf(buf, "%d\n", engine->group->id);
+ else
+ return sprintf(buf, "%d\n", -1);
+}
+
+static ssize_t engine_group_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_engine *engine =
+ container_of(dev, struct idxd_engine, conf_dev);
+ struct idxd_device *idxd = engine->idxd;
+ long id;
+ int rc;
+ struct idxd_group *prevg, *group;
+
+ rc = kstrtol(buf, 10, &id);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (id > idxd->max_groups - 1 || id < -1)
+ return -EINVAL;
+
+ if (id == -1) {
+ if (engine->group) {
+ engine->group->num_engines--;
+ engine->group = NULL;
+ }
+ return count;
+ }
+
+ group = &idxd->groups[id];
+ prevg = engine->group;
+
+ if (prevg)
+ prevg->num_engines--;
+ engine->group = &idxd->groups[id];
+ engine->group->num_engines++;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_engine_group =
+ __ATTR(group_id, 0644, engine_group_id_show,
+ engine_group_id_store);
+
+static struct attribute *idxd_engine_attributes[] = {
+ &dev_attr_engine_group.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_engine_attribute_group = {
+ .attrs = idxd_engine_attributes,
+};
+
+static const struct attribute_group *idxd_engine_attribute_groups[] = {
+ &idxd_engine_attribute_group,
+ NULL,
+};
+
+/* Group attributes */
+
+static void idxd_set_free_tokens(struct idxd_device *idxd)
+{
+ int i, tokens;
+
+ for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *g = &idxd->groups[i];
+
+ tokens += g->tokens_reserved;
+ }
+
+ idxd->nr_tokens = idxd->max_tokens - tokens;
+}
+
+static ssize_t group_tokens_reserved_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->tokens_reserved);
+}
+
+static ssize_t group_tokens_reserved_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+
+ if (val > idxd->max_tokens)
+ return -EINVAL;
+
+ if (val > idxd->nr_tokens)
+ return -EINVAL;
+
+ group->tokens_reserved = val;
+ idxd_set_free_tokens(idxd);
+ return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_reserved =
+ __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
+ group_tokens_reserved_store);
+
+static ssize_t group_tokens_allowed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->tokens_allowed);
+}
+
+static ssize_t group_tokens_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+ if (val < 4 * group->num_engines ||
+ val > group->tokens_reserved + idxd->nr_tokens)
+ return -EINVAL;
+
+ group->tokens_allowed = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_tokens_allowed =
+ __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
+ group_tokens_allowed_store);
+
+static ssize_t group_use_token_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%u\n", group->use_token_limit);
+}
+
+static ssize_t group_use_token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (idxd->token_limit == 0)
+ return -EPERM;
+
+ group->use_token_limit = !!val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_use_token_limit =
+ __ATTR(use_token_limit, 0644, group_use_token_limit_show,
+ group_use_token_limit_store);
+
+static ssize_t group_engines_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ int i, rc = 0;
+ char *tmp = buf;
+ struct idxd_device *idxd = group->idxd;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ if (!engine->group)
+ continue;
+
+ if (engine->group->id == group->id)
+ rc += sprintf(tmp + rc, "engine%d.%d ",
+ idxd->id, engine->id);
+ }
+
+ rc--;
+ rc += sprintf(tmp + rc, "\n");
+
+ return rc;
+}
+
+static struct device_attribute dev_attr_group_engines =
+ __ATTR(engines, 0444, group_engines_show, NULL);
+
+static ssize_t group_work_queues_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ int i, rc = 0;
+ char *tmp = buf;
+ struct idxd_device *idxd = group->idxd;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (!wq->group)
+ continue;
+
+ if (wq->group->id == group->id)
+ rc += sprintf(tmp + rc, "wq%d.%d ",
+ idxd->id, wq->id);
+ }
+
+ rc--;
+ rc += sprintf(tmp + rc, "\n");
+
+ return rc;
+}
+
+static struct device_attribute dev_attr_group_work_queues =
+ __ATTR(work_queues, 0444, group_work_queues_show, NULL);
+
+static ssize_t group_traffic_class_a_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%d\n", group->tc_a);
+}
+
+static ssize_t group_traffic_class_a_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ long val;
+ int rc;
+
+ rc = kstrtol(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (val < 0 || val > 7)
+ return -EINVAL;
+
+ group->tc_a = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_a =
+ __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
+ group_traffic_class_a_store);
+
+static ssize_t group_traffic_class_b_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+
+ return sprintf(buf, "%d\n", group->tc_b);
+}
+
+static ssize_t group_traffic_class_b_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group =
+ container_of(dev, struct idxd_group, conf_dev);
+ struct idxd_device *idxd = group->idxd;
+ long val;
+ int rc;
+
+ rc = kstrtol(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (val < 0 || val > 7)
+ return -EINVAL;
+
+ group->tc_b = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_traffic_class_b =
+ __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
+ group_traffic_class_b_store);
+
+static struct attribute *idxd_group_attributes[] = {
+ &dev_attr_group_work_queues.attr,
+ &dev_attr_group_engines.attr,
+ &dev_attr_group_use_token_limit.attr,
+ &dev_attr_group_tokens_allowed.attr,
+ &dev_attr_group_tokens_reserved.attr,
+ &dev_attr_group_traffic_class_a.attr,
+ &dev_attr_group_traffic_class_b.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_group_attribute_group = {
+ .attrs = idxd_group_attributes,
+};
+
+static const struct attribute_group *idxd_group_attribute_groups[] = {
+ &idxd_group_attribute_group,
+ NULL,
+};
+
+/* IDXD work queue attribs */
+static ssize_t wq_clients_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%d\n", wq->client_count);
+}
+
+static struct device_attribute dev_attr_wq_clients =
+ __ATTR(clients, 0444, wq_clients_show, NULL);
+
+static ssize_t wq_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ switch (wq->state) {
+ case IDXD_WQ_DISABLED:
+ return sprintf(buf, "disabled\n");
+ case IDXD_WQ_ENABLED:
+ return sprintf(buf, "enabled\n");
+ }
+
+ return sprintf(buf, "unknown\n");
+}
+
+static struct device_attribute dev_attr_wq_state =
+ __ATTR(state, 0444, wq_state_show, NULL);
+
+static ssize_t wq_group_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ if (wq->group)
+ return sprintf(buf, "%u\n", wq->group->id);
+ else
+ return sprintf(buf, "-1\n");
+}
+
+static ssize_t wq_group_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ long id;
+ int rc;
+ struct idxd_group *prevg, *group;
+
+ rc = kstrtol(buf, 10, &id);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (id > idxd->max_groups - 1 || id < -1)
+ return -EINVAL;
+
+ if (id == -1) {
+ if (wq->group) {
+ wq->group->num_wqs--;
+ wq->group = NULL;
+ }
+ return count;
+ }
+
+ group = &idxd->groups[id];
+ prevg = wq->group;
+
+ if (prevg)
+ prevg->num_wqs--;
+ wq->group = group;
+ group->num_wqs++;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_group_id =
+ __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
+
+static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%s\n",
+ wq_dedicated(wq) ? "dedicated" : "shared");
+}
+
+static ssize_t wq_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (sysfs_streq(buf, "dedicated")) {
+ set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ wq->threshold = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_mode =
+ __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
+
+static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->size);
+}
+
+static ssize_t wq_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ unsigned long size;
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &size);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (size > idxd->max_wq_size)
+ return -EINVAL;
+
+ wq->size = size;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_size =
+ __ATTR(size, 0644, wq_size_show, wq_size_store);
+
+static ssize_t wq_priority_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->priority);
+}
+
+static ssize_t wq_priority_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ unsigned long prio;
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &prio);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (prio > IDXD_MAX_PRIORITY)
+ return -EINVAL;
+
+ wq->priority = prio;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_priority =
+ __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
+
+static ssize_t wq_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ switch (wq->type) {
+ case IDXD_WQT_KERNEL:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_KERNEL]);
+ case IDXD_WQT_USER:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_USER]);
+ case IDXD_WQT_NONE:
+ default:
+ return sprintf(buf, "%s\n",
+ idxd_wq_type_names[IDXD_WQT_NONE]);
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t wq_type_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ enum idxd_wq_type old_type;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ old_type = wq->type;
+ if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
+ wq->type = IDXD_WQT_KERNEL;
+ else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
+ wq->type = IDXD_WQT_USER;
+ else
+ wq->type = IDXD_WQT_NONE;
+
+ /* If we are changing queue type, clear the name */
+ if (wq->type != old_type)
+ memset(wq->name, 0, WQ_NAME_SIZE + 1);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_type =
+ __ATTR(type, 0644, wq_type_show, wq_type_store);
+
+static ssize_t wq_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%s\n", wq->name);
+}
+
+static ssize_t wq_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
+ return -EINVAL;
+
+ memset(wq->name, 0, WQ_NAME_SIZE + 1);
+ strncpy(wq->name, buf, WQ_NAME_SIZE);
+ strreplace(wq->name, '\n', '\0');
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_name =
+ __ATTR(name, 0644, wq_name_show, wq_name_store);
+
+static ssize_t wq_cdev_minor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
+}
+
+static struct device_attribute dev_attr_wq_cdev_minor =
+ __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
+
+static struct attribute *idxd_wq_attributes[] = {
+ &dev_attr_wq_clients.attr,
+ &dev_attr_wq_state.attr,
+ &dev_attr_wq_group_id.attr,
+ &dev_attr_wq_mode.attr,
+ &dev_attr_wq_size.attr,
+ &dev_attr_wq_priority.attr,
+ &dev_attr_wq_type.attr,
+ &dev_attr_wq_name.attr,
+ &dev_attr_wq_cdev_minor.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_wq_attribute_group = {
+ .attrs = idxd_wq_attributes,
+};
+
+static const struct attribute_group *idxd_wq_attribute_groups[] = {
+ &idxd_wq_attribute_group,
+ NULL,
+};
+
+/* IDXD device attribs */
+static ssize_t max_work_queues_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_wq_size);
+}
+static DEVICE_ATTR_RO(max_work_queues_size);
+
+static ssize_t max_groups_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_groups);
+}
+static DEVICE_ATTR_RO(max_groups);
+
+static ssize_t max_work_queues_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_wqs);
+}
+static DEVICE_ATTR_RO(max_work_queues);
+
+static ssize_t max_engines_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_engines);
+}
+static DEVICE_ATTR_RO(max_engines);
+
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
+}
+static DEVICE_ATTR_RO(numa_node);
+
+static ssize_t max_batch_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_batch_size);
+}
+static DEVICE_ATTR_RO(max_batch_size);
+
+static ssize_t max_transfer_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
+}
+static DEVICE_ATTR_RO(max_transfer_size);
+
+static ssize_t op_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+}
+static DEVICE_ATTR_RO(op_cap);
+
+static ssize_t configurable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n",
+ test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
+}
+static DEVICE_ATTR_RO(configurable);
+
+static ssize_t clients_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ unsigned long flags;
+ int count = 0, i;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ count += wq->client_count;
+ }
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ return sprintf(buf, "%d\n", count);
+}
+static DEVICE_ATTR_RO(clients);
+
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ switch (idxd->state) {
+ case IDXD_DEV_DISABLED:
+ case IDXD_DEV_CONF_READY:
+ return sprintf(buf, "disabled\n");
+ case IDXD_DEV_ENABLED:
+ return sprintf(buf, "enabled\n");
+ case IDXD_DEV_HALTED:
+ return sprintf(buf, "halted\n");
+ }
+
+ return sprintf(buf, "unknown\n");
+}
+static DEVICE_ATTR_RO(state);
+
+static ssize_t errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ int i, out = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ for (i = 0; i < 4; i++)
+ out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ out--;
+ out += sprintf(buf + out, "\n");
+ return out;
+}
+static DEVICE_ATTR_RO(errors);
+
+static ssize_t max_tokens_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->max_tokens);
+}
+static DEVICE_ATTR_RO(max_tokens);
+
+static ssize_t token_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->token_limit);
+}
+
+static ssize_t token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+ unsigned long val;
+ int rc;
+
+ rc = kstrtoul(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (idxd->state == IDXD_DEV_ENABLED)
+ return -EPERM;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (!idxd->hw.group_cap.token_limit)
+ return -EPERM;
+
+ if (val > idxd->hw.group_cap.total_tokens)
+ return -EINVAL;
+
+ idxd->token_limit = val;
+ return count;
+}
+static DEVICE_ATTR_RW(token_limit);
+
+static ssize_t cdev_major_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", idxd->major);
+}
+static DEVICE_ATTR_RO(cdev_major);
+
+static struct attribute *idxd_device_attributes[] = {
+ &dev_attr_max_groups.attr,
+ &dev_attr_max_work_queues.attr,
+ &dev_attr_max_work_queues_size.attr,
+ &dev_attr_max_engines.attr,
+ &dev_attr_numa_node.attr,
+ &dev_attr_max_batch_size.attr,
+ &dev_attr_max_transfer_size.attr,
+ &dev_attr_op_cap.attr,
+ &dev_attr_configurable.attr,
+ &dev_attr_clients.attr,
+ &dev_attr_state.attr,
+ &dev_attr_errors.attr,
+ &dev_attr_max_tokens.attr,
+ &dev_attr_token_limit.attr,
+ &dev_attr_cdev_major.attr,
+ NULL,
+};
+
+static const struct attribute_group idxd_device_attribute_group = {
+ .attrs = idxd_device_attributes,
+};
+
+static const struct attribute_group *idxd_attribute_groups[] = {
+ &idxd_device_attribute_group,
+ NULL,
+};
+
+static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ engine->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&engine->conf_dev, "engine%d.%d",
+ idxd->id, engine->id);
+ engine->conf_dev.bus = idxd_get_bus_type(idxd);
+ engine->conf_dev.groups = idxd_engine_attribute_groups;
+ engine->conf_dev.type = &idxd_engine_device_type;
+ dev_dbg(dev, "Engine device register: %s\n",
+ dev_name(&engine->conf_dev));
+ rc = device_register(&engine->conf_dev);
+ if (rc < 0) {
+ put_device(&engine->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ device_unregister(&engine->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_group_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ group->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&group->conf_dev, "group%d.%d",
+ idxd->id, group->id);
+ group->conf_dev.bus = idxd_get_bus_type(idxd);
+ group->conf_dev.groups = idxd_group_attribute_groups;
+ group->conf_dev.type = &idxd_group_device_type;
+ dev_dbg(dev, "Group device register: %s\n",
+ dev_name(&group->conf_dev));
+ rc = device_register(&group->conf_dev);
+ if (rc < 0) {
+ put_device(&group->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ device_unregister(&group->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i, rc;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ wq->conf_dev.parent = &idxd->conf_dev;
+ dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+ wq->conf_dev.bus = idxd_get_bus_type(idxd);
+ wq->conf_dev.groups = idxd_wq_attribute_groups;
+ wq->conf_dev.type = &idxd_wq_device_type;
+ dev_dbg(dev, "WQ device register: %s\n",
+ dev_name(&wq->conf_dev));
+ rc = device_register(&wq->conf_dev);
+ if (rc < 0) {
+ put_device(&wq->conf_dev);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ while (i--) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ device_unregister(&wq->conf_dev);
+ }
+ return rc;
+}
+
+static int idxd_setup_device_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+ char devname[IDXD_NAME_SIZE];
+
+ sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
+ idxd->conf_dev.parent = dev;
+ dev_set_name(&idxd->conf_dev, "%s", devname);
+ idxd->conf_dev.bus = idxd_get_bus_type(idxd);
+ idxd->conf_dev.groups = idxd_attribute_groups;
+ idxd->conf_dev.type = idxd_get_device_type(idxd);
+
+ dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
+ rc = device_register(&idxd->conf_dev);
+ if (rc < 0) {
+ put_device(&idxd->conf_dev);
+ return rc;
+ }
+
+ return 0;
+}
+
+int idxd_setup_sysfs(struct idxd_device *idxd)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int rc;
+
+ rc = idxd_setup_device_sysfs(idxd);
+ if (rc < 0) {
+ dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_wq_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_group_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = idxd_setup_engine_sysfs(idxd);
+ if (rc < 0) {
+ /* unregister conf dev */
+ dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+void idxd_cleanup_sysfs(struct idxd_device *idxd)
+{
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ device_unregister(&wq->conf_dev);
+ }
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *engine = &idxd->engines[i];
+
+ device_unregister(&engine->conf_dev);
+ }
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *group = &idxd->groups[i];
+
+ device_unregister(&group->conf_dev);
+ }
+
+ device_unregister(&idxd->conf_dev);
+}
+
+int idxd_register_bus_type(void)
+{
+ int i, rc;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++) {
+ rc = bus_register(idxd_bus_types[i]);
+ if (rc < 0)
+ goto bus_err;
+ }
+
+ return 0;
+
+bus_err:
+ for (; i > 0; i--)
+ bus_unregister(idxd_bus_types[i]);
+ return rc;
+}
+
+void idxd_unregister_bus_type(void)
+{
+ int i;
+
+ for (i = 0; i < IDXD_TYPE_MAX; i++)
+ bus_unregister(idxd_bus_types[i]);
+}
return;
}
sdmac->desc = desc = to_sdma_desc(&vd->tx);
- /*
- * Do not delete the node in desc_issued list in cyclic mode, otherwise
- * the desc allocated will never be freed in vchan_dma_desc_free_list
- */
- if (!(sdmac->flags & IMX_DMA_SG_LOOP))
- list_del(&vd->node);
+
+ list_del(&vd->node);
sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
spin_lock_irqsave(&sdmac->vc.lock, flags);
vchan_get_all_descriptors(&sdmac->vc, &head);
- sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
sdmac->context_loaded = false;
}
-static int sdma_disable_channel_async(struct dma_chan *chan)
+static int sdma_terminate_all(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
sdma_disable_channel(chan);
- if (sdmac->desc)
+ if (sdmac->desc) {
+ vchan_terminate_vdesc(&sdmac->desc->vd);
+ sdmac->desc = NULL;
schedule_work(&sdmac->terminate_worker);
+ }
+
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
return 0;
}
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- sdma_disable_channel_async(chan);
+ sdma_terminate_all(chan);
sdma_channel_synchronize(chan);
struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- struct sdma_desc *desc;
+ struct sdma_desc *desc = NULL;
u32 residue;
struct virt_dma_desc *vd;
enum dma_status ret;
return ret;
spin_lock_irqsave(&sdmac->vc.lock, flags);
+
vd = vchan_find_desc(&sdmac->vc, cookie);
- if (vd) {
+ if (vd)
desc = to_sdma_desc(&vd->tx);
+ else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
+ desc = sdmac->desc;
+
+ if (desc) {
if (sdmac->flags & IMX_DMA_SG_LOOP)
residue = (desc->num_bd - desc->buf_ptail) *
desc->period_len - desc->chn_real_count;
else
residue = desc->chn_count - desc->chn_real_count;
- } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
- residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
} else {
residue = 0;
}
+
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
- sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
+ sdma->dma_device.device_terminate_all = sdma_terminate_all;
sdma->dma_device.device_synchronize = sdma_channel_synchronize;
sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
ioat_kobject_del(ioat_dma);
dma_async_device_unregister(dma);
-
- dma_pool_destroy(ioat_dma->completion_pool);
-
- INIT_LIST_HEAD(&dma->channels);
}
/**
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
- ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+ ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan)
break;
return;
ioat_stop(ioat_chan);
- ioat_reset_hw(ioat_chan);
- /* Put LTR to idle */
- if (ioat_dma->version >= IOAT_VER_3_4)
- writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
- ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
+ if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
+ ioat_reset_hw(ioat_chan);
+
+ /* Put LTR to idle */
+ if (ioat_dma->version >= IOAT_VER_3_4)
+ writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
+ ioat_chan->reg_base +
+ IOAT_CHAN_LTR_SWSEL_OFFSET);
+ }
spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock);
.err_handler = &ioat_err_handler,
};
+static void release_ioatdma(struct dma_device *device)
+{
+ struct ioatdma_device *d = to_ioatdma_device(device);
+ int i;
+
+ for (i = 0; i < IOAT_MAX_CHANS; i++)
+ kfree(d->idx[i]);
+
+ dma_pool_destroy(d->completion_pool);
+ kfree(d);
+}
+
static struct ioatdma_device *
alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
{
- struct device *dev = &pdev->dev;
- struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return NULL;
d->pdev = pdev;
d->reg_base = iobase;
+ d->dma_dev.device_release = release_ioatdma;
return d;
}
if (!device)
return;
+ ioat_shutdown(pdev);
+
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);
spin_lock_irqsave(&c->vc.lock, flags);
vchan_get_all_descriptors(&c->vc, &head);
- vchan_dma_desc_free_list(&c->vc, &head);
spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
return 0;
}
#include <linux/of.h>
#include <linux/of_dma.h>
+#include "dmaengine.h"
+
static LIST_HEAD(of_dma_list);
static DEFINE_MUTEX(of_dma_lock);
}
vchan_get_all_descriptors(&vchan->vc, &head);
- vchan_dma_desc_free_list(&vchan->vc, &head);
spin_unlock_irqrestore(&vchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
return 0;
}
{
struct amba_device *pcdev = to_amba_device(dev);
- pm_runtime_disable(dev);
-
- if (!pm_runtime_status_suspended(dev)) {
- /* amba did not disable the clock */
- amba_pclk_disable(pcdev);
- }
+ pm_runtime_force_suspend(dev);
amba_pclk_unprepare(pcdev);
return 0;
if (ret)
return ret;
- if (!pm_runtime_status_suspended(dev))
- ret = amba_pclk_enable(pcdev);
-
- pm_runtime_enable(dev);
+ pm_runtime_force_resume(dev);
return ret;
}
-static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
+static const struct dev_pm_ops pl330_pm = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume)
+};
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
+ * Copyright (c) 2019, GigaIO Networks, Inc
+ */
+
+#include "dmaengine.h"
+
+#include <linux/circ_buf.h>
+#include <linux/dmaengine.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Logan Gunthorpe");
+
+#define PLX_REG_DESC_RING_ADDR 0x214
+#define PLX_REG_DESC_RING_ADDR_HI 0x218
+#define PLX_REG_DESC_RING_NEXT_ADDR 0x21C
+#define PLX_REG_DESC_RING_COUNT 0x220
+#define PLX_REG_DESC_RING_LAST_ADDR 0x224
+#define PLX_REG_DESC_RING_LAST_SIZE 0x228
+#define PLX_REG_PREF_LIMIT 0x234
+#define PLX_REG_CTRL 0x238
+#define PLX_REG_CTRL2 0x23A
+#define PLX_REG_INTR_CTRL 0x23C
+#define PLX_REG_INTR_STATUS 0x23E
+
+#define PLX_REG_PREF_LIMIT_PREF_FOUR 8
+
+#define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0)
+#define PLX_REG_CTRL_ABORT BIT(1)
+#define PLX_REG_CTRL_WRITE_BACK_EN BIT(2)
+#define PLX_REG_CTRL_START BIT(3)
+#define PLX_REG_CTRL_RING_STOP_MODE BIT(4)
+#define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5)
+#define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5)
+#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5)
+#define PLX_REG_CTRL_DESC_INVALID BIT(8)
+#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9)
+#define PLX_REG_CTRL_ABORT_DONE BIT(10)
+#define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12)
+#define PLX_REG_CTRL_IN_PROGRESS BIT(30)
+
+#define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \
+ PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
+ PLX_REG_CTRL_ABORT_DONE | \
+ PLX_REG_CTRL_IMM_PAUSE_DONE)
+
+#define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \
+ PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
+ PLX_REG_CTRL_START | \
+ PLX_REG_CTRL_RESET_VAL)
+
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5
+#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7
+
+#define PLX_REG_INTR_CRTL_ERROR_EN BIT(0)
+#define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1)
+#define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3)
+#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4)
+#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5)
+
+#define PLX_REG_INTR_STATUS_ERROR BIT(0)
+#define PLX_REG_INTR_STATUS_INV_DESC BIT(1)
+#define PLX_REG_INTR_STATUS_DESC_DONE BIT(2)
+#define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3)
+
+struct plx_dma_hw_std_desc {
+ __le32 flags_and_size;
+ __le16 dst_addr_hi;
+ __le16 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 src_addr_lo;
+};
+
+#define PLX_DESC_SIZE_MASK 0x7ffffff
+#define PLX_DESC_FLAG_VALID BIT(31)
+#define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30)
+
+#define PLX_DESC_WB_SUCCESS BIT(30)
+#define PLX_DESC_WB_RD_FAIL BIT(29)
+#define PLX_DESC_WB_WR_FAIL BIT(28)
+
+#define PLX_DMA_RING_COUNT 2048
+
+struct plx_dma_desc {
+ struct dma_async_tx_descriptor txd;
+ struct plx_dma_hw_std_desc *hw;
+ u32 orig_size;
+};
+
+struct plx_dma_dev {
+ struct dma_device dma_dev;
+ struct dma_chan dma_chan;
+ struct pci_dev __rcu *pdev;
+ void __iomem *bar;
+ struct tasklet_struct desc_task;
+
+ spinlock_t ring_lock;
+ bool ring_active;
+ int head;
+ int tail;
+ struct plx_dma_hw_std_desc *hw_ring;
+ dma_addr_t hw_ring_dma;
+ struct plx_dma_desc **desc_ring;
+};
+
+static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
+{
+ return container_of(c, struct plx_dma_dev, dma_chan);
+}
+
+static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct plx_dma_desc, txd);
+}
+
+static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
+{
+ return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
+}
+
+static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
+{
+ struct dmaengine_result res;
+ struct plx_dma_desc *desc;
+ u32 flags;
+
+ spin_lock_bh(&plxdev->ring_lock);
+
+ while (plxdev->tail != plxdev->head) {
+ desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+ flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
+
+ if (flags & PLX_DESC_FLAG_VALID)
+ break;
+
+ res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
+
+ if (flags & PLX_DESC_WB_SUCCESS)
+ res.result = DMA_TRANS_NOERROR;
+ else if (flags & PLX_DESC_WB_WR_FAIL)
+ res.result = DMA_TRANS_WRITE_FAILED;
+ else
+ res.result = DMA_TRANS_READ_FAILED;
+
+ dma_cookie_complete(&desc->txd);
+ dma_descriptor_unmap(&desc->txd);
+ dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+ desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
+
+ plxdev->tail++;
+ }
+
+ spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
+{
+ struct dmaengine_result res;
+ struct plx_dma_desc *desc;
+
+ plx_dma_process_desc(plxdev);
+
+ spin_lock_bh(&plxdev->ring_lock);
+
+ while (plxdev->tail != plxdev->head) {
+ desc = plx_dma_get_desc(plxdev, plxdev->tail);
+
+ res.residue = desc->orig_size;
+ res.result = DMA_TRANS_ABORTED;
+
+ dma_cookie_complete(&desc->txd);
+ dma_descriptor_unmap(&desc->txd);
+ dmaengine_desc_get_callback_invoke(&desc->txd, &res);
+ desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
+
+ plxdev->tail++;
+ }
+
+ spin_unlock_bh(&plxdev->ring_lock);
+}
+
+static void __plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ u32 val;
+
+ val = readl(plxdev->bar + PLX_REG_CTRL);
+ if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
+ return;
+
+ writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+ plxdev->bar + PLX_REG_CTRL);
+
+ while (!time_after(jiffies, timeout)) {
+ val = readl(plxdev->bar + PLX_REG_CTRL);
+ if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
+ break;
+
+ cpu_relax();
+ }
+
+ if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
+ dev_err(plxdev->dma_dev.dev,
+ "Timeout waiting for graceful pause!\n");
+
+ writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
+ plxdev->bar + PLX_REG_CTRL);
+
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+ writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+}
+
+static void plx_dma_stop(struct plx_dma_dev *plxdev)
+{
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ __plx_dma_stop(plxdev);
+
+ rcu_read_unlock();
+}
+
+static void plx_dma_desc_task(unsigned long data)
+{
+ struct plx_dma_dev *plxdev = (void *)data;
+
+ plx_dma_process_desc(plxdev);
+}
+
+static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
+ dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
+ unsigned long flags)
+ __acquires(plxdev->ring_lock)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
+ struct plx_dma_desc *plxdesc;
+
+ spin_lock_bh(&plxdev->ring_lock);
+ if (!plxdev->ring_active)
+ goto err_unlock;
+
+ if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
+ goto err_unlock;
+
+ if (len > PLX_DESC_SIZE_MASK)
+ goto err_unlock;
+
+ plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
+ plxdev->head++;
+
+ plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
+ plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
+ plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
+ plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
+
+ plxdesc->orig_size = len;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ len |= PLX_DESC_FLAG_INT_WHEN_DONE;
+
+ plxdesc->hw->flags_and_size = cpu_to_le32(len);
+ plxdesc->txd.flags = flags;
+
+ /* return with the lock held, it will be released in tx_submit */
+
+ return &plxdesc->txd;
+
+err_unlock:
+ /*
+ * Keep sparse happy by restoring an even lock count on
+ * this lock.
+ */
+ __acquire(plxdev->ring_lock);
+
+ spin_unlock_bh(&plxdev->ring_lock);
+ return NULL;
+}
+
+static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
+ __releases(plxdev->ring_lock)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
+ struct plx_dma_desc *plxdesc = to_plx_desc(desc);
+ dma_cookie_t cookie;
+
+ cookie = dma_cookie_assign(desc);
+
+ /*
+ * Ensure the descriptor updates are visible to the dma device
+ * before setting the valid bit.
+ */
+ wmb();
+
+ plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
+
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ return cookie;
+}
+
+static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ plx_dma_process_desc(plxdev);
+
+ return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void plx_dma_issue_pending(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /*
+ * Ensure the valid bits are visible before starting the
+ * DMA engine.
+ */
+ wmb();
+
+ writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
+
+ rcu_read_unlock();
+}
+
+static irqreturn_t plx_dma_isr(int irq, void *devid)
+{
+ struct plx_dma_dev *plxdev = devid;
+ u32 status;
+
+ status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
+
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
+ tasklet_schedule(&plxdev->desc_task);
+
+ writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
+{
+ struct plx_dma_desc *desc;
+ int i;
+
+ plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
+ sizeof(*plxdev->desc_ring), GFP_KERNEL);
+ if (!plxdev->desc_ring)
+ return -ENOMEM;
+
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ goto free_and_exit;
+
+ dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
+ desc->txd.tx_submit = plx_dma_tx_submit;
+ desc->hw = &plxdev->hw_ring[i];
+
+ plxdev->desc_ring[i] = desc;
+ }
+
+ return 0;
+
+free_and_exit:
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+ kfree(plxdev->desc_ring[i]);
+ kfree(plxdev->desc_ring);
+ return -ENOMEM;
+}
+
+static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+ int rc;
+
+ plxdev->head = plxdev->tail = 0;
+ plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
+ &plxdev->hw_ring_dma, GFP_KERNEL);
+ if (!plxdev->hw_ring)
+ return -ENOMEM;
+
+ rc = plx_dma_alloc_desc(plxdev);
+ if (rc)
+ goto out_free_hw_ring;
+
+ rcu_read_lock();
+ if (!rcu_dereference(plxdev->pdev)) {
+ rcu_read_unlock();
+ rc = -ENODEV;
+ goto out_free_hw_ring;
+ }
+
+ writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
+ writel(lower_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_ADDR);
+ writel(upper_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
+ writel(lower_32_bits(plxdev->hw_ring_dma),
+ plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
+ writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
+ writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
+
+ plxdev->ring_active = true;
+
+ rcu_read_unlock();
+
+ return PLX_DMA_RING_COUNT;
+
+out_free_hw_ring:
+ dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+ plxdev->hw_ring_dma);
+ return rc;
+}
+
+static void plx_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
+ size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
+ struct pci_dev *pdev;
+ int irq = -1;
+ int i;
+
+ spin_lock_bh(&plxdev->ring_lock);
+ plxdev->ring_active = false;
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ plx_dma_stop(plxdev);
+
+ rcu_read_lock();
+ pdev = rcu_dereference(plxdev->pdev);
+ if (pdev)
+ irq = pci_irq_vector(pdev, 0);
+ rcu_read_unlock();
+
+ if (irq > 0)
+ synchronize_irq(irq);
+
+ tasklet_kill(&plxdev->desc_task);
+
+ plx_dma_abort_desc(plxdev);
+
+ for (i = 0; i < PLX_DMA_RING_COUNT; i++)
+ kfree(plxdev->desc_ring[i]);
+
+ kfree(plxdev->desc_ring);
+ dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
+ plxdev->hw_ring_dma);
+
+}
+
+static void plx_dma_release(struct dma_device *dma_dev)
+{
+ struct plx_dma_dev *plxdev =
+ container_of(dma_dev, struct plx_dma_dev, dma_dev);
+
+ put_device(dma_dev->dev);
+ kfree(plxdev);
+}
+
+static int plx_dma_create(struct pci_dev *pdev)
+{
+ struct plx_dma_dev *plxdev;
+ struct dma_device *dma;
+ struct dma_chan *chan;
+ int rc;
+
+ plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
+ if (!plxdev)
+ return -ENOMEM;
+
+ rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
+ KBUILD_MODNAME, plxdev);
+ if (rc) {
+ kfree(plxdev);
+ return rc;
+ }
+
+ spin_lock_init(&plxdev->ring_lock);
+ tasklet_init(&plxdev->desc_task, plx_dma_desc_task,
+ (unsigned long)plxdev);
+
+ RCU_INIT_POINTER(plxdev->pdev, pdev);
+ plxdev->bar = pcim_iomap_table(pdev)[0];
+
+ dma = &plxdev->dma_dev;
+ dma->chancnt = 1;
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
+ dma->dev = get_device(&pdev->dev);
+
+ dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = plx_dma_free_chan_resources;
+ dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
+ dma->device_issue_pending = plx_dma_issue_pending;
+ dma->device_tx_status = plx_dma_tx_status;
+ dma->device_release = plx_dma_release;
+
+ chan = &plxdev->dma_chan;
+ chan->device = dma;
+ dma_cookie_init(chan);
+ list_add_tail(&chan->device_node, &dma->channels);
+
+ rc = dma_async_device_register(dma);
+ if (rc) {
+ pci_err(pdev, "Failed to register dma device: %d\n", rc);
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+ kfree(plxdev);
+ return rc;
+ }
+
+ pci_set_drvdata(pdev, plxdev);
+
+ return 0;
+}
+
+static int plx_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (rc)
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (rc)
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
+ if (rc)
+ return rc;
+
+ rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (rc <= 0)
+ return rc;
+
+ pci_set_master(pdev);
+
+ rc = plx_dma_create(pdev);
+ if (rc)
+ goto err_free_irq_vectors;
+
+ pci_info(pdev, "PLX DMA Channel Registered\n");
+
+ return 0;
+
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return rc;
+}
+
+static void plx_dma_remove(struct pci_dev *pdev)
+{
+ struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
+
+ free_irq(pci_irq_vector(pdev, 0), plxdev);
+
+ rcu_assign_pointer(plxdev->pdev, NULL);
+ synchronize_rcu();
+
+ spin_lock_bh(&plxdev->ring_lock);
+ plxdev->ring_active = false;
+ spin_unlock_bh(&plxdev->ring_lock);
+
+ __plx_dma_stop(plxdev);
+ plx_dma_abort_desc(plxdev);
+
+ plxdev->bar = NULL;
+ dma_async_device_unregister(&plxdev->dma_dev);
+
+ pci_free_irq_vectors(pdev);
+}
+
+static const struct pci_device_id plx_dma_pci_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x87D0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = PCI_CLASS_SYSTEM_OTHER << 8,
+ .class_mask = 0xFFFFFFFF,
+ },
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
+
+static struct pci_driver plx_dma_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = plx_dma_pci_tbl,
+ .probe = plx_dma_probe,
+ .remove = plx_dma_remove,
+};
+module_pci_driver(plx_dma_pci_driver);
s3c24xx_dma_start_next_sg(s3cchan, txd);
}
-static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
- struct s3c24xx_dma_chan *s3cchan)
-{
- LIST_HEAD(head);
-
- vchan_get_all_descriptors(&s3cchan->vc, &head);
- vchan_dma_desc_free_list(&s3cchan->vc, &head);
-}
-
/*
* Try to allocate a physical channel. When successful, assign it to
* this virtual channel, and initiate the next descriptor. The
{
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
+ LIST_HEAD(head);
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&s3cchan->vc.lock, flags);
}
/* Dequeue jobs not yet fired as well */
- s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
+
+ vchan_get_all_descriptors(&s3cchan->vc, &head);
+
+ spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&s3cchan->vc, &head);
+
+ return 0;
+
unlock:
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
/* Basic sanity check */
if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
- dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
+ dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
pdata->num_phy_channels, MAX_DMA_CHANNELS);
return -EINVAL;
}
kfree(chan->desc);
chan->desc = NULL;
vchan_get_all_descriptors(&chan->vchan, &head);
- vchan_dma_desc_free_list(&chan->vchan, &head);
sf_pdma_disclaim_chan(chan);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
}
static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
chan->desc = NULL;
chan->xfer_err = false;
vchan_get_all_descriptors(&chan->vchan, &head);
- vchan_dma_desc_free_list(&chan->vchan, &head);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
return 0;
}
dma_addr_t src, dest;
u32 endpoints;
int nr_periods, offset, plength, i;
+ u8 ram_type, io_mode, linear_mode;
if (!is_slave_direction(dir)) {
dev_err(chan2dev(chan), "Invalid DMA direction\n");
return NULL;
}
- if (vchan->is_dedicated) {
- /*
- * As we are using this just for audio data, we need to use
- * normal DMA. There is nothing stopping us from supporting
- * dedicated DMA here as well, so if a client comes up and
- * requires it, it will be simple to implement it.
- */
- dev_err(chan2dev(chan),
- "Cyclic transfers are only supported on Normal DMA\n");
- return NULL;
- }
-
contract = generate_dma_contract();
if (!contract)
return NULL;
contract->is_cyclic = 1;
- /* Figure out the endpoints and the address we need */
+ if (vchan->is_dedicated) {
+ io_mode = SUN4I_DDMA_ADDR_MODE_IO;
+ linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
+ ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ } else {
+ io_mode = SUN4I_NDMA_ADDR_MODE_IO;
+ linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
+ ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ }
+
if (dir == DMA_MEM_TO_DEV) {
src = buf;
dest = sconfig->dst_addr;
- endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
- SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
+ endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type);
} else {
src = sconfig->src_addr;
dest = buf;
- endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
- SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+ endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+ SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
}
/*
dest = buf + offset;
/* Make the promise */
- promise = generate_ndma_promise(chan, src, dest,
- plength, sconfig, dir);
+ if (vchan->is_dedicated)
+ promise = generate_ddma_promise(chan, src, dest,
+ plength, sconfig);
+ else
+ promise = generate_ndma_promise(chan, src, dest,
+ plength, sconfig, dir);
+
if (!promise) {
/* TODO: should we free everything? */
return NULL;
}
spin_lock_irqsave(&vchan->vc.lock, flags);
- vchan_dma_desc_free_list(&vchan->vc, &head);
/* Clear these so the vchan is usable again */
vchan->processing = NULL;
vchan->pchan = NULL;
spin_unlock_irqrestore(&vchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
return 0;
}
Enable support for the TI sDMA (System DMA or DMA4) controller. This
DMA engine is found on OMAP and DRA7xx parts.
+config TI_K3_UDMA
+ bool "Texas Instruments UDMA support"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_PROTOCOL
+ depends on TI_SCI_INTA_IRQCHIP
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select TI_K3_RINGACC
+ select TI_K3_PSIL
+ help
+ Enable support for the TI UDMA (Unified DMA) controller. This
+ DMA engine is used in AM65x and j721e.
+
+config TI_K3_UDMA_GLUE_LAYER
+ bool "Texas Instruments UDMA Glue layer for non DMAengine users"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_K3_UDMA
+ help
+ Say y here to support the K3 NAVSS DMA glue interface
+ If unsure, say N.
+
+config TI_K3_PSIL
+ bool
+
config TI_DMA_CROSSBAR
bool
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o
+obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
+obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
if (!info)
return -ENODEV;
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- return ret;
- }
-
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
platform_set_drvdata(pdev, ecc);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
/* Get eDMA3 configuration from IP */
ret = edma_setup_from_hw(dev, info, ecc);
if (ret)
- return ret;
+ goto err_disable_pm;
/* Allocate memory based on the information we got from the IP */
ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
sizeof(*ecc->slave_chans), GFP_KERNEL);
- if (!ecc->slave_chans)
- return -ENOMEM;
ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
sizeof(unsigned long), GFP_KERNEL);
- if (!ecc->slot_inuse)
- return -ENOMEM;
ecc->channels_mask = devm_kcalloc(dev,
BITS_TO_LONGS(ecc->num_channels),
sizeof(unsigned long), GFP_KERNEL);
- if (!ecc->channels_mask)
- return -ENOMEM;
+ if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
/* Mark all channels available initially */
bitmap_fill(ecc->channels_mask, ecc->num_channels);
ecc);
if (ret) {
dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
- return ret;
+ goto err_disable_pm;
}
ecc->ccint = irq;
}
ecc);
if (ret) {
dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
- return ret;
+ goto err_disable_pm;
}
ecc->ccerrint = irq;
}
ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
if (ecc->dummy_slot < 0) {
dev_err(dev, "Can't allocate PaRAM dummy slot\n");
- return ecc->dummy_slot;
+ ret = ecc->dummy_slot;
+ goto err_disable_pm;
}
queue_priority_mapping = info->queue_priority_mapping;
err_reg1:
edma_free_slot(ecc, ecc->dummy_slot);
+err_disable_pm:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
if (ecc->dma_memcpy)
dma_async_device_unregister(ecc->dma_memcpy);
edma_free_slot(ecc, ecc->dummy_slot);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return 0;
}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am654_src_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0x4000, 0),
+ PSIL_SA2UL(0x4001, 0),
+ PSIL_SA2UL(0x4002, 0),
+ PSIL_SA2UL(0x4003, 0),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0x4100),
+ PSIL_ETHERNET(0x4101),
+ PSIL_ETHERNET(0x4102),
+ PSIL_ETHERNET(0x4103),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0x4200),
+ PSIL_ETHERNET(0x4201),
+ PSIL_ETHERNET(0x4202),
+ PSIL_ETHERNET(0x4203),
+ /* PRU_ICSSG2 */
+ PSIL_ETHERNET(0x4300),
+ PSIL_ETHERNET(0x4301),
+ PSIL_ETHERNET(0x4302),
+ PSIL_ETHERNET(0x4303),
+ /* PDMA0 - McASPs */
+ PSIL_PDMA_XY_TR(0x4400),
+ PSIL_PDMA_XY_TR(0x4401),
+ PSIL_PDMA_XY_TR(0x4402),
+ /* PDMA1 - SPI0-4 */
+ PSIL_PDMA_XY_PKT(0x4500),
+ PSIL_PDMA_XY_PKT(0x4501),
+ PSIL_PDMA_XY_PKT(0x4502),
+ PSIL_PDMA_XY_PKT(0x4503),
+ PSIL_PDMA_XY_PKT(0x4504),
+ PSIL_PDMA_XY_PKT(0x4505),
+ PSIL_PDMA_XY_PKT(0x4506),
+ PSIL_PDMA_XY_PKT(0x4507),
+ PSIL_PDMA_XY_PKT(0x4508),
+ PSIL_PDMA_XY_PKT(0x4509),
+ PSIL_PDMA_XY_PKT(0x450a),
+ PSIL_PDMA_XY_PKT(0x450b),
+ PSIL_PDMA_XY_PKT(0x450c),
+ PSIL_PDMA_XY_PKT(0x450d),
+ PSIL_PDMA_XY_PKT(0x450e),
+ PSIL_PDMA_XY_PKT(0x450f),
+ PSIL_PDMA_XY_PKT(0x4510),
+ PSIL_PDMA_XY_PKT(0x4511),
+ PSIL_PDMA_XY_PKT(0x4512),
+ PSIL_PDMA_XY_PKT(0x4513),
+ /* PDMA1 - USART0-2 */
+ PSIL_PDMA_XY_PKT(0x4514),
+ PSIL_PDMA_XY_PKT(0x4515),
+ PSIL_PDMA_XY_PKT(0x4516),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 - ADCs */
+ PSIL_PDMA_XY_TR(0x7100),
+ PSIL_PDMA_XY_TR(0x7101),
+ PSIL_PDMA_XY_TR(0x7102),
+ PSIL_PDMA_XY_TR(0x7103),
+ /* MCU_PDMA1 - MCU_SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ PSIL_PDMA_XY_PKT(0x7208),
+ PSIL_PDMA_XY_PKT(0x7209),
+ PSIL_PDMA_XY_PKT(0x720a),
+ PSIL_PDMA_XY_PKT(0x720b),
+ /* MCU_PDMA1 - MCU_USART0 */
+ PSIL_PDMA_XY_PKT(0x7212),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am654_dst_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0xc000, 1),
+ PSIL_SA2UL(0xc001, 1),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0xc100),
+ PSIL_ETHERNET(0xc101),
+ PSIL_ETHERNET(0xc102),
+ PSIL_ETHERNET(0xc103),
+ PSIL_ETHERNET(0xc104),
+ PSIL_ETHERNET(0xc105),
+ PSIL_ETHERNET(0xc106),
+ PSIL_ETHERNET(0xc107),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0xc200),
+ PSIL_ETHERNET(0xc201),
+ PSIL_ETHERNET(0xc202),
+ PSIL_ETHERNET(0xc203),
+ PSIL_ETHERNET(0xc204),
+ PSIL_ETHERNET(0xc205),
+ PSIL_ETHERNET(0xc206),
+ PSIL_ETHERNET(0xc207),
+ /* PRU_ICSSG2 */
+ PSIL_ETHERNET(0xc300),
+ PSIL_ETHERNET(0xc301),
+ PSIL_ETHERNET(0xc302),
+ PSIL_ETHERNET(0xc303),
+ PSIL_ETHERNET(0xc304),
+ PSIL_ETHERNET(0xc305),
+ PSIL_ETHERNET(0xc306),
+ PSIL_ETHERNET(0xc307),
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+};
+
+struct psil_ep_map am654_ep_map = {
+ .name = "am654",
+ .src = am654_src_ep_map,
+ .src_count = ARRAY_SIZE(am654_src_ep_map),
+ .dst = am654_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am654_dst_ep_map),
+};
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j721e_src_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0x4000, 0),
+ PSIL_SA2UL(0x4001, 0),
+ PSIL_SA2UL(0x4002, 0),
+ PSIL_SA2UL(0x4003, 0),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0x4100),
+ PSIL_ETHERNET(0x4101),
+ PSIL_ETHERNET(0x4102),
+ PSIL_ETHERNET(0x4103),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0x4200),
+ PSIL_ETHERNET(0x4201),
+ PSIL_ETHERNET(0x4202),
+ PSIL_ETHERNET(0x4203),
+ /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ PSIL_PDMA_MCASP(0x4503),
+ PSIL_PDMA_MCASP(0x4504),
+ PSIL_PDMA_MCASP(0x4505),
+ PSIL_PDMA_MCASP(0x4506),
+ PSIL_PDMA_MCASP(0x4507),
+ PSIL_PDMA_MCASP(0x4508),
+ /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ PSIL_PDMA_XY_PKT(0x4610),
+ PSIL_PDMA_XY_PKT(0x4611),
+ PSIL_PDMA_XY_PKT(0x4612),
+ PSIL_PDMA_XY_PKT(0x4613),
+ /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */
+ PSIL_PDMA_XY_PKT(0x4618),
+ PSIL_PDMA_XY_PKT(0x4619),
+ PSIL_PDMA_XY_PKT(0x461a),
+ PSIL_PDMA_XY_PKT(0x461b),
+ PSIL_PDMA_XY_PKT(0x461c),
+ PSIL_PDMA_XY_PKT(0x461d),
+ PSIL_PDMA_XY_PKT(0x461e),
+ PSIL_PDMA_XY_PKT(0x461f),
+ /* PDMA11 (PDMA_MISC_G3) */
+ PSIL_PDMA_XY_PKT(0x4624),
+ PSIL_PDMA_XY_PKT(0x4625),
+ PSIL_PDMA_XY_PKT(0x4626),
+ PSIL_PDMA_XY_PKT(0x4627),
+ PSIL_PDMA_XY_PKT(0x4628),
+ PSIL_PDMA_XY_PKT(0x4629),
+ PSIL_PDMA_XY_PKT(0x4630),
+ PSIL_PDMA_XY_PKT(0x463a),
+ /* PDMA13 (PDMA_USART_G0) - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA14 (PDMA_USART_G1) - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA15 (PDMA_USART_G2) - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CPSW9 */
+ PSIL_ETHERNET(0x4a00),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ PSIL_PDMA_XY_TR(0x7402),
+ PSIL_PDMA_XY_TR(0x7403),
+ /* SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j721e_dst_ep_map[] = {
+ /* SA2UL */
+ PSIL_SA2UL(0xc000, 1),
+ PSIL_SA2UL(0xc001, 1),
+ /* PRU_ICSSG0 */
+ PSIL_ETHERNET(0xc100),
+ PSIL_ETHERNET(0xc101),
+ PSIL_ETHERNET(0xc102),
+ PSIL_ETHERNET(0xc103),
+ PSIL_ETHERNET(0xc104),
+ PSIL_ETHERNET(0xc105),
+ PSIL_ETHERNET(0xc106),
+ PSIL_ETHERNET(0xc107),
+ /* PRU_ICSSG1 */
+ PSIL_ETHERNET(0xc200),
+ PSIL_ETHERNET(0xc201),
+ PSIL_ETHERNET(0xc202),
+ PSIL_ETHERNET(0xc203),
+ PSIL_ETHERNET(0xc204),
+ PSIL_ETHERNET(0xc205),
+ PSIL_ETHERNET(0xc206),
+ PSIL_ETHERNET(0xc207),
+ /* CPSW9 */
+ PSIL_ETHERNET(0xca00),
+ PSIL_ETHERNET(0xca01),
+ PSIL_ETHERNET(0xca02),
+ PSIL_ETHERNET(0xca03),
+ PSIL_ETHERNET(0xca04),
+ PSIL_ETHERNET(0xca05),
+ PSIL_ETHERNET(0xca06),
+ PSIL_ETHERNET(0xca07),
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+};
+
+struct psil_ep_map j721e_ep_map = {
+ .name = "j721e",
+ .src = j721e_src_ep_map,
+ .src_count = ARRAY_SIZE(j721e_src_ep_map),
+ .dst = j721e_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j721e_dst_ep_map),
+};
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_PSIL_PRIV_H_
+#define K3_PSIL_PRIV_H_
+
+#include <linux/dma/k3-psil.h>
+
+struct psil_ep {
+ u32 thread_id;
+ struct psil_endpoint_config ep_config;
+};
+
+/**
+ * struct psil_ep_map - PSI-L thread ID configuration maps
+ * @name: Name of the map, set it to the name of the SoC
+ * @src: Array of source PSI-L thread configurations
+ * @src_count: Number of entries in the src array
+ * @dst: Array of destination PSI-L thread configurations
+ * @dst_count: Number of entries in the dst array
+ *
+ * In case of symmetric configuration for a matching src/dst thread (for example
+ * 0x4400 and 0xc400) only the src configuration can be present. If no dst
+ * configuration found the code will look for (dst_thread_id & ~0x8000) to find
+ * the symmetric match.
+ */
+struct psil_ep_map {
+ char *name;
+ struct psil_ep *src;
+ int src_count;
+ struct psil_ep *dst;
+ int dst_count;
+};
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
+
+/* SoC PSI-L endpoint maps */
+extern struct psil_ep_map am654_ep_map;
+extern struct psil_ep_map j721e_ep_map;
+
+#endif /* K3_PSIL_PRIV_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include "k3-psil-priv.h"
+
+static DEFINE_MUTEX(ep_map_mutex);
+static struct psil_ep_map *soc_ep_map;
+
+struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
+{
+ int i;
+
+ mutex_lock(&ep_map_mutex);
+ if (!soc_ep_map) {
+ if (of_machine_is_compatible("ti,am654")) {
+ soc_ep_map = &am654_ep_map;
+ } else if (of_machine_is_compatible("ti,j721e")) {
+ soc_ep_map = &j721e_ep_map;
+ } else {
+ pr_err("PSIL: No compatible machine found for map\n");
+ return ERR_PTR(-ENOTSUPP);
+ }
+ pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
+ }
+ mutex_unlock(&ep_map_mutex);
+
+ if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) {
+ /* check in destination thread map */
+ for (i = 0; i < soc_ep_map->dst_count; i++) {
+ if (soc_ep_map->dst[i].thread_id == thread_id)
+ return &soc_ep_map->dst[i].ep_config;
+ }
+ }
+
+ thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET;
+ if (soc_ep_map->src) {
+ for (i = 0; i < soc_ep_map->src_count; i++) {
+ if (soc_ep_map->src[i].thread_id == thread_id)
+ return &soc_ep_map->src[i].ep_config;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(psil_get_ep_config);
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+ struct psil_endpoint_config *ep_config)
+{
+ struct psil_endpoint_config *dst_ep_config;
+ struct of_phandle_args dma_spec;
+ u32 thread_id;
+ int index;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ index = of_property_match_string(dev->of_node, "dma-names", name);
+ if (index < 0)
+ return index;
+
+ if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells",
+ index, &dma_spec))
+ return -ENOENT;
+
+ thread_id = dma_spec.args[0];
+
+ dst_ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(dst_ep_config)) {
+ pr_err("PSIL: thread ID 0x%04x not defined in map\n",
+ thread_id);
+ of_node_put(dma_spec.np);
+ return PTR_ERR(dst_ep_config);
+ }
+
+ memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config));
+
+ of_node_put(dma_spec.np);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(psil_set_new_ep_config);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * K3 NAVSS DMA glue interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct k3_udma_glue_common {
+ struct device *dev;
+ struct udma_dev *udmax;
+ const struct udma_tisci_rm *tisci_rm;
+ struct k3_ringacc *ringacc;
+ u32 src_thread;
+ u32 dst_thread;
+
+ u32 hdesc_size;
+ bool epib;
+ u32 psdata_size;
+ u32 swdata_size;
+};
+
+struct k3_udma_glue_tx_channel {
+ struct k3_udma_glue_common common;
+
+ struct udma_tchan *udma_tchanx;
+ int udma_tchan_id;
+
+ struct k3_ring *ringtx;
+ struct k3_ring *ringtxcq;
+
+ bool psil_paired;
+
+ int virq;
+
+ atomic_t free_pkts;
+ bool tx_pause_on_err;
+ bool tx_filt_einfo;
+ bool tx_filt_pswords;
+ bool tx_supr_tdpkt;
+};
+
+struct k3_udma_glue_rx_flow {
+ struct udma_rflow *udma_rflow;
+ int udma_rflow_id;
+ struct k3_ring *ringrx;
+ struct k3_ring *ringrxfdq;
+
+ int virq;
+};
+
+struct k3_udma_glue_rx_channel {
+ struct k3_udma_glue_common common;
+
+ struct udma_rchan *udma_rchanx;
+ int udma_rchan_id;
+ bool remote;
+
+ bool psil_paired;
+
+ u32 swdata_size;
+ int flow_id_base;
+
+ struct k3_udma_glue_rx_flow *flows;
+ u32 flow_num;
+ u32 flows_ready;
+};
+
+#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
+
+static int of_k3_udma_glue_parse(struct device_node *udmax_np,
+ struct k3_udma_glue_common *common)
+{
+ common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
+ "ti,ringacc");
+ if (IS_ERR(common->ringacc))
+ return PTR_ERR(common->ringacc);
+
+ common->udmax = of_xudma_dev_get(udmax_np, NULL);
+ if (IS_ERR(common->udmax))
+ return PTR_ERR(common->udmax);
+
+ common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
+
+ return 0;
+}
+
+static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
+ const char *name, struct k3_udma_glue_common *common,
+ bool tx_chn)
+{
+ struct psil_endpoint_config *ep_config;
+ struct of_phandle_args dma_spec;
+ u32 thread_id;
+ int ret = 0;
+ int index;
+
+ if (unlikely(!name))
+ return -EINVAL;
+
+ index = of_property_match_string(chn_np, "dma-names", name);
+ if (index < 0)
+ return index;
+
+ if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
+ &dma_spec))
+ return -ENOENT;
+
+ thread_id = dma_spec.args[0];
+
+ if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ /* get psil endpoint config */
+ ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(ep_config)) {
+ dev_err(common->dev,
+ "No configuration for psi-l thread 0x%04x\n",
+ thread_id);
+ ret = PTR_ERR(ep_config);
+ goto out_put_spec;
+ }
+
+ common->epib = ep_config->needs_epib;
+ common->psdata_size = ep_config->psd_size;
+
+ if (tx_chn)
+ common->dst_thread = thread_id;
+ else
+ common->src_thread = thread_id;
+
+ ret = of_k3_udma_glue_parse(dma_spec.np, common);
+
+out_put_spec:
+ of_node_put(dma_spec.np);
+ return ret;
+};
+
+static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ struct device *dev = tx_chn->common.dev;
+
+ dev_dbg(dev, "dump_tx_chn:\n"
+ "udma_tchan_id: %d\n"
+ "src_thread: %08x\n"
+ "dst_thread: %08x\n",
+ tx_chn->udma_tchan_id,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+}
+
+static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
+ char *mark)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "=== dump ===> %s\n", mark);
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ xudma_tchanrt_read(chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG,
+ xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG));
+}
+
+static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.index = tx_chn->udma_tchan_id;
+ if (tx_chn->tx_pause_on_err)
+ req.tx_pause_on_err = 1;
+ if (tx_chn->tx_filt_einfo)
+ req.tx_filt_einfo = 1;
+ if (tx_chn->tx_filt_pswords)
+ req.tx_filt_pswords = 1;
+ req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ if (tx_chn->tx_supr_tdpkt)
+ req.tx_supr_tdpkt = 1;
+ req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
+ req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+
+ return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
+}
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+ const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &tx_chn->common, true);
+ if (ret)
+ goto err;
+
+ tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
+ tx_chn->common.psdata_size,
+ tx_chn->common.swdata_size);
+
+ /* request and cfg UDMAP TX channel */
+ tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
+ if (IS_ERR(tx_chn->udma_tchanx)) {
+ ret = PTR_ERR(tx_chn->udma_tchanx);
+ dev_err(dev, "UDMAX tchanx get err %d\n", ret);
+ goto err;
+ }
+ tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
+
+ atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
+
+ /* request and cfg rings */
+ tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc,
+ tx_chn->udma_tchan_id, 0);
+ if (!tx_chn->ringtx) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get TX ring %u\n",
+ tx_chn->udma_tchan_id);
+ goto err;
+ }
+
+ tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc,
+ -1, 0);
+ if (!tx_chn->ringtxcq) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get TXCQ ring\n");
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringtx %d\n", ret);
+ goto err;
+ }
+
+ /* request and cfg psi-l */
+ tx_chn->common.src_thread =
+ xudma_dev_get_psil_base(tx_chn->common.udmax) +
+ tx_chn->udma_tchan_id;
+
+ ret = k3_udma_glue_cfg_tx_chn(tx_chn);
+ if (ret) {
+ dev_err(dev, "Failed to cfg tchan %d\n", ret);
+ goto err;
+ }
+
+ ret = xudma_navss_psil_pair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(dev, "PSI-L request err %d\n", ret);
+ goto err;
+ }
+
+ tx_chn->psil_paired = true;
+
+ /* reset TX RT registers */
+ k3_udma_glue_disable_tx_chn(tx_chn);
+
+ k3_udma_glue_dump_tx_chn(tx_chn);
+
+ return tx_chn;
+
+err:
+ k3_udma_glue_release_tx_chn(tx_chn);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ if (tx_chn->psil_paired) {
+ xudma_navss_psil_unpair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ tx_chn->psil_paired = false;
+ }
+
+ if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
+ xudma_tchan_put(tx_chn->common.udmax,
+ tx_chn->udma_tchanx);
+
+ if (tx_chn->ringtxcq)
+ k3_ringacc_ring_free(tx_chn->ringtxcq);
+
+ if (tx_chn->ringtx)
+ k3_ringacc_ring_free(tx_chn->ringtx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
+
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma)
+{
+ u32 ringtxcq_id;
+
+ if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
+ return -ENOMEM;
+
+ ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+ cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
+
+ return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
+
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *desc_dma)
+{
+ int ret;
+
+ ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
+ if (!ret)
+ atomic_inc(&tx_chn->free_pkts);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
+
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ u32 txrt_ctl;
+
+ txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
+ xudma_tchanrt_write(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ txrt_ctl);
+
+ txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_CTL_REG);
+ txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+ txrt_ctl);
+
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
+
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0);
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
+
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ bool sync)
+{
+ int i = 0;
+ u32 val;
+
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
+
+ xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
+
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG);
+
+ while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_CTL_REG);
+ udelay(1);
+ if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+ dev_err(tx_chn->common.dev, "TX tdown timeout\n");
+ break;
+ }
+ i++;
+ }
+
+ val = xudma_tchanrt_read(tx_chn->udma_tchanx,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG);
+ if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+ dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
+ k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
+
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma))
+{
+ dma_addr_t desc_dma;
+ int occ_tx, i, ret;
+
+ /* reset TXCQ as it is not input for udma - expected to be empty */
+ if (tx_chn->ringtxcq)
+ k3_ringacc_ring_reset(tx_chn->ringtxcq);
+
+ /*
+ * TXQ reset need to be special way as it is input for udma and its
+ * state cached by udma, so:
+ * 1) save TXQ occ
+ * 2) clean up TXQ and call callback .cleanup() for each desc
+ * 3) reset TXQ in a special way
+ */
+ occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
+ dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
+
+ for (i = 0; i < occ_tx; i++) {
+ ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
+ if (ret) {
+ dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
+ break;
+ }
+ cleanup(data, desc_dma);
+ }
+
+ k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
+
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ return tx_chn->common.hdesc_size;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
+
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
+
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+
+ return tx_chn->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
+
+static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
+
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.index = rx_chn->udma_rchan_id;
+ req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
+ /*
+ * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
+ * and udmax impl, so just configure it to invalid value.
+ * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
+ */
+ req.rxcq_qnum = 0xFFFF;
+ if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
+ /* Default flow + extra ones */
+ req.flowid_start = rx_chn->flow_id_base;
+ req.flowid_cnt = rx_chn->flow_num;
+ }
+ req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
+ if (ret)
+ dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
+ rx_chn->udma_rchan_id, ret);
+
+ return ret;
+}
+
+static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ if (IS_ERR_OR_NULL(flow->udma_rflow))
+ return;
+
+ if (flow->ringrxfdq)
+ k3_ringacc_ring_free(flow->ringrxfdq);
+
+ if (flow->ringrx)
+ k3_ringacc_ring_free(flow->ringrx);
+
+ xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+ flow->udma_rflow = NULL;
+ rx_chn->flows_ready--;
+}
+
+static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx,
+ struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int rx_ring_id;
+ int rx_ringfdq_id;
+ int ret = 0;
+
+ flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
+ flow->udma_rflow_id);
+ if (IS_ERR(flow->udma_rflow)) {
+ ret = PTR_ERR(flow->udma_rflow);
+ dev_err(dev, "UDMAX rflow get err %d\n", ret);
+ goto err;
+ }
+
+ if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
+ xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+ return -ENODEV;
+ }
+
+ /* request and cfg rings */
+ flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc,
+ flow_cfg->ring_rxq_id, 0);
+ if (!flow->ringrx) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get RX ring\n");
+ goto err;
+ }
+
+ flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
+ flow_cfg->ring_rxfdq0_id, 0);
+ if (!flow->ringrxfdq) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get RXFDQ ring\n");
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringrx %d\n", ret);
+ goto err;
+ }
+
+ ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
+ goto err;
+ }
+
+ if (rx_chn->remote) {
+ rx_ring_id = TI_SCI_RESOURCE_NULL;
+ rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
+ } else {
+ rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+ rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+ }
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ if (rx_chn->common.epib)
+ req.rx_einfo_present = 1;
+ if (rx_chn->common.psdata_size)
+ req.rx_psinfo_present = 1;
+ if (flow_cfg->rx_error_handling)
+ req.rx_error_handling = 1;
+ req.rx_desc_type = 0;
+ req.rx_dest_qnum = rx_ring_id;
+ req.rx_src_tag_hi_sel = 0;
+ req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
+ req.rx_dest_tag_hi_sel = 0;
+ req.rx_dest_tag_lo_sel = 0;
+ req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+ req.rx_fdq1_qnum = rx_ringfdq_id;
+ req.rx_fdq2_qnum = rx_ringfdq_id;
+ req.rx_fdq3_qnum = rx_ringfdq_id;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
+ ret);
+ goto err;
+ }
+
+ rx_chn->flows_ready++;
+ dev_dbg(dev, "flow%d config done. ready:%d\n",
+ flow->udma_rflow_id, rx_chn->flows_ready);
+
+ return 0;
+err:
+ k3_udma_glue_release_rx_flow(rx_chn, flow_idx);
+ return ret;
+}
+
+static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "dump_rx_chn:\n"
+ "udma_rchan_id: %d\n"
+ "src_thread: %08x\n"
+ "dst_thread: %08x\n"
+ "epib: %d\n"
+ "hdesc_size: %u\n"
+ "psdata_size: %u\n"
+ "swdata_size: %u\n"
+ "flow_id_base: %d\n"
+ "flow_num: %d\n",
+ chn->udma_rchan_id,
+ chn->common.src_thread,
+ chn->common.dst_thread,
+ chn->common.epib,
+ chn->common.hdesc_size,
+ chn->common.psdata_size,
+ chn->common.swdata_size,
+ chn->flow_id_base,
+ chn->flow_num);
+}
+
+static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
+ char *mark)
+{
+ struct device *dev = chn->common.dev;
+
+ dev_dbg(dev, "=== dump ===> %s\n", mark);
+
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ xudma_rchanrt_read(chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG));
+ dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG,
+ xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG));
+}
+
+static int
+k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ int ret;
+
+ /* default rflow */
+ if (cfg->flow_id_use_rxchan_id)
+ return 0;
+
+ /* not a GP rflows */
+ if (rx_chn->flow_id_base != -1 &&
+ !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+ return 0;
+
+ /* Allocate range of GP rflows */
+ ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
+ rx_chn->flow_id_base,
+ rx_chn->flow_num);
+ if (ret < 0) {
+ dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
+ rx_chn->flow_id_base, rx_chn->flow_num, ret);
+ return ret;
+ }
+ rx_chn->flow_id_base = ret;
+
+ return 0;
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret, i;
+
+ if (cfg->flow_id_num <= 0)
+ return ERR_PTR(-EINVAL);
+
+ if (cfg->flow_id_num != 1 &&
+ (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
+ return ERR_PTR(-EINVAL);
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = false;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &rx_chn->common, false);
+ if (ret)
+ goto err;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ /* request and cfg UDMAP RX channel */
+ rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
+ if (IS_ERR(rx_chn->udma_rchanx)) {
+ ret = PTR_ERR(rx_chn->udma_rchanx);
+ dev_err(dev, "UDMAX rchanx get err %d\n", ret);
+ goto err;
+ }
+ rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
+
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+
+ /* Use RX channel id as flow id: target dev can't generate flow_id */
+ if (cfg->flow_id_use_rxchan_id)
+ rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ /* request and cfg psi-l */
+ rx_chn->common.dst_thread =
+ xudma_dev_get_psil_base(rx_chn->common.udmax) +
+ rx_chn->udma_rchan_id;
+
+ ret = k3_udma_glue_cfg_rx_chn(rx_chn);
+ if (ret) {
+ dev_err(dev, "Failed to cfg rchan %d\n", ret);
+ goto err;
+ }
+
+ /* init default RX flow only if flow_num = 1 */
+ if (cfg->def_flow_cfg) {
+ ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
+ if (ret)
+ goto err;
+ }
+
+ ret = xudma_navss_psil_pair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(dev, "PSI-L request err %d\n", ret);
+ goto err;
+ }
+
+ rx_chn->psil_paired = true;
+
+ /* reset RX RT registers */
+ k3_udma_glue_disable_rx_chn(rx_chn);
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return rx_chn;
+
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
+
+static struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret, i;
+
+ if (cfg->flow_id_num <= 0 ||
+ cfg->flow_id_use_rxchan_id ||
+ cfg->def_flow_cfg ||
+ cfg->flow_id_base < 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Remote RX channel is under control of Remote CPU core, so
+ * Linux can only request and manipulate by dedicated RX flows
+ */
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = true;
+ rx_chn->udma_rchan_id = -1;
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+ rx_chn->psil_paired = false;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &rx_chn->common, false);
+ if (ret)
+ goto err;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return rx_chn;
+
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
+
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg)
+{
+ if (cfg->remote)
+ return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
+ else
+ return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(rx_chn->common.udmax))
+ return;
+
+ if (rx_chn->psil_paired) {
+ xudma_navss_psil_unpair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ rx_chn->psil_paired = false;
+ }
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ k3_udma_glue_release_rx_flow(rx_chn, i);
+
+ if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
+ xudma_free_gp_rflow_range(rx_chn->common.udmax,
+ rx_chn->flow_id_base,
+ rx_chn->flow_num);
+
+ if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
+ xudma_rchan_put(rx_chn->common.udmax,
+ rx_chn->udma_rchanx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
+
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx,
+ struct k3_udma_glue_rx_flow_cfg *flow_cfg)
+{
+ if (flow_idx >= rx_chn->flow_num)
+ return -EINVAL;
+
+ return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
+
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow;
+
+ if (flow_idx >= rx_chn->flow_num)
+ return -EINVAL;
+
+ flow = &rx_chn->flows[flow_idx];
+
+ return k3_ringacc_get_ring_id(flow->ringrxfdq);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
+
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ return rx_chn->flow_id_base;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
+
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int rx_ring_id;
+ int rx_ringfdq_id;
+ int ret = 0;
+
+ if (!rx_chn->remote)
+ return -EINVAL;
+
+ rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
+ rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
+
+ memset(&req, 0, sizeof(req));
+
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ req.rx_dest_qnum = rx_ring_id;
+ req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
+ req.rx_fdq1_qnum = rx_ringfdq_id;
+ req.rx_fdq2_qnum = rx_ringfdq_id;
+ req.rx_fdq3_qnum = rx_ringfdq_id;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
+ ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
+
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
+ const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
+ struct device *dev = rx_chn->common.dev;
+ struct ti_sci_msg_rm_udmap_flow_cfg req;
+ int ret = 0;
+
+ if (!rx_chn->remote)
+ return -EINVAL;
+
+ memset(&req, 0, sizeof(req));
+ req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+ req.nav_id = tisci_rm->tisci_dev_id;
+ req.flow_index = flow->udma_rflow_id;
+ req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
+ req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
+
+ ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
+ if (ret) {
+ dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
+ ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
+
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ u32 rxrt_ctl;
+
+ if (rx_chn->remote)
+ return -EINVAL;
+
+ if (rx_chn->flows_ready < rx_chn->flow_num)
+ return -EINVAL;
+
+ rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_CTL_REG);
+ rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG,
+ rxrt_ctl);
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
+
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ 0);
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0);
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
+
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ bool sync)
+{
+ int i = 0;
+ u32 val;
+
+ if (rx_chn->remote)
+ return;
+
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
+
+ xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
+
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG);
+
+ while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_CTL_REG);
+ udelay(1);
+ if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
+ dev_err(rx_chn->common.dev, "RX tdown timeout\n");
+ break;
+ }
+ i++;
+ }
+
+ val = xudma_rchanrt_read(rx_chn->udma_rchanx,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG);
+ if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
+ dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
+ k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
+
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+ struct device *dev = rx_chn->common.dev;
+ dma_addr_t desc_dma;
+ int occ_rx, i, ret;
+
+ /* reset RXCQ as it is not input for udma - expected to be empty */
+ occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
+ dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
+ if (flow->ringrx)
+ k3_ringacc_ring_reset(flow->ringrx);
+
+ /* Skip RX FDQ in case one FDQ is used for the set of flows */
+ if (skip_fdq)
+ return;
+
+ /*
+ * RX FDQ reset need to be special way as it is input for udma and its
+ * state cached by udma, so:
+ * 1) save RX FDQ occ
+ * 2) clean up RX FDQ and call callback .cleanup() for each desc
+ * 3) reset RX FDQ in a special way
+ */
+ occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
+ dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
+
+ for (i = 0; i < occ_rx; i++) {
+ ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
+ if (ret) {
+ dev_err(dev, "RX reset pop %d\n", ret);
+ break;
+ }
+ cleanup(data, desc_dma);
+ }
+
+ k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
+
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, struct cppi5_host_desc_t *desc_rx,
+ dma_addr_t desc_dma)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
+
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t *desc_dma)
+{
+ struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
+
+ return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
+
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num)
+{
+ struct k3_udma_glue_rx_flow *flow;
+
+ flow = &rx_chn->flows[flow_num];
+
+ flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+
+ return flow->virq;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ return navss_psil_pair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_pair);
+
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ return navss_psil_unpair(ud, src_thread, dst_thread);
+}
+EXPORT_SYMBOL(xudma_navss_psil_unpair);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
+{
+ struct device_node *udma_node = np;
+ struct platform_device *pdev;
+ struct udma_dev *ud;
+
+ if (property) {
+ udma_node = of_parse_phandle(np, property, 0);
+ if (!udma_node) {
+ pr_err("UDMA node is not found\n");
+ return ERR_PTR(-ENODEV);
+ }
+ }
+
+ pdev = of_find_device_by_node(udma_node);
+ if (!pdev) {
+ pr_debug("UDMA device not found\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (np != udma_node)
+ of_node_put(udma_node);
+
+ ud = platform_get_drvdata(pdev);
+ if (!ud) {
+ pr_debug("UDMA has not been probed\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return ud;
+}
+EXPORT_SYMBOL(of_xudma_dev_get);
+
+u32 xudma_dev_get_psil_base(struct udma_dev *ud)
+{
+ return ud->psil_base;
+}
+EXPORT_SYMBOL(xudma_dev_get_psil_base);
+
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud)
+{
+ return &ud->tisci_rm;
+}
+EXPORT_SYMBOL(xudma_dev_get_tisci_rm);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ return __udma_alloc_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_alloc_gp_rflow_range);
+
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ return __udma_free_gp_rflow_range(ud, from, cnt);
+}
+EXPORT_SYMBOL(xudma_free_gp_rflow_range);
+
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
+{
+ return !test_bit(id, ud->rflow_gp_map);
+}
+EXPORT_SYMBOL(xudma_rflow_is_gp);
+
+#define XUDMA_GET_PUT_RESOURCE(res) \
+struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \
+{ \
+ return __udma_reserve_##res(ud, false, id); \
+} \
+EXPORT_SYMBOL(xudma_##res##_get); \
+ \
+void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p) \
+{ \
+ clear_bit(p->id, ud->res##_map); \
+} \
+EXPORT_SYMBOL(xudma_##res##_put)
+XUDMA_GET_PUT_RESOURCE(tchan);
+XUDMA_GET_PUT_RESOURCE(rchan);
+
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id)
+{
+ return __udma_get_rflow(ud, id);
+}
+EXPORT_SYMBOL(xudma_rflow_get);
+
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
+{
+ __udma_put_rflow(ud, p);
+}
+EXPORT_SYMBOL(xudma_rflow_put);
+
+#define XUDMA_GET_RESOURCE_ID(res) \
+int xudma_##res##_get_id(struct udma_##res *p) \
+{ \
+ return p->id; \
+} \
+EXPORT_SYMBOL(xudma_##res##_get_id)
+XUDMA_GET_RESOURCE_ID(tchan);
+XUDMA_GET_RESOURCE_ID(rchan);
+XUDMA_GET_RESOURCE_ID(rflow);
+
+/* Exported register access functions */
+#define XUDMA_RT_IO_FUNCTIONS(res) \
+u32 xudma_##res##rt_read(struct udma_##res *p, int reg) \
+{ \
+ return udma_##res##rt_read(p, reg); \
+} \
+EXPORT_SYMBOL(xudma_##res##rt_read); \
+ \
+void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \
+{ \
+ udma_##res##rt_write(p, reg, val); \
+} \
+EXPORT_SYMBOL(xudma_##res##rt_write)
+XUDMA_RT_IO_FUNCTIONS(tchan);
+XUDMA_RT_IO_FUNCTIONS(rchan);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/dma/ti-cppi5.h>
+
+#include "../virt-dma.h"
+#include "k3-udma.h"
+#include "k3-psil-priv.h"
+
+struct udma_static_tr {
+ u8 elsize; /* RPSTR0 */
+ u16 elcnt; /* RPSTR0 */
+ u16 bstcnt; /* RPSTR1 */
+};
+
+#define K3_UDMA_MAX_RFLOWS 1024
+#define K3_UDMA_DEFAULT_RING_SIZE 16
+
+/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
+#define UDMA_RFLOW_SRCTAG_NONE 0
+#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
+#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
+#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
+
+#define UDMA_RFLOW_DSTTAG_NONE 0
+#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
+#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
+#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
+#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
+
+struct udma_chan;
+
+enum udma_mmr {
+ MMR_GCFG = 0,
+ MMR_RCHANRT,
+ MMR_TCHANRT,
+ MMR_LAST,
+};
+
+static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
+
+struct udma_tchan {
+ void __iomem *reg_rt;
+
+ int id;
+ struct k3_ring *t_ring; /* Transmit ring */
+ struct k3_ring *tc_ring; /* Transmit Completion ring */
+};
+
+struct udma_rflow {
+ int id;
+ struct k3_ring *fd_ring; /* Free Descriptor ring */
+ struct k3_ring *r_ring; /* Receive ring */
+};
+
+struct udma_rchan {
+ void __iomem *reg_rt;
+
+ int id;
+};
+
+#define UDMA_FLAG_PDMA_ACC32 BIT(0)
+#define UDMA_FLAG_PDMA_BURST BIT(1)
+
+struct udma_match_data {
+ u32 psil_base;
+ bool enable_memcpy_support;
+ u32 flags;
+ u32 statictr_z_mask;
+ u32 rchan_oes_offset;
+
+ u8 tpl_levels;
+ u32 level_start_idx[];
+};
+
+struct udma_dev {
+ struct dma_device ddev;
+ struct device *dev;
+ void __iomem *mmrs[MMR_LAST];
+ const struct udma_match_data *match_data;
+
+ size_t desc_align; /* alignment to use for descriptors */
+
+ struct udma_tisci_rm tisci_rm;
+
+ struct k3_ringacc *ringacc;
+
+ struct work_struct purge_work;
+ struct list_head desc_to_purge;
+ spinlock_t lock;
+
+ int tchan_cnt;
+ int echan_cnt;
+ int rchan_cnt;
+ int rflow_cnt;
+ unsigned long *tchan_map;
+ unsigned long *rchan_map;
+ unsigned long *rflow_gp_map;
+ unsigned long *rflow_gp_map_allocated;
+ unsigned long *rflow_in_use;
+
+ struct udma_tchan *tchans;
+ struct udma_rchan *rchans;
+ struct udma_rflow *rflows;
+
+ struct udma_chan *channels;
+ u32 psil_base;
+};
+
+struct udma_hwdesc {
+ size_t cppi5_desc_size;
+ void *cppi5_desc_vaddr;
+ dma_addr_t cppi5_desc_paddr;
+
+ /* TR descriptor internal pointers */
+ void *tr_req_base;
+ struct cppi5_tr_resp_t *tr_resp_base;
+};
+
+struct udma_desc {
+ struct virt_dma_desc vd;
+
+ bool terminated;
+
+ enum dma_transfer_direction dir;
+
+ struct udma_static_tr static_tr;
+ u32 residue;
+
+ unsigned int sglen;
+ unsigned int desc_idx; /* Only used for cyclic in packet mode */
+ unsigned int tr_idx;
+
+ u32 metadata_size;
+ void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
+
+ unsigned int hwdesc_count;
+ struct udma_hwdesc hwdesc[0];
+};
+
+enum udma_chan_state {
+ UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
+ UDMA_CHAN_IS_ACTIVE, /* Normal operation */
+ UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
+};
+
+struct udma_tx_drain {
+ struct delayed_work work;
+ unsigned long jiffie;
+ u32 residue;
+};
+
+struct udma_chan_config {
+ bool pkt_mode; /* TR or packet */
+ bool needs_epib; /* EPIB is needed for the communication or not */
+ u32 psd_size; /* size of Protocol Specific Data */
+ u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
+ u32 hdesc_size; /* Size of a packet descriptor in packet mode */
+ bool notdpkt; /* Suppress sending TDC packet */
+ int remote_thread_id;
+ u32 src_thread;
+ u32 dst_thread;
+ enum psil_endpoint_type ep_type;
+ bool enable_acc32;
+ bool enable_burst;
+ enum udma_tp_level channel_tpl; /* Channel Throughput Level */
+
+ enum dma_transfer_direction dir;
+};
+
+struct udma_chan {
+ struct virt_dma_chan vc;
+ struct dma_slave_config cfg;
+ struct udma_dev *ud;
+ struct udma_desc *desc;
+ struct udma_desc *terminated_desc;
+ struct udma_static_tr static_tr;
+ char *name;
+
+ struct udma_tchan *tchan;
+ struct udma_rchan *rchan;
+ struct udma_rflow *rflow;
+
+ bool psil_paired;
+
+ int irq_num_ring;
+ int irq_num_udma;
+
+ bool cyclic;
+ bool paused;
+
+ enum udma_chan_state state;
+ struct completion teardown_completed;
+
+ struct udma_tx_drain tx_drain;
+
+ u32 bcnt; /* number of bytes completed since the start of the channel */
+ u32 in_ring_cnt; /* number of descriptors in flight */
+
+ /* Channel configuration parameters */
+ struct udma_chan_config config;
+
+ /* dmapool for packet mode descriptors */
+ bool use_dma_pool;
+ struct dma_pool *hdesc_pool;
+
+ u32 id;
+};
+
+static inline struct udma_dev *to_udma_dev(struct dma_device *d)
+{
+ return container_of(d, struct udma_dev, ddev);
+}
+
+static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct udma_chan, vc.chan);
+}
+
+static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct udma_desc, vd.tx);
+}
+
+/* Generic register access functions */
+static inline u32 udma_read(void __iomem *base, int reg)
+{
+ return readl(base + reg);
+}
+
+static inline void udma_write(void __iomem *base, int reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static inline void udma_update_bits(void __iomem *base, int reg,
+ u32 mask, u32 val)
+{
+ u32 tmp, orig;
+
+ orig = readl(base + reg);
+ tmp = orig & ~mask;
+ tmp |= (val & mask);
+
+ if (tmp != orig)
+ writel(tmp, base + reg);
+}
+
+/* TCHANRT */
+static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
+{
+ if (!tchan)
+ return 0;
+ return udma_read(tchan->reg_rt, reg);
+}
+
+static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg,
+ u32 val)
+{
+ if (!tchan)
+ return;
+ udma_write(tchan->reg_rt, reg, val);
+}
+
+static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg,
+ u32 mask, u32 val)
+{
+ if (!tchan)
+ return;
+ udma_update_bits(tchan->reg_rt, reg, mask, val);
+}
+
+/* RCHANRT */
+static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
+{
+ if (!rchan)
+ return 0;
+ return udma_read(rchan->reg_rt, reg);
+}
+
+static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg,
+ u32 val)
+{
+ if (!rchan)
+ return;
+ udma_write(rchan->reg_rt, reg, val);
+}
+
+static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg,
+ u32 mask, u32 val)
+{
+ if (!rchan)
+ return;
+ udma_update_bits(rchan->reg_rt, reg, mask, val);
+}
+
+static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
+{
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+ dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+ return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
+ tisci_rm->tisci_navss_dev_id,
+ src_thread, dst_thread);
+}
+
+static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+ u32 dst_thread)
+{
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+
+ dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+ return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
+ tisci_rm->tisci_navss_dev_id,
+ src_thread, dst_thread);
+}
+
+static void udma_reset_uchan(struct udma_chan *uc)
+{
+ memset(&uc->config, 0, sizeof(uc->config));
+ uc->config.remote_thread_id = -1;
+ uc->state = UDMA_CHAN_IS_IDLE;
+}
+
+static void udma_dump_chan_stdata(struct udma_chan *uc)
+{
+ struct device *dev = uc->ud->dev;
+ u32 offset;
+ int i;
+
+ if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
+ dev_dbg(dev, "TCHAN State data:\n");
+ for (i = 0; i < 32; i++) {
+ offset = UDMA_TCHAN_RT_STDATA_REG + i * 4;
+ dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
+ udma_tchanrt_read(uc->tchan, offset));
+ }
+ }
+
+ if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
+ dev_dbg(dev, "RCHAN State data:\n");
+ for (i = 0; i < 32; i++) {
+ offset = UDMA_RCHAN_RT_STDATA_REG + i * 4;
+ dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
+ udma_rchanrt_read(uc->rchan, offset));
+ }
+ }
+}
+
+static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
+ int idx)
+{
+ return d->hwdesc[idx].cppi5_desc_paddr;
+}
+
+static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
+{
+ return d->hwdesc[idx].cppi5_desc_vaddr;
+}
+
+static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
+ dma_addr_t paddr)
+{
+ struct udma_desc *d = uc->terminated_desc;
+
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+
+ if (desc_paddr != paddr)
+ d = NULL;
+ }
+
+ if (!d) {
+ d = uc->desc;
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+
+ if (desc_paddr != paddr)
+ d = NULL;
+ }
+ }
+
+ return d;
+}
+
+static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
+{
+ if (uc->use_dma_pool) {
+ int i;
+
+ for (i = 0; i < d->hwdesc_count; i++) {
+ if (!d->hwdesc[i].cppi5_desc_vaddr)
+ continue;
+
+ dma_pool_free(uc->hdesc_pool,
+ d->hwdesc[i].cppi5_desc_vaddr,
+ d->hwdesc[i].cppi5_desc_paddr);
+
+ d->hwdesc[i].cppi5_desc_vaddr = NULL;
+ }
+ } else if (d->hwdesc[0].cppi5_desc_vaddr) {
+ struct udma_dev *ud = uc->ud;
+
+ dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
+ d->hwdesc[0].cppi5_desc_vaddr,
+ d->hwdesc[0].cppi5_desc_paddr);
+
+ d->hwdesc[0].cppi5_desc_vaddr = NULL;
+ }
+}
+
+static void udma_purge_desc_work(struct work_struct *work)
+{
+ struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
+ struct virt_dma_desc *vd, *_vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&ud->lock, flags);
+ list_splice_tail_init(&ud->desc_to_purge, &head);
+ spin_unlock_irqrestore(&ud->lock, flags);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+ struct udma_desc *d = to_udma_desc(&vd->tx);
+
+ udma_free_hwdesc(uc, d);
+ list_del(&vd->node);
+ kfree(d);
+ }
+
+ /* If more to purge, schedule the work again */
+ if (!list_empty(&ud->desc_to_purge))
+ schedule_work(&ud->purge_work);
+}
+
+static void udma_desc_free(struct virt_dma_desc *vd)
+{
+ struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
+ struct udma_chan *uc = to_udma_chan(vd->tx.chan);
+ struct udma_desc *d = to_udma_desc(&vd->tx);
+ unsigned long flags;
+
+ if (uc->terminated_desc == d)
+ uc->terminated_desc = NULL;
+
+ if (uc->use_dma_pool) {
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return;
+ }
+
+ spin_lock_irqsave(&ud->lock, flags);
+ list_add_tail(&vd->node, &ud->desc_to_purge);
+ spin_unlock_irqrestore(&ud->lock, flags);
+
+ schedule_work(&ud->purge_work);
+}
+
+static bool udma_is_chan_running(struct udma_chan *uc)
+{
+ u32 trt_ctl = 0;
+ u32 rrt_ctl = 0;
+
+ if (uc->tchan)
+ trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+ if (uc->rchan)
+ rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
+
+ if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
+ return true;
+
+ return false;
+}
+
+static bool udma_is_chan_paused(struct udma_chan *uc)
+{
+ u32 val, pause_mask;
+
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ val = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG);
+ pause_mask = UDMA_PEER_RT_EN_PAUSE;
+ break;
+ case DMA_MEM_TO_DEV:
+ val = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG);
+ pause_mask = UDMA_PEER_RT_EN_PAUSE;
+ break;
+ case DMA_MEM_TO_MEM:
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
+ pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
+ break;
+ default:
+ return false;
+ }
+
+ if (val & pause_mask)
+ return true;
+
+ return false;
+}
+
+static void udma_sync_for_device(struct udma_chan *uc, int idx)
+{
+ struct udma_desc *d = uc->desc;
+
+ if (uc->cyclic && uc->config.pkt_mode) {
+ dma_sync_single_for_device(uc->ud->dev,
+ d->hwdesc[idx].cppi5_desc_paddr,
+ d->hwdesc[idx].cppi5_desc_size,
+ DMA_TO_DEVICE);
+ } else {
+ int i;
+
+ for (i = 0; i < d->hwdesc_count; i++) {
+ if (!d->hwdesc[i].cppi5_desc_vaddr)
+ continue;
+
+ dma_sync_single_for_device(uc->ud->dev,
+ d->hwdesc[i].cppi5_desc_paddr,
+ d->hwdesc[i].cppi5_desc_size,
+ DMA_TO_DEVICE);
+ }
+ }
+}
+
+static int udma_push_to_ring(struct udma_chan *uc, int idx)
+{
+ struct udma_desc *d = uc->desc;
+
+ struct k3_ring *ring = NULL;
+ int ret = -EINVAL;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ ring = uc->rflow->fd_ring;
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ ring = uc->tchan->t_ring;
+ break;
+ default:
+ break;
+ }
+
+ if (ring) {
+ dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx);
+
+ wmb(); /* Ensure that writes are not moved over this point */
+ udma_sync_for_device(uc, idx);
+ ret = k3_ringacc_ring_push(ring, &desc_addr);
+ uc->in_ring_cnt++;
+ }
+
+ return ret;
+}
+
+static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
+{
+ struct k3_ring *ring = NULL;
+ int ret = -ENOENT;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ ring = uc->rflow->r_ring;
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ ring = uc->tchan->tc_ring;
+ break;
+ default:
+ break;
+ }
+
+ if (ring && k3_ringacc_ring_get_occ(ring)) {
+ struct udma_desc *d = NULL;
+
+ ret = k3_ringacc_ring_pop(ring, addr);
+ if (ret)
+ return ret;
+
+ /* Teardown completion */
+ if (cppi5_desc_is_tdcm(*addr))
+ return ret;
+
+ d = udma_udma_desc_from_paddr(uc, *addr);
+
+ if (d)
+ dma_sync_single_for_cpu(uc->ud->dev, *addr,
+ d->hwdesc[0].cppi5_desc_size,
+ DMA_FROM_DEVICE);
+ rmb(); /* Ensure that reads are not moved before this point */
+
+ if (!ret)
+ uc->in_ring_cnt--;
+ }
+
+ return ret;
+}
+
+static void udma_reset_rings(struct udma_chan *uc)
+{
+ struct k3_ring *ring1 = NULL;
+ struct k3_ring *ring2 = NULL;
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ if (uc->rchan) {
+ ring1 = uc->rflow->fd_ring;
+ ring2 = uc->rflow->r_ring;
+ }
+ break;
+ case DMA_MEM_TO_DEV:
+ case DMA_MEM_TO_MEM:
+ if (uc->tchan) {
+ ring1 = uc->tchan->t_ring;
+ ring2 = uc->tchan->tc_ring;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (ring1)
+ k3_ringacc_ring_reset_dma(ring1,
+ k3_ringacc_ring_get_occ(ring1));
+ if (ring2)
+ k3_ringacc_ring_reset(ring2);
+
+ /* make sure we are not leaking memory by stalled descriptor */
+ if (uc->terminated_desc) {
+ udma_desc_free(&uc->terminated_desc->vd);
+ uc->terminated_desc = NULL;
+ }
+
+ uc->in_ring_cnt = 0;
+}
+
+static void udma_reset_counters(struct udma_chan *uc)
+{
+ u32 val;
+
+ if (uc->tchan) {
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
+
+ val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
+ }
+
+ if (uc->rchan) {
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
+
+ val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
+ }
+
+ uc->bcnt = 0;
+}
+
+static int udma_reset_chan(struct udma_chan *uc, bool hard)
+{
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Reset all counters */
+ udma_reset_counters(uc);
+
+ /* Hard reset: re-initialize the channel to reset */
+ if (hard) {
+ struct udma_chan_config ucc_backup;
+ int ret;
+
+ memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
+ uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
+
+ /* restore the channel configuration */
+ memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
+ ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
+ if (ret)
+ return ret;
+
+ /*
+ * Setting forced teardown after forced reset helps recovering
+ * the rchan.
+ */
+ if (uc->config.dir == DMA_DEV_TO_MEM)
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN |
+ UDMA_CHAN_RT_CTL_FTDOWN);
+ }
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ return 0;
+}
+
+static void udma_start_desc(struct udma_chan *uc)
+{
+ struct udma_chan_config *ucc = &uc->config;
+
+ if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
+ int i;
+
+ /* Push all descriptors to ring for packet mode cyclic or RX */
+ for (i = 0; i < uc->desc->sglen; i++)
+ udma_push_to_ring(uc, i);
+ } else {
+ udma_push_to_ring(uc, 0);
+ }
+}
+
+static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
+{
+ /* Only PDMAs have staticTR */
+ if (uc->config.ep_type == PSIL_EP_NATIVE)
+ return false;
+
+ /* Check if the staticTR configuration has changed for TX */
+ if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
+ return true;
+
+ return false;
+}
+
+static int udma_start(struct udma_chan *uc)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
+
+ if (!vd) {
+ uc->desc = NULL;
+ return -ENOENT;
+ }
+
+ list_del(&vd->node);
+
+ uc->desc = to_udma_desc(&vd->tx);
+
+ /* Channel is already running and does not need reconfiguration */
+ if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
+ udma_start_desc(uc);
+ goto out;
+ }
+
+ /* Make sure that we clear the teardown bit, if it is set */
+ udma_reset_chan(uc, false);
+
+ /* Push descriptors before we start the channel */
+ udma_start_desc(uc);
+
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ /* Config remote TR */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+ u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+ PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+ const struct udma_match_data *match_data =
+ uc->ud->match_data;
+
+ if (uc->config.enable_acc32)
+ val |= PDMA_STATIC_TR_XY_ACC32;
+ if (uc->config.enable_burst)
+ val |= PDMA_STATIC_TR_XY_BURST;
+
+ udma_rchanrt_write(uc->rchan,
+ UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+ udma_rchanrt_write(uc->rchan,
+ UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG,
+ PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
+ match_data->statictr_z_mask));
+
+ /* save the current staticTR configuration */
+ memcpy(&uc->static_tr, &uc->desc->static_tr,
+ sizeof(uc->static_tr));
+ }
+
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ /* Enable remote */
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Config remote TR */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
+ u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
+ PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
+
+ if (uc->config.enable_acc32)
+ val |= PDMA_STATIC_TR_XY_ACC32;
+ if (uc->config.enable_burst)
+ val |= PDMA_STATIC_TR_XY_BURST;
+
+ udma_tchanrt_write(uc->tchan,
+ UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val);
+
+ /* save the current staticTR configuration */
+ memcpy(&uc->static_tr, &uc->desc->static_tr,
+ sizeof(uc->static_tr));
+ }
+
+ /* Enable remote */
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE);
+
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ uc->state = UDMA_CHAN_IS_ACTIVE;
+out:
+
+ return 0;
+}
+
+static int udma_stop(struct udma_chan *uc)
+{
+ enum udma_chan_state old_state = uc->state;
+
+ uc->state = UDMA_CHAN_IS_TERMINATING;
+ reinit_completion(&uc->teardown_completed);
+
+ switch (uc->config.dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE |
+ UDMA_PEER_RT_EN_TEARDOWN);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_ENABLE |
+ UDMA_PEER_RT_EN_FLUSH);
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_EN |
+ UDMA_CHAN_RT_CTL_TDOWN);
+ break;
+ default:
+ uc->state = old_state;
+ complete_all(&uc->teardown_completed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
+{
+ struct udma_desc *d = uc->desc;
+ struct cppi5_host_desc_t *h_desc;
+
+ h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
+ cppi5_hdesc_reset_to_original(h_desc);
+ udma_push_to_ring(uc, d->desc_idx);
+ d->desc_idx = (d->desc_idx + 1) % d->sglen;
+}
+
+static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
+{
+ struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ memcpy(d->metadata, h_desc->epib, d->metadata_size);
+}
+
+static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
+{
+ u32 peer_bcnt, bcnt;
+
+ /* Only TX towards PDMA is affected */
+ if (uc->config.ep_type == PSIL_EP_NATIVE ||
+ uc->config.dir != DMA_MEM_TO_DEV)
+ return true;
+
+ peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
+ bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+
+ if (peer_bcnt < bcnt) {
+ uc->tx_drain.residue = bcnt - peer_bcnt;
+ uc->tx_drain.jiffie = jiffies;
+ return false;
+ }
+
+ return true;
+}
+
+static void udma_check_tx_completion(struct work_struct *work)
+{
+ struct udma_chan *uc = container_of(work, typeof(*uc),
+ tx_drain.work.work);
+ bool desc_done = true;
+ u32 residue_diff;
+ unsigned long jiffie_diff, delay;
+
+ if (uc->desc) {
+ residue_diff = uc->tx_drain.residue;
+ jiffie_diff = uc->tx_drain.jiffie;
+ desc_done = udma_is_desc_really_done(uc, uc->desc);
+ }
+
+ if (!desc_done) {
+ jiffie_diff = uc->tx_drain.jiffie - jiffie_diff;
+ residue_diff -= uc->tx_drain.residue;
+ if (residue_diff) {
+ /* Try to guess when we should check next time */
+ residue_diff /= jiffie_diff;
+ delay = uc->tx_drain.residue / residue_diff / 3;
+ if (jiffies_to_msecs(delay) < 5)
+ delay = 0;
+ } else {
+ /* No progress, check again in 1 second */
+ delay = HZ;
+ }
+
+ schedule_delayed_work(&uc->tx_drain.work, delay);
+ } else if (uc->desc) {
+ struct udma_desc *d = uc->desc;
+
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ }
+}
+
+static irqreturn_t udma_ring_irq_handler(int irq, void *data)
+{
+ struct udma_chan *uc = data;
+ struct udma_desc *d;
+ unsigned long flags;
+ dma_addr_t paddr = 0;
+
+ if (udma_pop_from_ring(uc, &paddr) || !paddr)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ /* Teardown completion message */
+ if (cppi5_desc_is_tdcm(paddr)) {
+ /* Compensate our internal pop/push counter */
+ uc->in_ring_cnt++;
+
+ complete_all(&uc->teardown_completed);
+
+ if (uc->terminated_desc) {
+ udma_desc_free(&uc->terminated_desc->vd);
+ uc->terminated_desc = NULL;
+ }
+
+ if (!uc->desc)
+ udma_start(uc);
+
+ goto out;
+ }
+
+ d = udma_udma_desc_from_paddr(uc, paddr);
+
+ if (d) {
+ dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
+ d->desc_idx);
+ if (desc_paddr != paddr) {
+ dev_err(uc->ud->dev, "not matching descriptors!\n");
+ goto out;
+ }
+
+ if (uc->cyclic) {
+ /* push the descriptor back to the ring */
+ if (d == uc->desc) {
+ udma_cyclic_packet_elapsed(uc);
+ vchan_cyclic_callback(&d->vd);
+ }
+ } else {
+ bool desc_done = false;
+
+ if (d == uc->desc) {
+ desc_done = udma_is_desc_really_done(uc, d);
+
+ if (desc_done) {
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ } else {
+ schedule_delayed_work(&uc->tx_drain.work,
+ 0);
+ }
+ }
+
+ if (desc_done)
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+out:
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t udma_udma_irq_handler(int irq, void *data)
+{
+ struct udma_chan *uc = data;
+ struct udma_desc *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+ d = uc->desc;
+ if (d) {
+ d->tr_idx = (d->tr_idx + 1) % d->sglen;
+
+ if (uc->cyclic) {
+ vchan_cyclic_callback(&d->vd);
+ } else {
+ /* TODO: figure out the real amount of data */
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
+ * @ud: UDMA device
+ * @from: Start the search from this flow id number
+ * @cnt: Number of consecutive flow ids to allocate
+ *
+ * Allocate range of RX flow ids for future use, those flows can be requested
+ * only using explicit flow id number. if @from is set to -1 it will try to find
+ * first free range. if @from is positive value it will force allocation only
+ * of the specified range of flows.
+ *
+ * Returns -ENOMEM if can't find free range.
+ * -EEXIST if requested range is busy.
+ * -EINVAL if wrong input values passed.
+ * Returns flow id on success.
+ */
+static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ int start, tmp_from;
+ DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
+
+ tmp_from = from;
+ if (tmp_from < 0)
+ tmp_from = ud->rchan_cnt;
+ /* default flows can't be allocated and accessible only by id */
+ if (tmp_from < ud->rchan_cnt)
+ return -EINVAL;
+
+ if (tmp_from + cnt > ud->rflow_cnt)
+ return -EINVAL;
+
+ bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
+ ud->rflow_cnt);
+
+ start = bitmap_find_next_zero_area(tmp,
+ ud->rflow_cnt,
+ tmp_from, cnt, 0);
+ if (start >= ud->rflow_cnt)
+ return -ENOMEM;
+
+ if (from >= 0 && start != from)
+ return -EEXIST;
+
+ bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
+ return start;
+}
+
+static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ if (from < ud->rchan_cnt)
+ return -EINVAL;
+ if (from + cnt > ud->rflow_cnt)
+ return -EINVAL;
+
+ bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
+ return 0;
+}
+
+static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
+{
+ /*
+ * Attempt to request rflow by ID can be made for any rflow
+ * if not in use with assumption that caller knows what's doing.
+ * TI-SCI FW will perform additional permission check ant way, it's
+ * safe
+ */
+
+ if (id < 0 || id >= ud->rflow_cnt)
+ return ERR_PTR(-ENOENT);
+
+ if (test_bit(id, ud->rflow_in_use))
+ return ERR_PTR(-ENOENT);
+
+ /* GP rflow has to be allocated first */
+ if (!test_bit(id, ud->rflow_gp_map) &&
+ !test_bit(id, ud->rflow_gp_map_allocated))
+ return ERR_PTR(-EINVAL);
+
+ dev_dbg(ud->dev, "get rflow%d\n", id);
+ set_bit(id, ud->rflow_in_use);
+ return &ud->rflows[id];
+}
+
+static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
+{
+ if (!test_bit(rflow->id, ud->rflow_in_use)) {
+ dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
+ return;
+ }
+
+ dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
+ clear_bit(rflow->id, ud->rflow_in_use);
+}
+
+#define UDMA_RESERVE_RESOURCE(res) \
+static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
+ enum udma_tp_level tpl, \
+ int id) \
+{ \
+ if (id >= 0) { \
+ if (test_bit(id, ud->res##_map)) { \
+ dev_err(ud->dev, "res##%d is in use\n", id); \
+ return ERR_PTR(-ENOENT); \
+ } \
+ } else { \
+ int start; \
+ \
+ if (tpl >= ud->match_data->tpl_levels) \
+ tpl = ud->match_data->tpl_levels - 1; \
+ \
+ start = ud->match_data->level_start_idx[tpl]; \
+ \
+ id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
+ start); \
+ if (id == ud->res##_cnt) { \
+ return ERR_PTR(-ENOENT); \
+ } \
+ } \
+ \
+ set_bit(id, ud->res##_map); \
+ return &ud->res##s[id]; \
+}
+
+UDMA_RESERVE_RESOURCE(tchan);
+UDMA_RESERVE_RESOURCE(rchan);
+
+static int udma_get_tchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->tchan) {
+ dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
+ uc->id, uc->tchan->id);
+ return 0;
+ }
+
+ uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
+ if (IS_ERR(uc->tchan))
+ return PTR_ERR(uc->tchan);
+
+ return 0;
+}
+
+static int udma_get_rchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rchan) {
+ dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
+ uc->id, uc->rchan->id);
+ return 0;
+ }
+
+ uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
+ if (IS_ERR(uc->rchan))
+ return PTR_ERR(uc->rchan);
+
+ return 0;
+}
+
+static int udma_get_chan_pair(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ const struct udma_match_data *match_data = ud->match_data;
+ int chan_id, end;
+
+ if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
+ dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
+ uc->id, uc->tchan->id);
+ return 0;
+ }
+
+ if (uc->tchan) {
+ dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
+ uc->id, uc->tchan->id);
+ return -EBUSY;
+ } else if (uc->rchan) {
+ dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
+ uc->id, uc->rchan->id);
+ return -EBUSY;
+ }
+
+ /* Can be optimized, but let's have it like this for now */
+ end = min(ud->tchan_cnt, ud->rchan_cnt);
+ /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
+ chan_id = match_data->level_start_idx[match_data->tpl_levels - 1];
+ for (; chan_id < end; chan_id++) {
+ if (!test_bit(chan_id, ud->tchan_map) &&
+ !test_bit(chan_id, ud->rchan_map))
+ break;
+ }
+
+ if (chan_id == end)
+ return -ENOENT;
+
+ set_bit(chan_id, ud->tchan_map);
+ set_bit(chan_id, ud->rchan_map);
+ uc->tchan = &ud->tchans[chan_id];
+ uc->rchan = &ud->rchans[chan_id];
+
+ return 0;
+}
+
+static int udma_get_rflow(struct udma_chan *uc, int flow_id)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (!uc->rchan) {
+ dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
+ return -EINVAL;
+ }
+
+ if (uc->rflow) {
+ dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
+ uc->id, uc->rflow->id);
+ return 0;
+ }
+
+ uc->rflow = __udma_get_rflow(ud, flow_id);
+ if (IS_ERR(uc->rflow))
+ return PTR_ERR(uc->rflow);
+
+ return 0;
+}
+
+static void udma_put_rchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rchan) {
+ dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
+ uc->rchan->id);
+ clear_bit(uc->rchan->id, ud->rchan_map);
+ uc->rchan = NULL;
+ }
+}
+
+static void udma_put_tchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->tchan) {
+ dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
+ uc->tchan->id);
+ clear_bit(uc->tchan->id, ud->tchan_map);
+ uc->tchan = NULL;
+ }
+}
+
+static void udma_put_rflow(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rflow) {
+ dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
+ uc->rflow->id);
+ __udma_put_rflow(ud, uc->rflow);
+ uc->rflow = NULL;
+ }
+}
+
+static void udma_free_tx_resources(struct udma_chan *uc)
+{
+ if (!uc->tchan)
+ return;
+
+ k3_ringacc_ring_free(uc->tchan->t_ring);
+ k3_ringacc_ring_free(uc->tchan->tc_ring);
+ uc->tchan->t_ring = NULL;
+ uc->tchan->tc_ring = NULL;
+
+ udma_put_tchan(uc);
+}
+
+static int udma_alloc_tx_resources(struct udma_chan *uc)
+{
+ struct k3_ring_cfg ring_cfg;
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ ret = udma_get_tchan(uc);
+ if (ret)
+ return ret;
+
+ uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc,
+ uc->tchan->id, 0);
+ if (!uc->tchan->t_ring) {
+ ret = -EBUSY;
+ goto err_tx_ring;
+ }
+
+ uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+ if (!uc->tchan->tc_ring) {
+ ret = -EBUSY;
+ goto err_txc_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+ ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
+ ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
+
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(uc->tchan->tc_ring);
+ uc->tchan->tc_ring = NULL;
+err_txc_ring:
+ k3_ringacc_ring_free(uc->tchan->t_ring);
+ uc->tchan->t_ring = NULL;
+err_tx_ring:
+ udma_put_tchan(uc);
+
+ return ret;
+}
+
+static void udma_free_rx_resources(struct udma_chan *uc)
+{
+ if (!uc->rchan)
+ return;
+
+ if (uc->rflow) {
+ struct udma_rflow *rflow = uc->rflow;
+
+ k3_ringacc_ring_free(rflow->fd_ring);
+ k3_ringacc_ring_free(rflow->r_ring);
+ rflow->fd_ring = NULL;
+ rflow->r_ring = NULL;
+
+ udma_put_rflow(uc);
+ }
+
+ udma_put_rchan(uc);
+}
+
+static int udma_alloc_rx_resources(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct k3_ring_cfg ring_cfg;
+ struct udma_rflow *rflow;
+ int fd_ring_id;
+ int ret;
+
+ ret = udma_get_rchan(uc);
+ if (ret)
+ return ret;
+
+ /* For MEM_TO_MEM we don't need rflow or rings */
+ if (uc->config.dir == DMA_MEM_TO_MEM)
+ return 0;
+
+ ret = udma_get_rflow(uc, uc->rchan->id);
+ if (ret) {
+ ret = -EBUSY;
+ goto err_rflow;
+ }
+
+ rflow = uc->rflow;
+ fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
+ rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0);
+ if (!rflow->fd_ring) {
+ ret = -EBUSY;
+ goto err_rx_ring;
+ }
+
+ rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0);
+ if (!rflow->r_ring) {
+ ret = -EBUSY;
+ goto err_rxc_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+
+ if (uc->config.pkt_mode)
+ ring_cfg.size = SG_MAX_SEGMENTS;
+ else
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+
+ ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
+
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(rflow->r_ring);
+ rflow->r_ring = NULL;
+err_rxc_ring:
+ k3_ringacc_ring_free(rflow->fd_ring);
+ rflow->fd_ring = NULL;
+err_rx_ring:
+ udma_put_rflow(uc);
+err_rflow:
+ udma_put_rchan(uc);
+
+ return ret;
+}
+
+#define TISCI_TCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
+
+#define TISCI_RCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
+
+static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ struct udma_rchan *rchan = uc->rchan;
+ int ret = 0;
+
+ /* Non synchronized - mem to mem type of transfer */
+ int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+
+ req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+ req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+ req_tx.txcq_qnum = tc_ring;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret) {
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+ return ret;
+ }
+
+ req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+ req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
+ req_rx.rxcq_qnum = tc_ring;
+ req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret)
+ dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
+
+ return ret;
+}
+
+static int udma_tisci_tx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ u32 mode, fetch_size;
+ int ret = 0;
+
+ if (uc->config.pkt_mode) {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+ uc->config.psd_size, 0);
+ } else {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+ fetch_size = sizeof(struct cppi5_desc_hdr_t);
+ }
+
+ req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_chan_type = mode;
+ req_tx.tx_supr_tdpkt = uc->config.notdpkt;
+ req_tx.tx_fetch_size = fetch_size >> 2;
+ req_tx.txcq_qnum = tc_ring;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+
+ return ret;
+}
+
+static int udma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_rchan *rchan = uc->rchan;
+ int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
+ int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
+ u32 mode, fetch_size;
+ int ret = 0;
+
+ if (uc->config.pkt_mode) {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
+ fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
+ uc->config.psd_size, 0);
+ } else {
+ mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
+ fetch_size = sizeof(struct cppi5_desc_hdr_t);
+ }
+
+ req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+ req_rx.rx_fetch_size = fetch_size >> 2;
+ req_rx.rxcq_qnum = rx_ring;
+ req_rx.rx_chan_type = mode;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret) {
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
+ return ret;
+ }
+
+ flow_req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
+
+ flow_req.nav_id = tisci_rm->tisci_dev_id;
+ flow_req.flow_index = rchan->id;
+
+ if (uc->config.needs_epib)
+ flow_req.rx_einfo_present = 1;
+ else
+ flow_req.rx_einfo_present = 0;
+ if (uc->config.psd_size)
+ flow_req.rx_psinfo_present = 1;
+ else
+ flow_req.rx_psinfo_present = 0;
+ flow_req.rx_error_handling = 1;
+ flow_req.rx_dest_qnum = rx_ring;
+ flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
+ flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
+ flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
+ flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
+ flow_req.rx_fdq0_sz0_qnum = fd_ring;
+ flow_req.rx_fdq1_qnum = fd_ring;
+ flow_req.rx_fdq2_qnum = fd_ring;
+ flow_req.rx_fdq3_qnum = fd_ring;
+
+ ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
+
+ if (ret)
+ dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
+
+ return 0;
+}
+
+static int udma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_match_data *match_data = ud->match_data;
+ struct k3_ring *irq_ring;
+ u32 irq_udma_idx;
+ int ret;
+
+ if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
+ uc->use_dma_pool = true;
+ /* in case of MEM_TO_MEM we have maximum of two TRs */
+ if (uc->config.dir == DMA_MEM_TO_MEM) {
+ uc->config.hdesc_size = cppi5_trdesc_calc_size(
+ sizeof(struct cppi5_tr_type15_t), 2);
+ uc->config.pkt_mode = false;
+ }
+ }
+
+ if (uc->use_dma_pool) {
+ uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
+ uc->config.hdesc_size,
+ ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_MEM:
+ /* Non synchronized - mem to mem type of transfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_get_chan_pair(uc);
+ if (ret)
+ return ret;
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret)
+ return ret;
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ udma_free_tx_resources(uc);
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->tchan->tc_ring;
+ irq_udma_idx = uc->tchan->id;
+
+ ret = udma_tisci_m2m_channel_config(uc);
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->tchan->tc_ring;
+ irq_udma_idx = uc->tchan->id;
+
+ ret = udma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring = uc->rflow->r_ring;
+ irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
+
+ ret = udma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_stop(uc);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ goto err_res_free;
+ }
+ }
+
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+
+ uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ k3_ringacc_get_ring_id(irq_ring));
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ /* Event from UDMA (TR events) only needed for slave TR mode channels */
+ if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
+ uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
+ irq_udma_idx);
+ if (uc->irq_num_udma <= 0) {
+ dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
+ irq_udma_idx);
+ free_irq(uc->irq_num_ring, uc);
+ ret = -EINVAL;
+ goto err_irq_free;
+ }
+
+ ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
+ uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
+ uc->id);
+ free_irq(uc->irq_num_ring, uc);
+ goto err_irq_free;
+ }
+ } else {
+ uc->irq_num_udma = 0;
+ }
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+ uc->irq_num_udma = 0;
+err_psi_free:
+ navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+
+ return ret;
+}
+
+static int udma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
+
+ return 0;
+}
+
+static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
+ size_t tr_size, int tr_count,
+ enum dma_transfer_direction dir)
+{
+ struct udma_hwdesc *hwdesc;
+ struct cppi5_desc_hdr_t *tr_desc;
+ struct udma_desc *d;
+ u32 reload_count = 0;
+ u32 ring_id;
+
+ switch (tr_size) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
+ return NULL;
+ }
+
+ /* We have only one descriptor containing multiple TRs */
+ d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->sglen = tr_count;
+
+ d->hwdesc_count = 1;
+ hwdesc = &d->hwdesc[0];
+
+ /* Allocate memory for DMA ring descriptor */
+ if (uc->use_dma_pool) {
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ } else {
+ hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
+ tr_count);
+ hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
+ uc->ud->desc_align);
+ hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
+ hwdesc->cppi5_desc_size,
+ &hwdesc->cppi5_desc_paddr,
+ GFP_NOWAIT);
+ }
+
+ if (!hwdesc->cppi5_desc_vaddr) {
+ kfree(d);
+ return NULL;
+ }
+
+ /* Start of the TR req records */
+ hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
+ /* Start address of the TR response array */
+ hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
+
+ tr_desc = hwdesc->cppi5_desc_vaddr;
+
+ if (uc->cyclic)
+ reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
+
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
+ cppi5_desc_set_pktids(tr_desc, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
+
+ return d;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ enum dma_slave_buswidth dev_width;
+ struct scatterlist *sgent;
+ struct udma_desc *d;
+ size_t tr_size;
+ struct cppi5_tr_type1_t *tr_req = NULL;
+ unsigned int i;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type1_t);
+ d = udma_alloc_tr_desc(uc, tr_size, sglen, dir);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for_each_sg(sgl, sgent, sglen, i) {
+ d->residue += sg_dma_len(sgent);
+
+ cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[i].addr = sg_dma_address(sgent);
+ tr_req[i].icnt0 = burst * dev_width;
+ tr_req[i].dim1 = burst * dev_width;
+ tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0;
+ }
+
+ cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP);
+
+ return d;
+}
+
+static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
+ enum dma_slave_buswidth dev_width,
+ u16 elcnt)
+{
+ if (uc->config.ep_type != PSIL_EP_PDMA_XY)
+ return 0;
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ d->static_tr.elsize = 0;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ d->static_tr.elsize = 1;
+ break;
+ case DMA_SLAVE_BUSWIDTH_3_BYTES:
+ d->static_tr.elsize = 2;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ d->static_tr.elsize = 3;
+ break;
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ d->static_tr.elsize = 4;
+ break;
+ default: /* not reached */
+ return -EINVAL;
+ }
+
+ d->static_tr.elcnt = elcnt;
+
+ /*
+ * PDMA must to close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA to close
+ * the packet otherwise the transfer will stall because PDMA holds on
+ * the data it has received from the peripheral.
+ */
+ if (uc->config.pkt_mode || !uc->cyclic) {
+ unsigned int div = dev_width * elcnt;
+
+ if (uc->cyclic)
+ d->static_tr.bstcnt = d->residue / d->sglen / div;
+ else
+ d->static_tr.bstcnt = d->residue / div;
+
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+ } else {
+ d->static_tr.bstcnt = 0;
+ }
+
+ return 0;
+}
+
+static struct udma_desc *
+udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct scatterlist *sgent;
+ struct cppi5_host_desc_t *h_desc = NULL;
+ struct udma_desc *d;
+ u32 ring_id;
+ unsigned int i;
+
+ d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+ d->hwdesc_count = sglen;
+
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ for_each_sg(sgl, sgent, sglen, i) {
+ struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+ dma_addr_t sg_addr = sg_dma_address(sgent);
+ struct cppi5_host_desc_t *desc;
+ size_t sg_len = sg_dma_len(sgent);
+
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ if (!hwdesc->cppi5_desc_vaddr) {
+ dev_err(uc->ud->dev,
+ "descriptor%d allocation failed\n", i);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ d->residue += sg_len;
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ desc = hwdesc->cppi5_desc_vaddr;
+
+ if (i == 0) {
+ cppi5_hdesc_init(desc, 0, 0);
+ /* Flow and Packed ID */
+ cppi5_desc_set_pktids(&desc->hdr, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
+ } else {
+ cppi5_hdesc_reset_hbdesc(desc);
+ cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
+ }
+
+ /* attach the sg buffer to the descriptor */
+ cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
+
+ /* Attach link as host buffer descriptor */
+ if (h_desc)
+ cppi5_hdesc_link_hbdesc(h_desc,
+ hwdesc->cppi5_desc_paddr);
+
+ if (dir == DMA_MEM_TO_DEV)
+ h_desc = desc;
+ }
+
+ if (d->residue >= SZ_4M) {
+ dev_err(uc->ud->dev,
+ "%s: Transfer size %u is over the supported 4M range\n",
+ __func__, d->residue);
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+ cppi5_hdesc_set_pktlen(h_desc, d->residue);
+
+ return d;
+}
+
+static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+ u32 psd_size = len;
+ u32 flags = 0;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return -ENOTSUPP;
+
+ if (!data || len > uc->config.metadata_size)
+ return -EINVAL;
+
+ if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+ return -EINVAL;
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+ if (d->dir == DMA_MEM_TO_DEV)
+ memcpy(h_desc->epib, data, len);
+
+ if (uc->config.needs_epib)
+ psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ d->metadata = data;
+ d->metadata_size = len;
+ if (uc->config.needs_epib)
+ flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+
+ cppi5_hdesc_update_flags(h_desc, flags);
+ cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+ return 0;
+}
+
+static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return ERR_PTR(-ENOTSUPP);
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ *max_len = uc->config.metadata_size;
+
+ *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
+ CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
+ *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
+
+ return h_desc->epib;
+}
+
+static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len)
+{
+ struct udma_desc *d = to_udma_desc(desc);
+ struct udma_chan *uc = to_udma_chan(desc->chan);
+ struct cppi5_host_desc_t *h_desc;
+ u32 psd_size = payload_len;
+ u32 flags = 0;
+
+ if (!uc->config.pkt_mode || !uc->config.metadata_size)
+ return -ENOTSUPP;
+
+ if (payload_len > uc->config.metadata_size)
+ return -EINVAL;
+
+ if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
+ return -EINVAL;
+
+ h_desc = d->hwdesc[0].cppi5_desc_vaddr;
+
+ if (uc->config.needs_epib) {
+ psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
+ flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
+ }
+
+ cppi5_hdesc_update_flags(h_desc, flags);
+ cppi5_hdesc_update_psdata_size(h_desc, psd_size);
+
+ return 0;
+}
+
+static struct dma_descriptor_metadata_ops metadata_ops = {
+ .attach = udma_attach_metadata,
+ .get_ptr = udma_get_metadata_ptr,
+ .set_len = udma_set_metadata_len,
+};
+
+static struct dma_async_tx_descriptor *
+udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sglen, enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ u32 burst;
+
+ if (dir != uc->config.dir) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(dir));
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (uc->config.pkt_mode)
+ d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
+ context);
+ else
+ d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
+ context);
+
+ if (!d)
+ return NULL;
+
+ d->dir = dir;
+ d->desc_idx = 0;
+ d->tr_idx = 0;
+
+ /* static TR for remote PDMA */
+ if (udma_configure_statictr(uc, d, dev_width, burst)) {
+ dev_err(uc->ud->dev,
+ "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+ __func__, d->static_tr.bstcnt);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ size_t tr_size;
+ struct cppi5_tr_type1_t *tr_req;
+ unsigned int i;
+ unsigned int periods = buf_len / period_len;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type1_t);
+ d = udma_alloc_tr_desc(uc, tr_size, periods, dir);
+ if (!d)
+ return NULL;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for (i = 0; i < periods; i++) {
+ cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+
+ tr_req[i].addr = buf_addr + period_len * i;
+ tr_req[i].icnt0 = dev_width;
+ tr_req[i].icnt1 = period_len / dev_width;
+ tr_req[i].dim1 = dev_width;
+
+ if (!(flags & DMA_PREP_INTERRUPT))
+ cppi5_tr_csf_set(&tr_req[i].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+ }
+
+ return d;
+}
+
+static struct udma_desc *
+udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct udma_desc *d;
+ u32 ring_id;
+ int i;
+ int periods = buf_len / period_len;
+
+ if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
+ return NULL;
+
+ if (period_len >= SZ_4M)
+ return NULL;
+
+ d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT);
+ if (!d)
+ return NULL;
+
+ d->hwdesc_count = periods;
+
+ /* TODO: re-check this... */
+ if (dir == DMA_DEV_TO_MEM)
+ ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
+ else
+ ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+
+ for (i = 0; i < periods; i++) {
+ struct udma_hwdesc *hwdesc = &d->hwdesc[i];
+ dma_addr_t period_addr = buf_addr + (period_len * i);
+ struct cppi5_host_desc_t *h_desc;
+
+ hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
+ GFP_NOWAIT,
+ &hwdesc->cppi5_desc_paddr);
+ if (!hwdesc->cppi5_desc_vaddr) {
+ dev_err(uc->ud->dev,
+ "descriptor%d allocation failed\n", i);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ hwdesc->cppi5_desc_size = uc->config.hdesc_size;
+ h_desc = hwdesc->cppi5_desc_vaddr;
+
+ cppi5_hdesc_init(h_desc, 0, 0);
+ cppi5_hdesc_set_pktlen(h_desc, period_len);
+
+ /* Flow and Packed ID */
+ cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
+ CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
+
+ /* attach each period to a new descriptor */
+ cppi5_hdesc_attach_buf(h_desc,
+ period_addr, period_len,
+ period_addr, period_len);
+ }
+
+ return d;
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct udma_desc *d;
+ u32 burst;
+
+ if (dir != uc->config.dir) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(dir));
+ return NULL;
+ }
+
+ uc->cyclic = true;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (uc->config.pkt_mode)
+ d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
+ dir, flags);
+ else
+ d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
+ dir, flags);
+
+ if (!d)
+ return NULL;
+
+ d->sglen = buf_len / period_len;
+
+ d->dir = dir;
+ d->residue = buf_len;
+
+ /* static TR for remote PDMA */
+ if (udma_configure_statictr(uc, d, dev_width, burst)) {
+ dev_err(uc->ud->dev,
+ "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
+ __func__, d->static_tr.bstcnt);
+
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long tx_flags)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_desc *d;
+ struct cppi5_tr_type15_t *tr_req;
+ int num_tr;
+ size_t tr_size = sizeof(struct cppi5_tr_type15_t);
+ u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
+
+ if (uc->config.dir != DMA_MEM_TO_MEM) {
+ dev_err(chan->device->dev,
+ "%s: chan%d is for %s, not supporting %s\n",
+ __func__, uc->id,
+ dmaengine_get_direction_text(uc->config.dir),
+ dmaengine_get_direction_text(DMA_MEM_TO_MEM));
+ return NULL;
+ }
+
+ if (len < SZ_64K) {
+ num_tr = 1;
+ tr0_cnt0 = len;
+ tr0_cnt1 = 1;
+ } else {
+ unsigned long align_to = __ffs(src | dest);
+
+ if (align_to > 3)
+ align_to = 3;
+ /*
+ * Keep simple: tr0: SZ_64K-alignment blocks,
+ * tr1: the remaining
+ */
+ num_tr = 2;
+ tr0_cnt0 = (SZ_64K - BIT(align_to));
+ if (len / tr0_cnt0 >= SZ_64K) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ len);
+ return NULL;
+ }
+
+ tr0_cnt1 = len / tr0_cnt0;
+ tr1_cnt0 = len % tr0_cnt0;
+ }
+
+ d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
+ if (!d)
+ return NULL;
+
+ d->dir = DMA_MEM_TO_MEM;
+ d->desc_idx = 0;
+ d->tr_idx = 0;
+ d->residue = len;
+
+ tr_req = d->hwdesc[0].tr_req_base;
+
+ cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[0].addr = src;
+ tr_req[0].icnt0 = tr0_cnt0;
+ tr_req[0].icnt1 = tr0_cnt1;
+ tr_req[0].icnt2 = 1;
+ tr_req[0].icnt3 = 1;
+ tr_req[0].dim1 = tr0_cnt0;
+
+ tr_req[0].daddr = dest;
+ tr_req[0].dicnt0 = tr0_cnt0;
+ tr_req[0].dicnt1 = tr0_cnt1;
+ tr_req[0].dicnt2 = 1;
+ tr_req[0].dicnt3 = 1;
+ tr_req[0].ddim1 = tr0_cnt0;
+
+ if (num_tr == 2) {
+ cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
+ tr_req[1].icnt0 = tr1_cnt0;
+ tr_req[1].icnt1 = 1;
+ tr_req[1].icnt2 = 1;
+ tr_req[1].icnt3 = 1;
+
+ tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
+ tr_req[1].dicnt0 = tr1_cnt0;
+ tr_req[1].dicnt1 = 1;
+ tr_req[1].dicnt2 = 1;
+ tr_req[1].dicnt3 = 1;
+ }
+
+ cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
+
+ if (uc->config.metadata_size)
+ d->vd.tx.metadata_ops = &metadata_ops;
+
+ return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
+}
+
+static void udma_issue_pending(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ /* If we have something pending and no active descriptor, then */
+ if (vchan_issue_pending(&uc->vc) && !uc->desc) {
+ /*
+ * start a descriptor if the channel is NOT [marked as
+ * terminating _and_ it is still running (teardown has not
+ * completed yet)].
+ */
+ if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
+ udma_is_chan_running(uc)))
+ udma_start(uc);
+ }
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+}
+
+static enum dma_status udma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ enum dma_status ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+
+ if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
+ ret = DMA_PAUSED;
+
+ if (ret == DMA_COMPLETE || !txstate)
+ goto out;
+
+ if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
+ u32 peer_bcnt = 0;
+ u32 bcnt = 0;
+ u32 residue = uc->desc->residue;
+ u32 delay = 0;
+
+ if (uc->desc->dir == DMA_MEM_TO_DEV) {
+ bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_SBCNT_REG);
+
+ if (uc->config.ep_type != PSIL_EP_NATIVE) {
+ peer_bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_PEER_BCNT_REG);
+
+ if (bcnt > peer_bcnt)
+ delay = bcnt - peer_bcnt;
+ }
+ } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
+ bcnt = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_BCNT_REG);
+
+ if (uc->config.ep_type != PSIL_EP_NATIVE) {
+ peer_bcnt = udma_rchanrt_read(uc->rchan,
+ UDMA_RCHAN_RT_PEER_BCNT_REG);
+
+ if (peer_bcnt > bcnt)
+ delay = peer_bcnt - bcnt;
+ }
+ } else {
+ bcnt = udma_tchanrt_read(uc->tchan,
+ UDMA_TCHAN_RT_BCNT_REG);
+ }
+
+ bcnt -= uc->bcnt;
+ if (bcnt && !(bcnt % uc->desc->residue))
+ residue = 0;
+ else
+ residue -= bcnt % uc->desc->residue;
+
+ if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
+ ret = DMA_COMPLETE;
+ delay = 0;
+ }
+
+ dma_set_residue(txstate, residue);
+ dma_set_in_flight_bytes(txstate, delay);
+
+ } else {
+ ret = DMA_COMPLETE;
+ }
+
+out:
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+ return ret;
+}
+
+static int udma_pause(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ if (!uc->desc)
+ return -EINVAL;
+
+ /* pause the channel */
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_update_bits(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE,
+ UDMA_PEER_RT_EN_PAUSE);
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_update_bits(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE,
+ UDMA_PEER_RT_EN_PAUSE);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_PAUSE,
+ UDMA_CHAN_RT_CTL_PAUSE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int udma_resume(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+
+ if (!uc->desc)
+ return -EINVAL;
+
+ /* resume the channel */
+ switch (uc->desc->dir) {
+ case DMA_DEV_TO_MEM:
+ udma_rchanrt_update_bits(uc->rchan,
+ UDMA_RCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE, 0);
+
+ break;
+ case DMA_MEM_TO_DEV:
+ udma_tchanrt_update_bits(uc->tchan,
+ UDMA_TCHAN_RT_PEER_RT_EN_REG,
+ UDMA_PEER_RT_EN_PAUSE, 0);
+ break;
+ case DMA_MEM_TO_MEM:
+ udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
+ UDMA_CHAN_RT_CTL_PAUSE, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int udma_terminate_all(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
+ if (udma_is_chan_running(uc))
+ udma_stop(uc);
+
+ if (uc->desc) {
+ uc->terminated_desc = uc->desc;
+ uc->desc = NULL;
+ uc->terminated_desc->terminated = true;
+ cancel_delayed_work(&uc->tx_drain.work);
+ }
+
+ uc->paused = false;
+
+ vchan_get_all_descriptors(&uc->vc, &head);
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+ vchan_dma_desc_free_list(&uc->vc, &head);
+
+ return 0;
+}
+
+static void udma_synchronize(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ unsigned long timeout = msecs_to_jiffies(1000);
+
+ vchan_synchronize(&uc->vc);
+
+ if (uc->state == UDMA_CHAN_IS_TERMINATING) {
+ timeout = wait_for_completion_timeout(&uc->teardown_completed,
+ timeout);
+ if (!timeout) {
+ dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
+ uc->id);
+ udma_dump_chan_stdata(uc);
+ udma_reset_chan(uc, true);
+ }
+ }
+
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc))
+ dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
+
+ cancel_delayed_work_sync(&uc->tx_drain.work);
+ udma_reset_rings(uc);
+}
+
+static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd,
+ struct dmaengine_result *result)
+{
+ struct udma_chan *uc = to_udma_chan(&vc->chan);
+ struct udma_desc *d;
+
+ if (!vd)
+ return;
+
+ d = to_udma_desc(&vd->tx);
+
+ if (d->metadata_size)
+ udma_fetch_epib(uc, d);
+
+ /* Provide residue information for the client */
+ if (result) {
+ void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
+
+ if (cppi5_desc_get_type(desc_vaddr) ==
+ CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+ result->residue = d->residue -
+ cppi5_hdesc_get_pktlen(desc_vaddr);
+ if (result->residue)
+ result->result = DMA_TRANS_ABORTED;
+ else
+ result->result = DMA_TRANS_NOERROR;
+ } else {
+ result->residue = 0;
+ result->result = DMA_TRANS_NOERROR;
+ }
+ }
+}
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void udma_vchan_complete(unsigned long arg)
+{
+ struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+ struct virt_dma_desc *vd, *_vd;
+ struct dmaengine_desc_callback cb;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&vc->lock);
+ list_splice_tail_init(&vc->desc_completed, &head);
+ vd = vc->cyclic;
+ if (vd) {
+ vc->cyclic = NULL;
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+ } else {
+ memset(&cb, 0, sizeof(cb));
+ }
+ spin_unlock_irq(&vc->lock);
+
+ udma_desc_pre_callback(vc, vd, NULL);
+ dmaengine_desc_callback_invoke(&cb, NULL);
+
+ list_for_each_entry_safe(vd, _vd, &head, node) {
+ struct dmaengine_result result;
+
+ dmaengine_desc_get_callback(&vd->tx, &cb);
+
+ list_del(&vd->node);
+
+ udma_desc_pre_callback(vc, vd, &result);
+ dmaengine_desc_callback_invoke(&cb, &result);
+
+ vchan_vdesc_fini(vd);
+ }
+}
+
+static void udma_free_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+
+ udma_terminate_all(chan);
+ if (uc->terminated_desc) {
+ udma_reset_chan(uc, false);
+ udma_reset_rings(uc);
+ }
+
+ cancel_delayed_work_sync(&uc->tx_drain.work);
+ destroy_delayed_work_on_stack(&uc->tx_drain.work);
+
+ if (uc->irq_num_ring > 0) {
+ free_irq(uc->irq_num_ring, uc);
+
+ uc->irq_num_ring = 0;
+ }
+ if (uc->irq_num_udma > 0) {
+ free_irq(uc->irq_num_udma, uc);
+
+ uc->irq_num_udma = 0;
+ }
+
+ /* Release PSI-L pairing */
+ if (uc->psil_paired) {
+ navss_psil_unpair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ uc->psil_paired = false;
+ }
+
+ vchan_free_chan_resources(&uc->vc);
+ tasklet_kill(&uc->vc.task);
+
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+}
+
+static struct platform_driver udma_driver;
+
+static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct udma_chan_config *ucc;
+ struct psil_endpoint_config *ep_config;
+ struct udma_chan *uc;
+ struct udma_dev *ud;
+ u32 *args;
+
+ if (chan->device->dev->driver != &udma_driver.driver)
+ return false;
+
+ uc = to_udma_chan(chan);
+ ucc = &uc->config;
+ ud = uc->ud;
+ args = param;
+
+ ucc->remote_thread_id = args[0];
+
+ if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
+ ucc->dir = DMA_MEM_TO_DEV;
+ else
+ ucc->dir = DMA_DEV_TO_MEM;
+
+ ep_config = psil_get_ep_config(ucc->remote_thread_id);
+ if (IS_ERR(ep_config)) {
+ dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
+ ucc->remote_thread_id);
+ ucc->dir = DMA_MEM_TO_MEM;
+ ucc->remote_thread_id = -1;
+ return false;
+ }
+
+ ucc->pkt_mode = ep_config->pkt_mode;
+ ucc->channel_tpl = ep_config->channel_tpl;
+ ucc->notdpkt = ep_config->notdpkt;
+ ucc->ep_type = ep_config->ep_type;
+
+ if (ucc->ep_type != PSIL_EP_NATIVE) {
+ const struct udma_match_data *match_data = ud->match_data;
+
+ if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
+ ucc->enable_acc32 = ep_config->pdma_acc32;
+ if (match_data->flags & UDMA_FLAG_PDMA_BURST)
+ ucc->enable_burst = ep_config->pdma_burst;
+ }
+
+ ucc->needs_epib = ep_config->needs_epib;
+ ucc->psd_size = ep_config->psd_size;
+ ucc->metadata_size =
+ (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
+ ucc->psd_size;
+
+ if (ucc->pkt_mode)
+ ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
+ ucc->metadata_size, ud->desc_align);
+
+ dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
+ ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
+
+ return true;
+}
+
+static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct udma_dev *ud = ofdma->of_dma_data;
+ dma_cap_mask_t mask = ud->ddev.cap_mask;
+ struct dma_chan *chan;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ chan = __dma_request_channel(&mask, udma_dma_filter_fn,
+ &dma_spec->args[0], ofdma->of_node);
+ if (!chan) {
+ dev_err(ud->dev, "get channel fail in %s.\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return chan;
+}
+
+static struct udma_match_data am654_main_data = {
+ .psil_base = 0x1000,
+ .enable_memcpy_support = true,
+ .statictr_z_mask = GENMASK(11, 0),
+ .rchan_oes_offset = 0x2000,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 8, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static struct udma_match_data am654_mcu_data = {
+ .psil_base = 0x6000,
+ .enable_memcpy_support = true, /* TEST: DMA domains */
+ .statictr_z_mask = GENMASK(11, 0),
+ .rchan_oes_offset = 0x2000,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 2, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static struct udma_match_data j721e_main_data = {
+ .psil_base = 0x1000,
+ .enable_memcpy_support = true,
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .statictr_z_mask = GENMASK(23, 0),
+ .rchan_oes_offset = 0x400,
+ .tpl_levels = 3,
+ .level_start_idx = {
+ [0] = 16, /* Normal channels */
+ [1] = 4, /* High Throughput channels */
+ [2] = 0, /* Ultra High Throughput channels */
+ },
+};
+
+static struct udma_match_data j721e_mcu_data = {
+ .psil_base = 0x6000,
+ .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .statictr_z_mask = GENMASK(23, 0),
+ .rchan_oes_offset = 0x400,
+ .tpl_levels = 2,
+ .level_start_idx = {
+ [0] = 2, /* Normal channels */
+ [1] = 0, /* High Throughput channels */
+ },
+};
+
+static const struct of_device_id udma_of_match[] = {
+ {
+ .compatible = "ti,am654-navss-main-udmap",
+ .data = &am654_main_data,
+ },
+ {
+ .compatible = "ti,am654-navss-mcu-udmap",
+ .data = &am654_mcu_data,
+ }, {
+ .compatible = "ti,j721e-navss-main-udmap",
+ .data = &j721e_main_data,
+ }, {
+ .compatible = "ti,j721e-navss-mcu-udmap",
+ .data = &j721e_mcu_data,
+ },
+ { /* Sentinel */ },
+};
+
+static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
+{
+ struct resource *res;
+ int i;
+
+ for (i = 0; i < MMR_LAST; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mmr_names[i]);
+ ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ud->mmrs[i]))
+ return PTR_ERR(ud->mmrs[i]);
+ }
+
+ return 0;
+}
+
+static int udma_setup_resources(struct udma_dev *ud)
+{
+ struct device *dev = ud->dev;
+ int ch_count, ret, i, j;
+ u32 cap2, cap3;
+ struct ti_sci_resource_desc *rm_desc;
+ struct ti_sci_resource *rm_res, irq_res;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ static const char * const range_names[] = { "ti,sci-rm-range-tchan",
+ "ti,sci-rm-range-rchan",
+ "ti,sci-rm-range-rflow" };
+
+ cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+
+ ud->rflow_cnt = cap3 & 0x3fff;
+ ud->tchan_cnt = cap2 & 0x1ff;
+ ud->echan_cnt = (cap2 >> 9) & 0x1ff;
+ ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
+ ch_count = ud->tchan_cnt + ud->rchan_cnt;
+
+ ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+ GFP_KERNEL);
+ ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+ GFP_KERNEL);
+ ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflow_gp_map_allocated = devm_kcalloc(dev,
+ BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
+ GFP_KERNEL);
+
+ if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
+ !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
+ !ud->rflows || !ud->rflow_in_use)
+ return -ENOMEM;
+
+ /*
+ * RX flows with the same Ids as RX channels are reserved to be used
+ * as default flows if remote HW can't generate flow_ids. Those
+ * RX flows can be requested only explicitly by id.
+ */
+ bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
+
+ /* by default no GP rflows are assigned to Linux */
+ bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
+
+ /* Get resource ranges from tisci */
+ for (i = 0; i < RM_RANGE_LAST; i++)
+ tisci_rm->rm_ranges[i] =
+ devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+ tisci_rm->tisci_dev_id,
+ (char *)range_names[i]);
+
+ /* tchan ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ } else {
+ bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->tchan_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+ irq_res.sets = rm_res->sets;
+
+ /* rchan and matching default flow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ } else {
+ bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->rchan_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+
+ irq_res.sets += rm_res->sets;
+ irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ ud->match_data->rchan_oes_offset;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+ kfree(irq_res.desc);
+ if (ret) {
+ dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+ return ret;
+ }
+
+ /* GP rflow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+ if (IS_ERR(rm_res)) {
+ /* all gp flows are assigned exclusively to Linux */
+ bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
+ ud->rflow_cnt - ud->rchan_cnt);
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ rm_desc = &rm_res->desc[i];
+ bitmap_clear(ud->rflow_gp_map, rm_desc->start,
+ rm_desc->num);
+ dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
+ rm_desc->start, rm_desc->num);
+ }
+ }
+
+ ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
+ ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
+ if (!ch_count)
+ return -ENODEV;
+
+ ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
+ GFP_KERNEL);
+ if (!ud->channels)
+ return -ENOMEM;
+
+ dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
+ ch_count,
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
+ ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
+ ud->rflow_cnt));
+
+ return ch_count;
+}
+
+#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int udma_probe(struct platform_device *pdev)
+{
+ struct device_node *navss_node = pdev->dev.parent->of_node;
+ struct device *dev = &pdev->dev;
+ struct udma_dev *ud;
+ const struct of_device_id *match;
+ int i, ret;
+ int ch_count;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret)
+ dev_err(dev, "failed to set dma mask stuff\n");
+
+ ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
+ if (!ud)
+ return -ENOMEM;
+
+ ret = udma_get_mmrs(pdev, ud);
+ if (ret)
+ return ret;
+
+ ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
+ if (IS_ERR(ud->tisci_rm.tisci))
+ return PTR_ERR(ud->tisci_rm.tisci);
+
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
+ &ud->tisci_rm.tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
+ return ret;
+ }
+ pdev->id = ud->tisci_rm.tisci_dev_id;
+
+ ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
+ &ud->tisci_rm.tisci_navss_dev_id);
+ if (ret) {
+ dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
+ return ret;
+ }
+
+ ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
+ ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
+
+ ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+ if (IS_ERR(ud->ringacc))
+ return PTR_ERR(ud->ringacc);
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi_domain) {
+ dev_err(dev, "Failed to get MSI domain\n");
+ return -EPROBE_DEFER;
+ }
+
+ match = of_match_node(udma_of_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "No compatible match found\n");
+ return -ENODEV;
+ }
+ ud->match_data = match->data;
+
+ dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+
+ ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
+ ud->ddev.device_config = udma_slave_config;
+ ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
+ ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
+ ud->ddev.device_issue_pending = udma_issue_pending;
+ ud->ddev.device_tx_status = udma_tx_status;
+ ud->ddev.device_pause = udma_pause;
+ ud->ddev.device_resume = udma_resume;
+ ud->ddev.device_terminate_all = udma_terminate_all;
+ ud->ddev.device_synchronize = udma_synchronize;
+
+ ud->ddev.device_free_chan_resources = udma_free_chan_resources;
+ ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
+ ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
+ ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
+ ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
+ DESC_METADATA_ENGINE;
+ if (ud->match_data->enable_memcpy_support) {
+ dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
+ ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
+ ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
+ }
+
+ ud->ddev.dev = dev;
+ ud->dev = dev;
+ ud->psil_base = ud->match_data->psil_base;
+
+ INIT_LIST_HEAD(&ud->ddev.channels);
+ INIT_LIST_HEAD(&ud->desc_to_purge);
+
+ ch_count = udma_setup_resources(ud);
+ if (ch_count <= 0)
+ return ch_count;
+
+ spin_lock_init(&ud->lock);
+ INIT_WORK(&ud->purge_work, udma_purge_desc_work);
+
+ ud->desc_align = 64;
+ if (ud->desc_align < dma_get_cache_alignment())
+ ud->desc_align = dma_get_cache_alignment();
+
+ for (i = 0; i < ud->tchan_cnt; i++) {
+ struct udma_tchan *tchan = &ud->tchans[i];
+
+ tchan->id = i;
+ tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
+ }
+
+ for (i = 0; i < ud->rchan_cnt; i++) {
+ struct udma_rchan *rchan = &ud->rchans[i];
+
+ rchan->id = i;
+ rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
+ }
+
+ for (i = 0; i < ud->rflow_cnt; i++) {
+ struct udma_rflow *rflow = &ud->rflows[i];
+
+ rflow->id = i;
+ }
+
+ for (i = 0; i < ch_count; i++) {
+ struct udma_chan *uc = &ud->channels[i];
+
+ uc->ud = ud;
+ uc->vc.desc_free = udma_desc_free;
+ uc->id = i;
+ uc->tchan = NULL;
+ uc->rchan = NULL;
+ uc->config.remote_thread_id = -1;
+ uc->config.dir = DMA_MEM_TO_MEM;
+ uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
+ dev_name(dev), i);
+
+ vchan_init(&uc->vc, &ud->ddev);
+ /* Use custom vchan completion handling */
+ tasklet_init(&uc->vc.task, udma_vchan_complete,
+ (unsigned long)&uc->vc);
+ init_completion(&uc->teardown_completed);
+ }
+
+ ret = dma_async_device_register(&ud->ddev);
+ if (ret) {
+ dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ud);
+
+ ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
+ if (ret) {
+ dev_err(dev, "failed to register of_dma controller\n");
+ dma_async_device_unregister(&ud->ddev);
+ }
+
+ return ret;
+}
+
+static struct platform_driver udma_driver = {
+ .driver = {
+ .name = "ti-udma",
+ .of_match_table = udma_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = udma_probe,
+};
+builtin_platform_driver(udma_driver);
+
+/* Private interfaces to UDMA */
+#include "k3-udma-private.c"
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_UDMA_H_
+#define K3_UDMA_H_
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/* Global registers */
+#define UDMA_REV_REG 0x0
+#define UDMA_PERF_CTL_REG 0x4
+#define UDMA_EMU_CTL_REG 0x8
+#define UDMA_PSIL_TO_REG 0x10
+#define UDMA_UTC_CTL_REG 0x1c
+#define UDMA_CAP_REG(i) (0x20 + ((i) * 4))
+#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80
+#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88
+
+/* TX chan RT regs */
+#define UDMA_TCHAN_RT_CTL_REG 0x0
+#define UDMA_TCHAN_RT_SWTRIG_REG 0x8
+#define UDMA_TCHAN_RT_STDATA_REG 0x80
+
+#define UDMA_TCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4))
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG \
+ UDMA_TCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */
+#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG \
+ UDMA_TCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */
+#define UDMA_TCHAN_RT_PEER_BCNT_REG \
+ UDMA_TCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */
+#define UDMA_TCHAN_RT_PEER_RT_EN_REG \
+ UDMA_TCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */
+
+#define UDMA_TCHAN_RT_PCNT_REG 0x400
+#define UDMA_TCHAN_RT_BCNT_REG 0x408
+#define UDMA_TCHAN_RT_SBCNT_REG 0x410
+
+/* RX chan RT regs */
+#define UDMA_RCHAN_RT_CTL_REG 0x0
+#define UDMA_RCHAN_RT_SWTRIG_REG 0x8
+#define UDMA_RCHAN_RT_STDATA_REG 0x80
+
+#define UDMA_RCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4))
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG \
+ UDMA_RCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */
+#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG \
+ UDMA_RCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */
+#define UDMA_RCHAN_RT_PEER_BCNT_REG \
+ UDMA_RCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */
+#define UDMA_RCHAN_RT_PEER_RT_EN_REG \
+ UDMA_RCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */
+
+#define UDMA_RCHAN_RT_PCNT_REG 0x400
+#define UDMA_RCHAN_RT_BCNT_REG 0x408
+#define UDMA_RCHAN_RT_SBCNT_REG 0x410
+
+/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */
+#define UDMA_CHAN_RT_CTL_EN BIT(31)
+#define UDMA_CHAN_RT_CTL_TDOWN BIT(30)
+#define UDMA_CHAN_RT_CTL_PAUSE BIT(29)
+#define UDMA_CHAN_RT_CTL_FTDOWN BIT(28)
+#define UDMA_CHAN_RT_CTL_ERROR BIT(0)
+
+/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */
+#define UDMA_PEER_RT_EN_ENABLE BIT(31)
+#define UDMA_PEER_RT_EN_TEARDOWN BIT(30)
+#define UDMA_PEER_RT_EN_PAUSE BIT(29)
+#define UDMA_PEER_RT_EN_FLUSH BIT(28)
+#define UDMA_PEER_RT_EN_IDLE BIT(1)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG
+ */
+#define PDMA_STATIC_TR_X_MASK GENMASK(26, 24)
+#define PDMA_STATIC_TR_X_SHIFT (24)
+#define PDMA_STATIC_TR_Y_MASK GENMASK(11, 0)
+#define PDMA_STATIC_TR_Y_SHIFT (0)
+
+#define PDMA_STATIC_TR_Y(x) \
+ (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK)
+#define PDMA_STATIC_TR_X(x) \
+ (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK)
+
+#define PDMA_STATIC_TR_XY_ACC32 BIT(30)
+#define PDMA_STATIC_TR_XY_BURST BIT(31)
+
+/*
+ * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG /
+ * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG
+ */
+#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask))
+
+struct udma_dev;
+struct udma_tchan;
+struct udma_rchan;
+struct udma_rflow;
+
+enum udma_rm_range {
+ RM_RANGE_TCHAN = 0,
+ RM_RANGE_RCHAN,
+ RM_RANGE_RFLOW,
+ RM_RANGE_LAST,
+};
+
+struct udma_tisci_rm {
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
+ u32 tisci_dev_id;
+
+ /* tisci information for PSI-L thread pairing/unpairing */
+ const struct ti_sci_rm_psil_ops *tisci_psil_ops;
+ u32 tisci_navss_dev_id;
+
+ struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
+};
+
+/* Direct access to UDMA low lever resources for the glue layer */
+int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread);
+int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
+ u32 dst_thread);
+
+struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
+void xudma_dev_put(struct udma_dev *ud);
+u32 xudma_dev_get_psil_base(struct udma_dev *ud);
+struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
+
+int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+
+struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id);
+struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id);
+struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id);
+
+void xudma_tchan_put(struct udma_dev *ud, struct udma_tchan *p);
+void xudma_rchan_put(struct udma_dev *ud, struct udma_rchan *p);
+void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p);
+
+int xudma_tchan_get_id(struct udma_tchan *p);
+int xudma_rchan_get_id(struct udma_rchan *p);
+int xudma_rflow_get_id(struct udma_rflow *p);
+
+u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg);
+void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val);
+u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg);
+void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
+bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
+
+#endif /* K3_UDMA_H_ */
struct virt_dma_desc *vd, *_vd;
list_for_each_entry_safe(vd, _vd, head, node) {
- if (dmaengine_desc_test_reuse(&vd->tx)) {
- list_move_tail(&vd->node, &vc->desc_allocated);
- } else {
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
- list_del(&vd->node);
- vc->desc_free(vd);
- }
+ list_del(&vd->node);
+ vchan_vdesc_fini(vd);
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
+ INIT_LIST_HEAD(&vc->desc_terminated);
tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
+ struct list_head desc_terminated;
struct virt_dma_desc *cyclic;
- struct virt_dma_desc *vd_terminated;
};
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
- if (dmaengine_desc_test_reuse(&vd->tx))
+ if (dmaengine_desc_test_reuse(&vd->tx)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
list_add(&vd->node, &vc->desc_allocated);
- else
+ spin_unlock_irqrestore(&vc->lock, flags);
+ } else {
vc->desc_free(vd);
+ }
}
/**
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
- /* free up stuck descriptor */
- if (vc->vd_terminated)
- vchan_vdesc_fini(vc->vd_terminated);
+ list_add_tail(&vd->node, &vc->desc_terminated);
- vc->vd_terminated = vd;
if (vc->cyclic == vd)
vc->cyclic = NULL;
}
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
+ list_splice_tail_init(&vc->desc_terminated, head);
}
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
*/
static inline void vchan_synchronize(struct virt_dma_chan *vc)
{
+ LIST_HEAD(head);
unsigned long flags;
tasklet_kill(&vc->task);
spin_lock_irqsave(&vc->lock, flags);
- if (vc->vd_terminated) {
- vchan_vdesc_fini(vc->vd_terminated);
- vc->vd_terminated = NULL;
- }
+
+ list_splice_tail_init(&vc->desc_terminated, &head);
+
spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
}
#endif
/* Max transfer size per descriptor */
#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
+/* Max burst lengths */
+#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
+#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
+
/* Reset values for data attributes */
#define ZYNQMP_DMA_AXCACHE_VAL 0xF
-#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
-#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
{
- u32 val;
+ u32 val, burst_val;
val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
val |= ZYNQMP_DMA_POINT_TYPE_SG;
writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ burst_val = __ilog2_u32(chan->src_burst_len);
val = (val & ~ZYNQMP_DMA_ARLEN) |
- (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
+ burst_val = __ilog2_u32(chan->dst_burst_len);
val = (val & ~ZYNQMP_DMA_AWLEN) |
- (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
}
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
- chan->src_burst_len = config->src_maxburst;
- chan->dst_burst_len = config->dst_maxburst;
+ chan->src_burst_len = clamp(config->src_maxburst, 1U,
+ ZYNQMP_DMA_MAX_SRC_BURST_LEN);
+ chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
+ ZYNQMP_DMA_MAX_DST_BURST_LEN);
return 0;
}
return PTR_ERR(chan->regs);
chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
- chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
- chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
+ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
if (err < 0) {
dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
tristate "Texas Instruments DDR3 ECC Controller"
depends on ARCH_KEYSTONE || SOC_DRA7XX
help
- Support for error detection and correction on the
- TI SoCs.
+ Support for error detection and correction on the TI SoCs.
config EDAC_QCOM
tristate "QCOM EDAC Controller"
scrubval = scrubrates[i].scrubval;
- if (pvt->fam == 0x17 || pvt->fam == 0x18) {
+ if (pvt->umc) {
__f17h_set_scrubval(pvt, scrubval);
} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
int i, retval = -EINVAL;
u32 scrubval = 0;
- switch (pvt->fam) {
- case 0x15:
- /* Erratum #505 */
- if (pvt->model < 0x10)
- f15h_select_dct(pvt, 0);
-
- if (pvt->model == 0x60)
- amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
- break;
-
- case 0x17:
- case 0x18:
+ if (pvt->umc) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
if (scrubval & BIT(0)) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
} else {
scrubval = 0;
}
- break;
+ } else if (pvt->fam == 0x15) {
+ /* Erratum #505 */
+ if (pvt->model < 0x10)
+ f15h_select_dct(pvt, 0);
- default:
+ if (pvt->model == 0x60)
+ amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+ } else {
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
- break;
}
scrubval = scrubval & 0x001F;
{
u32 dram_ctrl, dcsm;
+ if (pvt->umc) {
+ if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
+ pvt->dram_type = MEM_LRDDR4;
+ else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
+ pvt->dram_type = MEM_RDDR4;
+ else
+ pvt->dram_type = MEM_DDR4;
+ return;
+ }
+
switch (pvt->fam) {
case 0xf:
if (pvt->ext_model >= K8_REV_F)
case 0x16:
goto ddr3;
- case 0x17:
- case 0x18:
- if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
- pvt->dram_type = MEM_LRDDR4;
- else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
- pvt->dram_type = MEM_RDDR4;
- else
- pvt->dram_type = MEM_DDR4;
- return;
-
default:
WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
pvt->dram_type = MEM_EMPTY;
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
+ [F19_CPUS] = {
+ .ctl_name = "F19h",
+ .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
+ .max_mcs = 8,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
+ }
+ },
};
/*
family_types[F17_CPUS].ctl_name = "F18h";
break;
+ case 0x19:
+ fam_type = &family_types[F19_CPUS];
+ pvt->ops = &family_types[F19_CPUS].ops;
+ family_types[F19_CPUS].ctl_name = "F19h";
+ break;
+
default:
amd64_err("Unsupported family!\n");
return NULL;
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- mci = find_mci_by_dev(&F3->dev);
- WARN_ON(!mci);
-
/* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc(&F3->dev);
if (!mci)
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
+#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
+#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
/*
* Function 1 - Address Map
F17_M10H_CPUS,
F17_M30H_CPUS,
F17_M70H_CPUS,
+ F19_CPUS,
NUM_FAMILIES,
};
if (!np) {
dev_err(mci->pdev, "dt: missing /memory node\n");
return -ENODEV;
- };
+ }
rc = of_address_to_resource(np, 0, &r);
if (rc) {
dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n");
return rc;
- };
+ }
dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
r.start, resource_size(&r), PAGE_SHIFT);
return a & ((1 << 16) - 1);
}
-static inline u32 i5100_redmemb_ecc_locator(u32 a)
-{
- return a & ((1 << 18) - 1);
-}
-
static inline u32 i5100_recmema_merr(u32 a)
{
return i5100_nrecmema_merr(a);
u32 dw;
u32 dw2;
unsigned syndrome = 0;
- unsigned ecc_loc = 0;
unsigned merr;
unsigned bank;
unsigned rank;
pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
syndrome = dw2;
pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
- ecc_loc = i5100_redmemb_ecc_locator(dw2);
}
if (i5100_validlog_recmemvalid(dw)) {
#include "mce_amd.h"
-static struct amd_decoder_ops *fam_ops;
+static struct amd_decoder_ops fam_ops;
static u8 xec_mask = 0xf;
"L2 Fill Data error",
};
+static const char * const smca_ls2_mce_desc[] = {
+ "An ECC error was detected on a data cache read by a probe or victimization",
+ "An ECC error or L2 poison was detected on a data cache read by a load",
+ "An ECC error was detected on a data cache read-modify-write by a store",
+ "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization",
+ "An ECC error or poison bit mismatch was detected on a tag read by a load",
+ "An ECC error or poison bit mismatch was detected on a tag read by a store",
+ "An ECC error was detected on an EMEM read by a load",
+ "An ECC error was detected on an EMEM read-modify-write by a store",
+ "A parity error was detected in an L1 TLB entry by any access",
+ "A parity error was detected in an L2 TLB entry by any access",
+ "A parity error was detected in a PWC entry by any access",
+ "A parity error was detected in an STQ entry by any access",
+ "A parity error was detected in an LDQ entry by any access",
+ "A parity error was detected in a MAB entry by any access",
+ "A parity error was detected in an SCB entry state field by any access",
+ "A parity error was detected in an SCB entry address field by any access",
+ "A parity error was detected in an SCB entry data field by any access",
+ "A parity error was detected in a WCB entry by any access",
+ "A poisoned line was detected in an SCB entry by any access",
+ "A SystemReadDataError error was reported on read data returned from L2 for a load",
+ "A SystemReadDataError error was reported on read data returned from L2 for an SCB store",
+ "A SystemReadDataError error was reported on read data returned from L2 for a WCB store",
+ "A hardware assertion error was reported",
+ "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access",
+};
+
static const char * const smca_if_mce_desc[] = {
"Op Cache Microtag Probe Port Parity Error",
"IC Microtag or Full Tag Multi-hit Error",
static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) },
+ [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) },
[SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) },
[SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) },
[SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) },
: (xec ? "multimatch" : "parity")));
return;
}
- } else if (fam_ops->mc0_mce(ec, xec))
+ } else if (fam_ops.mc0_mce(ec, xec))
;
else
pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n");
pr_cont("Hardware Assert.\n");
else
goto wrong_mc1_mce;
- } else if (fam_ops->mc1_mce(ec, xec))
+ } else if (fam_ops.mc1_mce(ec, xec))
;
else
goto wrong_mc1_mce;
pr_emerg(HW_ERR "MC2 Error: ");
- if (!fam_ops->mc2_mce(ec, xec))
+ if (!fam_ops.mc2_mce(ec, xec))
pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
}
if (m->tsc)
pr_emerg(HW_ERR "TSC: %llu\n", m->tsc);
- if (!fam_ops)
+ /* Doesn't matter which member to test. */
+ if (!fam_ops.mc0_mce)
goto err_code;
switch (m->bank) {
c->x86_vendor != X86_VENDOR_HYGON)
return -ENODEV;
- fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
- if (!fam_ops)
- return -ENOMEM;
+ if (boot_cpu_has(X86_FEATURE_SMCA)) {
+ xec_mask = 0x3f;
+ goto out;
+ }
switch (c->x86) {
case 0xf:
- fam_ops->mc0_mce = k8_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = k8_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x10:
- fam_ops->mc0_mce = f10h_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = f10h_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x11:
- fam_ops->mc0_mce = k8_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = k8_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x12:
- fam_ops->mc0_mce = f12h_mc0_mce;
- fam_ops->mc1_mce = k8_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = f12h_mc0_mce;
+ fam_ops.mc1_mce = k8_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x14:
- fam_ops->mc0_mce = cat_mc0_mce;
- fam_ops->mc1_mce = cat_mc1_mce;
- fam_ops->mc2_mce = k8_mc2_mce;
+ fam_ops.mc0_mce = cat_mc0_mce;
+ fam_ops.mc1_mce = cat_mc1_mce;
+ fam_ops.mc2_mce = k8_mc2_mce;
break;
case 0x15:
xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f;
- fam_ops->mc0_mce = f15h_mc0_mce;
- fam_ops->mc1_mce = f15h_mc1_mce;
- fam_ops->mc2_mce = f15h_mc2_mce;
+ fam_ops.mc0_mce = f15h_mc0_mce;
+ fam_ops.mc1_mce = f15h_mc1_mce;
+ fam_ops.mc2_mce = f15h_mc2_mce;
break;
case 0x16:
xec_mask = 0x1f;
- fam_ops->mc0_mce = cat_mc0_mce;
- fam_ops->mc1_mce = cat_mc1_mce;
- fam_ops->mc2_mce = f16h_mc2_mce;
+ fam_ops.mc0_mce = cat_mc0_mce;
+ fam_ops.mc1_mce = cat_mc1_mce;
+ fam_ops.mc2_mce = f16h_mc2_mce;
break;
case 0x17:
case 0x18:
- xec_mask = 0x3f;
- if (!boot_cpu_has(X86_FEATURE_SMCA)) {
- printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
- goto err_out;
- }
- break;
+ pr_warn("Decoding supported only on Scalable MCA processors.\n");
+ return -EINVAL;
default:
printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
- goto err_out;
+ return -EINVAL;
}
+out:
pr_info("MCE: In-kernel MCE decoding enabled.\n");
mce_register_decode_chain(&amd_mce_dec_nb);
return 0;
-
-err_out:
- kfree(fam_ops);
- fam_ops = NULL;
- return -EINVAL;
}
early_initcall(mce_amd_init);
static void __exit mce_amd_exit(void)
{
mce_unregister_decode_chain(&amd_mce_dec_nb);
- kfree(fam_ops);
}
MODULE_DESCRIPTION("AMD MCE decoder");
#include <linux/edac.h>
#include <linux/platform_device.h>
#include "edac_module.h"
-#include <asm/sifive_l2_cache.h>
+#include <soc/sifive/sifive_l2_cache.h>
#define DRVNAME "sifive_edac"
p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
1, 1, NULL, 0,
edac_device_alloc_index());
- if (IS_ERR(p->dci))
- return PTR_ERR(p->dci);
+ if (!p->dci)
+ return -ENOMEM;
p->dci->dev = &pdev->dev;
p->dci->mod_name = "Sifive ECC Manager";
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
if (!pdev) {
- skx_printk(KERN_ERR, "Can't get tolm/tohm\n");
+ edac_dbg(2, "Can't get tolm/tohm\n");
return -ENODEV;
}
fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
if (IS_ERR(fw_shm_pool)) {
- tee_client_close_context(pvt_data.ctx);
dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
err = PTR_ERR(fw_shm_pool);
goto out_sess;
static const struct font_desc *font;
static u32 efi_x, efi_y;
static u64 fb_base;
-static pgprot_t fb_prot;
+static bool fb_wb;
static void *efi_fb;
/*
if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED))
return 0;
- if (pgprot_val(fb_prot) == pgprot_val(PAGE_KERNEL))
- efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WB);
- else
- efi_fb = memremap(fb_base, screen_info.lfb_size, MEMREMAP_WC);
+ efi_fb = memremap(fb_base, screen_info.lfb_size,
+ fb_wb ? MEMREMAP_WB : MEMREMAP_WC);
return efi_fb ? 0 : -ENOMEM;
}
static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
{
+ pgprot_t fb_prot;
+
if (efi_fb)
return efi_fb + start;
+ fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL);
return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
}
if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
fb_base |= (u64)screen_info.ext_lfb_base << 32;
- if (opt && !strcmp(opt, "ram"))
- fb_prot = PAGE_KERNEL;
- else
- fb_prot = pgprot_writecombine(PAGE_KERNEL);
+ fb_wb = opt && !strcmp(opt, "ram");
si = &screen_info;
xres = si->lfb_width;
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
efi_status_t status;
- struct efi_rng_protocol *rng;
+ struct efi_rng_protocol *rng = NULL;
status = efi_call_early(locate_protocol, &rng_proto, NULL,
(void **)&rng);
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
- struct efi_rng_protocol *rng;
- struct linux_efi_random_seed *seed;
+ struct efi_rng_protocol *rng = NULL;
+ struct linux_efi_random_seed *seed = NULL;
efi_status_t status;
status = efi_call_early(locate_protocol, &rng_proto, NULL,
tristate "Cavium ThunderX/OCTEON-TX GPIO"
depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
depends on PCI_MSI
- select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
select IRQ_FASTEOI_HIERARCHY_HANDLERS
help
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
+ select GPIOLIB_IRQCHIP
help
GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor.
MAX77620 PMIC has 8 pins that can be configured as GPIOs. The
mutex_lock(&chip->lock);
if (test_bit(FLAG_REQUESTED, &desc->flags) &&
- !test_bit(FLAG_IS_OUT, &desc->flags)) {
+ !test_bit(FLAG_IS_OUT, &desc->flags)) {
curr = __gpio_mockup_get(chip, offset);
if (curr == value)
goto out;
irq_type = irq_get_trigger_type(irq);
if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
- (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
+ (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
irq_sim_fire(sim, offset);
}
struct thunderx_gpio {
struct gpio_chip chip;
u8 __iomem *register_base;
+ struct irq_domain *irqd;
struct msix_entry *msix_entries; /* per line MSI-X */
struct thunderx_line *line_entries; /* per line irq info */
raw_spinlock_t lock;
}
}
-static void thunderx_gpio_irq_ack(struct irq_data *d)
+static void thunderx_gpio_irq_ack(struct irq_data *data)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
writeq(GPIO_INTR_INTR,
- txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+ txline->txgpio->register_base + intr_reg(txline->line));
}
-static void thunderx_gpio_irq_mask(struct irq_data *d)
+static void thunderx_gpio_irq_mask(struct irq_data *data)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
writeq(GPIO_INTR_ENA_W1C,
- txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+ txline->txgpio->register_base + intr_reg(txline->line));
}
-static void thunderx_gpio_irq_mask_ack(struct irq_data *d)
+static void thunderx_gpio_irq_mask_ack(struct irq_data *data)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR,
- txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+ txline->txgpio->register_base + intr_reg(txline->line));
}
-static void thunderx_gpio_irq_unmask(struct irq_data *d)
+static void thunderx_gpio_irq_unmask(struct irq_data *data)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
writeq(GPIO_INTR_ENA_W1S,
- txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
+ txline->txgpio->register_base + intr_reg(txline->line));
}
-static int thunderx_gpio_irq_set_type(struct irq_data *d,
+static int thunderx_gpio_irq_set_type(struct irq_data *data,
unsigned int flow_type)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
- struct thunderx_line *txline =
- &txgpio->line_entries[irqd_to_hwirq(d)];
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct thunderx_gpio *txgpio = txline->txgpio;
u64 bit_cfg;
- irqd_set_trigger_type(d, flow_type);
+ irqd_set_trigger_type(data, flow_type);
bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN;
if (flow_type & IRQ_TYPE_EDGE_BOTH) {
- irq_set_handler_locked(d, handle_fasteoi_ack_irq);
+ irq_set_handler_locked(data, handle_fasteoi_ack_irq);
bit_cfg |= GPIO_BIT_CFG_INT_TYPE;
} else {
- irq_set_handler_locked(d, handle_fasteoi_mask_irq);
+ irq_set_handler_locked(data, handle_fasteoi_mask_irq);
}
raw_spin_lock(&txgpio->lock);
irq_chip_disable_parent(data);
}
+static int thunderx_gpio_irq_request_resources(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct thunderx_gpio *txgpio = txline->txgpio;
+ int r;
+
+ r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
+ if (r)
+ return r;
+
+ r = irq_chip_request_resources_parent(data);
+ if (r)
+ gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
+
+ return r;
+}
+
+static void thunderx_gpio_irq_release_resources(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct thunderx_gpio *txgpio = txline->txgpio;
+
+ irq_chip_release_resources_parent(data);
+
+ gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
+}
+
/*
* Interrupts are chained from underlying MSI-X vectors. We have
* these irq_chip functions to be able to handle level triggering
.irq_unmask = thunderx_gpio_irq_unmask,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_request_resources = thunderx_gpio_irq_request_resources,
+ .irq_release_resources = thunderx_gpio_irq_release_resources,
.irq_set_type = thunderx_gpio_irq_set_type,
.flags = IRQCHIP_SET_TYPE_MASKED
};
-static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
- unsigned int child,
- unsigned int child_type,
- unsigned int *parent,
- unsigned int *parent_type)
+static int thunderx_gpio_irq_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ irq_hw_number_t *hwirq,
+ unsigned int *type)
{
- struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
-
- *parent = txgpio->base_msi + (2 * child);
- *parent_type = IRQ_TYPE_LEVEL_HIGH;
+ struct thunderx_gpio *txgpio = d->host_data;
+
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+ if (fwspec->param[0] >= txgpio->chip.ngpio)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
+static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct thunderx_line *txline = arg;
+
+ return irq_domain_set_hwirq_and_chip(d, virq, txline->line,
+ &thunderx_gpio_irq_chip, txline);
+}
+
+static const struct irq_domain_ops thunderx_gpio_irqd_ops = {
+ .alloc = thunderx_gpio_irq_alloc,
+ .translate = thunderx_gpio_irq_translate
+};
+
+static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+ return irq_find_mapping(txgpio->irqd, offset);
+}
+
static int thunderx_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct thunderx_gpio *txgpio;
struct gpio_chip *chip;
- struct gpio_irq_chip *girq;
int ngpio, i;
int err = 0;
}
txgpio->msix_entries = devm_kcalloc(dev,
- ngpio, sizeof(struct msix_entry),
- GFP_KERNEL);
+ ngpio, sizeof(struct msix_entry),
+ GFP_KERNEL);
if (!txgpio->msix_entries) {
err = -ENOMEM;
goto out;
if (err < 0)
goto out;
+ /*
+ * Push GPIO specific irqdomain on hierarchy created as a side
+ * effect of the pci_enable_msix()
+ */
+ txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
+ 0, 0, of_node_to_fwnode(dev->of_node),
+ &thunderx_gpio_irqd_ops, txgpio);
+ if (!txgpio->irqd) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Push on irq_data and the domain for each line. */
+ for (i = 0; i < ngpio; i++) {
+ err = irq_domain_push_irq(txgpio->irqd,
+ txgpio->msix_entries[i].vector,
+ &txgpio->line_entries[i]);
+ if (err < 0)
+ dev_err(dev, "irq_domain_push_irq: %d\n", err);
+ }
+
chip->label = KBUILD_MODNAME;
chip->parent = dev;
chip->owner = THIS_MODULE;
chip->set = thunderx_gpio_set;
chip->set_multiple = thunderx_gpio_set_multiple;
chip->set_config = thunderx_gpio_set_config;
- girq = &chip->irq;
- girq->chip = &thunderx_gpio_irq_chip;
- girq->fwnode = of_node_to_fwnode(dev->of_node);
- girq->parent_domain =
- irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
- girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq;
- girq->handler = handle_bad_irq;
- girq->default_type = IRQ_TYPE_NONE;
-
+ chip->to_irq = thunderx_gpio_to_irq;
err = devm_gpiochip_add_data(dev, chip, txgpio);
if (err)
goto out;
- /* Push on irq_data and the domain for each line. */
- for (i = 0; i < ngpio; i++) {
- err = irq_domain_push_irq(chip->irq.domain,
- txgpio->msix_entries[i].vector,
- chip);
- if (err < 0)
- dev_err(dev, "irq_domain_push_irq: %d\n", err);
- }
-
dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n",
ngpio, chip->base);
return 0;
struct thunderx_gpio *txgpio = pci_get_drvdata(pdev);
for (i = 0; i < txgpio->chip.ngpio; i++)
- irq_domain_pop_irq(txgpio->chip.irq.domain,
+ irq_domain_pop_irq(txgpio->irqd,
txgpio->msix_entries[i].vector);
- irq_domain_remove(txgpio->chip.irq.domain);
+ irq_domain_remove(txgpio->irqd);
pci_set_drvdata(pdev, NULL);
}
unsigned int bank_num;
for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
+ writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
+ ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
writel_relaxed(gpio->context.datalsw[bank_num],
gpio->base_addr +
ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num));
writel_relaxed(gpio->context.dirm[bank_num],
gpio->base_addr +
ZYNQ_GPIO_DIRM_OFFSET(bank_num));
- writel_relaxed(gpio->context.int_en[bank_num],
- gpio->base_addr +
- ZYNQ_GPIO_INTEN_OFFSET(bank_num));
writel_relaxed(gpio->context.int_type[bank_num],
gpio->base_addr +
ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
writel_relaxed(gpio->context.int_any[bank_num],
gpio->base_addr +
ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ writel_relaxed(~(gpio->context.int_en[bank_num]),
+ gpio->base_addr +
+ ZYNQ_GPIO_INTEN_OFFSET(bank_num));
}
}
#include "gpiolib.h"
#include "gpiolib-acpi.h"
+#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l
+#define QUIRK_NO_WAKEUP 0x02l
+
static int run_edge_events_on_boot = -1;
module_param(run_edge_events_on_boot, int, 0444);
MODULE_PARM_DESC(run_edge_events_on_boot,
"Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+static int honor_wakeup = -1;
+module_param(honor_wakeup, int, 0444);
+MODULE_PARM_DESC(honor_wakeup,
+ "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
+
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
*
event->handle = evt_handle;
event->handler = handler;
event->irq = irq;
- event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+ event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
event->pin = pin;
event->desc = desc;
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+static const struct dmi_system_id gpiolib_acpi_quirks[] = {
{
/*
* The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
- }
+ },
+ .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
},
{
/*
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
- }
+ },
+ .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
+ },
+ {
+ /*
+ * Various HP X2 10 Cherry Trail models use an external
+ * embedded-controller connected via I2C + an ACPI GPIO
+ * event handler. The embedded controller generates various
+ * spurious wakeup events when suspended. So disable wakeup
+ * for its handler (it uses the only ACPI GPIO event handler).
+ * This breaks wakeup when opening the lid, the user needs
+ * to press the power-button to wakeup the system. The
+ * alternative is suspend simply not working, which is worse.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+ },
+ .driver_data = (void *)QUIRK_NO_WAKEUP,
},
{} /* Terminating entry */
};
static int acpi_gpio_setup_params(void)
{
+ const struct dmi_system_id *id;
+ long quirks = 0;
+
+ id = dmi_first_match(gpiolib_acpi_quirks);
+ if (id)
+ quirks = (long)id->driver_data;
+
if (run_edge_events_on_boot < 0) {
- if (dmi_check_system(run_edge_events_on_boot_blacklist))
+ if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
run_edge_events_on_boot = 0;
else
run_edge_events_on_boot = 1;
}
+ if (honor_wakeup < 0) {
+ if (quirks & QUIRK_NO_WAKEUP)
+ honor_wakeup = 0;
+ else
+ honor_wakeup = 1;
+ }
+
return 0;
}
enum of_gpio_flags *flags,
int index)
{
- /*
- * Handle MMC "cd-inverted" and "wp-inverted" semantics.
- */
- if (IS_ENABLED(CONFIG_MMC)) {
- /*
- * Active low is the default according to the
- * SDHCI specification and the device tree
- * bindings. However the code in the current
- * kernel was written such that the phandle
- * flags were always respected, and "cd-inverted"
- * would invert the flag from the device phandle.
- */
- if (!strcmp(propname, "cd-gpios")) {
- if (of_property_read_bool(np, "cd-inverted"))
- *flags ^= OF_GPIO_ACTIVE_LOW;
- }
- if (!strcmp(propname, "wp-gpios")) {
- if (of_property_read_bool(np, "wp-inverted"))
- *flags ^= OF_GPIO_ACTIVE_LOW;
- }
- }
/*
* Some GPIO fixed regulator quirks.
* Note that active low is the default.
}
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
+/**
+ * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not
+ * @desc: the gpio descriptor to change
+ */
+void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+ VALIDATE_DESC_VOID(desc);
+ change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+}
+EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
+
/* I/O calls are only valid after configuration completed; the relevant
* "is this a valid GPIO" error checks should already have been done.
*
int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
-int amdgpu_noretry = 1;
+int amdgpu_noretry;
int amdgpu_force_asic_type = -1;
struct amdgpu_mgpu_info mgpu_info = {
module_param_named(mes, amdgpu_mes, int, 0444);
MODULE_PARM_DESC(noretry,
- "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
+ "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
module_param_named(noretry, amdgpu_noretry, int, 0644);
/**
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
- {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
.driver_features =
DRIVER_USE_AGP | DRIVER_ATOMIC |
DRIVER_GEM |
- DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
};
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
return color_space;
}
-static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
-{
- if (timing_out->display_color_depth <= COLOR_DEPTH_888)
- return;
-
- timing_out->display_color_depth--;
-}
-
-static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
- const struct drm_display_info *info)
+static bool adjust_colour_depth_from_display_info(
+ struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
{
+ enum dc_color_depth depth = timing_out->display_color_depth;
int normalized_clk;
- if (timing_out->display_color_depth <= COLOR_DEPTH_888)
- return;
do {
normalized_clk = timing_out->pix_clk_100hz / 10;
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
normalized_clk /= 2;
/* Adjusting pix clock following on HDMI spec based on colour depth */
- switch (timing_out->display_color_depth) {
+ switch (depth) {
+ case COLOR_DEPTH_888:
+ break;
case COLOR_DEPTH_101010:
normalized_clk = (normalized_clk * 30) / 24;
break;
normalized_clk = (normalized_clk * 48) / 24;
break;
default:
- return;
+ /* The above depths are the only ones valid for HDMI. */
+ return false;
}
- if (normalized_clk <= info->max_tmds_clock)
- return;
- reduce_mode_colour_depth(timing_out);
-
- } while (timing_out->display_color_depth > COLOR_DEPTH_888);
-
+ if (normalized_clk <= info->max_tmds_clock) {
+ timing_out->display_color_depth = depth;
+ return true;
+ }
+ } while (--depth > COLOR_DEPTH_666);
+ return false;
}
static void fill_stream_properties_from_drm_display_mode(
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
- if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
- adjust_colour_depth_from_display_info(timing_out, info);
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+ drm_mode_is_420_also(info, mode_in) &&
+ timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ adjust_colour_depth_from_display_info(timing_out, info);
+ }
+ }
}
static void fill_audio_info(struct audio_info *audio_info,
}
case SIGNAL_TYPE_EDP: {
- read_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
+ read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
idx += req->u.i2c_read.transactions[i].num_bytes;
- buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
idx++;
}
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
mstb->tx_slots[txmsg->seqno] = NULL;
}
+ mgr->is_waiting_for_dwn_reply = false;
+
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
}
mutex_unlock(&mgr->qlock);
+ drm_dp_mst_kick_tx(mgr);
return ret;
}
return parent_lct + 1;
}
-static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
+static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+{
+ switch (pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ return true;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ /* For sst branch device */
+ if (!mcs)
+ return true;
+
+ return false;
+ }
+ return true;
+}
+
+static int
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
+ bool new_mcs)
{
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
struct drm_dp_mst_branch *mstb;
u8 rad[8], lct;
int ret = 0;
- if (port->pdt == new_pdt)
+ if (port->pdt == new_pdt && port->mcs == new_mcs)
return 0;
/* Teardown the old pdt, if there is one */
- switch (port->pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /*
- * If the new PDT would also have an i2c bus, don't bother
- * with reregistering it
- */
- if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- new_pdt == DP_PEER_DEVICE_SST_SINK) {
- port->pdt = new_pdt;
- return 0;
- }
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /*
+ * If the new PDT would also have an i2c bus,
+ * don't bother with reregistering it
+ */
+ if (new_pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+ port->pdt = new_pdt;
+ port->mcs = new_mcs;
+ return 0;
+ }
- /* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
- break;
- case DP_PEER_DEVICE_MST_BRANCHING:
- mutex_lock(&mgr->lock);
- drm_dp_mst_topology_put_mstb(port->mstb);
- port->mstb = NULL;
- mutex_unlock(&mgr->lock);
- break;
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ } else {
+ mutex_lock(&mgr->lock);
+ drm_dp_mst_topology_put_mstb(port->mstb);
+ port->mstb = NULL;
+ mutex_unlock(&mgr->lock);
+ }
}
port->pdt = new_pdt;
- switch (port->pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /* add i2c over sideband */
- ret = drm_dp_mst_register_i2c_bus(&port->aux);
- break;
+ port->mcs = new_mcs;
- case DP_PEER_DEVICE_MST_BRANCHING:
- lct = drm_dp_calculate_rad(port, rad);
- mstb = drm_dp_add_mst_branch_device(lct, rad);
- if (!mstb) {
- ret = -ENOMEM;
- DRM_ERROR("Failed to create MSTB for port %p", port);
- goto out;
- }
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /* add i2c over sideband */
+ ret = drm_dp_mst_register_i2c_bus(&port->aux);
+ } else {
+ lct = drm_dp_calculate_rad(port, rad);
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
+ if (!mstb) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed to create MSTB for port %p",
+ port);
+ goto out;
+ }
- mutex_lock(&mgr->lock);
- port->mstb = mstb;
- mstb->mgr = port->mgr;
- mstb->port_parent = port;
+ mutex_lock(&mgr->lock);
+ port->mstb = mstb;
+ mstb->mgr = port->mgr;
+ mstb->port_parent = port;
- /*
- * Make sure this port's memory allocation stays
- * around until its child MSTB releases it
- */
- drm_dp_mst_get_port_malloc(port);
- mutex_unlock(&mgr->lock);
+ /*
+ * Make sure this port's memory allocation stays
+ * around until its child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
+ mutex_unlock(&mgr->lock);
- /* And make sure we send a link address for this */
- ret = 1;
- break;
+ /* And make sure we send a link address for this */
+ ret = 1;
+ }
}
out:
goto error;
}
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ if (port->pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
struct drm_dp_mst_port *port;
int old_ddps = 0, ret;
u8 new_pdt = DP_PEER_DEVICE_NONE;
+ bool new_mcs = 0;
bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
port->input = port_msg->input_port;
if (!port->input)
new_pdt = port_msg->peer_device_type;
- port->mcs = port_msg->mcs;
+ new_mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
port->dpcd_rev = port_msg->dpcd_revision;
}
}
- ret = drm_dp_port_set_pdt(port, new_pdt);
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
send_link_addr = true;
} else if (ret < 0) {
* we're coming out of suspend. In this case, always resend the link
* address if there's an MSTB on this port
*/
- if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+ port->mcs)
send_link_addr = true;
if (port->connector)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_ddps, ret;
+ int old_ddps, old_input, ret, i;
u8 new_pdt;
+ bool new_mcs;
bool dowork = false, create_connector = false;
port = drm_dp_get_port(mstb, conn_stat->port_number);
}
old_ddps = port->ddps;
+ old_input = port->input;
port->input = conn_stat->input_port;
- port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
}
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
-
- ret = drm_dp_port_set_pdt(port, new_pdt);
+ new_mcs = conn_stat->message_capability_status;
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
dowork = true;
} else if (ret < 0) {
dowork = false;
}
+ if (!old_input && old_ddps != port->ddps && !port->ddps) {
+ for (i = 0; i < mgr->max_payloads; i++) {
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+ struct drm_dp_mst_port *port_validated;
+
+ if (!vcpi)
+ continue;
+
+ port_validated =
+ container_of(vcpi, struct drm_dp_mst_port, vcpi);
+ port_validated =
+ drm_dp_mst_topology_get_port_validated(mgr, port_validated);
+ if (!port_validated) {
+ mutex_lock(&mgr->payload_lock);
+ vcpi->num_slots = 0;
+ mutex_unlock(&mgr->payload_lock);
+ } else {
+ drm_dp_mst_topology_put_port(port_validated);
+ }
+ }
+ }
+
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
else if (create_connector)
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret == 1) {
/* txmsg is sent it should be in the slots now */
+ mgr->is_waiting_for_dwn_reply = true;
list_del(&txmsg->next);
} else if (ret) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
- if (list_is_singular(&mgr->tx_msg_downq))
+ if (list_is_singular(&mgr->tx_msg_downq) &&
+ !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
mstb->tx_slots[slot] = NULL;
+ mgr->is_waiting_for_dwn_reply = false;
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
no_msg:
drm_dp_mst_topology_put_mstb(mstb);
clear_down_rep_recv:
+ mutex_lock(&mgr->qlock);
+ mgr->is_waiting_for_dwn_reply = false;
+ mutex_unlock(&mgr->qlock);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
case DP_PEER_DEVICE_MST_BRANCHING:
+ if (!port->mcs)
+ ret = connector_status_connected;
break;
case DP_PEER_DEVICE_SST_SINK:
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (!list_empty(&mgr->tx_msg_downq))
+ if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
if (port->connector)
port->mgr->cbs->destroy_connector(port->mgr, port->connector);
- drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
}
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
- if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
+ if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
}
+ /*
+ * Likewise, bits_per_pixel should be rounded up to a supported value.
+ */
+ var->bits_per_pixel = fb->format->cpp[0] * 8;
+
/*
* drm fbdev emulation doesn't support changing the pixel format at all,
* so reject all pixel format changing requests.
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, true);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--dev_priv->audio_power_refcount == 0)
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg;
- u32 trans_ddi_func_ctl2_val;
if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
return;
DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
transcoder_name(old_crtc_state->cpu_transcoder));
- reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
- trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
- PORT_SYNC_MODE_MASTER_SELECT_MASK);
- I915_WRITE(reg, trans_ddi_func_ctl2_val);
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
}
static void intel_fdi_normal_train(struct intel_crtc *crtc)
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
-static __always_inline u32 __busy_read_flag(u8 id)
+static __always_inline u32 __busy_read_flag(u16 id)
{
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffff0000u;
GEM_BUG_ON(id >= 16);
return 0x10000u << id;
}
-static __always_inline u32 __busy_write_id(u8 id)
+static __always_inline u32 __busy_write_id(u16 id)
{
/*
* The uABI guarantees an active writer is also amongst the read
* last_read - hence we always set both read and write busy for
* last_write.
*/
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ if (id == (u16)I915_ENGINE_CLASS_INVALID)
return 0xffffffffu;
return (id + 1) | __busy_read_flag(id);
}
static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
{
const struct i915_request *rq;
return 0;
/* Beware type-expansion follies! */
- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
return flag(rq->engine->uabi_class);
}
static struct sg_table *
__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
- struct page **pvec, int num_pages)
+ struct page **pvec, unsigned long num_pages)
{
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
- const int npages = obj->base.size >> PAGE_SHIFT;
+ const unsigned long npages = obj->base.size >> PAGE_SHIFT;
+ unsigned long pinned;
struct page **pvec;
- int pinned, ret;
+ int ret;
ret = -ENOMEM;
pinned = 0;
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
- const int num_pages = obj->base.size >> PAGE_SHIFT;
+ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm;
struct page **pvec;
struct sg_table *pages;
if (err)
return err;
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unpin;
+
/*
* And mark it as a globally pinned object to let the shrinker know
* it cannot reclaim the object until we release it.
vma->obj->mm.dirty = true;
return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+ return err;
}
static void __context_unpin_state(struct i915_vma *vma)
{
i915_vma_make_shrinkable(vma);
+ i915_active_release(&vma->active);
__i915_vma_unpin(vma);
}
+static int __ring_active(struct intel_ring *ring)
+{
+ int err;
+
+ err = i915_active_acquire(&ring->vma->active);
+ if (err)
+ return err;
+
+ err = intel_ring_pin(ring);
+ if (err)
+ goto err_active;
+
+ return 0;
+
+err_active:
+ i915_active_release(&ring->vma->active);
+ return err;
+}
+
+static void __ring_retire(struct intel_ring *ring)
+{
+ intel_ring_unpin(ring);
+ i915_active_release(&ring->vma->active);
+}
+
__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
__context_unpin_state(ce->state);
intel_timeline_unpin(ce->timeline);
- intel_ring_unpin(ce->ring);
+ __ring_retire(ce->ring);
intel_context_put(ce);
}
intel_context_get(ce);
- err = intel_ring_pin(ce->ring);
+ err = __ring_active(ce->ring);
if (err)
goto err_put;
err_timeline:
intel_timeline_unpin(ce->timeline);
err_ring:
- intel_ring_unpin(ce->ring);
+ __ring_retire(ce->ring);
err_put:
intel_context_put(ce);
return err;
u8 class;
u8 instance;
- u8 uabi_class;
- u8 uabi_instance;
+ u16 uabi_class;
+ u16 uabi_instance;
u32 uabi_capabilities;
u32 context_size;
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ LRC_PPHWSP_SCRATCH_ADDR);
+
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
/* WaMediaPoolStateCmdInWABB:bxt,glk */
ve->base.gt = siblings[0]->gt;
ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
+
ve->base.class = OTHER_CLASS;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+ ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
/*
* The decision on whether to submit a request using semaphores
int len;
u32 *cs;
- flags |= MI_MM_SPACE_GTT;
- if (IS_HASWELL(i915))
- /* These flags are for resource streamer on HSW+ */
- flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
- else
- /* We need to save the extended state for powersaving modes */
- flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
-
len = 4;
if (IS_GEN(i915, 7))
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
}
if (ce->state) {
- u32 hw_flags;
+ u32 flags;
GEM_BUG_ON(rq->engine->id != RCS0);
- /*
- * The kernel context(s) is treated as pure scratch and is not
- * expected to retain any state (as we sacrifice it during
- * suspend and on resume it may be corrupted). This is ok,
- * as nothing actually executes using the kernel context; it
- * is purely used for flushing user contexts.
- */
- hw_flags = 0;
- if (i915_gem_context_is_kernel(rq->gem_context))
- hw_flags = MI_RESTORE_INHIBIT;
+ /* For resource streamer on HSW+ and power context elsewhere */
+ BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
+ BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
+
+ flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
+ if (!i915_gem_context_is_kernel(rq->gem_context))
+ flags |= MI_RESTORE_EXT_STATE_EN;
+ else
+ flags |= MI_RESTORE_INHIBIT;
- ret = mi_set_context(rq, hw_flags);
+ ret = mi_set_context(rq, flags);
if (ret)
return ret;
}
(IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
/* WaRsDisableCoarsePowerGating:skl,cnl */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
+ (IS_CANNONLAKE(dev_priv) || \
+ IS_SKL_GT3(dev_priv) || \
+ IS_SKL_GT4(dev_priv))
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
+ GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
iter->dma += I915_GTT_PAGE_SIZE;
vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
do {
+ GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
iter.dma += I915_GTT_PAGE_SIZE;
static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
{
- struct i915_vma *vma, *vn;
+ struct i915_vma *vma;
bool flush = false;
int open;
open = atomic_xchg(&ggtt->vm.open, 0);
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue;
- if (!__i915_vma_unbind(vma))
- continue;
-
clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
- if (!is_igp(i915))
+ if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
- "i915-%s",
+ "i915_%s",
dev_name(i915->drm.dev));
- else
+ if (pmu->name) {
+ /* tools/perf reserves colons as special. */
+ strreplace((char *)pmu->name, ':', '_');
+ }
+ } else {
pmu->name = "i915";
+ }
if (!pmu->name)
goto err;
#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
-#define VFUNIT_CLKGATE_DIS (1 << 20)
+#define VFUNIT_CLKGATE_DIS REG_BIT(20)
+#define HSUNIT_CLKGATE_DIS REG_BIT(8)
+#define VSUNIT_CLKGATE_DIS REG_BIT(3)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4)
+#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19)
+#define PSDUNIT_CLKGATE_DIS REG_BIT(5)
#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560)
#define CGPSF_CLKGATE_DIS (1 << 3)
/* WaEnable32PlaneMode:icl */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
_MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
+
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
+ 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+ /* Wa_1407352427:icl,ehl */
+ intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ 0, PSDUNIT_CLKGATE_DIS);
}
static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
#ifndef __I915_SELFTESTS_RANDOM_H__
#define __I915_SELFTESTS_RANDOM_H__
+#include <linux/math64.h>
#include <linux/random.h>
#include "../i915_selftest.h"
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct panfrost_file_priv *priv = file->driver_priv;
struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data;
+ struct panfrost_gem_mapping *mapping;
if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
if (IS_ERR(bo))
return PTR_ERR(bo);
- args->offset = bo->node.start << PAGE_SHIFT;
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ drm_gem_object_put_unlocked(&bo->base.base);
+ return -EINVAL;
+ }
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
struct drm_panfrost_submit *args,
struct panfrost_job *job)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo;
+ unsigned int i;
+ int ret;
+
job->bo_count = args->bo_handle_count;
if (!job->bo_count)
if (!job->implicit_fences)
return -ENOMEM;
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)args->bo_handles,
- job->bo_count, &job->bos);
+ ret = drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)args->bo_handles,
+ job->bo_count, &job->bos);
+ if (ret)
+ return ret;
+
+ job->mappings = kvmalloc_array(job->bo_count,
+ sizeof(struct panfrost_gem_mapping *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->mappings)
+ return -ENOMEM;
+
+ for (i = 0; i < job->bo_count; i++) {
+ struct panfrost_gem_mapping *mapping;
+
+ bo = to_panfrost_bo(job->bos[i]);
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ ret = -EINVAL;
+ break;
+ }
+
+ job->mappings[i] = mapping;
+ }
+
+ return ret;
}
/**
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_get_bo_offset *args = data;
+ struct panfrost_gem_mapping *mapping;
struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
}
bo = to_panfrost_bo(gem_obj);
- args->offset = bo->node.start << PAGE_SHIFT;
-
+ mapping = panfrost_gem_mapping_get(bo, priv);
drm_gem_object_put_unlocked(gem_obj);
+
+ if (!mapping)
+ return -EINVAL;
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_madvise *args = data;
struct panfrost_device *pfdev = dev->dev_private;
struct drm_gem_object *gem_obj;
+ struct panfrost_gem_object *bo;
+ int ret = 0;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
return -ENOENT;
}
+ bo = to_panfrost_bo(gem_obj);
+
mutex_lock(&pfdev->shrinker_lock);
+ mutex_lock(&bo->mappings.lock);
+ if (args->madv == PANFROST_MADV_DONTNEED) {
+ struct panfrost_gem_mapping *first;
+
+ first = list_first_entry(&bo->mappings.list,
+ struct panfrost_gem_mapping,
+ node);
+
+ /*
+ * If we want to mark the BO purgeable, there must be only one
+ * user: the caller FD.
+ * We could do something smarter and mark the BO purgeable only
+ * when all its users have marked it purgeable, but globally
+ * visible/shared BOs are likely to never be marked purgeable
+ * anyway, so let's not bother.
+ */
+ if (!list_is_singular(&bo->mappings.list) ||
+ WARN_ON_ONCE(first->mmu != &priv->mmu)) {
+ ret = -EINVAL;
+ goto out_unlock_mappings;
+ }
+ }
+
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
if (args->retained) {
- struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
-
if (args->madv == PANFROST_MADV_DONTNEED)
list_add_tail(&bo->base.madv_list,
&pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
}
+
+out_unlock_mappings:
+ mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock);
drm_gem_object_put_unlocked(gem_obj);
- return 0;
+ return ret;
}
int panfrost_unstable_ioctl_check(void)
list_del_init(&bo->base.madv_list);
mutex_unlock(&pfdev->shrinker_lock);
+ /*
+ * If we still have mappings attached to the BO, there's a problem in
+ * our refcounting.
+ */
+ WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
drm_gem_shmem_free_object(obj);
}
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv)
+{
+ struct panfrost_gem_mapping *iter, *mapping = NULL;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ kref_get(&iter->refcount);
+ mapping = iter;
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
+
+ return mapping;
+}
+
+static void
+panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
+{
+ struct panfrost_file_priv *priv;
+
+ if (mapping->active)
+ panfrost_mmu_unmap(mapping);
+
+ priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
+ spin_lock(&priv->mm_lock);
+ if (drm_mm_node_allocated(&mapping->mmnode))
+ drm_mm_remove_node(&mapping->mmnode);
+ spin_unlock(&priv->mm_lock);
+}
+
+static void panfrost_gem_mapping_release(struct kref *kref)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
+
+ panfrost_gem_teardown_mapping(mapping);
+ drm_gem_object_put_unlocked(&mapping->obj->base.base);
+ kfree(mapping);
+}
+
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
+{
+ if (!mapping)
+ return;
+
+ kref_put(&mapping->refcount, panfrost_gem_mapping_release);
+}
+
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(mapping, &bo->mappings.list, node)
+ panfrost_gem_teardown_mapping(mapping);
+ mutex_unlock(&bo->mappings.lock);
+}
+
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
int ret;
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mapping->node);
+ kref_init(&mapping->refcount);
+ drm_gem_object_get(obj);
+ mapping->obj = bo;
/*
* Executable buffers cannot cross a 16MB boundary as the program
else
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
- bo->mmu = &priv->mmu;
+ mapping->mmu = &priv->mmu;
spin_lock(&priv->mm_lock);
- ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
+ ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
size >> PAGE_SHIFT, align, color, 0);
spin_unlock(&priv->mm_lock);
if (ret)
- return ret;
+ goto err;
if (!bo->is_heap) {
- ret = panfrost_mmu_map(bo);
- if (ret) {
- spin_lock(&priv->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&priv->mm_lock);
- }
+ ret = panfrost_mmu_map(mapping);
+ if (ret)
+ goto err;
}
+
+ mutex_lock(&bo->mappings.lock);
+ WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
+ list_add_tail(&mapping->node, &bo->mappings.list);
+ mutex_unlock(&bo->mappings.lock);
+
+err:
+ if (ret)
+ panfrost_gem_mapping_put(mapping);
return ret;
}
void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{
- struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_gem_mapping *mapping = NULL, *iter;
- if (bo->is_mapped)
- panfrost_mmu_unmap(bo);
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ mapping = iter;
+ list_del(&iter->node);
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
- spin_lock(&priv->mm_lock);
- if (drm_mm_node_allocated(&bo->node))
- drm_mm_remove_node(&bo->node);
- spin_unlock(&priv->mm_lock);
+ panfrost_gem_mapping_put(mapping);
}
static int panfrost_gem_pin(struct drm_gem_object *obj)
if (!obj)
return NULL;
+ INIT_LIST_HEAD(&obj->mappings.list);
+ mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
return &obj->base.base;
struct drm_gem_shmem_object base;
struct sg_table *sgts;
- struct panfrost_mmu *mmu;
- struct drm_mm_node node;
- bool is_mapped :1;
+ /*
+ * Use a list for now. If searching a mapping ever becomes the
+ * bottleneck, we should consider using an RB-tree, or even better,
+ * let the core store drm_gem_object_mapping entries (where we
+ * could place driver specific data) instead of drm_gem_object ones
+ * in its drm_file->object_idr table.
+ *
+ * struct drm_gem_object_mapping {
+ * struct drm_gem_object *obj;
+ * void *driver_priv;
+ * };
+ */
+ struct {
+ struct list_head list;
+ struct mutex lock;
+ } mappings;
+
bool noexec :1;
bool is_heap :1;
};
+struct panfrost_gem_mapping {
+ struct list_head node;
+ struct kref refcount;
+ struct panfrost_gem_object *obj;
+ struct drm_mm_node mmnode;
+ struct panfrost_mmu *mmu;
+ bool active :1;
+};
+
static inline
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
{
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
-static inline
-struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+static inline struct panfrost_gem_mapping *
+drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
{
- return container_of(node, struct panfrost_gem_object, node);
+ return container_of(node, struct panfrost_gem_mapping, mmnode);
}
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
void panfrost_gem_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv);
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
if (!mutex_trylock(&shmem->pages_lock))
return false;
- panfrost_mmu_unmap(to_panfrost_bo(obj));
+ panfrost_gem_teardown_mappings(bo);
drm_gem_shmem_purge_locked(obj);
mutex_unlock(&shmem->pages_lock);
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
- if (job->bos) {
+ if (job->mappings) {
for (i = 0; i < job->bo_count; i++)
+ panfrost_gem_mapping_put(job->mappings[i]);
+ kvfree(job->mappings);
+ }
+
+ if (job->bos) {
+ struct panfrost_gem_object *bo;
+
+ for (i = 0; i < job->bo_count; i++) {
+ bo = to_panfrost_bo(job->bos[i]);
drm_gem_object_put_unlocked(job->bos[i]);
+ }
+
kvfree(job->bos);
}
/* Exclusive fences we have taken from the BOs to wait for */
struct dma_fence **implicit_fences;
+ struct panfrost_gem_mapping **mappings;
struct drm_gem_object **bos;
u32 bo_count;
return 0;
}
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE;
- if (WARN_ON(bo->is_mapped))
+ if (WARN_ON(mapping->active))
return 0;
if (bo->noexec)
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
- mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
- bo->is_mapped = true;
+ mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+ prot, sgt);
+ mapping->active = true;
return 0;
}
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
- struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
- u64 iova = bo->node.start << PAGE_SHIFT;
- size_t len = bo->node.size << PAGE_SHIFT;
+ struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
+ u64 iova = mapping->mmnode.start << PAGE_SHIFT;
+ size_t len = mapping->mmnode.size << PAGE_SHIFT;
size_t unmapped_len = 0;
- if (WARN_ON(!bo->is_mapped))
+ if (WARN_ON(!mapping->active))
return;
- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
+ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+ mapping->mmu->as, iova, len);
while (unmapped_len < len) {
size_t unmapped_page;
unmapped_len += pgsize;
}
- panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
- bo->is_mapped = false;
+ panfrost_mmu_flush_range(pfdev, mapping->mmu,
+ mapping->mmnode.start << PAGE_SHIFT, len);
+ mapping->active = false;
}
static void mmu_tlb_inv_context_s1(void *cookie)
free_io_pgtable_ops(mmu->pgtbl_ops);
}
-static struct panfrost_gem_object *
-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+static struct panfrost_gem_mapping *
+addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
{
- struct panfrost_gem_object *bo = NULL;
+ struct panfrost_gem_mapping *mapping = NULL;
struct panfrost_file_priv *priv;
struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT;
drm_mm_for_each_node(node, &priv->mm) {
if (offset >= node->start &&
offset < (node->start + node->size)) {
- bo = drm_mm_node_to_panfrost_bo(node);
- drm_gem_object_get(&bo->base.base);
+ mapping = drm_mm_node_to_panfrost_mapping(node);
+
+ kref_get(&mapping->refcount);
break;
}
}
spin_unlock(&priv->mm_lock);
out:
spin_unlock(&pfdev->as_lock);
- return bo;
+ return mapping;
}
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
u64 addr)
{
int ret, i;
+ struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo;
struct address_space *mapping;
pgoff_t page_offset;
struct sg_table *sgt;
struct page **pages;
- bo = addr_to_drm_mm_node(pfdev, as, addr);
- if (!bo)
+ bomapping = addr_to_mapping(pfdev, as, addr);
+ if (!bomapping)
return -ENOENT;
+ bo = bomapping->obj;
if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
- bo->node.start << PAGE_SHIFT);
+ bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL;
goto err_bo;
}
- WARN_ON(bo->mmu->as != as);
+ WARN_ON(bomapping->mmu->as != as);
/* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT;
- page_offset -= bo->node.start;
+ page_offset -= bomapping->mmnode.start;
mutex_lock(&bo->base.pages_lock);
goto err_map;
}
- mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ mmu_map_sg(pfdev, bomapping->mmu, addr,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
- bo->is_mapped = true;
+ bomapping->active = true;
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
- drm_gem_object_put_unlocked(&bo->base.base);
+ panfrost_gem_mapping_put(bomapping);
return 0;
#ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__
-struct panfrost_gem_object;
+struct panfrost_gem_mapping;
struct panfrost_file_priv;
struct panfrost_mmu;
-int panfrost_mmu_map(struct panfrost_gem_object *bo);
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
#define V4_SHADERS_PER_COREGROUP 4
struct panfrost_perfcnt {
- struct panfrost_gem_object *bo;
+ struct panfrost_gem_mapping *mapping;
size_t bosize;
void *buf;
struct panfrost_file_priv *user;
int ret;
reinit_completion(&pfdev->perfcnt->dump_comp);
- gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
+ gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
gpu_write(pfdev, GPU_INT_CLEAR,
if (IS_ERR(bo))
return PTR_ERR(bo);
- perfcnt->bo = to_panfrost_bo(&bo->base);
-
/* Map the perfcnt buf in the address space attached to file_priv. */
- ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
+ ret = panfrost_gem_open(&bo->base, file_priv);
if (ret)
goto err_put_bo;
+ perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
+ user);
+ if (!perfcnt->mapping) {
+ ret = -EINVAL;
+ goto err_close_bo;
+ }
+
perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
if (IS_ERR(perfcnt->buf)) {
ret = PTR_ERR(perfcnt->buf);
- goto err_close_bo;
+ goto err_put_mapping;
}
/*
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
+ /* The BO ref is retained by the mapping. */
+ drm_gem_object_put_unlocked(&bo->base);
+
return 0;
err_vunmap:
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
+err_put_mapping:
+ panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
+ panfrost_gem_close(&bo->base, file_priv);
err_put_bo:
drm_gem_object_put_unlocked(&bo->base);
return ret;
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL;
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
- drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
- perfcnt->bo = NULL;
+ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+ panfrost_gem_mapping_put(perfcnt->mapping);
+ perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
u8 max_lanes;
- u8 max_rate;
+ unsigned int max_rate;
u8 lanes;
int active_port;
WARN_ON(!tcon->quirks->has_channel_0);
- tcon->dclk_min_div = 1;
+ tcon->dclk_min_div = tcon->quirks->dclk_min_div;
tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
.set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
.set_mux = sun5i_a13_tcon_set_mux,
};
.has_channel_1 = true,
.has_lvds_alt = true,
.needs_de_be_mux = true,
+ .dclk_min_div = 1,
.set_mux = sun6i_tcon_set_mux,
};
.has_channel_0 = true,
.has_channel_1 = true,
.needs_de_be_mux = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
+ .dclk_min_div = 4,
/* Same display pipeline structure as A10 */
.set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
.has_channel_0 = true,
.has_lvds_alt = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
.has_channel_0 = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = {
- .has_channel_0 = true,
- .needs_edp_reset = true,
+ .has_channel_0 = true,
+ .needs_edp_reset = true,
+ .dclk_min_div = 1,
};
static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
+ u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
if (!objs)
return;
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
+ virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, 0,
plane->state->crtc_w,
struct hid_usage *usage, __s32 value)
{
if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 &&
- (usage->hid & HID_USAGE) != 0x00 && !usage->type) {
+ (usage->hid & HID_USAGE) != 0x00 &&
+ (usage->hid & HID_USAGE) != 0xff && !usage->type) {
hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n",
usage->hid & HID_USAGE);
}
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
+ /* Total size check: Allow for possible report index byte */
+ if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+ hid_err(parser->device, "report is too long\n");
+ return -1;
+ }
+
if (!parser->local.usage_index) /* Ignore padding fields */
return 0;
#define USB_VENDOR_ID_ITE 0x048d
#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
+#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
#define USB_DEVICE_ID_ITE8595 0x8595
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
#define I2C_DEVICE_ID_LG_8001 0x8001
+#define I2C_DEVICE_ID_LG_7010 0x7010
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
}
mapped:
- if (device->driver->input_mapped && device->driver->input_mapped(device,
- hidinput, field, usage, &bit, &max) < 0)
- goto ignore;
+ if (device->driver->input_mapped &&
+ device->driver->input_mapped(device, hidinput, field, usage,
+ &bit, &max) < 0) {
+ /*
+ * The driver indicated that no further generic handling
+ * of the usage is desired.
+ */
+ return;
+ }
set_bit(usage->type, input->evbit);
set_bit(MSC_SCAN, input->mscbit);
}
-ignore:
return;
+ignore:
+ usage->type = 0;
+ usage->code = 0;
}
static void hidinput_handle_scroll(struct hid_usage *usage,
static const struct hid_device_id ite_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
+ /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
+ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);
#define HIDPP_REPORT_LONG_LENGTH 20
#define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64
+#define HIDPP_REPORT_SHORT_SUPPORTED BIT(0)
+#define HIDPP_REPORT_LONG_SUPPORTED BIT(1)
+#define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2)
+
#define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03
#define HIDPP_SUB_ID_ROLLER 0x05
#define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06
#define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1)
#define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2)
#define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3)
+#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4)
/*
* There are two hidpp protocols in use, the first version hidpp10 is known
struct hidpp_battery {
u8 feature_index;
u8 solar_feature_index;
+ u8 voltage_feature_index;
struct power_supply_desc desc;
struct power_supply *ps;
char name[64];
int status;
int capacity;
int level;
+ int voltage;
+ int charge_type;
bool online;
};
unsigned long quirks;
unsigned long capabilities;
+ u8 supported_reports;
struct hidpp_battery battery;
struct hidpp_scroll_counter vertical_wheel_counter;
+
+ u8 wireless_feature_index;
};
/* HID++ 1.0 error codes */
struct hidpp_report *message;
int ret, max_count;
+ /* Send as long report if short reports are not supported. */
+ if (report_id == REPORT_ID_HIDPP_SHORT &&
+ !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED))
+ report_id = REPORT_ID_HIDPP_LONG;
+
switch (report_id) {
case REPORT_ID_HIDPP_SHORT:
max_count = HIDPP_REPORT_SHORT_LENGTH - 4;
(answer->fap.params[0] == question->fap.funcindex_clientid);
}
-static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
+static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp,
+ struct hidpp_report *report)
{
- return (report->report_id == REPORT_ID_HIDPP_SHORT) &&
- (report->rap.sub_id == 0x41);
+ return (hidpp->wireless_feature_index &&
+ (report->fap.feature_index == hidpp->wireless_feature_index)) ||
+ ((report->report_id == REPORT_ID_HIDPP_SHORT) &&
+ (report->rap.sub_id == 0x41));
}
/**
return 0;
}
+/* -------------------------------------------------------------------------- */
+/* 0x1001: Battery voltage */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001
+
+#define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00
+
+#define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00
+
+static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
+ int *level, int *charge_type)
+{
+ int status;
+
+ long charge_sts = (long)data[2];
+
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ switch (data[2] & 0xe0) {
+ case 0x00:
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x20:
+ status = POWER_SUPPLY_STATUS_FULL;
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ break;
+ case 0x40:
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case 0xe0:
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ status = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ if (test_bit(3, &charge_sts)) {
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ }
+ if (test_bit(4, &charge_sts)) {
+ *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ }
+
+ if (test_bit(5, &charge_sts)) {
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ }
+
+ *voltage = get_unaligned_be16(data);
+
+ return status;
+}
+
+static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp,
+ u8 feature_index,
+ int *status, int *voltage,
+ int *level, int *charge_type)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 *params = (u8 *)response.fap.params;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE,
+ NULL, 0, &response);
+
+ if (ret > 0) {
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, ret);
+ return -EPROTO;
+ }
+ if (ret)
+ return ret;
+
+ hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE;
+
+ *status = hidpp20_battery_map_status_voltage(params, voltage,
+ level, charge_type);
+
+ return 0;
+}
+
+static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ int ret;
+ int status, voltage, level, charge_type;
+
+ if (hidpp->battery.voltage_feature_index == 0xff) {
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE,
+ &hidpp->battery.voltage_feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+ }
+
+ ret = hidpp20_battery_get_battery_voltage(hidpp,
+ hidpp->battery.voltage_feature_index,
+ &status, &voltage, &level, &charge_type);
+
+ if (ret)
+ return ret;
+
+ hidpp->battery.status = status;
+ hidpp->battery.voltage = voltage;
+ hidpp->battery.level = level;
+ hidpp->battery.charge_type = charge_type;
+ hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ return 0;
+}
+
+static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ struct hidpp_report *report = (struct hidpp_report *)data;
+ int status, voltage, level, charge_type;
+
+ if (report->fap.feature_index != hidpp->battery.voltage_feature_index ||
+ report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST)
+ return 0;
+
+ status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage,
+ &level, &charge_type);
+
+ hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) {
+ hidpp->battery.voltage = voltage;
+ hidpp->battery.status = status;
+ hidpp->battery.level = level;
+ hidpp->battery.charge_type = charge_type;
+ if (hidpp->battery.ps)
+ power_supply_changed(hidpp->battery.ps);
+ }
+ return 0;
+}
+
static enum power_supply_property hidpp_battery_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */
0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */
+ 0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */
};
static int hidpp_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
val->strval = hidpp->hid_dev->uniq;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ /* hardware reports voltage in in mV. sysfs expects uV */
+ val->intval = hidpp->battery.voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = hidpp->battery.charge_type;
+ break;
default:
ret = -EINVAL;
break;
return ret;
}
+/* -------------------------------------------------------------------------- */
+/* 0x1d4b: Wireless device status */
+/* -------------------------------------------------------------------------- */
+#define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b
+
+static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp)
+{
+ u8 feature_type;
+ int ret;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+ &hidpp->wireless_feature_index,
+ &feature_type);
+
+ return ret;
+}
+
/* -------------------------------------------------------------------------- */
/* 0x2120: Hi-resolution scrolling */
/* -------------------------------------------------------------------------- */
}
}
- if (unlikely(hidpp_report_is_connect_event(report))) {
+ if (unlikely(hidpp_report_is_connect_event(hidpp, report))) {
atomic_set(&hidpp->connected,
!(report->rap.params[0] & (1 << 6)));
if (schedule_work(&hidpp->work) == 0)
ret = hidpp_solar_battery_event(hidpp, data, size);
if (ret != 0)
return ret;
+ ret = hidpp20_battery_voltage_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
}
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
hidpp->battery.feature_index = 0xff;
hidpp->battery.solar_feature_index = 0xff;
+ hidpp->battery.voltage_feature_index = 0xff;
if (hidpp->protocol_major >= 2) {
if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750)
ret = hidpp_solar_request_battery_event(hidpp);
- else
- ret = hidpp20_query_battery_info(hidpp);
+ else {
+ ret = hidpp20_query_battery_voltage_info(hidpp);
+ if (ret)
+ ret = hidpp20_query_battery_info(hidpp);
+ }
if (ret)
return ret;
if (!battery_props)
return -ENOMEM;
- num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
+ num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3;
if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
battery_props[num_battery_props++] =
battery_props[num_battery_props++] =
POWER_SUPPLY_PROP_CAPACITY_LEVEL;
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+ battery_props[num_battery_props++] =
+ POWER_SUPPLY_PROP_VOLTAGE_NOW;
+
battery = &hidpp->battery;
n = atomic_inc_return(&battery_no) - 1;
else
hidpp10_query_battery_status(hidpp);
} else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
- hidpp20_query_battery_info(hidpp);
+ if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE)
+ hidpp20_query_battery_voltage_info(hidpp);
+ else
+ hidpp20_query_battery_info(hidpp);
}
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
return report->field[0]->report_count + 1;
}
-static bool hidpp_validate_device(struct hid_device *hdev)
+static u8 hidpp_validate_device(struct hid_device *hdev)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- int id, report_length, supported_reports = 0;
+ int id, report_length;
+ u8 supported_reports = 0;
id = REPORT_ID_HIDPP_SHORT;
report_length = hidpp_get_report_length(hdev, id);
if (report_length < HIDPP_REPORT_SHORT_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED;
}
id = REPORT_ID_HIDPP_LONG;
if (report_length < HIDPP_REPORT_LONG_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_LONG_SUPPORTED;
}
id = REPORT_ID_HIDPP_VERY_LONG;
report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
goto bad_device;
- supported_reports++;
+ supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED;
hidpp->very_long_report_length = report_length;
}
/*
* Make sure the device is HID++ capable, otherwise treat as generic HID
*/
- if (!hidpp_validate_device(hdev)) {
+ hidpp->supported_reports = hidpp_validate_device(hdev);
+
+ if (!hidpp->supported_reports) {
hid_set_drvdata(hdev, NULL);
devm_kfree(&hdev->dev, hidpp);
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret < 0) {
dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
__func__, ret);
- hid_hw_stop(hdev);
goto hid_hw_open_fail;
}
hidpp_overwrite_name(hdev);
}
+ if (connected && hidpp->protocol_major >= 2) {
+ ret = hidpp_set_wireless_feature_index(hidpp);
+ if (ret == -ENOENT)
+ hidpp->wireless_feature_index = 0;
+ else if (ret)
+ goto hid_hw_init_fail;
+ }
+
if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
ret = wtp_get_config(hidpp);
if (ret)
{ LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* Mouse Logitech MX Master 2S */
LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master 3 */
+ LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{ /* Mouse Logitech Performance MX */
LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
{ /* Keyboard logitech K400 */
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ { /* MX Master mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* MX Master 3 mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023),
+ .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
{}
};
tool = MT_TOOL_DIAL;
else if (unlikely(!confidence_state)) {
tool = MT_TOOL_PALM;
- if (!active &&
+ if (!active && mt &&
input_mt_is_active(&mt->slots[slotnum])) {
/*
* The non-confidence was reported for
{ .driver_data = MT_CLS_LG,
HID_USB_DEVICE(USB_VENDOR_ID_LG,
USB_DEVICE_ID_LG_MELFAS_MT) },
+ { .driver_data = MT_CLS_LG,
+ HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
{ 0 }
};
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
hid_info(hdev, "Steam wireless receiver connected");
+ /* If using a wireless adaptor ask for connection status */
+ steam->connected = false;
steam_request_conn_status(steam);
} else {
+ /* A wired connection is always present */
+ steam->connected = true;
ret = steam_register(steam);
if (ret) {
hid_err(hdev,
static __poll_t hidraw_poll(struct file *file, poll_table *wait)
{
struct hidraw_list *list = file->private_data;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */
poll_wait(file, &list->hidraw->wait, wait);
if (list->head != list->tail)
- return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (!list->hidraw->exist)
- return EPOLLERR | EPOLLHUP;
- return 0;
+ mask |= EPOLLERR | EPOLLHUP;
+ return mask;
}
static int hidraw_open(struct inode *inode, struct file *file)
-EFAULT : len;
break;
}
+
+ if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) {
+ int len = strlen(hid->uniq) + 1;
+ if (len > _IOC_SIZE(cmd))
+ len = _IOC_SIZE(cmd);
+ ret = copy_to_user(user_arg, hid->uniq, len) ?
+ -EFAULT : len;
+ break;
+ }
}
ret = -ENOTTY;
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
+#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
+
/* flags */
#define I2C_HID_STARTED 0
I2C_HID_QUIRK_BOGUS_IRQ },
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
I2C_HID_QUIRK_RESET_ON_RESUME },
+ { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
+ I2C_HID_QUIRK_BAD_INPUT_SIZE },
{ 0, 0 }
};
}
if ((ret_size > size) || (ret_size < 2)) {
- dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
- __func__, size, ret_size);
- return;
+ if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) {
+ ihid->inbuf[0] = size & 0xff;
+ ihid->inbuf[1] = size >> 8;
+ ret_size = size;
+ } else {
+ dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+ __func__, size, ret_size);
+ return;
+ }
}
i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
#define ICL_MOBILE_DEVICE_ID 0x34FC
#define SPT_H_DEVICE_ID 0xA135
#define CML_LP_DEVICE_ID 0x02FC
+#define CMP_H_DEVICE_ID 0x06FC
#define EHL_Ax_DEVICE_ID 0x4BB3
+#define TGL_LP_DEVICE_ID 0xA0FC
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
{
struct uhid_device *uhid = file->private_data;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
poll_wait(file, &uhid->waitq, wait);
if (uhid->head != uhid->tail)
- return EPOLLIN | EPOLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
- return 0;
+ return mask;
}
static const struct file_operations uhid_fops = {
return 0;
}
+static int __hiddev_open(struct hiddev *hiddev, struct file *file)
+{
+ struct hiddev_list *list;
+ int error;
+
+ lockdep_assert_held(&hiddev->existancelock);
+
+ list = vzalloc(sizeof(*list));
+ if (!list)
+ return -ENOMEM;
+
+ mutex_init(&list->thread_lock);
+ list->hiddev = hiddev;
+
+ if (!hiddev->open++) {
+ error = hid_hw_power(hiddev->hid, PM_HINT_FULLON);
+ if (error < 0)
+ goto err_drop_count;
+
+ error = hid_hw_open(hiddev->hid);
+ if (error < 0)
+ goto err_normal_power;
+ }
+
+ spin_lock_irq(&hiddev->list_lock);
+ list_add_tail(&list->node, &hiddev->list);
+ spin_unlock_irq(&hiddev->list_lock);
+
+ file->private_data = list;
+
+ return 0;
+
+err_normal_power:
+ hid_hw_power(hiddev->hid, PM_HINT_NORMAL);
+err_drop_count:
+ hiddev->open--;
+ vfree(list);
+ return error;
+}
+
/*
* open file op
*/
static int hiddev_open(struct inode *inode, struct file *file)
{
- struct hiddev_list *list;
struct usb_interface *intf;
struct hid_device *hid;
struct hiddev *hiddev;
intf = usbhid_find_interface(iminor(inode));
if (!intf)
return -ENODEV;
+
hid = usb_get_intfdata(intf);
hiddev = hid->hiddev;
- if (!(list = vzalloc(sizeof(struct hiddev_list))))
- return -ENOMEM;
- mutex_init(&list->thread_lock);
- list->hiddev = hiddev;
- file->private_data = list;
-
- /*
- * no need for locking because the USB major number
- * is shared which usbcore guards against disconnect
- */
- if (list->hiddev->exist) {
- if (!list->hiddev->open++) {
- res = hid_hw_open(hiddev->hid);
- if (res < 0)
- goto bail;
- }
- } else {
- res = -ENODEV;
- goto bail;
- }
-
- spin_lock_irq(&list->hiddev->list_lock);
- list_add_tail(&list->node, &hiddev->list);
- spin_unlock_irq(&list->hiddev->list_lock);
-
mutex_lock(&hiddev->existancelock);
- /*
- * recheck exist with existance lock held to
- * avoid opening a disconnected device
- */
- if (!list->hiddev->exist) {
- res = -ENODEV;
- goto bail_unlock;
- }
- if (!list->hiddev->open++)
- if (list->hiddev->exist) {
- struct hid_device *hid = hiddev->hid;
- res = hid_hw_power(hid, PM_HINT_FULLON);
- if (res < 0)
- goto bail_unlock;
- res = hid_hw_open(hid);
- if (res < 0)
- goto bail_normal_power;
- }
- mutex_unlock(&hiddev->existancelock);
- return 0;
-bail_normal_power:
- hid_hw_power(hid, PM_HINT_NORMAL);
-bail_unlock:
+ res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV;
mutex_unlock(&hiddev->existancelock);
- spin_lock_irq(&list->hiddev->list_lock);
- list_del(&list->node);
- spin_unlock_irq(&list->hiddev->list_lock);
-bail:
- file->private_data = NULL;
- vfree(list);
return res;
}
(hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */
hdev->product == 0x392 || /* Intuos Pro 2 */
- hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */
+ hdev->product == 0x398 || hdev->product == 0x399 || /* MobileStudio Pro */
+ hdev->product == 0x3AA)) { /* MobileStudio Pro */
value = (field->logical_maximum - value);
if (hdev->product == 0x357 || hdev->product == 0x358 ||
hdev->product == 0x392)
value = wacom_offset_rotation(input, usage, value, 3, 16);
else if (hdev->product == 0x34d || hdev->product == 0x34e ||
- hdev->product == 0x398 || hdev->product == 0x399)
+ hdev->product == 0x398 || hdev->product == 0x399 ||
+ hdev->product == 0x3AA)
value = wacom_offset_rotation(input, usage, value, 1, 2);
}
else {
This driver can also be built as a module. If so, the module
will be called adm1031.
+config SENSORS_ADM1177
+ tristate "Analog Devices ADM1177 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for Analog Devices ADM1177
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called adm1177.
+
config SENSORS_ADM9240
tristate "Analog Devices ADM9240 and compatibles"
depends on I2C
This driver can also be built as a module. If so, the module
will be called atxp1.
+config SENSORS_DRIVETEMP
+ tristate "Hard disk drives with temperature sensors"
+ depends on SCSI && ATA
+ help
+ If you say yes you get support for the temperature sensor on
+ hard disk drives.
+
+ This driver can also be built as a module. If so, the module
+ will be called satatemp.
+
config SENSORS_DS620
tristate "Dallas Semiconductor DS620"
depends on I2C
will be called max197.
config SENSORS_MAX31722
-tristate "MAX31722 temperature sensor"
+ tristate "MAX31722 temperature sensor"
depends on SPI
help
Support for the Maxim Integrated MAX31722/MAX31723 digital
This driver can also be built as a module. If so, the module
will be called max31722.
+config SENSORS_MAX31730
+ tristate "MAX31730 temperature sensor"
+ depends on I2C
+ help
+ Support for the Maxim Integrated MAX31730 3-Channel Remote
+ Temperature Sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called max31730.
+
config SENSORS_MAX6621
tristate "Maxim MAX6621 sensor chip"
depends on I2C
will be called w83627hf.
config SENSORS_W83627EHF
- tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F"
+ tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG"
depends on !PPC
select HWMON_VID
help
the Core 2 Duo. And also the W83627UHG, which is a stripped down
version of the W83627DHG (as far as hardware monitoring goes.)
- This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F
- (also known as W83667HG-I), and NCT6776F.
+ This driver also supports Nuvoton W83667HG and W83667HG-B.
This driver can also be built as a module. If so, the module
will be called w83627ehf.
obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
+obj-$(CONFIG_SENSORS_ADM1177) += adm1177.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
+obj-$(CONFIG_SENSORS_DRIVETEMP) += drivetemp.o
obj-$(CONFIG_SENSORS_DS620) += ds620.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
+obj-$(CONFIG_SENSORS_MAX31730) += max31730.o
obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADM1177 Hot Swap Controller and Digital Power Monitor with Soft Start Pin
+ *
+ * Copyright 2015-2019 Analog Devices Inc.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+/* Command Byte Operations */
+#define ADM1177_CMD_V_CONT BIT(0)
+#define ADM1177_CMD_I_CONT BIT(2)
+#define ADM1177_CMD_VRANGE BIT(4)
+
+/* Extended Register */
+#define ADM1177_REG_ALERT_TH 2
+
+#define ADM1177_BITS 12
+
+/**
+ * struct adm1177_state - driver instance specific data
+ * @client pointer to i2c client
+ * @reg regulator info for the the power supply of the device
+ * @r_sense_uohm current sense resistor value
+ * @alert_threshold_ua current limit for shutdown
+ * @vrange_high internal voltage divider
+ */
+struct adm1177_state {
+ struct i2c_client *client;
+ struct regulator *reg;
+ u32 r_sense_uohm;
+ u32 alert_threshold_ua;
+ bool vrange_high;
+};
+
+static int adm1177_read_raw(struct adm1177_state *st, u8 num, u8 *data)
+{
+ return i2c_master_recv(st->client, data, num);
+}
+
+static int adm1177_write_cmd(struct adm1177_state *st, u8 cmd)
+{
+ return i2c_smbus_write_byte(st->client, cmd);
+}
+
+static int adm1177_write_alert_thr(struct adm1177_state *st,
+ u32 alert_threshold_ua)
+{
+ u64 val;
+ int ret;
+
+ val = 0xFFULL * alert_threshold_ua * st->r_sense_uohm;
+ val = div_u64(val, 105840000U);
+ val = div_u64(val, 1000U);
+ if (val > 0xFF)
+ val = 0xFF;
+
+ ret = i2c_smbus_write_byte_data(st->client, ADM1177_REG_ALERT_TH,
+ val);
+ if (ret)
+ return ret;
+
+ st->alert_threshold_ua = alert_threshold_ua;
+ return 0;
+}
+
+static int adm1177_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct adm1177_state *st = dev_get_drvdata(dev);
+ u8 data[3];
+ long dummy;
+ int ret;
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ ret = adm1177_read_raw(st, 3, data);
+ if (ret < 0)
+ return ret;
+ dummy = (data[1] << 4) | (data[2] & 0xF);
+ /*
+ * convert to milliamperes
+ * ((105.84mV / 4096) x raw) / senseResistor(ohm)
+ */
+ *val = div_u64((105840000ull * dummy),
+ 4096 * st->r_sense_uohm);
+ return 0;
+ case hwmon_curr_max_alarm:
+ *val = st->alert_threshold_ua;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_in:
+ ret = adm1177_read_raw(st, 3, data);
+ if (ret < 0)
+ return ret;
+ dummy = (data[0] << 4) | (data[2] >> 4);
+ /*
+ * convert to millivolts based on resistor devision
+ * (V_fullscale / 4096) * raw
+ */
+ if (st->vrange_high)
+ dummy *= 26350;
+ else
+ dummy *= 6650;
+
+ *val = DIV_ROUND_CLOSEST(dummy, 4096);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adm1177_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct adm1177_state *st = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_max_alarm:
+ adm1177_write_alert_thr(st, val);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t adm1177_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct adm1177_state *st = data;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return 0444;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ if (st->r_sense_uohm)
+ return 0444;
+ return 0;
+ case hwmon_curr_max_alarm:
+ if (st->r_sense_uohm)
+ return 0644;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *adm1177_info[] = {
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops adm1177_hwmon_ops = {
+ .is_visible = adm1177_is_visible,
+ .read = adm1177_read,
+ .write = adm1177_write,
+};
+
+static const struct hwmon_chip_info adm1177_chip_info = {
+ .ops = &adm1177_hwmon_ops,
+ .info = adm1177_info,
+};
+
+static void adm1177_remove(void *data)
+{
+ struct adm1177_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
+static int adm1177_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct adm1177_state *st;
+ u32 alert_threshold_ua;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->client = client;
+
+ st->reg = devm_regulator_get_optional(&client->dev, "vref");
+ if (IS_ERR(st->reg)) {
+ if (PTR_ERR(st->reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ st->reg = NULL;
+ } else {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(&client->dev, adm1177_remove,
+ st);
+ if (ret)
+ return ret;
+ }
+
+ if (device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &st->r_sense_uohm))
+ st->r_sense_uohm = 0;
+ if (device_property_read_u32(dev, "adi,shutdown-threshold-microamp",
+ &alert_threshold_ua)) {
+ if (st->r_sense_uohm)
+ /*
+ * set maximum default value from datasheet based on
+ * shunt-resistor
+ */
+ alert_threshold_ua = div_u64(105840000000,
+ st->r_sense_uohm);
+ else
+ alert_threshold_ua = 0;
+ }
+ st->vrange_high = device_property_read_bool(dev,
+ "adi,vrange-high-enable");
+ if (alert_threshold_ua && st->r_sense_uohm)
+ adm1177_write_alert_thr(st, alert_threshold_ua);
+
+ ret = adm1177_write_cmd(st, ADM1177_CMD_V_CONT |
+ ADM1177_CMD_I_CONT |
+ (st->vrange_high ? 0 : ADM1177_CMD_VRANGE));
+ if (ret)
+ return ret;
+
+ hwmon_dev =
+ devm_hwmon_device_register_with_info(dev, client->name, st,
+ &adm1177_chip_info, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id adm1177_id[] = {
+ {"adm1177", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adm1177_id);
+
+static const struct of_device_id adm1177_dt_ids[] = {
+ { .compatible = "adi,adm1177" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, adm1177_dt_ids);
+
+static struct i2c_driver adm1177_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "adm1177",
+ .of_match_table = adm1177_dt_ids,
+ },
+ .probe = adm1177_probe,
+ .id_table = adm1177_id,
+};
+module_i2c_driver(adm1177_driver);
+
+MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADM1177 ADC driver");
+MODULE_LICENSE("GPL v2");
long reg;
if (bypass_attn & (1 << channel))
- reg = (volt * 1024) / 2250;
+ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
else
- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
+ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
+ (r[0] + r[1]) * 2250);
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hwmon client for disk and solid state drives with temperature sensors
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ *
+ * With input from:
+ * Hwmon client for S.M.A.R.T. hard disk drives with temperature sensors.
+ * (C) 2018 Linus Walleij
+ *
+ * hwmon: Driver for SCSI/ATA temperature sensors
+ * by Constantin Baranov <const@mimas.ru>, submitted September 2009
+ *
+ * This drive supports reporting the temperatire of SATA drives. It can be
+ * easily extended to report the temperature of SCSI drives.
+ *
+ * The primary means to read drive temperatures and temperature limits
+ * for ATA drives is the SCT Command Transport feature set as specified in
+ * ATA8-ACS.
+ * It can be used to read the current drive temperature, temperature limits,
+ * and historic minimum and maximum temperatures. The SCT Command Transport
+ * feature set is documented in "AT Attachment 8 - ATA/ATAPI Command Set
+ * (ATA8-ACS)".
+ *
+ * If the SCT Command Transport feature set is not available, drive temperatures
+ * may be readable through SMART attributes. Since SMART attributes are not well
+ * defined, this method is only used as fallback mechanism.
+ *
+ * There are three SMART attributes which may report drive temperatures.
+ * Those are defined as follows (from
+ * http://www.cropel.com/library/smart-attribute-list.aspx).
+ *
+ * 190 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 194 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * 231 Temperature Temperature, monitored by a sensor somewhere inside
+ * the drive. Raw value typicaly holds the actual
+ * temperature (hexadecimal) in its rightmost two digits.
+ *
+ * Wikipedia defines attributes a bit differently.
+ *
+ * 190 Temperature Value is equal to (100-temp. °C), allowing manufacturer
+ * Difference or to set a minimum threshold which corresponds to a
+ * Airflow maximum temperature. This also follows the convention of
+ * Temperature 100 being a best-case value and lower values being
+ * undesirable. However, some older drives may instead
+ * report raw Temperature (identical to 0xC2) or
+ * Temperature minus 50 here.
+ * 194 Temperature or Indicates the device temperature, if the appropriate
+ * Temperature sensor is fitted. Lowest byte of the raw value contains
+ * Celsius the exact temperature value (Celsius degrees).
+ * 231 Life Left Indicates the approximate SSD life left, in terms of
+ * (SSDs) or program/erase cycles or available reserved blocks.
+ * Temperature A normalized value of 100 represents a new drive, with
+ * a threshold value at 10 indicating a need for
+ * replacement. A value of 0 may mean that the drive is
+ * operating in read-only mode to allow data recovery.
+ * Previously (pre-2010) occasionally used for Drive
+ * Temperature (more typically reported at 0xC2).
+ *
+ * Common denominator is that the first raw byte reports the temperature
+ * in degrees C on almost all drives. Some drives may report a fractional
+ * temperature in the second raw byte.
+ *
+ * Known exceptions (from libatasmart):
+ * - SAMSUNG SV0412H and SAMSUNG SV1204H) report the temperature in 10th
+ * degrees C in the first two raw bytes.
+ * - A few Maxtor drives report an unknown or bad value in attribute 194.
+ * - Certain Apple SSD drives report an unknown value in attribute 190.
+ * Only certain firmware versions are affected.
+ *
+ * Those exceptions affect older ATA drives and are currently ignored.
+ * Also, the second raw byte (possibly reporting the fractional temperature)
+ * is currently ignored.
+ *
+ * Many drives also report temperature limits in additional SMART data raw
+ * bytes. The format of those is not well defined and varies widely.
+ * The driver does not currently attempt to report those limits.
+ *
+ * According to data in smartmontools, attribute 231 is rarely used to report
+ * drive temperatures. At the same time, several drives report SSD life left
+ * in attribute 231, but do not support temperature sensors. For this reason,
+ * attribute 231 is currently ignored.
+ *
+ * Following above definitions, temperatures are reported as follows.
+ * If SCT Command Transport is supported, it is used to read the
+ * temperature and, if available, temperature limits.
+ * - Otherwise, if SMART attribute 194 is supported, it is used to read
+ * the temperature.
+ * - Otherwise, if SMART attribute 190 is supported, it is used to read
+ * the temperature.
+ */
+
+#include <linux/ata.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_proto.h>
+
+struct drivetemp_data {
+ struct list_head list; /* list of instantiated devices */
+ struct mutex lock; /* protect data buffer accesses */
+ struct scsi_device *sdev; /* SCSI device */
+ struct device *dev; /* instantiating device */
+ struct device *hwdev; /* hardware monitoring device */
+ u8 smartdata[ATA_SECT_SIZE]; /* local buffer */
+ int (*get_temp)(struct drivetemp_data *st, u32 attr, long *val);
+ bool have_temp_lowest; /* lowest temp in SCT status */
+ bool have_temp_highest; /* highest temp in SCT status */
+ bool have_temp_min; /* have min temp */
+ bool have_temp_max; /* have max temp */
+ bool have_temp_lcrit; /* have lower critical limit */
+ bool have_temp_crit; /* have critical limit */
+ int temp_min; /* min temp */
+ int temp_max; /* max temp */
+ int temp_lcrit; /* lower critical limit */
+ int temp_crit; /* critical limit */
+};
+
+static LIST_HEAD(drivetemp_devlist);
+
+#define ATA_MAX_SMART_ATTRS 30
+#define SMART_TEMP_PROP_190 190
+#define SMART_TEMP_PROP_194 194
+
+#define SCT_STATUS_REQ_ADDR 0xe0
+#define SCT_STATUS_VERSION_LOW 0 /* log byte offsets */
+#define SCT_STATUS_VERSION_HIGH 1
+#define SCT_STATUS_TEMP 200
+#define SCT_STATUS_TEMP_LOWEST 201
+#define SCT_STATUS_TEMP_HIGHEST 202
+#define SCT_READ_LOG_ADDR 0xe1
+#define SMART_READ_LOG 0xd5
+#define SMART_WRITE_LOG 0xd6
+
+#define INVALID_TEMP 0x80
+
+#define temp_is_valid(temp) ((temp) != INVALID_TEMP)
+#define temp_from_sct(temp) (((s8)(temp)) * 1000)
+
+static inline bool ata_id_smart_supported(u16 *id)
+{
+ return id[ATA_ID_COMMAND_SET_1] & BIT(0);
+}
+
+static inline bool ata_id_smart_enabled(u16 *id)
+{
+ return id[ATA_ID_CFS_ENABLE_1] & BIT(0);
+}
+
+static int drivetemp_scsi_command(struct drivetemp_data *st,
+ u8 ata_command, u8 feature,
+ u8 lba_low, u8 lba_mid, u8 lba_high)
+{
+ u8 scsi_cmd[MAX_COMMAND_SIZE];
+ int data_dir;
+
+ memset(scsi_cmd, 0, sizeof(scsi_cmd));
+ scsi_cmd[0] = ATA_16;
+ if (ata_command == ATA_CMD_SMART && feature == SMART_WRITE_LOG) {
+ scsi_cmd[1] = (5 << 1); /* PIO Data-out */
+ /*
+ * No off.line or cc, write to dev, block count in sector count
+ * field.
+ */
+ scsi_cmd[2] = 0x06;
+ data_dir = DMA_TO_DEVICE;
+ } else {
+ scsi_cmd[1] = (4 << 1); /* PIO Data-in */
+ /*
+ * No off.line or cc, read from dev, block count in sector count
+ * field.
+ */
+ scsi_cmd[2] = 0x0e;
+ data_dir = DMA_FROM_DEVICE;
+ }
+ scsi_cmd[4] = feature;
+ scsi_cmd[6] = 1; /* 1 sector */
+ scsi_cmd[8] = lba_low;
+ scsi_cmd[10] = lba_mid;
+ scsi_cmd[12] = lba_high;
+ scsi_cmd[14] = ata_command;
+
+ return scsi_execute_req(st->sdev, scsi_cmd, data_dir,
+ st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5,
+ NULL);
+}
+
+static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
+ u8 select)
+{
+ return drivetemp_scsi_command(st, ATA_CMD_SMART, feature, select,
+ ATA_SMART_LBAM_PASS, ATA_SMART_LBAH_PASS);
+}
+
+static int drivetemp_get_smarttemp(struct drivetemp_data *st, u32 attr,
+ long *temp)
+{
+ u8 *buf = st->smartdata;
+ bool have_temp = false;
+ u8 temp_raw;
+ u8 csum;
+ int err;
+ int i;
+
+ err = drivetemp_ata_command(st, ATA_SMART_READ_VALUES, 0);
+ if (err)
+ return err;
+
+ /* Checksum the read value table */
+ csum = 0;
+ for (i = 0; i < ATA_SECT_SIZE; i++)
+ csum += buf[i];
+ if (csum) {
+ dev_dbg(&st->sdev->sdev_gendev,
+ "checksum error reading SMART values\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < ATA_MAX_SMART_ATTRS; i++) {
+ u8 *attr = buf + i * 12;
+ int id = attr[2];
+
+ if (!id)
+ continue;
+
+ if (id == SMART_TEMP_PROP_190) {
+ temp_raw = attr[7];
+ have_temp = true;
+ }
+ if (id == SMART_TEMP_PROP_194) {
+ temp_raw = attr[7];
+ have_temp = true;
+ break;
+ }
+ }
+
+ if (have_temp) {
+ *temp = temp_raw * 1000;
+ return 0;
+ }
+
+ return -ENXIO;
+}
+
+static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
+{
+ u8 *buf = st->smartdata;
+ int err;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ return err;
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP]);
+ break;
+ case hwmon_temp_lowest:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]);
+ break;
+ case hwmon_temp_highest:
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int drivetemp_identify_sata(struct drivetemp_data *st)
+{
+ struct scsi_device *sdev = st->sdev;
+ u8 *buf = st->smartdata;
+ struct scsi_vpd *vpd;
+ bool is_ata, is_sata;
+ bool have_sct_data_table;
+ bool have_sct_temp;
+ bool have_smart;
+ bool have_sct;
+ u16 *ata_id;
+ u16 version;
+ long temp;
+ int err;
+
+ /* SCSI-ATA Translation present? */
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+
+ /*
+ * Verify that ATA IDENTIFY DEVICE data is included in ATA Information
+ * VPD and that the drive implements the SATA protocol.
+ */
+ if (!vpd || vpd->len < 572 || vpd->data[56] != ATA_CMD_ID_ATA ||
+ vpd->data[36] != 0x34) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ ata_id = (u16 *)&vpd->data[60];
+ is_ata = ata_id_is_ata(ata_id);
+ is_sata = ata_id_is_sata(ata_id);
+ have_sct = ata_id_sct_supported(ata_id);
+ have_sct_data_table = ata_id_sct_data_tables(ata_id);
+ have_smart = ata_id_smart_supported(ata_id) &&
+ ata_id_smart_enabled(ata_id);
+
+ rcu_read_unlock();
+
+ /* bail out if this is not a SATA device */
+ if (!is_ata || !is_sata)
+ return -ENODEV;
+ if (!have_sct)
+ goto skip_sct;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ goto skip_sct;
+
+ version = (buf[SCT_STATUS_VERSION_HIGH] << 8) |
+ buf[SCT_STATUS_VERSION_LOW];
+ if (version != 2 && version != 3)
+ goto skip_sct;
+
+ have_sct_temp = temp_is_valid(buf[SCT_STATUS_TEMP]);
+ if (!have_sct_temp)
+ goto skip_sct;
+
+ st->have_temp_lowest = temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]);
+ st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]);
+
+ if (!have_sct_data_table)
+ goto skip_sct;
+
+ /* Request and read temperature history table */
+ memset(buf, '\0', sizeof(st->smartdata));
+ buf[0] = 5; /* data table command */
+ buf[2] = 1; /* read table */
+ buf[4] = 2; /* temperature history table */
+
+ err = drivetemp_ata_command(st, SMART_WRITE_LOG, SCT_STATUS_REQ_ADDR);
+ if (err)
+ goto skip_sct_data;
+
+ err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_READ_LOG_ADDR);
+ if (err)
+ goto skip_sct_data;
+
+ /*
+ * Temperature limits per AT Attachment 8 -
+ * ATA/ATAPI Command Set (ATA8-ACS)
+ */
+ st->have_temp_max = temp_is_valid(buf[6]);
+ st->have_temp_crit = temp_is_valid(buf[7]);
+ st->have_temp_min = temp_is_valid(buf[8]);
+ st->have_temp_lcrit = temp_is_valid(buf[9]);
+
+ st->temp_max = temp_from_sct(buf[6]);
+ st->temp_crit = temp_from_sct(buf[7]);
+ st->temp_min = temp_from_sct(buf[8]);
+ st->temp_lcrit = temp_from_sct(buf[9]);
+
+skip_sct_data:
+ if (have_sct_temp) {
+ st->get_temp = drivetemp_get_scttemp;
+ return 0;
+ }
+skip_sct:
+ if (!have_smart)
+ return -ENODEV;
+ st->get_temp = drivetemp_get_smarttemp;
+ return drivetemp_get_smarttemp(st, hwmon_temp_input, &temp);
+}
+
+static int drivetemp_identify(struct drivetemp_data *st)
+{
+ struct scsi_device *sdev = st->sdev;
+
+ /* Bail out immediately if there is no inquiry data */
+ if (!sdev->inquiry || sdev->inquiry_len < 16)
+ return -ENODEV;
+
+ /* Disk device? */
+ if (sdev->type != TYPE_DISK && sdev->type != TYPE_ZBC)
+ return -ENODEV;
+
+ return drivetemp_identify_sata(st);
+}
+
+static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct drivetemp_data *st = dev_get_drvdata(dev);
+ int err = 0;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_lowest:
+ case hwmon_temp_highest:
+ mutex_lock(&st->lock);
+ err = st->get_temp(st, attr, val);
+ mutex_unlock(&st->lock);
+ break;
+ case hwmon_temp_lcrit:
+ *val = st->temp_lcrit;
+ break;
+ case hwmon_temp_min:
+ *val = st->temp_min;
+ break;
+ case hwmon_temp_max:
+ *val = st->temp_max;
+ break;
+ case hwmon_temp_crit:
+ *val = st->temp_crit;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static umode_t drivetemp_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct drivetemp_data *st = data;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_lowest:
+ if (st->have_temp_lowest)
+ return 0444;
+ break;
+ case hwmon_temp_highest:
+ if (st->have_temp_highest)
+ return 0444;
+ break;
+ case hwmon_temp_min:
+ if (st->have_temp_min)
+ return 0444;
+ break;
+ case hwmon_temp_max:
+ if (st->have_temp_max)
+ return 0444;
+ break;
+ case hwmon_temp_lcrit:
+ if (st->have_temp_lcrit)
+ return 0444;
+ break;
+ case hwmon_temp_crit:
+ if (st->have_temp_crit)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *drivetemp_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT |
+ HWMON_T_LOWEST | HWMON_T_HIGHEST |
+ HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_LCRIT | HWMON_T_CRIT),
+ NULL
+};
+
+static const struct hwmon_ops drivetemp_ops = {
+ .is_visible = drivetemp_is_visible,
+ .read = drivetemp_read,
+};
+
+static const struct hwmon_chip_info drivetemp_chip_info = {
+ .ops = &drivetemp_ops,
+ .info = drivetemp_info,
+};
+
+/*
+ * The device argument points to sdev->sdev_dev. Its parent is
+ * sdev->sdev_gendev, which we can use to get the scsi_device pointer.
+ */
+static int drivetemp_add(struct device *dev, struct class_interface *intf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev->parent);
+ struct drivetemp_data *st;
+ int err;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->sdev = sdev;
+ st->dev = dev;
+ mutex_init(&st->lock);
+
+ if (drivetemp_identify(st)) {
+ err = -ENODEV;
+ goto abort;
+ }
+
+ st->hwdev = hwmon_device_register_with_info(dev->parent, "drivetemp",
+ st, &drivetemp_chip_info,
+ NULL);
+ if (IS_ERR(st->hwdev)) {
+ err = PTR_ERR(st->hwdev);
+ goto abort;
+ }
+
+ list_add(&st->list, &drivetemp_devlist);
+ return 0;
+
+abort:
+ kfree(st);
+ return err;
+}
+
+static void drivetemp_remove(struct device *dev, struct class_interface *intf)
+{
+ struct drivetemp_data *st, *tmp;
+
+ list_for_each_entry_safe(st, tmp, &drivetemp_devlist, list) {
+ if (st->dev == dev) {
+ list_del(&st->list);
+ hwmon_device_unregister(st->hwdev);
+ kfree(st);
+ break;
+ }
+ }
+}
+
+static struct class_interface drivetemp_interface = {
+ .add_dev = drivetemp_add,
+ .remove_dev = drivetemp_remove,
+};
+
+static int __init drivetemp_init(void)
+{
+ return scsi_register_interface(&drivetemp_interface);
+}
+
+static void __exit drivetemp_exit(void)
+{
+ scsi_unregister_interface(&drivetemp_interface);
+}
+
+module_init(drivetemp_init);
+module_exit(drivetemp_exit);
+
+MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>");
+MODULE_DESCRIPTION("Hard drive temperature monitor");
+MODULE_LICENSE("GPL");
#define to_hwmon_attr(d) \
container_of(d, struct hwmon_device_attribute, dev_attr)
+#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
/*
* Thermal zone information
* also provides the sensor index.
*/
struct hwmon_thermal_data {
- struct hwmon_device *hwdev; /* Reference to hwmon device */
+ struct device *dev; /* Reference to hwmon device */
int index; /* sensor index */
};
NULL
};
+static void hwmon_free_attrs(struct attribute **attrs)
+{
+ int i;
+
+ for (i = 0; attrs[i]; i++) {
+ struct device_attribute *dattr = to_dev_attr(attrs[i]);
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
+
+ kfree(hattr);
+ }
+ kfree(attrs);
+}
+
static void hwmon_dev_release(struct device *dev)
{
- kfree(to_hwmon_device(dev));
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+ if (hwdev->group.attrs)
+ hwmon_free_attrs(hwdev->group.attrs);
+ kfree(hwdev->groups);
+ kfree(hwdev);
}
static struct class hwmon_class = {
static int hwmon_thermal_get_temp(void *data, int *temp)
{
struct hwmon_thermal_data *tdata = data;
- struct hwmon_device *hwdev = tdata->hwdev;
+ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
int ret;
long t;
- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
+ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
tdata->index, &t);
if (ret < 0)
return ret;
.get_temp = hwmon_thermal_get_temp,
};
-static int hwmon_thermal_add_sensor(struct device *dev,
- struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
struct hwmon_thermal_data *tdata;
struct thermal_zone_device *tzd;
if (!tdata)
return -ENOMEM;
- tdata->hwdev = hwdev;
+ tdata->dev = dev;
tdata->index = index;
- tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
&hwmon_thermal_ops);
/*
* If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
return 0;
}
#else
-static int hwmon_thermal_add_sensor(struct device *dev,
- struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
return 0;
}
static int hwmon_attr_base(enum hwmon_sensor_types type)
{
- if (type == hwmon_in)
+ if (type == hwmon_in || type == hwmon_intrusion)
return 0;
return 1;
}
(type == hwmon_fan && attr == hwmon_fan_label);
}
-static struct attribute *hwmon_genattr(struct device *dev,
- const void *drvdata,
+static struct attribute *hwmon_genattr(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr,
int index,
if ((mode & 0222) && !ops->write)
return ERR_PTR(-EINVAL);
- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
if (!hattr)
return ERR_PTR(-ENOMEM);
};
static const char * const hwmon_temp_attr_templates[] = {
+ [hwmon_temp_enable] = "temp%d_enable",
[hwmon_temp_input] = "temp%d_input",
[hwmon_temp_type] = "temp%d_type",
[hwmon_temp_lcrit] = "temp%d_lcrit",
};
static const char * const hwmon_in_attr_templates[] = {
+ [hwmon_in_enable] = "in%d_enable",
[hwmon_in_input] = "in%d_input",
[hwmon_in_min] = "in%d_min",
[hwmon_in_max] = "in%d_max",
[hwmon_in_max_alarm] = "in%d_max_alarm",
[hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm",
[hwmon_in_crit_alarm] = "in%d_crit_alarm",
- [hwmon_in_enable] = "in%d_enable",
};
static const char * const hwmon_curr_attr_templates[] = {
+ [hwmon_curr_enable] = "curr%d_enable",
[hwmon_curr_input] = "curr%d_input",
[hwmon_curr_min] = "curr%d_min",
[hwmon_curr_max] = "curr%d_max",
};
static const char * const hwmon_power_attr_templates[] = {
+ [hwmon_power_enable] = "power%d_enable",
[hwmon_power_average] = "power%d_average",
[hwmon_power_average_interval] = "power%d_average_interval",
[hwmon_power_average_interval_max] = "power%d_interval_max",
};
static const char * const hwmon_energy_attr_templates[] = {
+ [hwmon_energy_enable] = "energy%d_enable",
[hwmon_energy_input] = "energy%d_input",
[hwmon_energy_label] = "energy%d_label",
};
static const char * const hwmon_humidity_attr_templates[] = {
+ [hwmon_humidity_enable] = "humidity%d_enable",
[hwmon_humidity_input] = "humidity%d_input",
[hwmon_humidity_label] = "humidity%d_label",
[hwmon_humidity_min] = "humidity%d_min",
};
static const char * const hwmon_fan_attr_templates[] = {
+ [hwmon_fan_enable] = "fan%d_enable",
[hwmon_fan_input] = "fan%d_input",
[hwmon_fan_label] = "fan%d_label",
[hwmon_fan_min] = "fan%d_min",
[hwmon_pwm_freq] = "pwm%d_freq",
};
+static const char * const hwmon_intrusion_attr_templates[] = {
+ [hwmon_intrusion_alarm] = "intrusion%d_alarm",
+ [hwmon_intrusion_beep] = "intrusion%d_beep",
+};
+
static const char * const *__templates[] = {
[hwmon_chip] = hwmon_chip_attrs,
[hwmon_temp] = hwmon_temp_attr_templates,
[hwmon_humidity] = hwmon_humidity_attr_templates,
[hwmon_fan] = hwmon_fan_attr_templates,
[hwmon_pwm] = hwmon_pwm_attr_templates,
+ [hwmon_intrusion] = hwmon_intrusion_attr_templates,
};
static const int __templates_size[] = {
[hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates),
[hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates),
[hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates),
+ [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates),
};
static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
return n;
}
-static int hwmon_genattrs(struct device *dev,
- const void *drvdata,
+static int hwmon_genattrs(const void *drvdata,
struct attribute **attrs,
const struct hwmon_ops *ops,
const struct hwmon_channel_info *info)
attr_mask &= ~BIT(attr);
if (attr >= template_size)
return -EINVAL;
- a = hwmon_genattr(dev, drvdata, info->type, attr, i,
+ a = hwmon_genattr(drvdata, info->type, attr, i,
templates[attr], ops);
if (IS_ERR(a)) {
if (PTR_ERR(a) != -ENOENT)
}
static struct attribute **
-__hwmon_create_attrs(struct device *dev, const void *drvdata,
- const struct hwmon_chip_info *chip)
+__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
{
int ret, i, aindex = 0, nattrs = 0;
struct attribute **attrs;
if (nattrs == 0)
return ERR_PTR(-EINVAL);
- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
+ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return ERR_PTR(-ENOMEM);
for (i = 0; chip->info[i]; i++) {
- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
+ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
chip->info[i]);
- if (ret < 0)
+ if (ret < 0) {
+ hwmon_free_attrs(attrs);
return ERR_PTR(ret);
+ }
aindex += ret;
}
for (i = 0; groups[i]; i++)
ngroups++;
- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
- GFP_KERNEL);
+ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
if (!hwdev->groups) {
err = -ENOMEM;
goto free_hwmon;
}
- attrs = __hwmon_create_attrs(dev, drvdata, chip);
+ attrs = __hwmon_create_attrs(drvdata, chip);
if (IS_ERR(attrs)) {
err = PTR_ERR(attrs);
goto free_hwmon;
hwmon_temp_input, j))
continue;
if (info[i]->config[j] & HWMON_T_INPUT) {
- err = hwmon_thermal_add_sensor(dev,
- hwdev, j);
+ err = hwmon_thermal_add_sensor(hdev, j);
if (err) {
device_unregister(hdev);
/*
return hdev;
free_hwmon:
- kfree(hwdev);
+ hwmon_dev_release(hdev);
ida_remove:
ida_simple_remove(&hwmon_ida, id);
return ERR_PTR(err);
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h
+ * processor hardware monitoring
*
* Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
+ * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net>
+ *
+ * Implementation notes:
+ * - CCD register address information as well as the calculation to
+ * convert raw register values is from https://github.com/ocerman/zenpower.
+ * The information is not confirmed from chip datasheets, but experiments
+ * suggest that it provides reasonable temperature values.
+ * - Register addresses to read chip voltage and current are also from
+ * https://github.com/ocerman/zenpower, and not confirmed from chip
+ * datasheets. Current calibration is board specific and not typically
+ * shared by board vendors. For this reason, current values are
+ * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC
+ * current. Reported values can be adjusted using the sensors configuration
+ * file.
*/
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#endif
/* CPUID function 0x80000001, ebx */
-#define CPUID_PKGTYPE_MASK 0xf0000000
+#define CPUID_PKGTYPE_MASK GENMASK(31, 28)
#define CPUID_PKGTYPE_F 0x00000000
#define CPUID_PKGTYPE_AM2R2_AM3 0x10000000
/* DRAM controller (PCI function 2) */
#define REG_DCT0_CONFIG_HIGH 0x094
-#define DDR3_MODE 0x00000100
+#define DDR3_MODE BIT(8)
/* miscellaneous (PCI function 3) */
#define REG_HARDWARE_THERMAL_CONTROL 0x64
-#define HTC_ENABLE 0x00000001
+#define HTC_ENABLE BIT(0)
#define REG_REPORTED_TEMPERATURE 0xa4
#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
-#define NB_CAP_HTC 0x00000400
+#define NB_CAP_HTC BIT(10)
/*
* For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
/* F17h M01h Access througn SMN */
#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
+#define F17H_M70H_CCD_TEMP(x) (0x00059954 + ((x) * 4))
+#define F17H_M70H_CCD_TEMP_VALID BIT(11)
+#define F17H_M70H_CCD_TEMP_MASK GENMASK(10, 0)
+
+#define F17H_M01H_SVI 0x0005A000
+#define F17H_M01H_SVI_TEL_PLANE0 (F17H_M01H_SVI + 0xc)
+#define F17H_M01H_SVI_TEL_PLANE1 (F17H_M01H_SVI + 0x10)
+
+#define CUR_TEMP_SHIFT 21
+#define CUR_TEMP_RANGE_SEL_MASK BIT(19)
+
+#define CFACTOR_ICORE 1000000 /* 1A / LSB */
+#define CFACTOR_ISOC 250000 /* 0.25A / LSB */
+
struct k10temp_data {
struct pci_dev *pdev;
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
int temp_offset;
u32 temp_adjust_mask;
bool show_tdie;
+ u32 show_tccd;
+ u32 svi_addr[2];
+ bool show_current;
+ int cfactor[2];
};
struct tctl_offset {
{ 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
};
+static bool is_threadripper(void)
+{
+ return strstr(boot_cpu_data.x86_model_id, "Threadripper");
+}
+
+static bool is_epyc(void)
+{
+ return strstr(boot_cpu_data.x86_model_id, "EPYC");
+}
+
static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
{
pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
}
-static unsigned int get_raw_temp(struct k10temp_data *data)
+static long get_raw_temp(struct k10temp_data *data)
{
- unsigned int temp;
u32 regval;
+ long temp;
data->read_tempreg(data->pdev, ®val);
- temp = (regval >> 21) * 125;
+ temp = (regval >> CUR_TEMP_SHIFT) * 125;
if (regval & data->temp_adjust_mask)
temp -= 49000;
return temp;
}
-static ssize_t temp1_input_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct k10temp_data *data = dev_get_drvdata(dev);
- unsigned int temp = get_raw_temp(data);
+const char *k10temp_temp_label[] = {
+ "Tdie",
+ "Tctl",
+ "Tccd1",
+ "Tccd2",
+ "Tccd3",
+ "Tccd4",
+ "Tccd5",
+ "Tccd6",
+ "Tccd7",
+ "Tccd8",
+};
- if (temp > data->temp_offset)
- temp -= data->temp_offset;
- else
- temp = 0;
+const char *k10temp_in_label[] = {
+ "Vcore",
+ "Vsoc",
+};
- return sprintf(buf, "%u\n", temp);
-}
+const char *k10temp_curr_label[] = {
+ "Icore",
+ "Isoc",
+};
-static ssize_t temp2_input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
{
- struct k10temp_data *data = dev_get_drvdata(dev);
- unsigned int temp = get_raw_temp(data);
-
- return sprintf(buf, "%u\n", temp);
+ switch (type) {
+ case hwmon_temp:
+ *str = k10temp_temp_label[channel];
+ break;
+ case hwmon_in:
+ *str = k10temp_in_label[channel];
+ break;
+ case hwmon_curr:
+ *str = k10temp_curr_label[channel];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_label_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_curr(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ u32 regval;
- return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie");
+ switch (attr) {
+ case hwmon_curr_input:
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ data->svi_addr[channel], ®val);
+ *val = DIV_ROUND_CLOSEST(data->cfactor[channel] *
+ (regval & 0xff),
+ 1000);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp1_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val)
{
- return sprintf(buf, "%d\n", 70 * 1000);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ u32 regval;
+
+ switch (attr) {
+ case hwmon_in_input:
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ data->svi_addr[channel], ®val);
+ regval = (regval >> 16) & 0xff;
+ *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static ssize_t temp_crit_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
+ long *val)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct k10temp_data *data = dev_get_drvdata(dev);
- int show_hyst = attr->index;
u32 regval;
- int value;
- data->read_htcreg(data->pdev, ®val);
- value = ((regval >> 16) & 0x7f) * 500 + 52000;
- if (show_hyst)
- value -= ((regval >> 24) & 0xf) * 500;
- return sprintf(buf, "%d\n", value);
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case 0: /* Tdie */
+ *val = get_raw_temp(data) - data->temp_offset;
+ if (*val < 0)
+ *val = 0;
+ break;
+ case 1: /* Tctl */
+ *val = get_raw_temp(data);
+ if (*val < 0)
+ *val = 0;
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
+ F17H_M70H_CCD_TEMP(channel - 2), ®val);
+ *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_temp_max:
+ *val = 70 * 1000;
+ break;
+ case hwmon_temp_crit:
+ data->read_htcreg(data->pdev, ®val);
+ *val = ((regval >> 16) & 0x7f) * 500 + 52000;
+ break;
+ case hwmon_temp_crit_hyst:
+ data->read_htcreg(data->pdev, ®val);
+ *val = (((regval >> 16) & 0x7f)
+ - ((regval >> 24) & 0xf)) * 500 + 52000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
-static DEVICE_ATTR_RO(temp1_input);
-static DEVICE_ATTR_RO(temp1_max);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1);
-
-static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0);
-static DEVICE_ATTR_RO(temp2_input);
-static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1);
+static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return k10temp_read_temp(dev, attr, channel, val);
+ case hwmon_in:
+ return k10temp_read_in(dev, attr, channel, val);
+ case hwmon_curr:
+ return k10temp_read_curr(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
-static umode_t k10temp_is_visible(struct kobject *kobj,
- struct attribute *attr, int index)
+static umode_t k10temp_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct k10temp_data *data = dev_get_drvdata(dev);
+ const struct k10temp_data *data = _data;
struct pci_dev *pdev = data->pdev;
u32 reg;
- switch (index) {
- case 0 ... 1: /* temp1_input, temp1_max */
- default:
- break;
- case 2 ... 3: /* temp1_crit, temp1_crit_hyst */
- if (!data->read_htcreg)
- return 0;
-
- pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
- ®);
- if (!(reg & NB_CAP_HTC))
- return 0;
-
- data->read_htcreg(data->pdev, ®);
- if (!(reg & HTC_ENABLE))
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case 0: /* Tdie, or Tctl if we don't show it */
+ break;
+ case 1: /* Tctl */
+ if (!data->show_tdie)
+ return 0;
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ if (!(data->show_tccd & BIT(channel - 2)))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case hwmon_temp_max:
+ if (channel || data->show_tdie)
+ return 0;
+ break;
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ if (channel || !data->read_htcreg)
+ return 0;
+
+ pci_read_config_dword(pdev,
+ REG_NORTHBRIDGE_CAPABILITIES,
+ ®);
+ if (!(reg & NB_CAP_HTC))
+ return 0;
+
+ data->read_htcreg(data->pdev, ®);
+ if (!(reg & HTC_ENABLE))
+ return 0;
+ break;
+ case hwmon_temp_label:
+ /* No labels if we don't show the die temperature */
+ if (!data->show_tdie)
+ return 0;
+ switch (channel) {
+ case 0: /* Tdie */
+ case 1: /* Tctl */
+ break;
+ case 2 ... 9: /* Tccd{1-8} */
+ if (!(data->show_tccd & BIT(channel - 2)))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
return 0;
+ }
break;
- case 4 ... 6: /* temp1_label, temp2_input, temp2_label */
- if (!data->show_tdie)
+ case hwmon_in:
+ case hwmon_curr:
+ if (!data->show_current)
return 0;
break;
+ default:
+ return 0;
}
- return attr->mode;
+ return 0444;
}
-static struct attribute *k10temp_attrs[] = {
- &dev_attr_temp1_input.attr,
- &dev_attr_temp1_max.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_label.dev_attr.attr,
- &dev_attr_temp2_input.attr,
- &sensor_dev_attr_temp2_label.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group k10temp_group = {
- .attrs = k10temp_attrs,
- .is_visible = k10temp_is_visible,
-};
-__ATTRIBUTE_GROUPS(k10temp);
-
static bool has_erratum_319(struct pci_dev *pdev)
{
u32 pkg_type, reg_dram_cfg;
(boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
}
-static int k10temp_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+#ifdef CONFIG_DEBUG_FS
+
+static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev,
+ u32 addr, int count)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (!(i & 3))
+ seq_printf(s, "0x%06x: ", addr + i * 4);
+ amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, ®);
+ seq_printf(s, "%08x ", reg);
+ if ((i & 3) == 3)
+ seq_puts(s, "\n");
+ }
+}
+
+static int svi_show(struct seq_file *s, void *unused)
+{
+ struct k10temp_data *data = s->private;
+
+ k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(svi);
+
+static int thm_show(struct seq_file *s, void *unused)
+{
+ struct k10temp_data *data = s->private;
+
+ k10temp_smn_regs_show(s, data->pdev,
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(thm);
+
+static void k10temp_debugfs_cleanup(void *ddir)
+{
+ debugfs_remove_recursive(ddir);
+}
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+ struct dentry *debugfs;
+ char name[32];
+
+ /* Only show debugfs data for Family 17h/18h CPUs */
+ if (!data->show_tdie)
+ return;
+
+ scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev));
+
+ debugfs = debugfs_create_dir(name, NULL);
+ if (debugfs) {
+ debugfs_create_file("svi", 0444, debugfs, data, &svi_fops);
+ debugfs_create_file("thm", 0444, debugfs, data, &thm_fops);
+ devm_add_action_or_reset(&data->pdev->dev,
+ k10temp_debugfs_cleanup, debugfs);
+ }
+}
+
+#else
+
+static void k10temp_init_debugfs(struct k10temp_data *data)
+{
+}
+
+#endif
+
+static const struct hwmon_channel_info *k10temp_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops k10temp_hwmon_ops = {
+ .is_visible = k10temp_is_visible,
+ .read = k10temp_read,
+ .read_string = k10temp_read_labels,
+};
+
+static const struct hwmon_chip_info k10temp_chip_info = {
+ .ops = &k10temp_hwmon_ops,
+ .info = k10temp_info,
+};
+
+static void k10temp_get_ccd_support(struct pci_dev *pdev,
+ struct k10temp_data *data, int limit)
+{
+ u32 regval;
+ int i;
+
+ for (i = 0; i < limit; i++) {
+ amd_smn_read(amd_pci_dev_to_node_id(pdev),
+ F17H_M70H_CCD_TEMP(i), ®val);
+ if (regval & F17H_M70H_CCD_TEMP_VALID)
+ data->show_tccd |= BIT(i);
+ }
+}
+
+static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int unreliable = has_erratum_319(pdev);
struct device *dev = &pdev->dev;
data->read_htcreg = read_htcreg_nb_f15;
data->read_tempreg = read_tempreg_nb_f15;
} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
- data->temp_adjust_mask = 0x80000;
+ data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK;
data->read_tempreg = read_tempreg_nb_f17;
data->show_tdie = true;
+
+ switch (boot_cpu_data.x86_model) {
+ case 0x1: /* Zen */
+ case 0x8: /* Zen+ */
+ case 0x11: /* Zen APU */
+ case 0x18: /* Zen+ APU */
+ data->show_current = !is_threadripper() && !is_epyc();
+ data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0;
+ data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1;
+ data->cfactor[0] = CFACTOR_ICORE;
+ data->cfactor[1] = CFACTOR_ISOC;
+ k10temp_get_ccd_support(pdev, data, 4);
+ break;
+ case 0x31: /* Zen2 Threadripper */
+ case 0x71: /* Zen2 */
+ data->show_current = !is_threadripper() && !is_epyc();
+ data->cfactor[0] = CFACTOR_ICORE;
+ data->cfactor[1] = CFACTOR_ISOC;
+ data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1;
+ data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0;
+ k10temp_get_ccd_support(pdev, data, 8);
+ break;
+ }
} else {
data->read_htcreg = read_htcreg_pci;
data->read_tempreg = read_tempreg_pci;
}
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
- k10temp_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data,
+ &k10temp_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ k10temp_init_debugfs(data);
+
+ return 0;
}
static const struct pci_device_id k10temp_id_table[] = {
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX31730 3-Channel Remote Temperature Sensor
+ *
+ * Copyright (c) 2019 Guenter Roeck <linux@roeck-us.net>
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+/* Addresses scanned */
+static const unsigned short normal_i2c[] = { 0x1c, 0x1d, 0x1e, 0x1f, 0x4c,
+ 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
+
+/* The MAX31730 registers */
+#define MAX31730_REG_TEMP 0x00
+#define MAX31730_REG_CONF 0x13
+#define MAX31730_STOP BIT(7)
+#define MAX31730_EXTRANGE BIT(1)
+#define MAX31730_REG_TEMP_OFFSET 0x16
+#define MAX31730_TEMP_OFFSET_BASELINE 0x77
+#define MAX31730_REG_OFFSET_ENABLE 0x17
+#define MAX31730_REG_TEMP_MAX 0x20
+#define MAX31730_REG_TEMP_MIN 0x30
+#define MAX31730_REG_STATUS_HIGH 0x32
+#define MAX31730_REG_STATUS_LOW 0x33
+#define MAX31730_REG_CHANNEL_ENABLE 0x35
+#define MAX31730_REG_TEMP_FAULT 0x36
+
+#define MAX31730_REG_MFG_ID 0x50
+#define MAX31730_MFG_ID 0x4d
+#define MAX31730_REG_MFG_REV 0x51
+#define MAX31730_MFG_REV 0x01
+
+#define MAX31730_TEMP_MIN (-128000)
+#define MAX31730_TEMP_MAX 127937
+
+/* Each client has this additional data */
+struct max31730_data {
+ struct i2c_client *client;
+ u8 orig_conf;
+ u8 current_conf;
+ u8 offset_enable;
+ u8 channel_enable;
+};
+
+/*-----------------------------------------------------------------------*/
+
+static inline long max31730_reg_to_mc(s16 temp)
+{
+ return DIV_ROUND_CLOSEST((temp >> 4) * 1000, 16);
+}
+
+static int max31730_write_config(struct max31730_data *data, u8 set_mask,
+ u8 clr_mask)
+{
+ u8 value;
+
+ clr_mask |= MAX31730_EXTRANGE;
+ value = data->current_conf & ~clr_mask;
+ value |= set_mask;
+
+ if (data->current_conf != value) {
+ s32 err;
+
+ err = i2c_smbus_write_byte_data(data->client, MAX31730_REG_CONF,
+ value);
+ if (err)
+ return err;
+ data->current_conf = value;
+ }
+ return 0;
+}
+
+static int max31730_set_enable(struct i2c_client *client, int reg,
+ u8 *confdata, int channel, bool enable)
+{
+ u8 regval = *confdata;
+ int err;
+
+ if (enable)
+ regval |= BIT(channel);
+ else
+ regval &= ~BIT(channel);
+
+ if (regval != *confdata) {
+ err = i2c_smbus_write_byte_data(client, reg, regval);
+ if (err)
+ return err;
+ *confdata = regval;
+ }
+ return 0;
+}
+
+static int max31730_set_offset_enable(struct max31730_data *data, int channel,
+ bool enable)
+{
+ return max31730_set_enable(data->client, MAX31730_REG_OFFSET_ENABLE,
+ &data->offset_enable, channel, enable);
+}
+
+static int max31730_set_channel_enable(struct max31730_data *data, int channel,
+ bool enable)
+{
+ return max31730_set_enable(data->client, MAX31730_REG_CHANNEL_ENABLE,
+ &data->channel_enable, channel, enable);
+}
+
+static int max31730_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+ int regval, reg, offset;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (!(data->channel_enable & BIT(channel)))
+ return -ENODATA;
+ reg = MAX31730_REG_TEMP + (channel * 2);
+ break;
+ case hwmon_temp_max:
+ reg = MAX31730_REG_TEMP_MAX + (channel * 2);
+ break;
+ case hwmon_temp_min:
+ reg = MAX31730_REG_TEMP_MIN;
+ break;
+ case hwmon_temp_enable:
+ *val = !!(data->channel_enable & BIT(channel));
+ return 0;
+ case hwmon_temp_offset:
+ if (!channel)
+ return -EINVAL;
+ if (!(data->offset_enable & BIT(channel))) {
+ *val = 0;
+ return 0;
+ }
+ offset = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_TEMP_OFFSET);
+ if (offset < 0)
+ return offset;
+ *val = (offset - MAX31730_TEMP_OFFSET_BASELINE) * 125;
+ return 0;
+ case hwmon_temp_fault:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_TEMP_FAULT);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ case hwmon_temp_min_alarm:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_STATUS_LOW);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ case hwmon_temp_max_alarm:
+ regval = i2c_smbus_read_byte_data(data->client,
+ MAX31730_REG_STATUS_HIGH);
+ if (regval < 0)
+ return regval;
+ *val = !!(regval & BIT(channel));
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ regval = i2c_smbus_read_word_swapped(data->client, reg);
+ if (regval < 0)
+ return regval;
+
+ *val = max31730_reg_to_mc(regval);
+
+ return 0;
+}
+
+static int max31730_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+ int reg, err;
+
+ if (type != hwmon_temp)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_temp_max:
+ reg = MAX31730_REG_TEMP_MAX + channel * 2;
+ break;
+ case hwmon_temp_min:
+ reg = MAX31730_REG_TEMP_MIN;
+ break;
+ case hwmon_temp_enable:
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ return max31730_set_channel_enable(data, channel, val);
+ case hwmon_temp_offset:
+ val = clamp_val(val, -14875, 17000) + 14875;
+ val = DIV_ROUND_CLOSEST(val, 125);
+ err = max31730_set_offset_enable(data, channel,
+ val != MAX31730_TEMP_OFFSET_BASELINE);
+ if (err)
+ return err;
+ return i2c_smbus_write_byte_data(data->client,
+ MAX31730_REG_TEMP_OFFSET, val);
+ default:
+ return -EINVAL;
+ }
+
+ val = clamp_val(val, MAX31730_TEMP_MIN, MAX31730_TEMP_MAX);
+ val = DIV_ROUND_CLOSEST(val << 4, 1000) << 4;
+
+ return i2c_smbus_write_word_swapped(data->client, reg, (u16)val);
+}
+
+static umode_t max31730_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_min:
+ return channel ? 0444 : 0644;
+ case hwmon_temp_offset:
+ case hwmon_temp_enable:
+ case hwmon_temp_max:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *max31730_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_OFFSET | HWMON_T_ENABLE |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_FAULT
+ ),
+ NULL
+};
+
+static const struct hwmon_ops max31730_hwmon_ops = {
+ .is_visible = max31730_is_visible,
+ .read = max31730_read,
+ .write = max31730_write,
+};
+
+static const struct hwmon_chip_info max31730_chip_info = {
+ .ops = &max31730_hwmon_ops,
+ .info = max31730_info,
+};
+
+static void max31730_remove(void *data)
+{
+ struct max31730_data *max31730 = data;
+ struct i2c_client *client = max31730->client;
+
+ i2c_smbus_write_byte_data(client, MAX31730_REG_CONF,
+ max31730->orig_conf);
+}
+
+static int
+max31730_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct max31730_data *data;
+ int status, err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+ return -EIO;
+
+ data = devm_kzalloc(dev, sizeof(struct max31730_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+
+ /* Cache original configuration and enable status */
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_CHANNEL_ENABLE);
+ if (status < 0)
+ return status;
+ data->channel_enable = status;
+
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_OFFSET_ENABLE);
+ if (status < 0)
+ return status;
+ data->offset_enable = status;
+
+ status = i2c_smbus_read_byte_data(client, MAX31730_REG_CONF);
+ if (status < 0)
+ return status;
+ data->orig_conf = status;
+ data->current_conf = status;
+
+ err = max31730_write_config(data,
+ data->channel_enable ? 0 : MAX31730_STOP,
+ data->channel_enable ? MAX31730_STOP : 0);
+ if (err)
+ return err;
+
+ dev_set_drvdata(dev, data);
+
+ err = devm_add_action_or_reset(dev, max31730_remove, data);
+ if (err)
+ return err;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max31730_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max31730_ids[] = {
+ { "max31730", 0, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max31730_ids);
+
+static const struct of_device_id __maybe_unused max31730_of_match[] = {
+ {
+ .compatible = "maxim,max31730",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, max31730_of_match);
+
+static bool max31730_check_reg_temp(struct i2c_client *client,
+ int reg)
+{
+ int regval;
+
+ regval = i2c_smbus_read_byte_data(client, reg + 1);
+ return regval < 0 || (regval & 0x0f);
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max31730_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int regval;
+ int i;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_ID);
+ if (regval != MAX31730_MFG_ID)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_REV);
+ if (regval != MAX31730_MFG_REV)
+ return -ENODEV;
+
+ /* lower 4 bit of temperature and limit registers must be 0 */
+ if (max31730_check_reg_temp(client, MAX31730_REG_TEMP_MIN))
+ return -ENODEV;
+
+ for (i = 0; i < 4; i++) {
+ if (max31730_check_reg_temp(client, MAX31730_REG_TEMP + i * 2))
+ return -ENODEV;
+ if (max31730_check_reg_temp(client,
+ MAX31730_REG_TEMP_MAX + i * 2))
+ return -ENODEV;
+ }
+
+ strlcpy(info->type, "max31730", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int __maybe_unused max31730_suspend(struct device *dev)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+
+ return max31730_write_config(data, MAX31730_STOP, 0);
+}
+
+static int __maybe_unused max31730_resume(struct device *dev)
+{
+ struct max31730_data *data = dev_get_drvdata(dev);
+
+ return max31730_write_config(data, 0, MAX31730_STOP);
+}
+
+static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume);
+
+static struct i2c_driver max31730_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max31730",
+ .of_match_table = of_match_ptr(max31730_of_match),
+ .pm = &max31730_pm_ops,
+ },
+ .probe = max31730_probe,
+ .id_table = max31730_ids,
+ .detect = max31730_detect,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(max31730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("MAX31730 driver");
+MODULE_LICENSE("GPL");
static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
- { 0x40, 0x00, 0x42, 0x44, 0x46 },
- { 0x3f, 0x00, 0x41, 0x43, 0x45 },
+ { 0x46, 0x00, 0x40, 0x42, 0x44 },
+ { 0x45, 0x00, 0x3f, 0x41, 0x43 },
};
static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
struct nct7802_data {
struct regmap *regmap;
struct mutex access_lock; /* for multi-byte read and write operations */
+ u8 in_status;
+ struct mutex in_alarm_lock;
};
static ssize_t temp_type_show(struct device *dev,
return err ? : count;
}
+static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ struct nct7802_data *data = dev_get_drvdata(dev);
+ int volt, min, max, ret;
+ unsigned int val;
+
+ mutex_lock(&data->in_alarm_lock);
+
+ /*
+ * The SMI Voltage status register is the only register giving a status
+ * for voltages. A bit is set for each input crossing a threshold, in
+ * both direction, but the "inside" or "outside" limits info is not
+ * available. Also this register is cleared on read.
+ * Note: this is not explicitly spelled out in the datasheet, but
+ * from experiment.
+ * To deal with this we use a status cache with one validity bit and
+ * one status bit for each input. Validity is cleared at startup and
+ * each time the register reports a change, and the status is processed
+ * by software based on current input value and limits.
+ */
+ ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
+ if (ret < 0)
+ goto abort;
+
+ /* invalidate cached status for all inputs crossing a threshold */
+ data->in_status &= ~((val & 0x0f) << 4);
+
+ /* if cached status for requested input is invalid, update it */
+ if (!(data->in_status & (0x10 << sattr->index))) {
+ ret = nct7802_read_voltage(data, sattr->nr, 0);
+ if (ret < 0)
+ goto abort;
+ volt = ret;
+
+ ret = nct7802_read_voltage(data, sattr->nr, 1);
+ if (ret < 0)
+ goto abort;
+ min = ret;
+
+ ret = nct7802_read_voltage(data, sattr->nr, 2);
+ if (ret < 0)
+ goto abort;
+ max = ret;
+
+ if (volt < min || volt > max)
+ data->in_status |= (1 << sattr->index);
+ else
+ data->in_status &= ~(1 << sattr->index);
+
+ data->in_status |= 0x10 << sattr->index;
+ }
+
+ ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
+abort:
+ mutex_unlock(&data->in_alarm_lock);
+ return ret;
+}
+
static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
+static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
+static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
+static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
static struct attribute *nct7802_in_attrs[] = {
return PTR_ERR(data->regmap);
mutex_init(&data->access_lock);
+ mutex_init(&data->in_alarm_lock);
ret = nct7802_init_chip(data);
if (ret < 0)
help
If you say yes here you get hardware monitoring support for generic
PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
- MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20,
- TPS544B25, TPS544C20, TPS544C25, and UDT020.
+ MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400,
+ TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020.
This driver can also be built as a module. If so, the module will
be called pmbus.
This driver can also be built as a module. If so, the module will
be called max16064.
+config SENSORS_MAX20730
+ tristate "Maxim MAX20730, MAX20734, MAX20743"
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX20730, MAX20734, and MAX20743.
+
+ This driver can also be built as a module. If so, the module will
+ be called max20730.
+
config SENSORS_MAX20751
tristate "Maxim MAX20751"
help
be called tps40422.
config SENSORS_TPS53679
- tristate "TI TPS53679"
+ tristate "TI TPS53679, TPS53688"
help
If you say yes here you get hardware monitoring support for TI
- TPS53679.
+ TPS53679, TPS53688
This driver can also be built as a module. If so, the module will
be called tps53679.
config SENSORS_UCD9000
- tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910"
+ tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910"
help
If you say yes here you get hardware monitoring support for TI
- UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System
- Health Controllers.
+ UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910, Sequencer
+ and System Health Controllers.
This driver can also be built as a module. If so, the module will
be called ucd9000.
This driver can also be built as a module. If so, the module will
be called ucd9200.
+config SENSORS_XDPE122
+ tristate "Infineon XDPE122 family"
+ help
+ If you say yes here you get hardware monitoring support for Infineon
+ XDPE12254, XDPE12284, device.
+
+ This driver can also be built as a module. If so, the module will
+ be called xdpe12284.
+
config SENSORS_ZL6100
tristate "Intersil ZL6100 and compatibles"
help
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX20730) += max20730.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
+obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
#define CFFPS_FRU_CMD 0x9A
#define CFFPS_PN_CMD 0x9B
+#define CFFPS_HEADER_CMD 0x9C
#define CFFPS_SN_CMD 0x9E
+#define CFFPS_MAX_POWER_OUT_CMD 0xA7
#define CFFPS_CCIN_CMD 0xBD
#define CFFPS_FW_CMD 0xFA
#define CFFPS1_FW_NUM_BYTES 4
#define CFFPS2_FW_NUM_WORDS 3
#define CFFPS_SYS_CONFIG_CMD 0xDA
+#define CFFPS_12VCS_VOUT_CMD 0xDE
#define CFFPS_INPUT_HISTORY_CMD 0xD6
#define CFFPS_INPUT_HISTORY_SIZE 100
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
-/*
- * LED off state actually relinquishes LED control to PSU firmware, so it can
- * turn on the LED for faults.
- */
-#define CFFPS_LED_OFF 0
#define CFFPS_LED_BLINK BIT(0)
#define CFFPS_LED_ON BIT(1)
+#define CFFPS_LED_OFF BIT(2)
#define CFFPS_BLINK_RATE_MS 250
enum {
CFFPS_DEBUGFS_INPUT_HISTORY = 0,
CFFPS_DEBUGFS_FRU,
CFFPS_DEBUGFS_PN,
+ CFFPS_DEBUGFS_HEADER,
CFFPS_DEBUGFS_SN,
+ CFFPS_DEBUGFS_MAX_POWER_OUT,
CFFPS_DEBUGFS_CCIN,
CFFPS_DEBUGFS_FW,
+ CFFPS_DEBUGFS_ON_OFF_CONFIG,
CFFPS_DEBUGFS_NUM_ENTRIES
};
psu->input_history.byte_count);
}
-static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
u8 cmd;
int i, rc;
int *idxp = file->private_data;
int idx = *idxp;
struct ibm_cffps *psu = to_psu(idxp, idx);
- char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+ char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
pmbus_set_page(psu->client, 0);
case CFFPS_DEBUGFS_PN:
cmd = CFFPS_PN_CMD;
break;
+ case CFFPS_DEBUGFS_HEADER:
+ cmd = CFFPS_HEADER_CMD;
+ break;
case CFFPS_DEBUGFS_SN:
cmd = CFFPS_SN_CMD;
break;
+ case CFFPS_DEBUGFS_MAX_POWER_OUT:
+ rc = i2c_smbus_read_word_swapped(psu->client,
+ CFFPS_MAX_POWER_OUT_CMD);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, I2C_SMBUS_BLOCK_MAX, "%d", rc);
+ goto done;
case CFFPS_DEBUGFS_CCIN:
rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
if (rc < 0)
return -EOPNOTSUPP;
}
goto done;
+ case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+ rc = i2c_smbus_read_byte_data(psu->client,
+ PMBUS_ON_OFF_CONFIG);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, 3, "%02x", rc);
+ goto done;
default:
return -EINVAL;
}
return simple_read_from_buffer(buf, count, ppos, data, rc);
}
+static ssize_t ibm_cffps_debugfs_write(struct file *file,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ u8 data;
+ ssize_t rc;
+ int *idxp = file->private_data;
+ int idx = *idxp;
+ struct ibm_cffps *psu = to_psu(idxp, idx);
+
+ switch (idx) {
+ case CFFPS_DEBUGFS_ON_OFF_CONFIG:
+ pmbus_set_page(psu->client, 0);
+
+ rc = simple_write_to_buffer(&data, 1, ppos, buf, count);
+ if (rc <= 0)
+ return rc;
+
+ rc = i2c_smbus_write_byte_data(psu->client,
+ PMBUS_ON_OFF_CONFIG, data);
+ if (rc)
+ return rc;
+
+ rc = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
static const struct file_operations ibm_cffps_fops = {
.llseek = noop_llseek,
- .read = ibm_cffps_debugfs_op,
+ .read = ibm_cffps_debugfs_read,
+ .write = ibm_cffps_debugfs_write,
.open = simple_open,
};
if (mfr & CFFPS_MFR_PS_KILL)
rc |= PB_STATUS_OFF;
break;
+ case PMBUS_VIRT_READ_VMON:
+ rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD);
+ break;
default:
rc = -ENODATA;
break;
rc = devm_led_classdev_register(dev, &psu->led);
if (rc)
dev_warn(dev, "failed to register led class: %d\n", rc);
+ else
+ i2c_smbus_write_byte_data(client, CFFPS_SYS_CONFIG_CMD,
+ CFFPS_LED_OFF);
}
static struct pmbus_driver_info ibm_cffps_info[] = {
PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
- PMBUS_HAVE_STATUS_FAN12,
+ PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_VMON,
.func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT,
debugfs_create_file("part_number", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_PN],
&ibm_cffps_fops);
+ debugfs_create_file("header", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_HEADER],
+ &ibm_cffps_fops);
debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_SN],
&ibm_cffps_fops);
+ debugfs_create_file("max_power_out", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_MAX_POWER_OUT],
+ &ibm_cffps_fops);
debugfs_create_file("ccin", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
&ibm_cffps_fops);
debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_FW],
&ibm_cffps_fops);
+ debugfs_create_file("on_off_config", 0644, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_ON_OFF_CONFIG],
+ &ibm_cffps_fops);
return 0;
}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MAX20730, MAX20734, and MAX20743 Integrated, Step-Down
+ * Switching Regulators
+ *
+ * Copyright 2019 Google LLC.
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/pmbus.h>
+#include <linux/util_macros.h>
+#include "pmbus.h"
+
+enum chips {
+ max20730,
+ max20734,
+ max20743
+};
+
+struct max20730_data {
+ enum chips id;
+ struct pmbus_driver_info info;
+ struct mutex lock; /* Used to protect against parallel writes */
+ u16 mfr_devset1;
+};
+
+#define to_max20730_data(x) container_of(x, struct max20730_data, info)
+
+#define MAX20730_MFR_DEVSET1 0xd2
+
+/*
+ * Convert discreet value to direct data format. Strictly speaking, all passed
+ * values are constants, so we could do that calculation manually. On the
+ * downside, that would make the driver more difficult to maintain, so lets
+ * use this approach.
+ */
+static u16 val_to_direct(int v, enum pmbus_sensor_classes class,
+ const struct pmbus_driver_info *info)
+{
+ int R = info->R[class] - 3; /* take milli-units into account */
+ int b = info->b[class] * 1000;
+ long d;
+
+ d = v * info->m[class] + b;
+ /*
+ * R < 0 is true for all callers, so we don't need to bother
+ * about the R > 0 case.
+ */
+ while (R < 0) {
+ d = DIV_ROUND_CLOSEST(d, 10);
+ R++;
+ }
+ return (u16)d;
+}
+
+static long direct_to_val(u16 w, enum pmbus_sensor_classes class,
+ const struct pmbus_driver_info *info)
+{
+ int R = info->R[class] - 3;
+ int b = info->b[class] * 1000;
+ int m = info->m[class];
+ long d = (s16)w;
+
+ if (m == 0)
+ return 0;
+
+ while (R < 0) {
+ d *= 10;
+ R++;
+ }
+ d = (d - b) / m;
+ return d;
+}
+
+static u32 max_current[][5] = {
+ [max20730] = { 13000, 16600, 20100, 23600 },
+ [max20734] = { 21000, 27000, 32000, 38000 },
+ [max20743] = { 18900, 24100, 29200, 34100 },
+};
+
+static int max20730_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct max20730_data *data = to_max20730_data(info);
+ int ret = 0;
+ u32 max_c;
+
+ switch (reg) {
+ case PMBUS_OT_FAULT_LIMIT:
+ switch ((data->mfr_devset1 >> 11) & 0x3) {
+ case 0x0:
+ ret = val_to_direct(150000, PSC_TEMPERATURE, info);
+ break;
+ case 0x1:
+ ret = val_to_direct(130000, PSC_TEMPERATURE, info);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3];
+ ret = val_to_direct(max_c, PSC_CURRENT_OUT, info);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int max20730_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ struct pmbus_driver_info *info;
+ struct max20730_data *data;
+ u16 devset1;
+ int ret = 0;
+ int idx;
+
+ info = (struct pmbus_driver_info *)pmbus_get_driver_info(client);
+ data = to_max20730_data(info);
+
+ mutex_lock(&data->lock);
+ devset1 = data->mfr_devset1;
+
+ switch (reg) {
+ case PMBUS_OT_FAULT_LIMIT:
+ devset1 &= ~(BIT(11) | BIT(12));
+ if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000)
+ devset1 |= BIT(11);
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ devset1 &= ~(BIT(5) | BIT(6));
+
+ idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info),
+ max_current[data->id], 4);
+ devset1 |= (idx << 5);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ if (!ret && devset1 != data->mfr_devset1) {
+ ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1,
+ devset1);
+ if (!ret) {
+ data->mfr_devset1 = devset1;
+ pmbus_clear_cache(client);
+ }
+ }
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static const struct pmbus_driver_info max20730_info[] = {
+ [max20730] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6042 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3609,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ /*
+ * Values in the datasheet are adjusted for temperature and
+ * for the relationship between Vin and Vout.
+ * Unfortunately, the data sheet suggests that Vout measurement
+ * may be scaled with a resistor array. This is indeed the case
+ * at least on the evaulation boards. As a result, any in-driver
+ * adjustments would either be wrong or require elaborate means
+ * to configure the scaling. Instead of doing that, just report
+ * raw values and let userspace handle adjustments.
+ */
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 153,
+ .b[PSC_CURRENT_OUT] = 4976,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+ [max20734] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6209 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3592,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 111,
+ .b[PSC_CURRENT_OUT] = 3461,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+ [max20743] = {
+ .pages = 1,
+ .read_word_data = max20730_read_word_data,
+ .write_word_data = max20730_write_word_data,
+
+ /* Source : Maxim AN6042 */
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 21,
+ .b[PSC_TEMPERATURE] = 5887,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 3597,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 95,
+ .b[PSC_CURRENT_OUT] = 5014,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ },
+};
+
+static int max20730_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
+ struct max20730_data *data;
+ enum chips chip_id;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return ret;
+ }
+ if (ret != 5 || strncmp(buf, "MAXIM", 5)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ /*
+ * The chips support reading PMBUS_MFR_MODEL. On both MAX20730
+ * and MAX20734, reading it returns M20743. Presumably that is
+ * the reason why the command is not documented. Unfortunately,
+ * that means that there is no reliable means to detect the chip.
+ * However, we can at least detect the chip series. Compare
+ * the returned value against 'M20743' and bail out if there is
+ * a mismatch. If that doesn't work for all chips, we may have
+ * to remove this check.
+ */
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read Manufacturer Model\n");
+ return ret;
+ }
+ if (ret != 6 || strncmp(buf, "M20743", 6)) {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read Manufacturer Revision\n");
+ return ret;
+ }
+ if (ret != 1 || buf[0] != 'F') {
+ buf[ret] = '\0';
+ dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf);
+ return -ENODEV;
+ }
+
+ if (client->dev.of_node)
+ chip_id = (enum chips)of_device_get_match_data(dev);
+ else
+ chip_id = id->driver_data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->id = chip_id;
+ mutex_init(&data->lock);
+ memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info));
+
+ ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1);
+ if (ret < 0)
+ return ret;
+ data->mfr_devset1 = ret;
+
+ return pmbus_do_probe(client, id, &data->info);
+}
+
+static const struct i2c_device_id max20730_id[] = {
+ { "max20730", max20730 },
+ { "max20734", max20734 },
+ { "max20743", max20743 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max20730_id);
+
+static const struct of_device_id max20730_of_match[] = {
+ { .compatible = "maxim,max20730", .data = (void *)max20730 },
+ { .compatible = "maxim,max20734", .data = (void *)max20734 },
+ { .compatible = "maxim,max20743", .data = (void *)max20743 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, max20730_of_match);
+
+static struct i2c_driver max20730_driver = {
+ .driver = {
+ .name = "max20730",
+ .of_match_table = max20730_of_match,
+ },
+ .probe = max20730_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max20730_id,
+};
+
+module_i2c_driver(max20730_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX20730 / MAX20734 / MAX20743");
+MODULE_LICENSE("GPL");
.pages = 1,
.format[PSC_VOLTAGE_IN] = linear,
.format[PSC_VOLTAGE_OUT] = vid,
- .vrm_version = vr12,
+ .vrm_version[0] = vr12,
.format[PSC_TEMPERATURE] = linear,
.format[PSC_CURRENT_OUT] = linear,
.format[PSC_POWER] = linear,
}
if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
- int vout_mode;
+ int vout_mode, i;
vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) {
break;
case 1:
info->format[PSC_VOLTAGE_OUT] = vid;
- info->vrm_version = vr11;
+ for (i = 0; i < info->pages; i++)
+ info->vrm_version[i] = vr11;
break;
case 2:
info->format[PSC_VOLTAGE_OUT] = direct;
{"dps460", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip},
{"dps800", (kernel_ulong_t)&pmbus_info_one_skip},
+ {"max20796", (kernel_ulong_t)&pmbus_info_one},
{"mdt040", (kernel_ulong_t)&pmbus_info_one},
{"ncp4200", (kernel_ulong_t)&pmbus_info_one},
{"ncp4208", (kernel_ulong_t)&pmbus_info_one},
PMBUS_CLEAR_FAULTS = 0x03,
PMBUS_PHASE = 0x04,
+ PMBUS_WRITE_PROTECT = 0x10,
+
PMBUS_CAPABILITY = 0x19,
PMBUS_QUERY = 0x1A,
*/
#define PB_OPERATION_CONTROL_ON BIT(7)
+/*
+ * WRITE_PROTECT
+ */
+#define PB_WP_ALL BIT(7) /* all but WRITE_PROTECT */
+#define PB_WP_OP BIT(6) /* all but WP, OPERATION, PAGE */
+#define PB_WP_VOUT BIT(5) /* all but WP, OPERATION, PAGE, VOUT, ON_OFF */
+
+#define PB_WP_ANY (PB_WP_ALL | PB_WP_OP | PB_WP_VOUT)
+
/*
* CAPABILITY
*/
#define PMBUS_PAGE_VIRTUAL BIT(31)
enum pmbus_data_format { linear = 0, direct, vid };
-enum vrm_version { vr11 = 0, vr12, vr13 };
+enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
struct pmbus_driver_info {
int pages; /* Total number of pages */
enum pmbus_data_format format[PSC_NUM_CLASSES];
- enum vrm_version vrm_version;
+ enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */
/*
* Support one set of coefficients for each sensor type
* Used for chips providing data in direct mode.
long val = sensor->data;
long rv = 0;
- switch (data->info->vrm_version) {
+ switch (data->info->vrm_version[sensor->page]) {
case vr11:
if (val >= 0x02 && val <= 0xb2)
rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
if (val >= 0x01)
rv = 500 + (val - 1) * 10;
break;
+ case imvp9:
+ if (val >= 0x01)
+ rv = 200 + (val - 1) * 10;
+ break;
+ case amd625mv:
+ if (val >= 0x0 && val <= 0xd8)
+ rv = DIV_ROUND_CLOSEST(155000 - val * 625, 100);
+ break;
}
return rv;
}
snprintf(sensor->name, sizeof(sensor->name), "%s%d",
name, seq);
+ if (data->flags & PMBUS_WRITE_PROTECTED)
+ readonly = true;
+
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
client->flags |= I2C_CLIENT_PEC;
+ /*
+ * Check if the chip is write protected. If it is, we can not clear
+ * faults, and we should not try it. Also, in that case, writes into
+ * limit registers need to be disabled.
+ */
+ ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
+ if (ret > 0 && (ret & PB_WP_ANY))
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+
if (data->info->pages)
pmbus_clear_faults(client);
else
static int pxe1610_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
- if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
- u8 vout_mode;
- int ret;
-
- /* Read the register with VOUT scaling value.*/
- ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
- if (ret < 0)
- return ret;
-
- vout_mode = ret & GENMASK(4, 0);
-
- switch (vout_mode) {
- case 1:
- info->vrm_version = vr12;
- break;
- case 2:
- info->vrm_version = vr13;
- break;
- default:
- return -ENODEV;
+ int i;
+
+ for (i = 0; i < PXE1610_NUM_PAGES; i++) {
+ if (pmbus_check_byte_register(client, i, PMBUS_VOUT_MODE)) {
+ u8 vout_mode;
+ int ret;
+
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_mode = ret & GENMASK(4, 0);
+
+ switch (vout_mode) {
+ case 1:
+ info->vrm_version[i] = vr12;
+ break;
+ case 2:
+ info->vrm_version[i] = vr13;
+ break;
+ default:
+ return -ENODEV;
+ }
}
}
struct pmbus_driver_info *info)
{
u8 vout_params;
- int ret;
-
- /* Read the register with VOUT scaling value.*/
- ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
- if (ret < 0)
- return ret;
-
- vout_params = ret & GENMASK(4, 0);
-
- switch (vout_params) {
- case TPS53679_PROT_VR13_10MV:
- case TPS53679_PROT_VR12_5_10MV:
- info->vrm_version = vr13;
- break;
- case TPS53679_PROT_VR13_5MV:
- case TPS53679_PROT_VR12_5MV:
- case TPS53679_PROT_IMVP8_5MV:
- info->vrm_version = vr12;
- break;
- default:
- return -EINVAL;
+ int i, ret;
+
+ for (i = 0; i < TPS53679_PAGE_NUM; i++) {
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_params = ret & GENMASK(4, 0);
+
+ switch (vout_params) {
+ case TPS53679_PROT_VR13_10MV:
+ case TPS53679_PROT_VR12_5_10MV:
+ info->vrm_version[i] = vr13;
+ break;
+ case TPS53679_PROT_VR13_5MV:
+ case TPS53679_PROT_VR12_5MV:
+ case TPS53679_PROT_IMVP8_5MV:
+ info->vrm_version[i] = vr12;
+ break;
+ default:
+ return -EINVAL;
+ }
}
return 0;
static const struct i2c_device_id tps53679_id[] = {
{"tps53679", 0},
+ {"tps53688", 0},
{}
};
static const struct of_device_id __maybe_unused tps53679_of_match[] = {
{.compatible = "ti,tps53679"},
+ {.compatible = "ti,tps53688"},
{}
};
MODULE_DEVICE_TABLE(of, tps53679_of_match);
#include <linux/gpio/driver.h>
#include "pmbus.h"
-enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
+enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
+ ucd90910 };
#define UCD9000_MONITOR_CONFIG 0xd5
#define UCD9000_NUM_PAGES 0xd6
#define UCD9000_GPIO_OUTPUT 1
#define UCD9000_MON_TYPE(x) (((x) >> 5) & 0x07)
-#define UCD9000_MON_PAGE(x) ((x) & 0x0f)
+#define UCD9000_MON_PAGE(x) ((x) & 0x1f)
#define UCD9000_MON_VOLTAGE 1
#define UCD9000_MON_TEMPERATURE 2
#define UCD9000_GPIO_NAME_LEN 16
#define UCD9090_NUM_GPIOS 23
#define UCD901XX_NUM_GPIOS 26
+#define UCD90320_NUM_GPIOS 84
#define UCD90910_NUM_GPIOS 26
#define UCD9000_DEBUGFS_NAME_LEN 24
#define UCD9000_GPI_COUNT 8
+#define UCD90320_GPI_COUNT 32
struct ucd9000_data {
u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX];
{"ucd90120", ucd90120},
{"ucd90124", ucd90124},
{"ucd90160", ucd90160},
+ {"ucd90320", ucd90320},
{"ucd9090", ucd9090},
{"ucd90910", ucd90910},
{}
.compatible = "ti,ucd90160",
.data = (void *)ucd90160
},
+ {
+ .compatible = "ti,ucd90320",
+ .data = (void *)ucd90320
+ },
{
.compatible = "ti,ucd9090",
.data = (void *)ucd9090
case ucd90160:
data->gpio.ngpio = UCD901XX_NUM_GPIOS;
break;
+ case ucd90320:
+ data->gpio.ngpio = UCD90320_NUM_GPIOS;
+ break;
case ucd90910:
data->gpio.ngpio = UCD90910_NUM_GPIOS;
break;
struct ucd9000_debugfs_entry *entry = data;
struct i2c_client *client = entry->client;
u8 buffer[I2C_SMBUS_BLOCK_MAX];
- int ret;
+ int ret, i;
ret = ucd9000_get_mfr_status(client, buffer);
if (ret < 0)
return ret;
/*
- * Attribute only created for devices with gpi fault bits at bits
- * 16-23, which is the second byte of the response.
+ * GPI fault bits are in sets of 8, two bytes from end of response.
*/
- *val = !!(buffer[1] & BIT(entry->index));
+ i = ret - 3 - entry->index / 8;
+ if (i >= 0)
+ *val = !!(buffer[i] & BIT(entry->index % 8));
return 0;
}
{
struct dentry *debugfs;
struct ucd9000_debugfs_entry *entries;
- int i;
+ int i, gpi_count;
char name[UCD9000_DEBUGFS_NAME_LEN];
debugfs = pmbus_get_debugfs_dir(client);
/*
* Of the chips this driver supports, only the UCD9090, UCD90160,
- * and UCD90910 report GPI faults in their MFR_STATUS register, so only
- * create the GPI fault debugfs attributes for those chips.
+ * UCD90320, and UCD90910 report GPI faults in their MFR_STATUS
+ * register, so only create the GPI fault debugfs attributes for those
+ * chips.
*/
if (mid->driver_data == ucd9090 || mid->driver_data == ucd90160 ||
- mid->driver_data == ucd90910) {
+ mid->driver_data == ucd90320 || mid->driver_data == ucd90910) {
+ gpi_count = mid->driver_data == ucd90320 ? UCD90320_GPI_COUNT
+ : UCD9000_GPI_COUNT;
entries = devm_kcalloc(&client->dev,
- UCD9000_GPI_COUNT, sizeof(*entries),
+ gpi_count, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
- for (i = 0; i < UCD9000_GPI_COUNT; i++) {
+ for (i = 0; i < gpi_count; i++) {
entries[i].client = client;
entries[i].index = i;
scnprintf(name, UCD9000_DEBUGFS_NAME_LEN,
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for Infineon Multi-phase Digital VR Controllers
+ *
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define XDPE122_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */
+#define XDPE122_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */
+#define XDPE122_PROT_IMVP9_10MV 0x03 /* IMVP9 mode, 10-mV DAC */
+#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */
+#define XDPE122_PAGE_NUM 2
+
+static int xdpe122_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ u8 vout_params;
+ int i, ret;
+
+ for (i = 0; i < XDPE122_PAGE_NUM; i++) {
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_params = ret & GENMASK(4, 0);
+
+ switch (vout_params) {
+ case XDPE122_PROT_VR12_5_10MV:
+ info->vrm_version[i] = vr13;
+ break;
+ case XDPE122_PROT_VR12_5MV:
+ info->vrm_version[i] = vr12;
+ break;
+ case XDPE122_PROT_IMVP9_10MV:
+ info->vrm_version[i] = imvp9;
+ break;
+ case XDPE122_AMD_625MV:
+ info->vrm_version[i] = amd625mv;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static struct pmbus_driver_info xdpe122_info = {
+ .pages = XDPE122_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = vid,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+ .identify = xdpe122_identify,
+};
+
+static int xdpe122_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pmbus_driver_info *info;
+
+ info = devm_kmemdup(&client->dev, &xdpe122_info, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id xdpe122_id[] = {
+ {"xdpe12254", 0},
+ {"xdpe12284", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, xdpe122_id);
+
+static const struct of_device_id __maybe_unused xdpe122_of_match[] = {
+ {.compatible = "infineon, xdpe12254"},
+ {.compatible = "infineon, xdpe12284"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, xdpe122_of_match);
+
+static struct i2c_driver xdpe122_driver = {
+ .driver = {
+ .name = "xdpe12284",
+ .of_match_table = of_match_ptr(xdpe122_of_match),
+ },
+ .probe = xdpe122_probe,
+ .remove = pmbus_do_remove,
+ .id_table = xdpe122_id,
+};
+
+module_i2c_driver(xdpe122_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family");
+MODULE_LICENSE("GPL");
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pwm_fan_suspend(struct device *dev)
+static int pwm_fan_disable(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
struct pwm_args args;
return 0;
}
+static void pwm_fan_shutdown(struct platform_device *pdev)
+{
+ pwm_fan_disable(&pdev->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pwm_fan_suspend(struct device *dev)
+{
+ return pwm_fan_disable(dev);
+}
+
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
static struct platform_driver pwm_fan_driver = {
.probe = pwm_fan_probe,
+ .shutdown = pwm_fan_shutdown,
.driver = {
.name = "pwm-fan",
.pm = &pwm_fan_pm,
* w83627uhg 8 2 2 3 0xa230 0xc1 0x5ca3
* w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
* w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3
- * nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3
- * nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
enum kinds {
w83627ehf, w83627dhg, w83627dhg_p, w83627uhg,
- w83667hg, w83667hg_b, nct6775, nct6776,
+ w83667hg, w83667hg_b,
};
/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
"w83627uhg",
"w83667hg",
"w83667hg",
- "nct6775",
- "nct6776",
};
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-static unsigned short fan_debounce;
-module_param(fan_debounce, ushort, 0);
-MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
-
#define DRVNAME "w83627ehf"
/*
#define SIO_W83627UHG_ID 0xa230
#define SIO_W83667HG_ID 0xa510
#define SIO_W83667HG_B_ID 0xb350
-#define SIO_NCT6775_ID 0xb470
-#define SIO_NCT6776_ID 0xc330
#define SIO_ID_MASK 0xFFF0
static inline void
#define W83627EHF_REG_DIODE 0x59
#define W83627EHF_REG_SMI_OVT 0x4C
-/* NCT6775F has its own fan divider registers */
-#define NCT6775_REG_FANDIV1 0x506
-#define NCT6775_REG_FANDIV2 0x507
-#define NCT6775_REG_FAN_DEBOUNCE 0xf0
-
#define W83627EHF_REG_ALARM1 0x459
#define W83627EHF_REG_ALARM2 0x45A
#define W83627EHF_REG_ALARM3 0x45B
static const u16 W83627EHF_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 };
-static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 };
-static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 };
-static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 };
-static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 };
-static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 };
-static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 };
-static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
-static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
-static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
-static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642};
-
-static const u16 NCT6775_REG_TEMP[]
- = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d };
-static const u16 NCT6775_REG_TEMP_CONFIG[]
- = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A };
-static const u16 NCT6775_REG_TEMP_HYST[]
- = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D };
-static const u16 NCT6775_REG_TEMP_OVER[]
- = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C };
-static const u16 NCT6775_REG_TEMP_SOURCE[]
- = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 };
-
static const char *const w83667hg_b_temp_label[] = {
"SYSTIN",
"CPUTIN",
"PECI Agent 4"
};
-static const char *const nct6775_temp_label[] = {
- "",
- "SYSTIN",
- "CPUTIN",
- "AUXTIN",
- "AMD SB-TSI",
- "PECI Agent 0",
- "PECI Agent 1",
- "PECI Agent 2",
- "PECI Agent 3",
- "PECI Agent 4",
- "PECI Agent 5",
- "PECI Agent 6",
- "PECI Agent 7",
- "PCH_CHIP_CPU_MAX_TEMP",
- "PCH_CHIP_TEMP",
- "PCH_CPU_TEMP",
- "PCH_MCH_TEMP",
- "PCH_DIM0_TEMP",
- "PCH_DIM1_TEMP",
- "PCH_DIM2_TEMP",
- "PCH_DIM3_TEMP"
-};
-
-static const char *const nct6776_temp_label[] = {
- "",
- "SYSTIN",
- "CPUTIN",
- "AUXTIN",
- "SMBUSMASTER 0",
- "SMBUSMASTER 1",
- "SMBUSMASTER 2",
- "SMBUSMASTER 3",
- "SMBUSMASTER 4",
- "SMBUSMASTER 5",
- "SMBUSMASTER 6",
- "SMBUSMASTER 7",
- "PECI Agent 0",
- "PECI Agent 1",
- "PCH_CHIP_CPU_MAX_TEMP",
- "PCH_CHIP_TEMP",
- "PCH_CPU_TEMP",
- "PCH_MCH_TEMP",
- "PCH_DIM0_TEMP",
- "PCH_DIM1_TEMP",
- "PCH_DIM2_TEMP",
- "PCH_DIM3_TEMP",
- "BYTE_TEMP"
-};
-
-#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP)
+#define NUM_REG_TEMP ARRAY_SIZE(W83627EHF_REG_TEMP)
static int is_word_sized(u16 reg)
{
return 1350000U / (reg << divreg);
}
-static unsigned int fan_from_reg13(u16 reg, unsigned int divreg)
-{
- if ((reg & 0xff1f) == 0xff1f)
- return 0;
-
- reg = (reg & 0x1f) | ((reg & 0xff00) >> 3);
-
- if (reg == 0)
- return 0;
-
- return 1350000U / reg;
-}
-
-static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
-{
- if (reg == 0 || reg == 0xffff)
- return 0;
-
- /*
- * Even though the registers are 16 bit wide, the fan divisor
- * still applies.
- */
- return 1350000U / (reg << divreg);
-}
-
static inline unsigned int
div_from_reg(u8 reg)
{
int addr; /* IO base of hw monitor block */
const char *name;
- struct device *hwmon_dev;
struct mutex lock;
u16 reg_temp[NUM_REG_TEMP];
u8 temp_src[NUM_REG_TEMP];
const char * const *temp_label;
- const u16 *REG_PWM;
- const u16 *REG_TARGET;
- const u16 *REG_FAN;
- const u16 *REG_FAN_MIN;
- const u16 *REG_FAN_START_OUTPUT;
- const u16 *REG_FAN_STOP_OUTPUT;
- const u16 *REG_FAN_STOP_TIME;
const u16 *REG_FAN_MAX_OUTPUT;
const u16 *REG_FAN_STEP_OUTPUT;
const u16 *scale_in;
- unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
- unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
-
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 fan_div[5];
u8 has_fan; /* some fan inputs can be disabled */
u8 has_fan_min; /* some fans don't have min register */
- bool has_fan_div;
u8 temp_type[3];
s8 temp_offset[3];
s16 temp[9];
u16 have_temp_offset;
u8 in6_skip:1;
u8 temp3_val_only:1;
+ u8 have_vid:1;
#ifdef CONFIG_PM
/* Remember extra register values over suspend/resume */
return w83627ehf_write_value(data, reg, value);
}
-/* This function assumes that the caller holds data->update_lock */
-static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
-{
- u8 reg;
-
- switch (nr) {
- case 0:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
- | (data->fan_div[0] & 0x7);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
- break;
- case 1:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
- | ((data->fan_div[1] << 4) & 0x70);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
- break;
- case 2:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
- | (data->fan_div[2] & 0x7);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
- break;
- case 3:
- reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
- | ((data->fan_div[3] << 4) & 0x70);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
- break;
- }
-}
-
/* This function assumes that the caller holds data->update_lock */
static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
{
}
}
-static void w83627ehf_write_fan_div_common(struct device *dev,
- struct w83627ehf_data *data, int nr)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6776)
- ; /* no dividers, do nothing */
- else if (sio_data->kind == nct6775)
- nct6775_write_fan_div(data, nr);
- else
- w83627ehf_write_fan_div(data, nr);
-}
-
-static void nct6775_update_fan_div(struct w83627ehf_data *data)
-{
- u8 i;
-
- i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
- data->fan_div[0] = i & 0x7;
- data->fan_div[1] = (i & 0x70) >> 4;
- i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
- data->fan_div[2] = i & 0x7;
- if (data->has_fan & (1<<3))
- data->fan_div[3] = (i & 0x70) >> 4;
-}
-
static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
{
int i;
}
}
-static void w83627ehf_update_fan_div_common(struct device *dev,
- struct w83627ehf_data *data)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6776)
- ; /* no dividers, do nothing */
- else if (sio_data->kind == nct6775)
- nct6775_update_fan_div(data);
- else
- w83627ehf_update_fan_div(data);
-}
-
-static void nct6775_update_pwm(struct w83627ehf_data *data)
-{
- int i;
- int pwmcfg, fanmodecfg;
-
- for (i = 0; i < data->pwm_num; i++) {
- pwmcfg = w83627ehf_read_value(data,
- W83627EHF_REG_PWM_ENABLE[i]);
- fanmodecfg = w83627ehf_read_value(data,
- NCT6775_REG_FAN_MODE[i]);
- data->pwm_mode[i] =
- ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
- data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1;
- data->tolerance[i] = fanmodecfg & 0x0f;
- data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
- }
-}
-
static void w83627ehf_update_pwm(struct w83627ehf_data *data)
{
int i;
((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
& 3) + 1;
- data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+ data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]);
data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f;
}
}
-static void w83627ehf_update_pwm_common(struct device *dev,
- struct w83627ehf_data *data)
-{
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
- nct6775_update_pwm(data);
- else
- w83627ehf_update_pwm(data);
-}
-
static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
-
int i;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ + HZ/2)
|| !data->valid) {
/* Fan clock dividers */
- w83627ehf_update_fan_div_common(dev, data);
+ w83627ehf_update_fan_div(data);
/* Measured voltages and limits */
for (i = 0; i < data->in_num; i++) {
if (!(data->has_fan & (1 << i)))
continue;
- reg = w83627ehf_read_value(data, data->REG_FAN[i]);
- data->rpm[i] = data->fan_from_reg(reg,
- data->fan_div[i]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_FAN[i]);
+ data->rpm[i] = fan_from_reg8(reg, data->fan_div[i]);
if (data->has_fan_min & (1 << i))
data->fan_min[i] = w83627ehf_read_value(data,
- data->REG_FAN_MIN[i]);
+ W83627EHF_REG_FAN_MIN[i]);
/*
* If we failed to measure the fan speed and clock
* divider can be increased, let's try that for next
* time
*/
- if (data->has_fan_div
- && (reg >= 0xff || (sio_data->kind == nct6775
- && reg == 0x00))
- && data->fan_div[i] < 0x07) {
+ if (reg >= 0xff && data->fan_div[i] < 0x07) {
dev_dbg(dev,
"Increasing fan%d clock divider from %u to %u\n",
i + 1, div_from_reg(data->fan_div[i]),
div_from_reg(data->fan_div[i] + 1));
data->fan_div[i]++;
- w83627ehf_write_fan_div_common(dev, data, i);
+ w83627ehf_write_fan_div(data, i);
/* Preserve min limit if possible */
if ((data->has_fan_min & (1 << i))
&& data->fan_min[i] >= 2
&& data->fan_min[i] != 255)
w83627ehf_write_value(data,
- data->REG_FAN_MIN[i],
+ W83627EHF_REG_FAN_MIN[i],
(data->fan_min[i] /= 2));
}
}
- w83627ehf_update_pwm_common(dev, data);
+ w83627ehf_update_pwm(data);
for (i = 0; i < data->pwm_num; i++) {
if (!(data->has_fan & (1 << i)))
data->fan_start_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_START_OUTPUT[i]);
+ W83627EHF_REG_FAN_START_OUTPUT[i]);
data->fan_stop_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STOP_OUTPUT[i]);
+ W83627EHF_REG_FAN_STOP_OUTPUT[i]);
data->fan_stop_time[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STOP_TIME[i]);
+ W83627EHF_REG_FAN_STOP_TIME[i]);
if (data->REG_FAN_MAX_OUTPUT &&
data->REG_FAN_MAX_OUTPUT[i] != 0xff)
data->target_temp[i] =
w83627ehf_read_value(data,
- data->REG_TARGET[i]) &
+ W83627EHF_REG_TARGET[i]) &
(data->pwm_mode[i] == 1 ? 0x7f : 0xff);
}
return data;
}
-/*
- * Sysfs callback functions
- */
-#define show_in_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \
- data->scale_in)); \
-}
-show_in_reg(in)
-show_in_reg(in_min)
-show_in_reg(in_max)
-
#define store_in_reg(REG, reg) \
-static ssize_t \
-store_in_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+static int \
+store_in_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+ long val) \
{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- unsigned long val; \
- int err; \
- err = kstrtoul(buf, 10, &val); \
- if (err < 0) \
- return err; \
+ if (val < 0) \
+ return -EINVAL; \
mutex_lock(&data->update_lock); \
- data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \
- w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
- data->in_##reg[nr]); \
+ data->in_##reg[channel] = in_to_reg(val, channel, data->scale_in); \
+ w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(channel), \
+ data->in_##reg[channel]); \
mutex_unlock(&data->update_lock); \
- return count; \
+ return 0; \
}
store_in_reg(MIN, min)
store_in_reg(MAX, max)
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%u\n", (data->alarms >> nr) & 0x01);
-}
-
-static struct sensor_device_attribute sda_in_input[] = {
- SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0),
- SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1),
- SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2),
- SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3),
- SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4),
- SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5),
- SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6),
- SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7),
- SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8),
- SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9),
-};
-
-static struct sensor_device_attribute sda_in_alarm[] = {
- SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0),
- SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1),
- SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2),
- SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3),
- SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8),
- SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 21),
- SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 20),
- SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16),
- SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17),
- SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 19),
-};
-
-static struct sensor_device_attribute sda_in_min[] = {
- SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
- SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
- SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
- SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
- SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
- SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
- SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
- SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
- SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
- SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
-};
-
-static struct sensor_device_attribute sda_in_max[] = {
- SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
- SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
- SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
- SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
- SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
- SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
- SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
- SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
- SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
- SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
-};
-
-static ssize_t
-show_fan(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n", data->rpm[nr]);
-}
-
-static ssize_t
-show_fan_min(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n",
- data->fan_from_reg_min(data->fan_min[nr],
- data->fan_div[nr]));
-}
-
-static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr]));
-}
-
-static ssize_t
-store_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_fan_min(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
unsigned int reg;
u8 new_div;
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
+ if (val < 0)
+ return -EINVAL;
mutex_lock(&data->update_lock);
- if (!data->has_fan_div) {
- /*
- * Only NCT6776F for now, so we know that this is a 13 bit
- * register
- */
- if (!val) {
- val = 0xff1f;
- } else {
- if (val > 1350000U)
- val = 135000U;
- val = 1350000U / val;
- val = (val & 0x1f) | ((val << 3) & 0xff00);
- }
- data->fan_min[nr] = val;
- goto done; /* Leave fan divider alone */
- }
if (!val) {
/* No min limit, alarm disabled */
- data->fan_min[nr] = 255;
- new_div = data->fan_div[nr]; /* No change */
- dev_info(dev, "fan%u low limit and alarm disabled\n", nr + 1);
+ data->fan_min[channel] = 255;
+ new_div = data->fan_div[channel]; /* No change */
+ dev_info(dev, "fan%u low limit and alarm disabled\n",
+ channel + 1);
} else if ((reg = 1350000U / val) >= 128 * 255) {
/*
* Speed below this value cannot possibly be represented,
* even with the highest divider (128)
*/
- data->fan_min[nr] = 254;
+ data->fan_min[channel] = 254;
new_div = 7; /* 128 == (1 << 7) */
dev_warn(dev,
"fan%u low limit %lu below minimum %u, set to minimum\n",
- nr + 1, val, data->fan_from_reg_min(254, 7));
+ channel + 1, val, fan_from_reg8(254, 7));
} else if (!reg) {
/*
* Speed above this value cannot possibly be represented,
* even with the lowest divider (1)
*/
- data->fan_min[nr] = 1;
+ data->fan_min[channel] = 1;
new_div = 0; /* 1 == (1 << 0) */
dev_warn(dev,
"fan%u low limit %lu above maximum %u, set to maximum\n",
- nr + 1, val, data->fan_from_reg_min(1, 0));
+ channel + 1, val, fan_from_reg8(1, 0));
} else {
/*
* Automatically pick the best divider, i.e. the one such
reg >>= 1;
new_div++;
}
- data->fan_min[nr] = reg;
+ data->fan_min[channel] = reg;
}
/*
* Write both the fan clock divider (if it changed) and the new
* fan min (unconditionally)
*/
- if (new_div != data->fan_div[nr]) {
+ if (new_div != data->fan_div[channel]) {
dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
- nr + 1, div_from_reg(data->fan_div[nr]),
+ channel + 1, div_from_reg(data->fan_div[channel]),
div_from_reg(new_div));
- data->fan_div[nr] = new_div;
- w83627ehf_write_fan_div_common(dev, data, nr);
+ data->fan_div[channel] = new_div;
+ w83627ehf_write_fan_div(data, channel);
/* Give the chip time to sample a new speed value */
data->last_updated = jiffies;
}
-done:
- w83627ehf_write_value(data, data->REG_FAN_MIN[nr],
- data->fan_min[nr]);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static struct sensor_device_attribute sda_fan_input[] = {
- SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
- SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
- SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
- SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
- SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4),
-};
-static struct sensor_device_attribute sda_fan_alarm[] = {
- SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6),
- SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7),
- SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11),
- SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 10),
- SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 23),
-};
-
-static struct sensor_device_attribute sda_fan_min[] = {
- SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 0),
- SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 1),
- SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 2),
- SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 3),
- SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 4),
-};
-
-static struct sensor_device_attribute sda_fan_div[] = {
- SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0),
- SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1),
- SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2),
- SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3),
- SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
-};
-
-static ssize_t
-show_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]);
-}
+ w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[channel],
+ data->fan_min[channel]);
+ mutex_unlock(&data->update_lock);
-#define show_temp_reg(addr, reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \
+ return 0;
}
-show_temp_reg(reg_temp, temp);
-show_temp_reg(reg_temp_over, temp_max);
-show_temp_reg(reg_temp_hyst, temp_max_hyst);
#define store_temp_reg(addr, reg) \
-static ssize_t \
-store_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+static int \
+store_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \
+ long val) \
{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- int err; \
- long val; \
- err = kstrtol(buf, 10, &val); \
- if (err < 0) \
- return err; \
mutex_lock(&data->update_lock); \
- data->reg[nr] = LM75_TEMP_TO_REG(val); \
- w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \
+ data->reg[channel] = LM75_TEMP_TO_REG(val); \
+ w83627ehf_write_temp(data, data->addr[channel], data->reg[channel]); \
mutex_unlock(&data->update_lock); \
- return count; \
+ return 0; \
}
store_temp_reg(reg_temp_over, temp_max);
store_temp_reg(reg_temp_hyst, temp_max_hyst);
-static ssize_t
-show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+store_temp_offset(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
- return sprintf(buf, "%d\n",
- data->temp_offset[sensor_attr->index] * 1000);
+ mutex_lock(&data->update_lock);
+ data->temp_offset[channel] = val;
+ w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[channel], val);
+ mutex_unlock(&data->update_lock);
+ return 0;
}
-static ssize_t
-store_temp_offset(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int
+store_pwm_mode(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
+ u16 reg;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ if (val < 0 || val > 1)
+ return -EINVAL;
mutex_lock(&data->update_lock);
- data->temp_offset[nr] = val;
- w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[nr], val);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[channel]);
+ data->pwm_mode[channel] = val;
+ reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[channel]);
+ if (!val)
+ reg |= 1 << W83627EHF_PWM_MODE_SHIFT[channel];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], reg);
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static ssize_t
-show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+store_pwm(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- return sprintf(buf, "%d\n", (int)data->temp_type[nr]);
-}
-
-static struct sensor_device_attribute sda_temp_input[] = {
- SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0),
- SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1),
- SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2),
- SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3),
- SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4),
- SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5),
- SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6),
- SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7),
- SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_label[] = {
- SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
- SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
- SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
- SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
- SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
- SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
- SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
- SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
- SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
-};
-
-static struct sensor_device_attribute sda_temp_max[] = {
- SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 0),
- SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 1),
- SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 2),
- SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 3),
- SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 4),
- SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 5),
- SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 6),
- SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 7),
- SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max,
- store_temp_max, 8),
-};
+ val = clamp_val(val, 0, 255);
-static struct sensor_device_attribute sda_temp_max_hyst[] = {
- SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 0),
- SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 1),
- SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 2),
- SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 3),
- SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 4),
- SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 5),
- SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 6),
- SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 7),
- SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
- store_temp_max_hyst, 8),
-};
+ mutex_lock(&data->update_lock);
+ data->pwm[channel] = val;
+ w83627ehf_write_value(data, W83627EHF_REG_PWM[channel], val);
+ mutex_unlock(&data->update_lock);
+ return 0;
+}
-static struct sensor_device_attribute sda_temp_alarm[] = {
- SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4),
- SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5),
- SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
-};
+static int
+store_pwm_enable(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
+{
+ u16 reg;
-static struct sensor_device_attribute sda_temp_type[] = {
- SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0),
- SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1),
- SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2),
-};
+ if (!val || val < 0 ||
+ (val > 4 && val != data->pwm_enable_orig[channel]))
+ return -EINVAL;
-static struct sensor_device_attribute sda_temp_offset[] = {
- SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 0),
- SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 1),
- SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 2),
-};
+ mutex_lock(&data->update_lock);
+ data->pwm_enable[channel] = val;
+ reg = w83627ehf_read_value(data,
+ W83627EHF_REG_PWM_ENABLE[channel]);
+ reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[channel]);
+ reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[channel];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel],
+ reg);
+ mutex_unlock(&data->update_lock);
+ return 0;
+}
-#define show_pwm_reg(reg) \
+#define show_tol_temp(reg) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
+ char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", data->reg[nr]); \
+ return sprintf(buf, "%d\n", data->reg[nr] * 1000); \
}
-show_pwm_reg(pwm_mode)
-show_pwm_reg(pwm_enable)
-show_pwm_reg(pwm)
+show_tol_temp(tolerance)
+show_tol_temp(target_temp)
static ssize_t
-store_pwm_mode(struct device *dev, struct device_attribute *attr,
+store_target_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
int nr = sensor_attr->index;
- unsigned long val;
+ long val;
int err;
- u16 reg;
- err = kstrtoul(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
- if (val > 1)
- return -EINVAL;
-
- /* On NCT67766F, DC mode is only supported for pwm1 */
- if (sio_data->kind == nct6776 && nr && val != 1)
- return -EINVAL;
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
- data->pwm_mode[nr] = val;
- reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]);
- if (!val)
- reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t
-store_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- data->pwm[nr] = val;
- w83627ehf_write_value(data, data->REG_PWM[nr], val);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t
-store_pwm_enable(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- unsigned long val;
- int err;
- u16 reg;
-
- err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
-
- if (!val || (val > 4 && val != data->pwm_enable_orig[nr]))
- return -EINVAL;
- /* SmartFan III mode is not supported on NCT6776F */
- if (sio_data->kind == nct6776 && val == 4)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- data->pwm_enable[nr] = val;
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- reg = w83627ehf_read_value(data,
- NCT6775_REG_FAN_MODE[nr]);
- reg &= 0x0f;
- reg |= (val - 1) << 4;
- w83627ehf_write_value(data,
- NCT6775_REG_FAN_MODE[nr], reg);
- } else {
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
- reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
- reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
- }
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-
-#define show_tol_temp(reg) \
-static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = \
- to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", data->reg[nr] * 1000); \
-}
-
-show_tol_temp(tolerance)
-show_tol_temp(target_temp)
-
-static ssize_t
-store_target_temp(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err < 0)
- return err;
-
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
-
- mutex_lock(&data->update_lock);
- data->target_temp[nr] = val;
- w83627ehf_write_value(data, data->REG_TARGET[nr], val);
+ data->target_temp[nr] = val;
+ w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u16 reg;
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
mutex_lock(&data->update_lock);
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- /* Limit tolerance further for NCT6776F */
- if (sio_data->kind == nct6776 && val > 7)
- val = 7;
- reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+ if (nr == 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg);
- } else {
- reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
- if (nr == 1)
- reg = (reg & 0x0f) | (val << 4);
- else
- reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
- }
+ w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
data->tolerance[nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
-static struct sensor_device_attribute sda_pwm[] = {
- SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0),
- SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1),
- SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2),
- SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_mode[] = {
- SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 0),
- SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 1),
- SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 2),
- SENSOR_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 3),
-};
-
-static struct sensor_device_attribute sda_pwm_enable[] = {
- SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 0),
- SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 1),
- SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 2),
- SENSOR_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 3),
-};
-
-static struct sensor_device_attribute sda_target_temp[] = {
- SENSOR_ATTR(pwm1_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 0),
- SENSOR_ATTR(pwm2_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 1),
- SENSOR_ATTR(pwm3_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 2),
- SENSOR_ATTR(pwm4_target, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 3),
-};
-
-static struct sensor_device_attribute sda_tolerance[] = {
- SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 0),
- SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 1),
- SENSOR_ATTR(pwm3_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 2),
- SENSOR_ATTR(pwm4_tolerance, S_IWUSR | S_IRUGO, show_tolerance,
- store_tolerance, 3),
-};
+static SENSOR_DEVICE_ATTR(pwm1_target, 0644, show_target_temp,
+ store_target_temp, 0);
+static SENSOR_DEVICE_ATTR(pwm2_target, 0644, show_target_temp,
+ store_target_temp, 1);
+static SENSOR_DEVICE_ATTR(pwm3_target, 0644, show_target_temp,
+ store_target_temp, 2);
+static SENSOR_DEVICE_ATTR(pwm4_target, 0644, show_target_temp,
+ store_target_temp, 3);
+
+static SENSOR_DEVICE_ATTR(pwm1_tolerance, 0644, show_tolerance,
+ store_tolerance, 0);
+static SENSOR_DEVICE_ATTR(pwm2_tolerance, 0644, show_tolerance,
+ store_tolerance, 1);
+static SENSOR_DEVICE_ATTR(pwm3_tolerance, 0644, show_tolerance,
+ store_tolerance, 2);
+static SENSOR_DEVICE_ATTR(pwm4_tolerance, 0644, show_tolerance,
+ store_tolerance, 3);
/* Smart Fan registers */
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
val = clamp_val(val, 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+ w83627ehf_write_value(data, REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
}
-fan_functions(fan_start_output, FAN_START_OUTPUT)
-fan_functions(fan_stop_output, FAN_STOP_OUTPUT)
-fan_functions(fan_max_output, FAN_MAX_OUTPUT)
-fan_functions(fan_step_output, FAN_STEP_OUTPUT)
+fan_functions(fan_start_output, W83627EHF_REG_FAN_START_OUTPUT)
+fan_functions(fan_stop_output, W83627EHF_REG_FAN_STOP_OUTPUT)
+fan_functions(fan_max_output, data->REG_FAN_MAX_OUTPUT)
+fan_functions(fan_step_output, data->REG_FAN_STEP_OUTPUT)
#define fan_time_functions(reg, REG) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
val = step_time_to_reg(val, data->pwm_mode[nr]); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+ w83627ehf_write_value(data, REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
} \
-fan_time_functions(fan_stop_time, FAN_STOP_TIME)
-
-static ssize_t name_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-static DEVICE_ATTR_RO(name);
-
-static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
- SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 3),
- SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 3),
- SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 3),
- SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 3),
- SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 3),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays_fan3[] = {
- SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 2),
- SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 2),
- SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 2),
-};
-
-static struct sensor_device_attribute sda_sf3_arrays[] = {
- SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 0),
- SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
- store_fan_stop_time, 1),
- SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 0),
- SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
- store_fan_start_output, 1),
- SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 0),
- SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
- store_fan_stop_output, 1),
-};
+fan_time_functions(fan_stop_time, W83627EHF_REG_FAN_STOP_TIME)
+
+static SENSOR_DEVICE_ATTR(pwm4_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 3);
+static SENSOR_DEVICE_ATTR(pwm4_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 3);
+static SENSOR_DEVICE_ATTR(pwm4_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 3);
+
+static SENSOR_DEVICE_ATTR(pwm3_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 2);
+static SENSOR_DEVICE_ATTR(pwm3_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 2);
+
+static SENSOR_DEVICE_ATTR(pwm1_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_time, 0644, show_fan_stop_time,
+ store_fan_stop_time, 1);
+static SENSOR_DEVICE_ATTR(pwm1_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_start_output, 0644, show_fan_start_output,
+ store_fan_start_output, 1);
+static SENSOR_DEVICE_ATTR(pwm1_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_stop_output, 0644, show_fan_stop_output,
+ store_fan_stop_output, 1);
/*
* pwm1 and pwm3 don't support max and step settings on all chips.
* Need to check support while generating/removing attribute files.
*/
-static struct sensor_device_attribute sda_sf3_max_step_arrays[] = {
- SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 0),
- SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 0),
- SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 1),
- SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 1),
- SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
- store_fan_max_output, 2),
- SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
- store_fan_step_output, 2),
-};
+static SENSOR_DEVICE_ATTR(pwm1_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 0);
+static SENSOR_DEVICE_ATTR(pwm1_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 0);
+static SENSOR_DEVICE_ATTR(pwm2_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 1);
+static SENSOR_DEVICE_ATTR(pwm2_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 1);
+static SENSOR_DEVICE_ATTR(pwm3_max_output, 0644, show_fan_max_output,
+ store_fan_max_output, 2);
+static SENSOR_DEVICE_ATTR(pwm3_step_output, 0644, show_fan_step_output,
+ store_fan_step_output, 2);
static ssize_t
cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
struct w83627ehf_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
-static DEVICE_ATTR_RO(cpu0_vid);
+DEVICE_ATTR_RO(cpu0_vid);
/* Case open detection */
-
-static ssize_t
-show_caseopen(struct device *dev, struct device_attribute *attr, char *buf)
+static int
+clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel,
+ long val)
{
- struct w83627ehf_data *data = w83627ehf_update_device(dev);
-
- return sprintf(buf, "%d\n",
- !!(data->caseopen & to_sensor_dev_attr_2(attr)->index));
-}
-
-static ssize_t
-clear_caseopen(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627ehf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- u16 reg, mask;
+ const u16 mask = 0x80;
+ u16 reg;
- if (kstrtoul(buf, 10, &val) || val != 0)
+ if (val != 0 || channel != 0)
return -EINVAL;
- mask = to_sensor_dev_attr_2(attr)->nr;
-
mutex_lock(&data->update_lock);
reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR);
w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask);
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
- return count;
+ return 0;
}
-static struct sensor_device_attribute_2 sda_caseopen[] = {
- SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen,
- clear_caseopen, 0x80, 0x10),
- SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen,
- clear_caseopen, 0x40, 0x40),
-};
-
-/*
- * Driver and device management
- */
-
-static void w83627ehf_device_remove_files(struct device *dev)
+static umode_t w83627ehf_attrs_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
- /*
- * some entries in the following arrays may not have been used in
- * device_create_file(), but device_remove_file() will ignore them
- */
- int i;
+ struct device *dev = container_of(kobj, struct device, kobj);
struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct device_attribute *devattr;
+ struct sensor_device_attribute *sda;
+
+ devattr = container_of(a, struct device_attribute, attr);
+
+ /* Not sensor */
+ if (devattr->show == cpu0_vid_show && data->have_vid)
+ return a->mode;
+
+ sda = (struct sensor_device_attribute *)devattr;
+
+ if (sda->index < 2 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output))
+ return a->mode;
+
+ if (sda->index < 3 &&
+ (devattr->show == show_fan_max_output ||
+ devattr->show == show_fan_step_output) &&
+ data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[sda->index] != 0xff)
+ return a->mode;
+
+ /* if fan3 and fan4 are enabled create the files for them */
+ if (sda->index == 2 &&
+ (data->has_fan & (1 << 2)) && data->pwm_num >= 3 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output))
+ return a->mode;
+
+ if (sda->index == 3 &&
+ (data->has_fan & (1 << 3)) && data->pwm_num >= 4 &&
+ (devattr->show == show_fan_stop_time ||
+ devattr->show == show_fan_start_output ||
+ devattr->show == show_fan_stop_output ||
+ devattr->show == show_fan_max_output ||
+ devattr->show == show_fan_step_output))
+ return a->mode;
+
+ if ((devattr->show == show_target_temp ||
+ devattr->show == show_tolerance) &&
+ (data->has_fan & (1 << sda->index)) &&
+ sda->index < data->pwm_num)
+ return a->mode;
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
- device_remove_file(dev, &sda_sf3_arrays[i].dev_attr);
- for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
- struct sensor_device_attribute *attr =
- &sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT &&
- data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
- device_remove_file(dev, &attr->dev_attr);
- }
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++)
- device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr);
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
- device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr);
- for (i = 0; i < data->in_num; i++) {
- if ((i == 6) && data->in6_skip)
- continue;
- device_remove_file(dev, &sda_in_input[i].dev_attr);
- device_remove_file(dev, &sda_in_alarm[i].dev_attr);
- device_remove_file(dev, &sda_in_min[i].dev_attr);
- device_remove_file(dev, &sda_in_max[i].dev_attr);
- }
- for (i = 0; i < 5; i++) {
- device_remove_file(dev, &sda_fan_input[i].dev_attr);
- device_remove_file(dev, &sda_fan_alarm[i].dev_attr);
- device_remove_file(dev, &sda_fan_div[i].dev_attr);
- device_remove_file(dev, &sda_fan_min[i].dev_attr);
- }
- for (i = 0; i < data->pwm_num; i++) {
- device_remove_file(dev, &sda_pwm[i].dev_attr);
- device_remove_file(dev, &sda_pwm_mode[i].dev_attr);
- device_remove_file(dev, &sda_pwm_enable[i].dev_attr);
- device_remove_file(dev, &sda_target_temp[i].dev_attr);
- device_remove_file(dev, &sda_tolerance[i].dev_attr);
- }
- for (i = 0; i < NUM_REG_TEMP; i++) {
- if (!(data->have_temp & (1 << i)))
- continue;
- device_remove_file(dev, &sda_temp_input[i].dev_attr);
- device_remove_file(dev, &sda_temp_label[i].dev_attr);
- if (i == 2 && data->temp3_val_only)
- continue;
- device_remove_file(dev, &sda_temp_max[i].dev_attr);
- device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
- if (i > 2)
- continue;
- device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
- device_remove_file(dev, &sda_temp_type[i].dev_attr);
- device_remove_file(dev, &sda_temp_offset[i].dev_attr);
- }
+ return 0;
+}
- device_remove_file(dev, &sda_caseopen[0].dev_attr);
- device_remove_file(dev, &sda_caseopen[1].dev_attr);
+/* These groups handle non-standard attributes used in this device */
+static struct attribute *w83627ehf_attrs[] = {
+
+ &sensor_dev_attr_pwm1_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm1_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm1_target.dev_attr.attr,
+ &sensor_dev_attr_pwm1_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm2_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm2_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm2_target.dev_attr.attr,
+ &sensor_dev_attr_pwm2_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm3_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm3_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm3_target.dev_attr.attr,
+ &sensor_dev_attr_pwm3_tolerance.dev_attr.attr,
+
+ &sensor_dev_attr_pwm4_stop_time.dev_attr.attr,
+ &sensor_dev_attr_pwm4_start_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_stop_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_max_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_step_output.dev_attr.attr,
+ &sensor_dev_attr_pwm4_target.dev_attr.attr,
+ &sensor_dev_attr_pwm4_tolerance.dev_attr.attr,
+
+ &dev_attr_cpu0_vid.attr,
+ NULL
+};
- device_remove_file(dev, &dev_attr_name);
- device_remove_file(dev, &dev_attr_cpu0_vid);
-}
+static const struct attribute_group w83627ehf_group = {
+ .attrs = w83627ehf_attrs,
+ .is_visible = w83627ehf_attrs_visible,
+};
+
+static const struct attribute_group *w83627ehf_groups[] = {
+ &w83627ehf_group,
+ NULL
+};
+
+/*
+ * Driver and device management
+ */
/* Get the monitoring functions started */
static inline void w83627ehf_init_device(struct w83627ehf_data *data,
}
}
-static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
- int r1, int r2)
-{
- swap(data->temp_src[r1], data->temp_src[r2]);
- swap(data->reg_temp[r1], data->reg_temp[r2]);
- swap(data->reg_temp_over[r1], data->reg_temp_over[r2]);
- swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]);
- swap(data->reg_temp_config[r1], data->reg_temp_config[r2]);
-}
-
static void
w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp)
{
w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
struct w83627ehf_data *data)
{
- int fan3pin, fan4pin, fan4min, fan5pin, regval;
+ int fan3pin, fan4pin, fan5pin, regval;
/* The W83627UHG is simple, only two fan inputs, no config */
if (sio_data->kind == w83627uhg) {
}
/* fan4 and fan5 share some pins with the GPIO and serial flash */
- if (sio_data->kind == nct6775) {
- /* On NCT6775, fan4 shares pins with the fdc interface */
- fan3pin = 1;
- fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
- fan4min = 0;
- fan5pin = 0;
- } else if (sio_data->kind == nct6776) {
- bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
-
- superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
- regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
-
- if (regval & 0x80)
- fan3pin = gpok;
- else
- fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
-
- if (regval & 0x40)
- fan4pin = gpok;
- else
- fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
-
- if (regval & 0x20)
- fan5pin = gpok;
- else
- fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
-
- fan4min = fan4pin;
- } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
fan3pin = 1;
fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
- fan4min = fan4pin;
} else {
fan3pin = 1;
fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
- fan4min = fan4pin;
}
data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
data->has_fan |= (fan3pin << 2);
data->has_fan_min |= (fan3pin << 2);
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- /*
- * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1
- * register
- */
- data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
- data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
- } else {
- /*
- * It looks like fan4 and fan5 pins can be alternatively used
- * as fan on/off switches, but fan5 control is write only :/
- * We assume that if the serial interface is disabled, designers
- * connected fan5 as input unless they are emitting log 1, which
- * is not the default.
- */
- regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
- if ((regval & (1 << 2)) && fan4pin) {
- data->has_fan |= (1 << 3);
- data->has_fan_min |= (1 << 3);
+ /*
+ * It looks like fan4 and fan5 pins can be alternatively used
+ * as fan on/off switches, but fan5 control is write only :/
+ * We assume that if the serial interface is disabled, designers
+ * connected fan5 as input unless they are emitting log 1, which
+ * is not the default.
+ */
+ regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
+ if ((regval & (1 << 2)) && fan4pin) {
+ data->has_fan |= (1 << 3);
+ data->has_fan_min |= (1 << 3);
+ }
+ if (!(regval & (1 << 1)) && fan5pin) {
+ data->has_fan |= (1 << 4);
+ data->has_fan_min |= (1 << 4);
+ }
+}
+
+static umode_t
+w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct w83627ehf_data *data = drvdata;
+
+ switch (type) {
+ case hwmon_temp:
+ /* channel 0.., name 1.. */
+ if (!(data->have_temp & (1 << channel)))
+ return 0;
+ if (attr == hwmon_temp_input || attr == hwmon_temp_label)
+ return 0444;
+ if (channel == 2 && data->temp3_val_only)
+ return 0;
+ if (attr == hwmon_temp_max) {
+ if (data->reg_temp_over[channel])
+ return 0644;
+ else
+ return 0;
+ }
+ if (attr == hwmon_temp_max_hyst) {
+ if (data->reg_temp_hyst[channel])
+ return 0644;
+ else
+ return 0;
+ }
+ if (channel > 2)
+ return 0;
+ if (attr == hwmon_temp_alarm || attr == hwmon_temp_type)
+ return 0444;
+ if (attr == hwmon_temp_offset) {
+ if (data->have_temp_offset & (1 << channel))
+ return 0644;
+ else
+ return 0;
+ }
+ break;
+
+ case hwmon_fan:
+ /* channel 0.., name 1.. */
+ if (!(data->has_fan & (1 << channel)))
+ return 0;
+ if (attr == hwmon_fan_input || attr == hwmon_fan_alarm)
+ return 0444;
+ if (attr == hwmon_fan_div) {
+ return 0444;
}
- if (!(regval & (1 << 1)) && fan5pin) {
- data->has_fan |= (1 << 4);
- data->has_fan_min |= (1 << 4);
+ if (attr == hwmon_fan_min) {
+ if (data->has_fan_min & (1 << channel))
+ return 0644;
+ else
+ return 0;
+ }
+ break;
+
+ case hwmon_in:
+ /* channel 0.., name 0.. */
+ if (channel >= data->in_num)
+ return 0;
+ if (channel == 6 && data->in6_skip)
+ return 0;
+ if (attr == hwmon_in_alarm || attr == hwmon_in_input)
+ return 0444;
+ if (attr == hwmon_in_min || attr == hwmon_in_max)
+ return 0644;
+ break;
+
+ case hwmon_pwm:
+ /* channel 0.., name 1.. */
+ if (!(data->has_fan & (1 << channel)) ||
+ channel >= data->pwm_num)
+ return 0;
+ if (attr == hwmon_pwm_mode || attr == hwmon_pwm_enable ||
+ attr == hwmon_pwm_input)
+ return 0644;
+ break;
+
+ case hwmon_intrusion:
+ return 0644;
+
+ default: /* Shouldn't happen */
+ return 0;
+ }
+
+ return 0; /* Shouldn't happen */
+}
+
+static int
+w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = LM75_TEMP_FROM_REG(data->temp[channel]);
+ return 0;
+ case hwmon_temp_max:
+ *val = LM75_TEMP_FROM_REG(data->temp_max[channel]);
+ return 0;
+ case hwmon_temp_max_hyst:
+ *val = LM75_TEMP_FROM_REG(data->temp_max_hyst[channel]);
+ return 0;
+ case hwmon_temp_offset:
+ *val = data->temp_offset[channel] * 1000;
+ return 0;
+ case hwmon_temp_type:
+ *val = (int)data->temp_type[channel];
+ return 0;
+ case hwmon_temp_alarm:
+ if (channel < 3) {
+ int bit[] = { 4, 5, 13 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ *val = in_from_reg(data->in[channel], channel, data->scale_in);
+ return 0;
+ case hwmon_in_min:
+ *val = in_from_reg(data->in_min[channel], channel,
+ data->scale_in);
+ return 0;
+ case hwmon_in_max:
+ *val = in_from_reg(data->in_max[channel], channel,
+ data->scale_in);
+ return 0;
+ case hwmon_in_alarm:
+ if (channel < 10) {
+ int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = data->rpm[channel];
+ return 0;
+ case hwmon_fan_min:
+ *val = fan_from_reg8(data->fan_min[channel],
+ data->fan_div[channel]);
+ return 0;
+ case hwmon_fan_div:
+ *val = div_from_reg(data->fan_div[channel]);
+ return 0;
+ case hwmon_fan_alarm:
+ if (channel < 5) {
+ int bit[] = { 6, 7, 11, 10, 23 };
+ *val = (data->alarms >> bit[channel]) & 1;
+ return 0;
}
+ break;
+ default:
+ break;
}
+ return -EOPNOTSUPP;
}
+static int
+w83627ehf_do_read_pwm(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = data->pwm[channel];
+ return 0;
+ case hwmon_pwm_enable:
+ *val = data->pwm_enable[channel];
+ return 0;
+ case hwmon_pwm_mode:
+ *val = data->pwm_enable[channel];
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_do_read_intrusion(struct w83627ehf_data *data, u32 attr,
+ int channel, long *val)
+{
+ if (attr != hwmon_intrusion_alarm || channel != 0)
+ return -EOPNOTSUPP; /* shouldn't happen */
+
+ *val = !!(data->caseopen & 0x10);
+ return 0;
+}
+
+static int
+w83627ehf_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev->parent);
+
+ switch (type) {
+ case hwmon_fan:
+ return w83627ehf_do_read_fan(data, attr, channel, val);
+
+ case hwmon_in:
+ return w83627ehf_do_read_in(data, attr, channel, val);
+
+ case hwmon_pwm:
+ return w83627ehf_do_read_pwm(data, attr, channel, val);
+
+ case hwmon_temp:
+ return w83627ehf_do_read_temp(data, attr, channel, val);
+
+ case hwmon_intrusion:
+ return w83627ehf_do_read_intrusion(data, attr, channel, val);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ if (attr == hwmon_temp_label) {
+ *str = data->temp_label[data->temp_src[channel]];
+ return 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+ /* Nothing else should be read as a string */
+ return -EOPNOTSUPP;
+}
+
+static int
+w83627ehf_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+ if (type == hwmon_in && attr == hwmon_in_min)
+ return store_in_min(dev, data, channel, val);
+ if (type == hwmon_in && attr == hwmon_in_max)
+ return store_in_max(dev, data, channel, val);
+
+ if (type == hwmon_fan && attr == hwmon_fan_min)
+ return store_fan_min(dev, data, channel, val);
+
+ if (type == hwmon_temp && attr == hwmon_temp_max)
+ return store_temp_max(dev, data, channel, val);
+ if (type == hwmon_temp && attr == hwmon_temp_max_hyst)
+ return store_temp_max_hyst(dev, data, channel, val);
+ if (type == hwmon_temp && attr == hwmon_temp_offset)
+ return store_temp_offset(dev, data, channel, val);
+
+ if (type == hwmon_pwm && attr == hwmon_pwm_mode)
+ return store_pwm_mode(dev, data, channel, val);
+ if (type == hwmon_pwm && attr == hwmon_pwm_enable)
+ return store_pwm_enable(dev, data, channel, val);
+ if (type == hwmon_pwm && attr == hwmon_pwm_input)
+ return store_pwm(dev, data, channel, val);
+
+ if (type == hwmon_intrusion && attr == hwmon_intrusion_alarm)
+ return clear_caseopen(dev, data, channel, val);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops w83627ehf_ops = {
+ .is_visible = w83627ehf_is_visible,
+ .read = w83627ehf_read,
+ .read_string = w83627ehf_read_string,
+ .write = w83627ehf_write,
+};
+
+static const struct hwmon_channel_info *w83627ehf_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN,
+ HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN,
+ HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE,
+ HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE,
+ HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE),
+ HWMON_CHANNEL_INFO(intrusion,
+ HWMON_INTRUSION_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info w83627ehf_chip_info = {
+ .ops = &w83627ehf_ops,
+ .info = w83627ehf_info,
+};
+
static int w83627ehf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
u8 en_vrm10;
int i, err = 0;
+ struct device *hwmon_dev;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) {
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
- /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */
+ /* 667HG has 3 pwms, and 627UHG has only 2 */
switch (sio_data->kind) {
default:
data->pwm_num = 4;
break;
case w83667hg:
case w83667hg_b:
- case nct6775:
- case nct6776:
data->pwm_num = 3;
break;
case w83627uhg:
data->have_temp = 0x07;
/* Deal with temperature register setup first. */
- if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
- int mask = 0;
-
- /*
- * Display temperature sensor output only if it monitors
- * a source other than one already reported. Always display
- * first three temperature registers, though.
- */
- for (i = 0; i < NUM_REG_TEMP; i++) {
- u8 src;
-
- data->reg_temp[i] = NCT6775_REG_TEMP[i];
- data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i];
- data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i];
- data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i];
-
- src = w83627ehf_read_value(data,
- NCT6775_REG_TEMP_SOURCE[i]);
- src &= 0x1f;
- if (src && !(mask & (1 << src))) {
- data->have_temp |= 1 << i;
- mask |= 1 << src;
- }
-
- data->temp_src[i] = src;
-
- /*
- * Now do some register swapping if index 0..2 don't
- * point to SYSTIN(1), CPUIN(2), and AUXIN(3).
- * Idea is to have the first three attributes
- * report SYSTIN, CPUIN, and AUXIN if possible
- * without overriding the basic system configuration.
- */
- if (i > 0 && data->temp_src[0] != 1
- && data->temp_src[i] == 1)
- w82627ehf_swap_tempreg(data, 0, i);
- if (i > 1 && data->temp_src[1] != 2
- && data->temp_src[i] == 2)
- w82627ehf_swap_tempreg(data, 1, i);
- if (i > 2 && data->temp_src[2] != 3
- && data->temp_src[i] == 3)
- w82627ehf_swap_tempreg(data, 2, i);
- }
- if (sio_data->kind == nct6776) {
- /*
- * On NCT6776, AUXTIN and VIN3 pins are shared.
- * Only way to detect it is to check if AUXTIN is used
- * as a temperature source, and if that source is
- * enabled.
- *
- * If that is the case, disable in6, which reports VIN3.
- * Otherwise disable temp3.
- */
- if (data->temp_src[2] == 3) {
- u8 reg;
-
- if (data->reg_temp_config[2])
- reg = w83627ehf_read_value(data,
- data->reg_temp_config[2]);
- else
- reg = 0; /* Assume AUXTIN is used */
-
- if (reg & 0x01)
- data->have_temp &= ~(1 << 2);
- else
- data->in6_skip = 1;
- }
- data->temp_label = nct6776_temp_label;
- } else {
- data->temp_label = nct6775_temp_label;
- }
- data->have_temp_offset = data->have_temp & 0x07;
- for (i = 0; i < 3; i++) {
- if (data->temp_src[i] > 3)
- data->have_temp_offset &= ~(1 << i);
- }
- } else if (sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg_b) {
u8 reg;
w83627ehf_set_temp_reg_ehf(data, 4);
data->have_temp_offset = data->have_temp & 0x07;
}
- if (sio_data->kind == nct6775) {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg16;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = NCT6775_REG_PWM;
- data->REG_TARGET = NCT6775_REG_TARGET;
- data->REG_FAN = NCT6775_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT;
- data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT;
- } else if (sio_data->kind == nct6776) {
- data->has_fan_div = false;
- data->fan_from_reg = fan_from_reg13;
- data->fan_from_reg_min = fan_from_reg13;
- data->REG_PWM = NCT6775_REG_PWM;
- data->REG_TARGET = NCT6775_REG_TARGET;
- data->REG_FAN = NCT6775_REG_FAN;
- data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
- } else if (sio_data->kind == w83667hg_b) {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg8;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = W83627EHF_REG_PWM;
- data->REG_TARGET = W83627EHF_REG_TARGET;
- data->REG_FAN = W83627EHF_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
+ if (sio_data->kind == w83667hg_b) {
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B;
data->REG_FAN_STEP_OUTPUT =
W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B;
} else {
- data->has_fan_div = true;
- data->fan_from_reg = fan_from_reg8;
- data->fan_from_reg_min = fan_from_reg8;
- data->REG_PWM = W83627EHF_REG_PWM;
- data->REG_TARGET = W83627EHF_REG_TARGET;
- data->REG_FAN = W83627EHF_REG_FAN;
- data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_COMMON;
data->REG_FAN_STEP_OUTPUT =
goto exit_release;
/* Read VID value */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b ||
- sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
/*
* W83667HG has different pins for VID input and output, so
* we can get the VID input values directly at logical device D
*/
superio_select(sio_data->sioreg, W83667HG_LD_VID);
data->vid = superio_inb(sio_data->sioreg, 0xe3);
- err = device_create_file(dev, &dev_attr_cpu0_vid);
- if (err) {
- superio_exit(sio_data->sioreg);
- goto exit_release;
- }
+ data->have_vid = true;
} else if (sio_data->kind != w83627uhg) {
superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) {
SIO_REG_VID_DATA);
if (sio_data->kind == w83627ehf) /* 6 VID pins only */
data->vid &= 0x3f;
-
- err = device_create_file(dev, &dev_attr_cpu0_vid);
- if (err) {
- superio_exit(sio_data->sioreg);
- goto exit_release;
- }
+ data->have_vid = true;
} else {
dev_info(dev,
"VID pins in output mode, CPU VID not available\n");
}
}
- if (fan_debounce &&
- (sio_data->kind == nct6775 || sio_data->kind == nct6776)) {
- u8 tmp;
-
- superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
- tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE);
- if (sio_data->kind == nct6776)
- superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
- 0x3e | tmp);
- else
- superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
- 0x1e | tmp);
- pr_info("Enabled fan debounce for chip %s\n", data->name);
- }
-
w83627ehf_check_fan_inputs(sio_data, data);
superio_exit(sio_data->sioreg);
/* Read fan clock dividers immediately */
- w83627ehf_update_fan_div_common(dev, data);
+ w83627ehf_update_fan_div(data);
/* Read pwm data to save original values */
- w83627ehf_update_pwm_common(dev, data);
+ w83627ehf_update_pwm(data);
for (i = 0; i < data->pwm_num; i++)
data->pwm_enable_orig[i] = data->pwm_enable[i];
- /* Register sysfs hooks */
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
- err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
- struct sensor_device_attribute *attr =
- &sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT &&
- data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
- err = device_create_file(dev, &attr->dev_attr);
- if (err)
- goto exit_remove;
- }
- }
- /* if fan3 and fan4 are enabled create the sf3 files for them */
- if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3)
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) {
- err = device_create_file(dev,
- &sda_sf3_arrays_fan3[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
- err = device_create_file(dev,
- &sda_sf3_arrays_fan4[i].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- for (i = 0; i < data->in_num; i++) {
- if ((i == 6) && data->in6_skip)
- continue;
- if ((err = device_create_file(dev, &sda_in_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_min[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_in_max[i].dev_attr)))
- goto exit_remove;
- }
-
- for (i = 0; i < 5; i++) {
- if (data->has_fan & (1 << i)) {
- if ((err = device_create_file(dev,
- &sda_fan_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_fan_alarm[i].dev_attr)))
- goto exit_remove;
- if (sio_data->kind != nct6776) {
- err = device_create_file(dev,
- &sda_fan_div[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->has_fan_min & (1 << i)) {
- err = device_create_file(dev,
- &sda_fan_min[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i < data->pwm_num &&
- ((err = device_create_file(dev,
- &sda_pwm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_pwm_mode[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_pwm_enable[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_target_temp[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_tolerance[i].dev_attr))))
- goto exit_remove;
- }
- }
-
- for (i = 0; i < NUM_REG_TEMP; i++) {
- if (!(data->have_temp & (1 << i)))
- continue;
- err = device_create_file(dev, &sda_temp_input[i].dev_attr);
- if (err)
- goto exit_remove;
- if (data->temp_label) {
- err = device_create_file(dev,
- &sda_temp_label[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i == 2 && data->temp3_val_only)
- continue;
- if (data->reg_temp_over[i]) {
- err = device_create_file(dev,
- &sda_temp_max[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->reg_temp_hyst[i]) {
- err = device_create_file(dev,
- &sda_temp_max_hyst[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (i > 2)
- continue;
- if ((err = device_create_file(dev,
- &sda_temp_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_temp_type[i].dev_attr)))
- goto exit_remove;
- if (data->have_temp_offset & (1 << i)) {
- err = device_create_file(dev,
- &sda_temp_offset[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- }
-
- err = device_create_file(dev, &sda_caseopen[0].dev_attr);
- if (err)
- goto exit_remove;
-
- if (sio_data->kind == nct6776) {
- err = device_create_file(dev, &sda_caseopen[1].dev_attr);
- if (err)
- goto exit_remove;
- }
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ data->name,
+ data,
+ &w83627ehf_chip_info,
+ w83627ehf_groups);
- err = device_create_file(dev, &dev_attr_name);
- if (err)
- goto exit_remove;
-
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
+ return PTR_ERR_OR_ZERO(hwmon_dev);
- return 0;
-
-exit_remove:
- w83627ehf_device_remove_files(dev);
exit_release:
release_region(res->start, IOREGION_LENGTH);
exit:
{
struct w83627ehf_data *data = platform_get_drvdata(pdev);
- hwmon_device_unregister(data->hwmon_dev);
- w83627ehf_device_remove_files(&pdev->dev);
release_region(data->addr, IOREGION_LENGTH);
return 0;
static int w83627ehf_suspend(struct device *dev)
{
struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
mutex_lock(&data->update_lock);
data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
- if (sio_data->kind == nct6775) {
- data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
- data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
- }
mutex_unlock(&data->update_lock);
return 0;
static int w83627ehf_resume(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
int i;
mutex_lock(&data->update_lock);
if (!(data->has_fan_min & (1 << i)))
continue;
- w83627ehf_write_value(data, data->REG_FAN_MIN[i],
+ w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[i],
data->fan_min[i]);
}
/* Restore other settings */
w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
- if (sio_data->kind == nct6775) {
- w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
- w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
- }
/* Force re-reading all values */
data->valid = 0;
static const char sio_name_W83627UHG[] __initconst = "W83627UHG";
static const char sio_name_W83667HG[] __initconst = "W83667HG";
static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B";
- static const char sio_name_NCT6775[] __initconst = "NCT6775F";
- static const char sio_name_NCT6776[] __initconst = "NCT6776F";
u16 val;
const char *sio_name;
sio_data->kind = w83667hg_b;
sio_name = sio_name_W83667HG_B;
break;
- case SIO_NCT6775_ID:
- sio_data->kind = nct6775;
- sio_name = sio_name_NCT6775;
- break;
- case SIO_NCT6776_ID:
- sio_data->kind = nct6776;
- sio_name = sio_name_NCT6776;
- break;
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
}
-#ifdef CONFIG_CPU_PM
static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int i, ret = 0;
static int etm4_cpu_pm_register(void)
{
- return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+ if (IS_ENABLED(CONFIG_CPU_PM))
+ return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+
+ return 0;
}
static void etm4_cpu_pm_unregister(void)
{
- cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+ if (IS_ENABLED(CONFIG_CPU_PM))
+ cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
}
-#else
-static int etm4_cpu_pm_register(void) { return 0; }
-static void etm4_cpu_pm_unregister(void) { }
-#endif
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
{
static struct at91_twi_pdata sam9x60_config = {
.clk_max_div = 7,
- .clk_offset = 4,
+ .clk_offset = 3,
.has_unre_flag = true,
.has_alt_cmd = true,
.has_hold_field = true,
struct i2c_adapter adapter;
struct completion completion;
struct i2c_msg *curr_msg;
+ struct clk *bus_clk;
int num_msgs;
u32 msg_err;
u8 *msg_buf;
struct resource *mem, *irq;
int ret;
struct i2c_adapter *adap;
- struct clk *bus_clk;
struct clk *mclk;
u32 bus_clk_rate;
return PTR_ERR(mclk);
}
- bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
+ i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
- if (IS_ERR(bus_clk)) {
+ if (IS_ERR(i2c_dev->bus_clk)) {
dev_err(&pdev->dev, "Could not register clock\n");
- return PTR_ERR(bus_clk);
+ return PTR_ERR(i2c_dev->bus_clk);
}
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
bus_clk_rate = 100000;
}
- ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate);
+ ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate);
if (ret < 0) {
dev_err(&pdev->dev, "Could not set clock frequency\n");
return ret;
}
- ret = clk_prepare_enable(bus_clk);
+ ret = clk_prepare_enable(i2c_dev->bus_clk);
if (ret) {
dev_err(&pdev->dev, "Couldn't prepare clock");
return ret;
static int bcm2835_i2c_remove(struct platform_device *pdev)
{
struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
- struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div");
- clk_rate_exclusive_put(bus_clk);
- clk_disable_unprepare(bus_clk);
+ clk_rate_exclusive_put(i2c_dev->bus_clk);
+ clk_disable_unprepare(i2c_dev->bus_clk);
free_irq(i2c_dev->irq, i2c_dev);
i2c_del_adapter(&i2c_dev->adapter);
adapter_data->gpio_scl = devm_gpiod_get_optional(&pdev->dev,
"scl",
GPIOD_ASIS);
- if (IS_ERR(adapter_data->gpio_scl))
- return PTR_ERR(adapter_data->gpio_scl);
+ if (IS_ERR(adapter_data->gpio_scl)) {
+ ret = PTR_ERR(adapter_data->gpio_scl);
+ goto free_both;
+ }
adapter_data->gpio_sda = devm_gpiod_get_optional(&pdev->dev,
"sda",
GPIOD_ASIS);
- if (IS_ERR(adapter_data->gpio_sda))
- return PTR_ERR(adapter_data->gpio_sda);
+ if (IS_ERR(adapter_data->gpio_sda)) {
+ ret = PTR_ERR(adapter_data->gpio_sda);
+ goto free_both;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
}
pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev))
+ if (!pm_runtime_enabled(&pdev->dev)) {
ret = tegra_i2c_runtime_resume(&pdev->dev);
- else
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto unprepare_div_clk;
+ }
+ } else {
ret = pm_runtime_get_sync(i2c_dev->dev);
-
- if (ret < 0) {
- dev_err(&pdev->dev, "runtime resume failed\n");
- goto unprepare_div_clk;
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto disable_rpm;
+ }
}
if (i2c_dev->is_multimaster_mode) {
if (ret < 0) {
dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
ret);
- goto disable_rpm;
+ goto put_rpm;
}
}
if (i2c_dev->is_multimaster_mode)
clk_disable(i2c_dev->div_clk);
-disable_rpm:
- pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
+put_rpm:
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_put_sync(&pdev->dev);
+ else
tegra_i2c_runtime_suspend(&pdev->dev);
+disable_rpm:
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_disable(&pdev->dev);
+
unprepare_div_clk:
clk_unprepare(i2c_dev->div_clk);
static int __maybe_unused tegra_i2c_suspend(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+ int err;
i2c_mark_adapter_suspended(&i2c_dev->adapter);
+ err = pm_runtime_force_suspend(dev);
+ if (err < 0)
+ return err;
+
return 0;
}
if (err)
return err;
+ err = pm_runtime_force_resume(dev);
+ if (err < 0)
+ return err;
+
i2c_mark_adapter_resumed(&i2c_dev->adapter);
return 0;
* If we can set SDA, we will always create a STOP to ensure additional
* pulses will do no harm. This is achieved by letting SDA follow SCL
* half a cycle later. Check the 'incomplete_write_byte' fault injector
- * for details.
+ * for details. Note that we must honour tsu:sto, 4us, but lets use 5us
+ * here for simplicity.
*/
bri->set_scl(adap, scl);
- ndelay(RECOVERY_NDELAY / 2);
+ ndelay(RECOVERY_NDELAY);
if (bri->set_sda)
bri->set_sda(adap, scl);
ndelay(RECOVERY_NDELAY / 2);
scl = !scl;
bri->set_scl(adap, scl);
/* Creating STOP again, see above */
- ndelay(RECOVERY_NDELAY / 2);
+ if (scl) {
+ /* Honour minimum tsu:sto */
+ ndelay(RECOVERY_NDELAY);
+ } else {
+ /* Honour minimum tf and thd:dat */
+ ndelay(RECOVERY_NDELAY / 2);
+ }
if (bri->set_sda)
bri->set_sda(adap, scl);
ndelay(RECOVERY_NDELAY / 2);
.groups = i3c_masterdev_groups,
};
-int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
- unsigned long max_i2c_scl_rate)
+static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+ unsigned long max_i2c_scl_rate)
{
struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+
+ if (data->index > pos && pos > 0) {
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+
+ data->index = pos;
+ master->addrs[pos] = dev->info.dyn_addr;
+ master->free_pos &= ~BIT(pos);
+ }
writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
master->regs +
static int dw_i3c_probe(struct platform_device *pdev)
{
struct dw_i3c_master *master;
- struct resource *res;
int ret, irq;
master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- master->regs = devm_ioremap_resource(&pdev->dev, res);
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <linux/of_device.h>
#define DEV_ID 0x0
#define DEV_ID_I3C_MASTER 0x5034
#define CTRL_HALT_EN BIT(30)
#define CTRL_MCS BIT(29)
#define CTRL_MCS_EN BIT(28)
+#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
#define CTRL_HJ_DISEC BIT(8)
#define CTRL_MST_ACK BIT(7)
#define CTRL_HJ_ACK BIT(6)
#define CTRL_MIXED_FAST_BUS_MODE 2
#define CTRL_MIXED_SLOW_BUS_MODE 3
#define CTRL_BUS_MODE_MASK GENMASK(1, 0)
+#define THD_DELAY_MAX 3
#define PRESCL_CTRL0 0x14
#define PRESCL_CTRL0_I2C(x) ((x) << 16)
struct cdns_i3c_cmd cmds[0];
};
+struct cdns_i3c_data {
+ u8 thd_delay_ns;
+};
+
struct cdns_i3c_master {
struct work_struct hj_work;
struct i3c_master_controller base;
struct clk *pclk;
struct cdns_i3c_master_caps caps;
unsigned long i3c_scl_lim;
+ const struct cdns_i3c_data *devdata;
};
static inline struct cdns_i3c_master *
return 0;
}
+static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
+{
+ unsigned long sysclk_rate = clk_get_rate(master->sysclk);
+ u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
+ (NSEC_PER_SEC / sysclk_rate));
+
+ /* Every value greater than 3 is not valid. */
+ if (thd_delay > THD_DELAY_MAX)
+ thd_delay = THD_DELAY_MAX;
+
+ /* CTLR_THD_DEL value is encoded. */
+ return (THD_DELAY_MAX - thd_delay);
+}
+
static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
{
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
* We will issue ENTDAA afterwards from the threaded IRQ handler.
*/
ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
+
+ /*
+ * Configure data hold delay based on device-specific data.
+ *
+ * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
+ * master output. This setting allows to meet this timing on master's
+ * SoC outputs, regardless of PCB balancing.
+ */
+ ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
writel(ctrl, master->regs + CTRL);
cdns_i3c_master_enable(master);
i3c_master_do_daa(&master->base);
}
+static struct cdns_i3c_data cdns_i3c_devdata = {
+ .thd_delay_ns = 10,
+};
+
+static const struct of_device_id cdns_i3c_master_of_ids[] = {
+ { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
+ { /* sentinel */ },
+};
+
static int cdns_i3c_master_probe(struct platform_device *pdev)
{
struct cdns_i3c_master *master;
- struct resource *res;
int ret, irq;
u32 val;
if (!master)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- master->regs = devm_ioremap_resource(&pdev->dev, res);
+ master->devdata = of_device_get_match_data(&pdev->dev);
+ if (!master->devdata)
+ return -EINVAL;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
return 0;
}
-static const struct of_device_id cdns_i3c_master_of_ids[] = {
- { .compatible = "cdns,i3c-master" },
- { /* sentinel */ },
-};
-
static struct platform_driver cdns_i3c_master = {
.probe = cdns_i3c_master_probe,
.remove = cdns_i3c_master_remove,
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/tick.h>
unsigned long auto_demotion_disable_flags;
bool byt_auto_demotion_disable_flag;
bool disable_promotion_to_c1e;
+ bool use_acpi;
};
static const struct idle_cpu *icpu;
struct cpuidle_driver *drv, int index);
static struct cpuidle_state *cpuidle_state_table;
+/*
+ * Enable this state by default even if the ACPI _CST does not list it.
+ */
+#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
+
/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 80,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 250,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 500,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
{
.name = "C1E",
.desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
mwait_idle_with_hints(eax, ecx);
}
-static void __setup_broadcast_timer(bool on)
-{
- if (on)
- tick_broadcast_enable();
- else
- tick_broadcast_disable();
-}
-
-static void auto_demotion_disable(void)
-{
- unsigned long long msr_bits;
-
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
- msr_bits &= ~(icpu->auto_demotion_disable_flags);
- wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
-}
-static void c1e_promotion_disable(void)
-{
- unsigned long long msr_bits;
-
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
- msr_bits &= ~0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
-}
-
static const struct idle_cpu idle_cpu_nehalem = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_nhx = {
+ .state_table = nehalem_cstates,
+ .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_atom = {
.state_table = atom_cstates,
};
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_snx = {
+ .state_table = snb_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_byt = {
.state_table = byt_cstates,
.disable_promotion_to_c1e = true,
static const struct idle_cpu idle_cpu_ivt = {
.state_table = ivt_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_hsw = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_hsx = {
+ .state_table = hsw_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_bdw = {
.state_table = bdw_cstates,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_bdx = {
+ .state_table = bdw_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct idle_cpu idle_cpu_skl = {
.state_table = skl_cstates,
.disable_promotion_to_c1e = true,
static const struct idle_cpu idle_cpu_skx = {
.state_table = skx_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_avn = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_knl = {
.state_table = knl_cstates,
+ .use_acpi = true,
};
static const struct idle_cpu idle_cpu_bxt = {
static const struct idle_cpu idle_cpu_dnv = {
.state_table = dnv_cstates,
.disable_promotion_to_c1e = true,
+ .use_acpi = true,
};
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
- INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx),
INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem),
INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem),
INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem),
- INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nehalem),
- INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx),
+ INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx),
INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom),
INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft),
- INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx),
INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb),
- INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snb),
+ INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx),
INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom),
INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt),
INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier),
INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb),
INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt),
INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw),
- INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw),
+ INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx),
INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw),
INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw),
INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn),
INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw),
INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdw),
+ INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx),
+ INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx),
INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl),
INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl),
INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl),
{}
};
-/*
- * intel_idle_probe()
+#define INTEL_CPU_FAM6_MWAIT \
+ { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 }
+
+static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
+ INTEL_CPU_FAM6_MWAIT,
+ {}
+};
+
+static bool __init intel_idle_max_cstate_reached(int cstate)
+{
+ if (cstate + 1 > max_cstate) {
+ pr_info("max_cstate %d reached\n", max_cstate);
+ return true;
+ }
+ return false;
+}
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+#include <acpi/processor.h>
+
+static bool no_acpi __read_mostly;
+module_param(no_acpi, bool, 0444);
+MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
+
+static struct acpi_processor_power acpi_state_table __initdata;
+
+/**
+ * intel_idle_cst_usable - Check if the _CST information can be used.
+ *
+ * Check if all of the C-states listed by _CST in the max_cstate range are
+ * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT.
*/
-static int __init intel_idle_probe(void)
+static bool __init intel_idle_cst_usable(void)
{
- unsigned int eax, ebx, ecx;
- const struct x86_cpu_id *id;
+ int cstate, limit;
- if (max_cstate == 0) {
- pr_debug("disabled\n");
- return -EPERM;
- }
+ limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1),
+ acpi_state_table.count);
- id = x86_match_cpu(intel_idle_ids);
- if (!id) {
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6)
- pr_debug("does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
- return -ENODEV;
+ for (cstate = 1; cstate < limit; cstate++) {
+ struct acpi_processor_cx *cx = &acpi_state_table.states[cstate];
+
+ if (cx->entry_method != ACPI_CSTATE_FFH)
+ return false;
}
- if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
- pr_debug("Please enable MWAIT in BIOS SETUP\n");
- return -ENODEV;
+ return true;
+}
+
+static bool __init intel_idle_acpi_cst_extract(void)
+{
+ unsigned int cpu;
+
+ if (no_acpi) {
+ pr_debug("Not allowed to use ACPI _CST\n");
+ return false;
}
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return -ENODEV;
+ for_each_possible_cpu(cpu) {
+ struct acpi_processor *pr = per_cpu(processors, cpu);
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+ if (!pr)
+ continue;
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
- !mwait_substates)
- return -ENODEV;
+ if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table))
+ continue;
- pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
+ acpi_state_table.count++;
- icpu = (const struct idle_cpu *)id->driver_data;
- cpuidle_state_table = icpu->state_table;
+ if (!intel_idle_cst_usable())
+ continue;
- pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
- boot_cpu_data.x86_model);
+ if (!acpi_processor_claim_cst_control()) {
+ acpi_state_table.count = 0;
+ return false;
+ }
- return 0;
+ return true;
+ }
+
+ pr_debug("ACPI _CST not found or not usable\n");
+ return false;
}
-/*
- * intel_idle_cpuidle_devices_uninit()
- * Unregisters the cpuidle devices.
- */
-static void intel_idle_cpuidle_devices_uninit(void)
+static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
{
- int i;
- struct cpuidle_device *dev;
+ int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
+
+ /*
+ * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
+ * the interesting states are ACPI_CSTATE_FFH.
+ */
+ for (cstate = 1; cstate < limit; cstate++) {
+ struct acpi_processor_cx *cx;
+ struct cpuidle_state *state;
- for_each_online_cpu(i) {
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
- cpuidle_unregister_device(dev);
+ if (intel_idle_max_cstate_reached(cstate))
+ break;
+
+ cx = &acpi_state_table.states[cstate];
+
+ state = &drv->states[drv->state_count++];
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
+ strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ state->exit_latency = cx->latency;
+ /*
+ * For C1-type C-states use the same number for both the exit
+ * latency and target residency, because that is the case for
+ * C1 in the majority of the static C-states tables above.
+ * For the other types of C-states, however, set the target
+ * residency to 3 times the exit latency which should lead to
+ * a reasonable balance between energy-efficiency and
+ * performance in the majority of interesting cases.
+ */
+ state->target_residency = cx->latency;
+ if (cx->type > ACPI_STATE_C1)
+ state->target_residency *= 3;
+
+ state->flags = MWAIT2flg(cx->address);
+ if (cx->type > ACPI_STATE_C2)
+ state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
+
+ state->enter = intel_idle;
+ state->enter_s2idle = intel_idle_s2idle;
}
}
+static bool __init intel_idle_off_by_default(u32 mwait_hint)
+{
+ int cstate, limit;
+
+ /*
+ * If there are no _CST C-states, do not disable any C-states by
+ * default.
+ */
+ if (!acpi_state_table.count)
+ return false;
+
+ limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
+ /*
+ * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
+ * the interesting states are ACPI_CSTATE_FFH.
+ */
+ for (cstate = 1; cstate < limit; cstate++) {
+ if (acpi_state_table.states[cstate].address == mwait_hint)
+ return false;
+ }
+ return true;
+}
+#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+static inline bool intel_idle_acpi_cst_extract(void) { return false; }
+static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
+static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
+#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
+
/*
* ivt_idle_state_table_update(void)
*
* Tune IVT multi-socket targets
* Assumption: num_sockets == (max_package_num + 1)
*/
-static void ivt_idle_state_table_update(void)
+static void __init ivt_idle_state_table_update(void)
{
/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
int cpu, package_num, num_sockets = 1;
/* else, 1 and 2 socket systems use default ivt_cstates */
}
-/*
- * Translate IRTL (Interrupt Response Time Limit) MSR to usec
+/**
+ * irtl_2_usec - IRTL to microseconds conversion.
+ * @irtl: IRTL MSR value.
+ *
+ * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds.
*/
-
-static unsigned int irtl_ns_units[] = {
- 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
-
-static unsigned long long irtl_2_usec(unsigned long long irtl)
+static unsigned long long __init irtl_2_usec(unsigned long long irtl)
{
+ static const unsigned int irtl_ns_units[] __initconst = {
+ 1, 32, 1024, 32768, 1048576, 33554432, 0, 0
+ };
unsigned long long ns;
if (!irtl)
ns = irtl_ns_units[(irtl >> 10) & 0x7];
- return div64_u64((irtl & 0x3FF) * ns, 1000);
+ return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
}
+
/*
* bxt_idle_state_table_update(void)
*
* On BXT, we trust the IRTL to show the definitive maximum latency
* We use the same value for target_residency.
*/
-static void bxt_idle_state_table_update(void)
+static void __init bxt_idle_state_table_update(void)
{
unsigned long long msr;
unsigned int usec;
* On SKL-H (model 0x5e) disable C8 and C9 if:
* C10 is enabled and SGX disabled
*/
-static void sklh_idle_state_table_update(void)
+static void __init sklh_idle_state_table_update(void)
{
unsigned long long msr;
unsigned int eax, ebx, ecx, edx;
skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */
skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
}
-/*
- * intel_idle_state_table_update()
- *
- * Update the default state_table for this CPU-id
- */
-static void intel_idle_state_table_update(void)
+static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
{
- switch (boot_cpu_data.x86_model) {
+ unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
+ unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) &
+ MWAIT_SUBSTATE_MASK;
+
+ /* Ignore the C-state if there are NO sub-states in CPUID for it. */
+ if (num_substates == 0)
+ return false;
+
+ if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle states deeper than C2");
+ return true;
+}
+
+static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
+{
+ int cstate;
+
+ switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_IVYBRIDGE_X:
ivt_idle_state_table_update();
break;
sklh_idle_state_table_update();
break;
}
-}
-
-/*
- * intel_idle_cpuidle_driver_init()
- * allocate, initialize cpuidle_states
- */
-static void __init intel_idle_cpuidle_driver_init(void)
-{
- int cstate;
- struct cpuidle_driver *drv = &intel_idle_driver;
-
- intel_idle_state_table_update();
-
- cpuidle_poll_state_init(drv);
- drv->state_count = 1;
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
- int num_substates, mwait_hint, mwait_cstate;
+ unsigned int mwait_hint;
- if ((cpuidle_state_table[cstate].enter == NULL) &&
- (cpuidle_state_table[cstate].enter_s2idle == NULL))
+ if (intel_idle_max_cstate_reached(cstate))
break;
- if (cstate + 1 > max_cstate) {
- pr_info("max_cstate %d reached\n", max_cstate);
+ if (!cpuidle_state_table[cstate].enter &&
+ !cpuidle_state_table[cstate].enter_s2idle)
break;
- }
-
- mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
- mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
-
- /* number of sub-states for this state in CPUID.MWAIT */
- num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
- & MWAIT_SUBSTATE_MASK;
-
- /* if NO sub-states for this state in CPUID, skip it */
- if (num_substates == 0)
- continue;
- /* if state marked as disabled, skip it */
+ /* If marked as unusable, skip this state. */
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
pr_debug("state %s is disabled\n",
cpuidle_state_table[cstate].name);
continue;
}
+ mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
+ if (!intel_idle_verify_cstate(mwait_hint))
+ continue;
- if (((mwait_cstate + 1) > 2) &&
- !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
- mark_tsc_unstable("TSC halts in idle"
- " states deeper than C2");
+ /* Structure copy. */
+ drv->states[drv->state_count] = cpuidle_state_table[cstate];
- drv->states[drv->state_count] = /* structure copy */
- cpuidle_state_table[cstate];
+ if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) &&
+ !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE))
+ drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
- drv->state_count += 1;
+ drv->state_count++;
}
if (icpu->byt_auto_demotion_disable_flag) {
}
}
+/*
+ * intel_idle_cpuidle_driver_init()
+ * allocate, initialize cpuidle_states
+ */
+static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
+{
+ cpuidle_poll_state_init(drv);
+ drv->state_count = 1;
+
+ if (icpu)
+ intel_idle_init_cstates_icpu(drv);
+ else
+ intel_idle_init_cstates_acpi(drv);
+}
+
+static void auto_demotion_disable(void)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+ msr_bits &= ~(icpu->auto_demotion_disable_flags);
+ wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+}
+
+static void c1e_promotion_disable(void)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ msr_bits &= ~0x2;
+ wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+}
/*
* intel_idle_cpu_init()
return -EIO;
}
+ if (!icpu)
+ return 0;
+
if (icpu->auto_demotion_disable_flags)
auto_demotion_disable();
struct cpuidle_device *dev;
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
- __setup_broadcast_timer(true);
+ tick_broadcast_enable();
/*
* Some systems can hotplug a cpu at runtime after
return 0;
}
+/**
+ * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices.
+ */
+static void __init intel_idle_cpuidle_devices_uninit(void)
+{
+ int i;
+
+ for_each_online_cpu(i)
+ cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
+}
+
static int __init intel_idle_init(void)
{
+ const struct x86_cpu_id *id;
+ unsigned int eax, ebx, ecx;
int retval;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
return -ENODEV;
- retval = intel_idle_probe();
- if (retval)
- return retval;
+ if (max_cstate == 0) {
+ pr_debug("disabled\n");
+ return -EPERM;
+ }
+
+ id = x86_match_cpu(intel_idle_ids);
+ if (id) {
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_debug("Please enable MWAIT in BIOS SETUP\n");
+ return -ENODEV;
+ }
+ } else {
+ id = x86_match_cpu(intel_mwait_ids);
+ if (!id)
+ return -ENODEV;
+ }
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return -ENODEV;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+ !mwait_substates)
+ return -ENODEV;
+
+ pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
+
+ icpu = (const struct idle_cpu *)id->driver_data;
+ if (icpu) {
+ cpuidle_state_table = icpu->state_table;
+ if (icpu->use_acpi)
+ intel_idle_acpi_cst_extract();
+ } else if (!intel_idle_acpi_cst_extract()) {
+ return -ENODEV;
+ }
+
+ pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
+ boot_cpu_data.x86_model);
intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
+ if (!intel_idle_cpuidle_devices)
return -ENOMEM;
- intel_idle_cpuidle_driver_init();
+ intel_idle_cpuidle_driver_init(&intel_idle_driver);
+
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
struct cpuidle_driver *drv = cpuidle_get_driver();
st->channel_config[channel].buf_negative =
of_property_read_bool(child, "adi,buffered-negative");
- *chan = ad7124_channel_template;
- chan->address = channel;
- chan->scan_index = channel;
- chan->channel = ain[0];
- chan->channel2 = ain[1];
-
- chan++;
+ chan[channel] = ad7124_channel_template;
+ chan[channel].address = channel;
+ chan[channel].scan_index = channel;
+ chan[channel].channel = ain[0];
+ chan[channel].channel2 = ain[1];
}
return 0;
config PMS7003
tristate "Plantower PMS7003 particulate matter sensor"
depends on SERIAL_DEV_BUS
+ select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Plantower PMS7003 particulate
for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) {
for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) {
- if (id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
+ if (st_lsm6dsx_sensor_settings[i].id[j].name &&
+ id == st_lsm6dsx_sensor_settings[i].id[j].hw_id)
break;
}
if (j < ST_LSM6DSX_MAX_ID)
const unsigned long *mask, bool timestamp)
{
unsigned bytes = 0;
- int length, i;
+ int length, i, largest = 0;
/* How much space will the demuxed element take? */
for_each_set_bit(i, mask,
length = iio_storage_bytes_for_si(indio_dev, i);
bytes = ALIGN(bytes, length);
bytes += length;
+ largest = max(largest, length);
}
if (timestamp) {
length = iio_storage_bytes_for_timestamp(indio_dev);
bytes = ALIGN(bytes, length);
bytes += length;
+ largest = max(largest, length);
}
+
+ bytes = ALIGN(bytes, largest);
return bytes;
}
if (ret < 0)
return ret;
- data->al_scale = 24000;
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
switch (id) {
/* show 54ms in total. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+ data->al_scale = 24000;
break;
case VCNL4040_PROD_ID:
/* Integration time is 80ms, add 10ms. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
+ data->al_scale = 120000;
break;
}
data->vcnl4200_al.last_measurement = ktime_set(0, 0);
int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc)
+ if (rc) {
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+ return rc;
+ }
if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
/* Add qp to flush list of the CQ */
bnxt_qplib_add_flush_qp(qp);
} else {
+ /* Before we complete, do WA 9060 */
+ if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
- /* Before we complete, do WA 9060 */
- if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
- cqe_sq_cons)) {
- *lib_qp = qp;
- goto out;
- }
cqe->status = CQ_REQ_STATUS_OK;
cqe++;
(*budget)--;
void iowait_cancel_work(struct iowait *w)
{
cancel_work_sync(&iowait_get_ib_work(w)->iowork);
- cancel_work_sync(&iowait_get_tid_work(w)->iowork);
+ /* Make sure that the iowork for TID RDMA is used */
+ if (iowait_get_tid_work(w)->iowork.func)
+ cancel_work_sync(&iowait_get_tid_work(w)->iowork);
}
/**
*/
fpsn = full_flow_psn(flow, flow->flow_state.spsn);
req->r_ack_psn = psn;
+ /*
+ * If resync_psn points to the last flow PSN for a
+ * segment and the new segment (likely from a new
+ * request) starts with a new generation number, we
+ * need to adjust resync_psn accordingly.
+ */
+ if (flow->flow_state.generation !=
+ (resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT))
+ resync_psn = mask_psn(fpsn - 1);
flow->resync_npkts +=
delta_psn(mask_psn(resync_psn + 1), fpsn);
/*
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct i40iw_ucontext *ucontext;
- u64 db_addr_offset;
- u64 push_offset;
+ u64 db_addr_offset, push_offset, pfn;
ucontext = to_ucontext(context);
if (ucontext->iwdev->sc_dev.is_pf) {
if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_private_data = ucontext;
} else {
if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
+ pfn = vma->vm_pgoff +
+ (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
+ PAGE_SHIFT);
- return 0;
+ return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+ vma->vm_page_prot, NULL);
}
/**
}
}
-static void
-isert_wait4cmds(struct iscsi_conn *conn)
-{
- isert_info("iscsi_conn %p\n", conn);
-
- if (conn->sess) {
- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
- target_wait_for_sess_cmds(conn->sess->se_sess);
- }
-}
-
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
- isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);
*/
client->tail = (client->head - 2) & (client->bufsize - 1);
- client->buffer[client->tail].input_event_sec =
- event->input_event_sec;
- client->buffer[client->tail].input_event_usec =
- event->input_event_usec;
- client->buffer[client->tail].type = EV_SYN;
- client->buffer[client->tail].code = SYN_DROPPED;
- client->buffer[client->tail].value = 0;
+ client->buffer[client->tail] = (struct input_event) {
+ .input_event_sec = event->input_event_sec,
+ .input_event_usec = event->input_event_usec,
+ .type = EV_SYN,
+ .code = SYN_DROPPED,
+ .value = 0,
+ };
client->packet_head = client->tail;
}
struct evdev_client *client;
int error;
- client = kzalloc(struct_size(client, buffer, bufsize),
- GFP_KERNEL | __GFP_NOWARN);
- if (!client)
- client = vzalloc(struct_size(client, buffer, bufsize));
+ client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
if (!client)
return -ENOMEM;
}
}
- __clear_bit(*old_keycode, dev->keybit);
- __set_bit(ke->keycode, dev->keybit);
-
- for (i = 0; i < dev->keycodemax; i++) {
- if (input_fetch_keycode(dev, i) == *old_keycode) {
- __set_bit(*old_keycode, dev->keybit);
- break; /* Setting the bit twice is useless, so break */
+ if (*old_keycode <= KEY_MAX) {
+ __clear_bit(*old_keycode, dev->keybit);
+ for (i = 0; i < dev->keycodemax; i++) {
+ if (input_fetch_keycode(dev, i) == *old_keycode) {
+ __set_bit(*old_keycode, dev->keybit);
+ /* Setting the bit twice is useless, so break */
+ break;
+ }
}
}
+ __set_bit(ke->keycode, dev->keybit);
return 0;
}
* Simulate keyup event if keycode is not present
* in the keymap anymore
*/
- if (test_bit(EV_KEY, dev->evbit) &&
- !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
- __test_and_clear_bit(old_keycode, dev->key)) {
+ if (old_keycode > KEY_MAX) {
+ dev_warn(dev->dev.parent ?: &dev->dev,
+ "%s: got too big old keycode %#x\n",
+ __func__, old_keycode);
+ } else if (test_bit(EV_KEY, dev->evbit) &&
+ !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
+ __test_and_clear_bit(old_keycode, dev->key)) {
struct input_value vals[] = {
{ EV_KEY, old_keycode, 0 },
input_value_sync
return;
}
- state = (bool)msg.state;
+ /*
+ * The response data from SCU firmware is 4 bytes,
+ * but ONLY the first byte is the key state, other
+ * 3 bytes could be some dirty data, so we should
+ * ONLY take the first byte as key state.
+ */
+ state = (bool)(msg.state & 0xff);
if (state ^ priv->keystate) {
priv->keystate = state;
int retval = 0;
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
+ 0x11, 0x40, 0x5601, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
__func__, retval);
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
+ 0x44, 0x40, 0x0, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
__func__, retval);
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
+ 0x22, 0x40, 0x0, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
__func__, retval);
return input_register_device(onkey->input);
}
+static const struct of_device_id max77650_onkey_of_match[] = {
+ { .compatible = "maxim,max77650-onkey" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_onkey_of_match);
+
static struct platform_driver max77650_onkey_driver = {
.driver = {
.name = "max77650-onkey",
+ .of_match_table = max77650_onkey_of_match,
},
.probe = max77650_onkey_probe,
};
if (regs->enable_mask)
rc = regmap_update_bits(vib->regmap, regs->enable_addr,
- on ? regs->enable_mask : 0, val);
+ regs->enable_mask, on ? ~0 : 0);
return rc;
}
struct uinput_device *udev = input_get_drvdata(dev);
struct timespec64 ts;
- udev->buff[udev->head].type = type;
- udev->buff[udev->head].code = code;
- udev->buff[udev->head].value = value;
ktime_get_ts64(&ts);
- udev->buff[udev->head].input_event_sec = ts.tv_sec;
- udev->buff[udev->head].input_event_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+ udev->buff[udev->head] = (struct input_event) {
+ .input_event_sec = ts.tv_sec,
+ .input_event_usec = ts.tv_nsec / NSEC_PER_USEC,
+ .type = type,
+ .code = code,
+ .value = value,
+ };
+
udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE;
wake_up_interruptible(&udev->waitq);
static __poll_t uinput_poll(struct file *file, poll_table *wait)
{
struct uinput_device *udev = file->private_data;
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uinput is always writable */
poll_wait(file, &udev->waitq, wait);
if (udev->head != udev->tail)
- return EPOLLIN | EPOLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
- return EPOLLOUT | EPOLLWRNORM;
+ return mask;
}
static int uinput_release(struct inode *inode, struct file *file)
#define F54_NUM_TX_OFFSET 1
#define F54_NUM_RX_OFFSET 0
+/*
+ * The smbus protocol can read only 32 bytes max at a time.
+ * But this should be fine for i2c/spi as well.
+ */
+#define F54_REPORT_DATA_SIZE 32
+
/* F54 commands */
#define F54_GET_REPORT 1
#define F54_FORCE_CAL 2
int report_size;
u8 command;
int error;
+ int i;
report_size = rmi_f54_get_report_size(f54);
if (report_size == 0) {
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
- fifo[0] = 0;
- fifo[1] = 0;
- error = rmi_write_block(fn->rmi_dev,
- fn->fd.data_base_addr + F54_FIFO_OFFSET,
- fifo, sizeof(fifo));
- if (error) {
- dev_err(&fn->dev, "Failed to set fifo start offset\n");
- goto abort;
- }
+ for (i = 0; i < report_size; i += F54_REPORT_DATA_SIZE) {
+ int size = min(F54_REPORT_DATA_SIZE, report_size - i);
+
+ fifo[0] = i & 0xff;
+ fifo[1] = i >> 8;
+ error = rmi_write_block(fn->rmi_dev,
+ fn->fd.data_base_addr + F54_FIFO_OFFSET,
+ fifo, sizeof(fifo));
+ if (error) {
+ dev_err(&fn->dev, "Failed to set fifo start offset\n");
+ goto abort;
+ }
- error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
- F54_REPORT_DATA_OFFSET, f54->report_data,
- report_size);
- if (error) {
- dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
- __func__, report_size, error);
- goto abort;
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+ F54_REPORT_DATA_OFFSET,
+ f54->report_data + i, size);
+ if (error) {
+ dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+ __func__, size, error);
+ goto abort;
+ }
}
abort:
/* prepare to write next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
}
exit:
mutex_unlock(&rmi_smb->page_mutex);
/* prepare to read next block of bytes */
cur_len -= SMB_MAX_COUNT;
databuff += SMB_MAX_COUNT;
+ rmiaddr += SMB_MAX_COUNT;
}
retval = 0;
aiptek->inputdev = inputdev;
aiptek->intf = intf;
- aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber;
+ aiptek->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
aiptek->inDelay = 0;
aiptek->endDelay = 0;
aiptek->previousJitterable = 0;
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
/* Verify that a device really has an endpoint */
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev,
"interface has %d endpoints, but must have minimum 1\n",
- intf->altsetting[0].desc.bNumEndpoints);
+ intf->cur_altsetting->desc.bNumEndpoints);
err = -EINVAL;
goto fail3;
}
- endpoint = &intf->altsetting[0].endpoint[0].desc;
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
/* Go set up our URB, which is called when the tablet receives
* input.
}
/* Sanity check that a device has an endpoint */
- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+ if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&usbinterface->dev,
"Invalid number of endpoints\n");
error = -EINVAL;
goto err_free_urb;
}
- /*
- * The endpoint is always altsetting 0, we know this since we know
- * this device only has one interrupt endpoint
- */
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
+ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
/* Some debug */
dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
if (usb_endpoint_xfer_int(endpoint))
dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n");
- dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen);
+ dev_dbg(&usbinterface->dev, "interface extra len:%d\n",
+ usbinterface->cur_altsetting->extralen);
/*
* Find the HID descriptor so we can find out the size of the
input_dev->dev.parent = &usbinterface->dev;
/* Setup the URB, it will be posted later on open of input device */
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
-
usb_fill_int_urb(gtco->urbinfo,
udev,
usb_rcvintpipe(udev,
return -ENODEV;
/* Sanity check that the device has an endpoint */
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev, "Invalid number of endpoints\n");
return -EINVAL;
}
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device *hwmon;
+ struct thermal_zone_device *thermal;
int error;
u32 reg;
bool ts_attached;
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
+ thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
+ &sun4i_ts_tz_ops);
+ if (IS_ERR(thermal))
+ return PTR_ERR(thermal);
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
int error;
/* Check if we really have the right interface. */
- iface_desc = &interface->altsetting[0];
+ iface_desc = interface->cur_altsetting;
if (iface_desc->desc.bInterfaceClass != 0xFF)
return -ENODEV;
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
struct pci_dev *pdev = iommu->dev;
- u64 val = 0xabcd, val2 = 0;
+ u64 val = 0xabcd, val2 = 0, save_reg = 0;
if (!iommu_feature(iommu, FEATURE_PC))
return;
amd_iommu_pc_present = true;
+ /* save the value to restore, if writable */
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+ goto pc_false;
+
/* Check if the performance counters can be written to */
if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
(iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
- (val != val2)) {
- pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
- amd_iommu_pc_present = false;
- return;
- }
+ (val != val2))
+ goto pc_false;
+
+ /* restore */
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+ goto pc_false;
pci_info(pdev, "IOMMU performance counters supported\n");
val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
iommu->max_banks = (u8) ((val >> 12) & 0x3f);
iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+ return;
+
+pc_false:
+ pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+ amd_iommu_pc_present = false;
+ return;
}
static ssize_t amd_iommu_show_cap(struct device *dev,
{
struct device *dev = msi_desc_to_dev(desc);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iommu_dma_cookie *cookie;
struct iommu_dma_msi_page *msi_page;
static DEFINE_MUTEX(msi_prepare_lock); /* see below */
return 0;
}
- cookie = domain->iova_cookie;
-
/*
* In fact the whole prepare operation should already be serialised by
* irq_domain_mutex further up the callchain, but that's pretty subtle
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
- if (info)
+ if (info && info != DEFER_DEVICE_DOMAIN_INFO
+ && info != DUMMY_DEVICE_DOMAIN_INFO)
__dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ goto unlink;
+ }
iommu_group_put(group);
if (!get_private_domain_for_dev(dev)) {
dev_warn(dev,
"Failed to get a private domain.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unlink;
}
dev_info(dev,
}
return 0;
+
+unlink:
+ iommu_device_unlink(&iommu->iommu, dev);
+ return ret;
}
static void intel_iommu_remove_device(struct device *dev)
WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
}
+static struct iommu_group *intel_iommu_device_group(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return pci_device_group(dev);
+ return generic_device_group(dev);
+}
+
#ifdef CONFIG_INTEL_IOMMU_SVM
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = intel_iommu_put_resv_regions,
.apply_resv_region = intel_iommu_apply_resv_region,
- .device_group = pci_device_group,
+ .device_group = intel_iommu_device_group,
.dev_has_feat = intel_iommu_dev_has_feat,
.dev_feat_enabled = intel_iommu_dev_feat_enabled,
.dev_enable_feat = intel_iommu_dev_enable_feat,
mutex_unlock(&group->mutex);
dev->iommu_group = NULL;
kobject_put(group->devices_kobj);
+ sysfs_remove_link(group->devices_kobj, device->name);
err_free_name:
kfree(device->name);
err_remove_link:
#include <linux/delay.h>
#include <asm/io.h>
-#include <asm/mach-jz4740/irq.h>
struct ingenic_intc_data {
void __iomem *base;
while (pending) {
int bit = __fls(pending);
- irq = irq_find_mapping(domain, bit + (i * 32));
+ irq = irq_linear_revmap(domain, bit + (i * 32));
generic_handle_irq(irq);
pending &= ~BIT(bit);
}
goto out_unmap_irq;
}
- domain = irq_domain_add_legacy(node, num_chips * 32,
- JZ4740_IRQ_BASE, 0,
+ domain = irq_domain_add_linear(node, num_chips * 32,
&irq_generic_chip_ops, NULL);
if (!domain) {
err = -ENOMEM;
switch (id) {
case AS_LED_FLASH:
flash->flash_node = child;
+ fwnode_handle_get(child);
break;
case AS_LED_INDICATOR:
flash->indicator_node = child;
+ fwnode_handle_get(child);
break;
default:
dev_warn(&flash->client->dev,
"unknown LED %u encountered, ignoring\n", id);
break;
}
- fwnode_handle_get(child);
}
if (!flash->flash_node) {
struct gpio_led led = {};
const char *state = NULL;
+ /*
+ * Acquire gpiod from DT with uninitialized label, which
+ * will be updated after LED class device is registered,
+ * Only then the final LED name is known.
+ */
led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
GPIOD_ASIS,
- led.name);
+ NULL);
if (IS_ERR(led.gpiod)) {
fwnode_handle_put(child);
return ERR_CAST(led.gpiod);
fwnode_handle_put(child);
return ERR_PTR(ret);
}
+ /* Set gpiod label to match the corresponding LED name. */
+ gpiod_set_consumer_name(led_dat->gpiod,
+ led_dat->cdev.dev->kobj.name);
priv->num_leds++;
}
// SPDX-License-Identifier: GPL-2.0
// TI LM3532 LED driver
// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// http://www.ti.com/lit/ds/symlink/lm3532.pdf
#include <linux/i2c.h>
#include <linux/leds.h>
led->num_leds = fwnode_property_count_u32(child, "led-sources");
if (led->num_leds > LM3532_MAX_LED_STRINGS) {
- dev_err(&priv->client->dev, "To many LED string defined\n");
+ dev_err(&priv->client->dev, "Too many LED string defined\n");
continue;
}
return rv;
}
+static const struct of_device_id max77650_led_of_match[] = {
+ { .compatible = "maxim,max77650-led" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77650_led_of_match);
+
static struct platform_driver max77650_led_driver = {
.driver = {
.name = "max77650-led",
+ .of_match_table = max77650_led_of_match,
},
.probe = max77650_led_probe,
};
{
if (brightness)
set_latch_u5(LO_ULED, 0);
-
else
set_latch_u5(0, LO_ULED);
}
module_init(pattern_trig_init);
module_exit(pattern_trig_exit);
-MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com");
-MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org");
+MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com>");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org>");
MODULE_DESCRIPTION("LED Pattern trigger");
MODULE_LICENSE("GPL v2");
struct block_device *bdev;
struct cache_sb sb;
+ struct cache_sb_disk *sb_disk;
struct bio sb_bio;
struct bio_vec sb_bv[1];
struct closure sb_write;
struct cache {
struct cache_set *set;
struct cache_sb sb;
+ struct cache_sb_disk *sb_disk;
struct bio sb_bio;
struct bio_vec sb_bv[1];
* Our temporary buffer is the same size as the btree node's
* buffer, we can just swap buffers instead of doing a big
* memcpy()
+ *
+ * Don't worry event 'out' is allocated from mempool, it can
+ * still be swapped here. Because state->pool is a page mempool
+ * creaated by by mempool_init_page_pool(), which allocates
+ * pages by alloc_pages() indeed.
*/
out->magic = b->set->data->magic;
i = 0;
btree_cache_used = c->btree_cache_used;
- list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
if (nr <= 0)
goto out;
- if (++i > 3 &&
- !mca_reap(b, 0, false)) {
+ if (!mca_reap(b, 0, false)) {
mca_data_free(b);
rw_unlock(true, b);
freed++;
}
nr--;
+ i++;
}
- for (; (nr--) && i < btree_cache_used; i++) {
- if (list_empty(&c->btree_cache))
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ if (nr <= 0 || i >= btree_cache_used)
goto out;
- b = list_first_entry(&c->btree_cache, struct btree, list);
- list_rotate_left(&c->btree_cache);
-
- if (!b->accessed &&
- !mca_reap(b, 0, false)) {
+ if (!mca_reap(b, 0, false)) {
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
freed++;
- } else
- b->accessed = 0;
+ }
+
+ nr--;
+ i++;
}
out:
mutex_unlock(&c->bucket_lock);
BUG_ON(!b->written);
b->parent = parent;
- b->accessed = 1;
for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
prefetch(b->keys.set[i].tree);
goto retry;
}
- b->accessed = 1;
b->parent = parent;
bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
/* Key/pointer for this btree node */
BKEY_PADDED(key);
- /* Single bit - set when accessed, cleared by shrinker */
- unsigned long accessed;
unsigned long seq;
struct rw_semaphore lock;
struct cache_set *c;
/* Journalling */
+#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
+
static void btree_flush_write(struct cache_set *c)
{
struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
- unsigned int i, n;
+ unsigned int i, nr, ref_nr;
+ atomic_t *fifo_front_p, *now_fifo_front_p;
+ size_t mask;
if (c->journal.btree_flushing)
return;
c->journal.btree_flushing = true;
spin_unlock(&c->journal.flush_write_lock);
+ /* get the oldest journal entry and check its refcount */
+ spin_lock(&c->journal.lock);
+ fifo_front_p = &fifo_front(&c->journal.pin);
+ ref_nr = atomic_read(fifo_front_p);
+ if (ref_nr <= 0) {
+ /*
+ * do nothing if no btree node references
+ * the oldest journal entry
+ */
+ spin_unlock(&c->journal.lock);
+ goto out;
+ }
+ spin_unlock(&c->journal.lock);
+
+ mask = c->journal.pin.mask;
+ nr = 0;
atomic_long_inc(&c->flush_write);
memset(btree_nodes, 0, sizeof(btree_nodes));
- n = 0;
mutex_lock(&c->bucket_lock);
list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ /*
+ * It is safe to get now_fifo_front_p without holding
+ * c->journal.lock here, because we don't need to know
+ * the exactly accurate value, just check whether the
+ * front pointer of c->journal.pin is changed.
+ */
+ now_fifo_front_p = &fifo_front(&c->journal.pin);
+ /*
+ * If the oldest journal entry is reclaimed and front
+ * pointer of c->journal.pin changes, it is unnecessary
+ * to scan c->btree_cache anymore, just quit the loop and
+ * flush out what we have already.
+ */
+ if (now_fifo_front_p != fifo_front_p)
+ break;
+ /*
+ * quit this loop if all matching btree nodes are
+ * scanned and record in btree_nodes[] already.
+ */
+ ref_nr = atomic_read(fifo_front_p);
+ if (nr >= ref_nr)
+ break;
+
if (btree_node_journal_flush(b))
pr_err("BUG: flush_write bit should not be set here!");
continue;
}
+ /*
+ * Only select the btree node which exactly references
+ * the oldest journal entry.
+ *
+ * If the journal entry pointed by fifo_front_p is
+ * reclaimed in parallel, don't worry:
+ * - the list_for_each_xxx loop will quit when checking
+ * next now_fifo_front_p.
+ * - If there are matched nodes recorded in btree_nodes[],
+ * they are clean now (this is why and how the oldest
+ * journal entry can be reclaimed). These selected nodes
+ * will be ignored and skipped in the folowing for-loop.
+ */
+ if (nr_to_fifo_front(btree_current_write(b)->journal,
+ fifo_front_p,
+ mask) != 0) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
set_btree_node_journal_flush(b);
mutex_unlock(&b->write_lock);
- btree_nodes[n++] = b;
- if (n == BTREE_FLUSH_NR)
+ btree_nodes[nr++] = b;
+ /*
+ * To avoid holding c->bucket_lock too long time,
+ * only scan for BTREE_FLUSH_NR matched btree nodes
+ * at most. If there are more btree nodes reference
+ * the oldest journal entry, try to flush them next
+ * time when btree_flush_write() is called.
+ */
+ if (nr == BTREE_FLUSH_NR)
break;
}
mutex_unlock(&c->bucket_lock);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < nr; i++) {
b = btree_nodes[i];
if (!b) {
pr_err("BUG: btree_nodes[%d] is NULL", i);
mutex_unlock(&b->write_lock);
}
+out:
spin_lock(&c->journal.flush_write_lock);
c->journal.btree_flushing = false;
spin_unlock(&c->journal.flush_write_lock);
#include "writeback.h"
#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/debugfs.h>
#include <linux/genhd.h>
#include <linux/idr.h>
/* Superblock */
static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
- struct page **res)
+ struct cache_sb_disk **res)
{
const char *err;
- struct cache_sb *s;
- struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
+ struct cache_sb_disk *s;
+ struct page *page;
unsigned int i;
- if (!bh)
+ page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
+ SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
+ if (IS_ERR(page))
return "IO error";
-
- s = (struct cache_sb *) bh->b_data;
+ s = page_address(page) + offset_in_page(SB_OFFSET);
sb->offset = le64_to_cpu(s->offset);
sb->version = le64_to_cpu(s->version);
}
sb->last_mount = (u32)ktime_get_real_seconds();
- err = NULL;
-
- get_page(bh->b_page);
- *res = bh->b_page;
+ *res = s;
+ return NULL;
err:
- put_bh(bh);
+ put_page(page);
return err;
}
closure_put(&dc->sb_write);
}
-static void __write_super(struct cache_sb *sb, struct bio *bio)
+static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
+ struct bio *bio)
{
- struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned int i;
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
bio->bi_iter.bi_sector = SB_SECTOR;
- bio->bi_iter.bi_size = SB_SIZE;
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
- bch_bio_map(bio, NULL);
+ __bio_add_page(bio, virt_to_page(out), SB_SIZE,
+ offset_in_page(out));
out->offset = cpu_to_le64(sb->offset);
out->version = cpu_to_le64(sb->version);
down(&dc->sb_write_mutex);
closure_init(cl, parent);
- bio_reset(bio);
+ bio_init(bio, dc->sb_bv, 1);
bio_set_dev(bio, dc->bdev);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
closure_get(cl);
/* I/O request sent to backing device */
- __write_super(&dc->sb, bio);
+ __write_super(&dc->sb, dc->sb_disk, bio);
closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
}
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
- bio_reset(bio);
+ bio_init(bio, ca->sb_bv, 1);
bio_set_dev(bio, ca->bdev);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
closure_get(cl);
- __write_super(&ca->sb, bio);
+ __write_super(&ca->sb, ca->sb_disk, bio);
}
closure_return_with_destructor(cl, bcache_write_super_unlock);
mutex_unlock(&bch_register_lock);
+ if (dc->sb_disk)
+ put_page(virt_to_page(dc->sb_disk));
+
if (!IS_ERR_OR_NULL(dc->bdev))
blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
/* Cached device - bcache superblock */
-static int register_bdev(struct cache_sb *sb, struct page *sb_page,
+static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct block_device *bdev,
struct cached_dev *dc)
{
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
-
- bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
- bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
- get_page(sb_page);
-
+ dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
goto err;
for (i = 0; i < RESERVE_NR; i++)
free_fifo(&ca->free[i]);
- if (ca->sb_bio.bi_inline_vecs[0].bv_page)
- put_page(bio_first_page_all(&ca->sb_bio));
+ if (ca->sb_disk)
+ put_page(virt_to_page(ca->sb_disk));
if (!IS_ERR_OR_NULL(ca->bdev))
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
return ret;
}
-static int register_cache(struct cache_sb *sb, struct page *sb_page,
+static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct block_device *bdev, struct cache *ca)
{
const char *err = NULL; /* must be set for any error case */
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
-
- bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
- bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
- get_page(sb_page);
+ ca->sb_disk = sb_disk;
if (blk_queue_discard(bdev_get_queue(bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
- ssize_t ret = -EINVAL;
- const char *err = "cannot allocate memory";
+ const char *err;
char *path = NULL;
- struct cache_sb *sb = NULL;
- struct block_device *bdev = NULL;
- struct page *sb_page = NULL;
+ struct cache_sb *sb;
+ struct cache_sb_disk *sb_disk;
+ struct block_device *bdev;
+ ssize_t ret;
+ ret = -EBUSY;
+ err = "failed to reference bcache module";
if (!try_module_get(THIS_MODULE))
- return -EBUSY;
+ goto out;
/* For latest state of bcache_is_reboot */
smp_mb();
+ err = "bcache is in reboot";
if (bcache_is_reboot)
- return -EBUSY;
+ goto out_module_put;
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
path = kstrndup(buffer, size, GFP_KERNEL);
if (!path)
- goto err;
+ goto out_module_put;
sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
if (!sb)
- goto err;
+ goto out_free_path;
+ ret = -EINVAL;
err = "failed to open device";
bdev = blkdev_get_by_path(strim(path),
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
if (!IS_ERR(bdev))
bdput(bdev);
if (attr == &ksysfs_register_quiet)
- goto quiet_out;
+ goto done;
}
- goto err;
+ goto out_free_sb;
}
err = "failed to set blocksize";
if (set_blocksize(bdev, 4096))
- goto err_close;
+ goto out_blkdev_put;
- err = read_super(sb, bdev, &sb_page);
+ err = read_super(sb, bdev, &sb_disk);
if (err)
- goto err_close;
+ goto out_blkdev_put;
err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
- goto err_close;
+ goto out_put_sb_page;
mutex_lock(&bch_register_lock);
- ret = register_bdev(sb, sb_page, bdev, dc);
+ ret = register_bdev(sb, sb_disk, bdev, dc);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
- goto err;
+ goto out_free_sb;
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
- goto err_close;
+ goto out_put_sb_page;
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(sb, sb_page, bdev, ca) != 0)
- goto err;
+ if (register_cache(sb, sb_disk, bdev, ca) != 0)
+ goto out_free_sb;
}
-quiet_out:
- ret = size;
-out:
- if (sb_page)
- put_page(sb_page);
+
+done:
kfree(sb);
kfree(path);
module_put(THIS_MODULE);
- return ret;
+ return size;
-err_close:
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-err:
- pr_info("error %s: %s", path, err);
- goto out;
+out_put_sb_page:
+ put_page(virt_to_page(sb_disk));
+out_blkdev_put:
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+out_free_sb:
+ kfree(sb);
+out_free_path:
+ kfree(path);
+ path = NULL;
+out_module_put:
+ module_put(THIS_MODULE);
+out:
+ pr_info("error %s: %s", path?path:"", err);
+ return ret;
}
#include <linux/dm-bufio.h>
#define DM_MSG_PREFIX "persistent snapshot"
-#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
+#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */
#define DM_PREFETCH_CHUNKS 12
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
for (i = 0; i < bitmap->storage.file_pages; i++) {
- if (!bitmap->storage.filemap)
- return;
dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
need_write = test_and_clear_page_attr(bitmap, i,
BITMAP_PAGE_NEEDWRITE);
BITMAP_PAGE_DIRTY))
/* bitmap_unplug will handle the rest */
break;
- if (test_and_clear_page_attr(bitmap, j,
+ if (bitmap->storage.filemap &&
+ test_and_clear_page_attr(bitmap, j,
BITMAP_PAGE_NEEDWRITE)) {
write_page(bitmap, bitmap->storage.filemap[j], 0);
}
return;
md_bitmap_wait_behind_writes(mddev);
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
+ if (!mddev->serialize_policy)
+ mddev_destroy_serial_pool(mddev, NULL, true);
mutex_lock(&mddev->bitmap_info.mutex);
spin_lock(&mddev->lock);
goto out;
rdev_for_each(rdev, mddev)
- mddev_create_wb_pool(mddev, rdev, true);
+ mddev_create_serial_pool(mddev, rdev, true);
if (mddev_is_clustered(mddev))
md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
if (backlog > COUNTER_MAX)
return -EINVAL;
mddev->bitmap_info.max_write_behind = backlog;
- if (!backlog && mddev->wb_info_pool) {
- /* wb_info_pool is not needed if backlog is zero */
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
- } else if (backlog && !mddev->wb_info_pool) {
- /* wb_info_pool is needed since backlog is not zero */
+ if (!backlog && mddev->serial_info_pool) {
+ /* serial_info_pool is not needed if backlog is zero */
+ if (!mddev->serialize_policy)
+ mddev_destroy_serial_pool(mddev, NULL, false);
+ } else if (backlog && !mddev->serial_info_pool) {
+ /* serial_info_pool is needed since backlog is not zero */
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
- mddev_create_wb_pool(mddev, rdev, false);
+ mddev_create_serial_pool(mddev, rdev, false);
}
if (old_mwb != backlog)
md_bitmap_update_sb(mddev->bitmap);
mddev->sync_speed_max : sysctl_speed_limit_max;
}
-static int rdev_init_wb(struct md_rdev *rdev)
+static void rdev_uninit_serial(struct md_rdev *rdev)
{
- if (rdev->bdev->bd_queue->nr_hw_queues == 1)
+ if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
+ return;
+
+ kvfree(rdev->serial);
+ rdev->serial = NULL;
+}
+
+static void rdevs_uninit_serial(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev)
+ rdev_uninit_serial(rdev);
+}
+
+static int rdev_init_serial(struct md_rdev *rdev)
+{
+ /* serial_nums equals with BARRIER_BUCKETS_NR */
+ int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
+ struct serial_in_rdev *serial = NULL;
+
+ if (test_bit(CollisionCheck, &rdev->flags))
return 0;
- spin_lock_init(&rdev->wb_list_lock);
- INIT_LIST_HEAD(&rdev->wb_list);
- init_waitqueue_head(&rdev->wb_io_wait);
- set_bit(WBCollisionCheck, &rdev->flags);
+ serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
+ GFP_KERNEL);
+ if (!serial)
+ return -ENOMEM;
- return 1;
+ for (i = 0; i < serial_nums; i++) {
+ struct serial_in_rdev *serial_tmp = &serial[i];
+
+ spin_lock_init(&serial_tmp->serial_lock);
+ serial_tmp->serial_rb = RB_ROOT_CACHED;
+ init_waitqueue_head(&serial_tmp->serial_io_wait);
+ }
+
+ rdev->serial = serial;
+ set_bit(CollisionCheck, &rdev->flags);
+
+ return 0;
+}
+
+static int rdevs_init_serial(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ int ret = 0;
+
+ rdev_for_each(rdev, mddev) {
+ ret = rdev_init_serial(rdev);
+ if (ret)
+ break;
+ }
+
+ /* Free all resources if pool is not existed */
+ if (ret && !mddev->serial_info_pool)
+ rdevs_uninit_serial(mddev);
+
+ return ret;
}
/*
- * Create wb_info_pool if rdev is the first multi-queue device flaged
- * with writemostly, also write-behind mode is enabled.
+ * rdev needs to enable serial stuffs if it meets the conditions:
+ * 1. it is multi-queue device flaged with writemostly.
+ * 2. the write-behind mode is enabled.
*/
-void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
- bool is_suspend)
+static int rdev_need_serial(struct md_rdev *rdev)
{
- if (mddev->bitmap_info.max_write_behind == 0)
- return;
+ return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
+ rdev->bdev->bd_queue->nr_hw_queues != 1 &&
+ test_bit(WriteMostly, &rdev->flags));
+}
+
+/*
+ * Init resource for rdev(s), then create serial_info_pool if:
+ * 1. rdev is the first device which return true from rdev_enable_serial.
+ * 2. rdev is NULL, means we want to enable serialization for all rdevs.
+ */
+void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
+{
+ int ret = 0;
- if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev))
+ if (rdev && !rdev_need_serial(rdev) &&
+ !test_bit(CollisionCheck, &rdev->flags))
return;
- if (mddev->wb_info_pool == NULL) {
+ if (!is_suspend)
+ mddev_suspend(mddev);
+
+ if (!rdev)
+ ret = rdevs_init_serial(mddev);
+ else
+ ret = rdev_init_serial(rdev);
+ if (ret)
+ goto abort;
+
+ if (mddev->serial_info_pool == NULL) {
unsigned int noio_flag;
- if (!is_suspend)
- mddev_suspend(mddev);
noio_flag = memalloc_noio_save();
- mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS,
- sizeof(struct wb_info));
+ mddev->serial_info_pool =
+ mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+ sizeof(struct serial_info));
memalloc_noio_restore(noio_flag);
- if (!mddev->wb_info_pool)
- pr_err("can't alloc memory pool for writemostly\n");
- if (!is_suspend)
- mddev_resume(mddev);
+ if (!mddev->serial_info_pool) {
+ rdevs_uninit_serial(mddev);
+ pr_err("can't alloc memory pool for serialization\n");
+ }
}
+
+abort:
+ if (!is_suspend)
+ mddev_resume(mddev);
}
-EXPORT_SYMBOL_GPL(mddev_create_wb_pool);
/*
- * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck.
+ * Free resource from rdev(s), and destroy serial_info_pool under conditions:
+ * 1. rdev is the last device flaged with CollisionCheck.
+ * 2. when bitmap is destroyed while policy is not enabled.
+ * 3. for disable policy, the pool is destroyed only when no rdev needs it.
*/
-static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev)
+void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
{
- if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags))
+ if (rdev && !test_bit(CollisionCheck, &rdev->flags))
return;
- if (mddev->wb_info_pool) {
+ if (mddev->serial_info_pool) {
struct md_rdev *temp;
- int num = 0;
+ int num = 0; /* used to track if other rdevs need the pool */
- /*
- * Check if other rdevs need wb_info_pool.
- */
- rdev_for_each(temp, mddev)
- if (temp != rdev &&
- test_bit(WBCollisionCheck, &temp->flags))
+ if (!is_suspend)
+ mddev_suspend(mddev);
+ rdev_for_each(temp, mddev) {
+ if (!rdev) {
+ if (!mddev->serialize_policy ||
+ !rdev_need_serial(temp))
+ rdev_uninit_serial(temp);
+ else
+ num++;
+ } else if (temp != rdev &&
+ test_bit(CollisionCheck, &temp->flags))
num++;
- if (!num) {
- mddev_suspend(rdev->mddev);
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
- mddev_resume(rdev->mddev);
}
+
+ if (rdev)
+ rdev_uninit_serial(rdev);
+
+ if (num)
+ pr_info("The mempool could be used by other devices\n");
+ else {
+ mempool_destroy(mddev->serial_info_pool);
+ mddev->serial_info_pool = NULL;
+ }
+ if (!is_suspend)
+ mddev_resume(mddev);
}
}
pr_debug("md: bind<%s>\n", b);
if (mddev->raid_disks)
- mddev_create_wb_pool(mddev, rdev, false);
+ mddev_create_serial_pool(mddev, rdev, false);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
- mddev_destroy_wb_pool(rdev->mddev, rdev);
+ mddev_destroy_serial_pool(rdev->mddev, rdev, false);
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
- mddev_create_wb_pool(rdev->mddev, rdev, false);
+ mddev_create_serial_pool(rdev->mddev, rdev, false);
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
- mddev_destroy_wb_pool(rdev->mddev, rdev);
+ mddev_destroy_serial_pool(rdev->mddev, rdev, false);
clear_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "blocked")) {
__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
fail_last_dev_store);
+static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
+{
+ if (mddev->pers == NULL || (mddev->pers->level != 1))
+ return sprintf(page, "n/a\n");
+ else
+ return sprintf(page, "%d\n", mddev->serialize_policy);
+}
+
+/*
+ * Setting serialize_policy to true to enforce write IO is not reordered
+ * for raid1.
+ */
+static ssize_t
+serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ int err;
+ bool value;
+
+ err = kstrtobool(buf, &value);
+ if (err)
+ return err;
+
+ if (value == mddev->serialize_policy)
+ return len;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ if (mddev->pers == NULL || (mddev->pers->level != 1)) {
+ pr_err("md: serialize_policy is only effective for raid1\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ mddev_suspend(mddev);
+ if (value)
+ mddev_create_serial_pool(mddev, NULL, true);
+ else
+ mddev_destroy_serial_pool(mddev, NULL, true);
+ mddev->serialize_policy = value;
+ mddev_resume(mddev);
+unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
+}
+
+static struct md_sysfs_entry md_serialize_policy =
+__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
+ serialize_policy_store);
+
+
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
&max_corr_read_errors.attr,
&md_consistency_policy.attr,
&md_fail_last_dev.attr,
+ &md_serialize_policy.attr,
NULL,
};
goto bitmap_abort;
if (mddev->bitmap_info.max_write_behind > 0) {
- bool creat_pool = false;
+ bool create_pool = false;
rdev_for_each(rdev, mddev) {
if (test_bit(WriteMostly, &rdev->flags) &&
- rdev_init_wb(rdev))
- creat_pool = true;
- }
- if (creat_pool && mddev->wb_info_pool == NULL) {
- mddev->wb_info_pool =
- mempool_create_kmalloc_pool(NR_WB_INFOS,
- sizeof(struct wb_info));
- if (!mddev->wb_info_pool) {
+ rdev_init_serial(rdev))
+ create_pool = true;
+ }
+ if (create_pool && mddev->serial_info_pool == NULL) {
+ mddev->serial_info_pool =
+ mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+ sizeof(struct serial_info));
+ if (!mddev->serial_info_pool) {
err = -ENOMEM;
goto bitmap_abort;
}
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
+ /* disable policy to guarantee rdevs free resources for serialization */
+ mddev->serialize_policy = 0;
+ mddev_destroy_serial_pool(mddev, NULL, true);
}
void md_stop_writes(struct mddev *mddev)
* be retried.
*/
#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+
+/*
+ * The struct embedded in rdev is used to serialize IO.
+ */
+struct serial_in_rdev {
+ struct rb_root_cached serial_rb;
+ spinlock_t serial_lock;
+ wait_queue_head_t serial_io_wait;
+};
+
/*
* MD's 'extended' device
*/
* in superblock.
*/
- /*
- * The members for check collision of write behind IOs.
- */
- struct list_head wb_list;
- spinlock_t wb_list_lock;
- wait_queue_head_t wb_io_wait;
+ struct serial_in_rdev *serial; /* used for raid1 io serialization */
struct work_struct del_work; /* used for delayed sysfs removal */
* it didn't fail, so don't use FailFast
* any more for metadata
*/
- WBCollisionCheck, /*
- * multiqueue device should check if there
- * is collision between write behind bios.
+ CollisionCheck, /*
+ * check if there is collision between raid1
+ * serial bios.
*/
};
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
-#define NR_WB_INFOS 8
-/* record current range of write behind IOs */
-struct wb_info {
- sector_t lo;
- sector_t hi;
- struct list_head list;
+#define NR_SERIAL_INFOS 8
+/* record current range of serialize IOs */
+struct serial_info {
+ struct rb_node node;
+ sector_t start; /* start sector of rb node */
+ sector_t last; /* end sector of rb node */
+ sector_t _subtree_last; /* highest sector in subtree of rb node */
};
struct mddev {
*/
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
- mempool_t *wb_info_pool;
+ mempool_t *serial_info_pool;
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
unsigned int good_device_nr; /* good device num within cluster raid */
bool has_superblocks:1;
bool fail_last_dev:1;
+ bool serialize_policy:1;
};
enum recovery_flags {
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
-extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
- bool is_suspend);
+extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend);
+extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- unsigned short blksize = 512;
+ unsigned blksize = 512;
*private_conf = ERR_PTR(-ENOMEM);
if (!conf)
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <linux/interval_tree_generic.h>
#include <trace/events/block.h>
#include "raid1-10.c"
-static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
+ START, LAST, static inline, raid1_rb);
+
+static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
+ struct serial_info *si, int idx)
{
- struct wb_info *wi, *temp_wi;
unsigned long flags;
int ret = 0;
- struct mddev *mddev = rdev->mddev;
-
- wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO);
-
- spin_lock_irqsave(&rdev->wb_list_lock, flags);
- list_for_each_entry(temp_wi, &rdev->wb_list, list) {
- /* collision happened */
- if (hi > temp_wi->lo && lo < temp_wi->hi) {
- ret = -EBUSY;
- break;
- }
+ sector_t lo = r1_bio->sector;
+ sector_t hi = lo + r1_bio->sectors;
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ spin_lock_irqsave(&serial->serial_lock, flags);
+ /* collision happened */
+ if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
+ ret = -EBUSY;
+ else {
+ si->start = lo;
+ si->last = hi;
+ raid1_rb_insert(si, &serial->serial_rb);
}
-
- if (!ret) {
- wi->lo = lo;
- wi->hi = hi;
- list_add(&wi->list, &rdev->wb_list);
- } else
- mempool_free(wi, mddev->wb_info_pool);
- spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
return ret;
}
-static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
+static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
+{
+ struct mddev *mddev = rdev->mddev;
+ struct serial_info *si;
+ int idx = sector_to_idx(r1_bio->sector);
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ if (WARN_ON(!mddev->serial_info_pool))
+ return;
+ si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
+ wait_event(serial->serial_io_wait,
+ check_and_add_serial(rdev, r1_bio, si, idx) == 0);
+}
+
+static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
{
- struct wb_info *wi;
+ struct serial_info *si;
unsigned long flags;
int found = 0;
struct mddev *mddev = rdev->mddev;
-
- spin_lock_irqsave(&rdev->wb_list_lock, flags);
- list_for_each_entry(wi, &rdev->wb_list, list)
- if (hi == wi->hi && lo == wi->lo) {
- list_del(&wi->list);
- mempool_free(wi, mddev->wb_info_pool);
+ int idx = sector_to_idx(lo);
+ struct serial_in_rdev *serial = &rdev->serial[idx];
+
+ spin_lock_irqsave(&serial->serial_lock, flags);
+ for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
+ si; si = raid1_rb_iter_next(si, lo, hi)) {
+ if (si->start == lo && si->last == hi) {
+ raid1_rb_remove(si, &serial->serial_rb);
+ mempool_free(si, mddev->serial_info_pool);
found = 1;
break;
}
-
+ }
if (!found)
- WARN(1, "The write behind IO is not recorded\n");
- spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
- wake_up(&rdev->wb_io_wait);
+ WARN(1, "The write IO is not recorded for serialization\n");
+ spin_unlock_irqrestore(&serial->serial_lock, flags);
+ wake_up(&serial->serial_io_wait);
}
/*
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
bool discard_error;
+ sector_t lo = r1_bio->sector;
+ sector_t hi = r1_bio->sector + r1_bio->sectors;
discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
}
if (behind) {
- if (test_bit(WBCollisionCheck, &rdev->flags)) {
- sector_t lo = r1_bio->sector;
- sector_t hi = r1_bio->sector + r1_bio->sectors;
-
- remove_wb(rdev, lo, hi);
- }
+ if (test_bit(CollisionCheck, &rdev->flags))
+ remove_serial(rdev, lo, hi);
if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
call_bio_endio(r1_bio);
}
}
- }
+ } else if (rdev->mddev->serialize_policy)
+ remove_serial(rdev, lo, hi);
if (r1_bio->bios[mirror] == NULL)
rdev_dec_pending(rdev, conf->mddev);
for (i = 0; i < disks; i++) {
struct bio *mbio = NULL;
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
if (!r1_bio->bios[i])
continue;
mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
if (r1_bio->behind_master_bio) {
- struct md_rdev *rdev = conf->mirrors[i].rdev;
-
- if (test_bit(WBCollisionCheck, &rdev->flags)) {
- sector_t lo = r1_bio->sector;
- sector_t hi = r1_bio->sector + r1_bio->sectors;
-
- wait_event(rdev->wb_io_wait,
- check_and_add_wb(rdev, lo, hi) == 0);
- }
+ if (test_bit(CollisionCheck, &rdev->flags))
+ wait_for_serialization(rdev, r1_bio);
if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
- }
+ } else if (mddev->serialize_policy)
+ wait_for_serialization(rdev, r1_bio);
r1_bio->bios[i] = mbio;
static int alloc_thread_groups(struct r5conf *conf, int cnt,
int *group_cnt,
- int *worker_cnt_per_group,
struct r5worker_group **worker_groups);
static ssize_t
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
unsigned int new;
int err;
struct r5worker_group *new_groups, *old_groups;
- int group_cnt, worker_cnt_per_group;
+ int group_cnt;
if (len >= PAGE_SIZE)
return -EINVAL;
if (old_groups)
flush_workqueue(raid5_wq);
- err = alloc_thread_groups(conf, new,
- &group_cnt, &worker_cnt_per_group,
- &new_groups);
+ err = alloc_thread_groups(conf, new, &group_cnt, &new_groups);
if (!err) {
spin_lock_irq(&conf->device_lock);
conf->group_cnt = group_cnt;
- conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_cnt_per_group = new;
conf->worker_groups = new_groups;
spin_unlock_irq(&conf->device_lock);
.attrs = raid5_attrs,
};
-static int alloc_thread_groups(struct r5conf *conf, int cnt,
- int *group_cnt,
- int *worker_cnt_per_group,
+static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt,
struct r5worker_group **worker_groups)
{
int i, j, k;
ssize_t size;
struct r5worker *workers;
- *worker_cnt_per_group = cnt;
if (cnt == 0) {
*group_cnt = 0;
*worker_groups = NULL;
struct disk_info *disk;
char pers_name[6];
int i;
- int group_cnt, worker_cnt_per_group;
+ int group_cnt;
struct r5worker_group *new_group;
int ret;
for (i = 0; i < PENDING_IO_MAX; i++)
list_add(&conf->pending_data[i].sibling, &conf->free_list);
/* Don't enable multi-threading by default*/
- if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
- &new_group)) {
+ if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) {
conf->group_cnt = group_cnt;
- conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_cnt_per_group = 0;
conf->worker_groups = new_group;
} else
goto abort;
* Function prototypes. Called from OS entry point mptctl_ioctl.
* arg contents specific to function.
*/
-static int mptctl_fw_download(unsigned long arg);
-static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_gettargetinfo(unsigned long arg);
-static int mptctl_readtest(unsigned long arg);
-static int mptctl_mpt_command(unsigned long arg);
-static int mptctl_eventquery(unsigned long arg);
-static int mptctl_eventenable(unsigned long arg);
-static int mptctl_eventreport(unsigned long arg);
-static int mptctl_replace_fw(unsigned long arg);
-
-static int mptctl_do_reset(unsigned long arg);
-static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_hp_targetinfo(unsigned long arg);
+static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg);
+
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_probe(struct pci_dev *, const struct pci_device_id *);
static void mptctl_remove(struct pci_dev *);
/*
* Private function calls.
*/
-static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr);
-static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen);
+static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr);
+static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen);
static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags,
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
* by TM and FW reloads.
*/
if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
- return mptctl_getiocinfo(arg, _IOC_SIZE(cmd));
+ return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd));
} else if (cmd == MPTTARGETINFO) {
- return mptctl_gettargetinfo(arg);
+ return mptctl_gettargetinfo(iocp, arg);
} else if (cmd == MPTTEST) {
- return mptctl_readtest(arg);
+ return mptctl_readtest(iocp, arg);
} else if (cmd == MPTEVENTQUERY) {
- return mptctl_eventquery(arg);
+ return mptctl_eventquery(iocp, arg);
} else if (cmd == MPTEVENTENABLE) {
- return mptctl_eventenable(arg);
+ return mptctl_eventenable(iocp, arg);
} else if (cmd == MPTEVENTREPORT) {
- return mptctl_eventreport(arg);
+ return mptctl_eventreport(iocp, arg);
} else if (cmd == MPTFWREPLACE) {
- return mptctl_replace_fw(arg);
+ return mptctl_replace_fw(iocp, arg);
}
/* All of these commands require an interrupt or
return ret;
if (cmd == MPTFWDOWNLOAD)
- ret = mptctl_fw_download(arg);
+ ret = mptctl_fw_download(iocp, arg);
else if (cmd == MPTCOMMAND)
- ret = mptctl_mpt_command(arg);
+ ret = mptctl_mpt_command(iocp, arg);
else if (cmd == MPTHARDRESET)
- ret = mptctl_do_reset(arg);
+ ret = mptctl_do_reset(iocp, arg);
else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
- ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd));
+ ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd));
else if (cmd == HP_GETTARGETINFO)
- ret = mptctl_hp_targetinfo(arg);
+ ret = mptctl_hp_targetinfo(iocp, arg);
else
ret = -EINVAL;
return ret;
}
-static int mptctl_do_reset(unsigned long arg)
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
struct mpt_ioctl_diag_reset krinfo;
- MPT_ADAPTER *iocp;
if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - "
return -EFAULT;
}
- if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) {
- printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n",
- __FILE__, __LINE__, krinfo.hdr.iocnum);
- return -ENODEV; /* (-6) No such device or address */
- }
-
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n",
iocp->name));
* -ENOMSG if FW upload returned bad status
*/
static int
-mptctl_fw_download(unsigned long arg)
+mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
struct mpt_fw_xfer kfwdl;
return -EFAULT;
}
- return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen);
+ return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
* -ENOMSG if FW upload returned bad status
*/
static int
-mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
+mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen)
{
FWDownload_t *dlmsg;
MPT_FRAME_HDR *mf;
- MPT_ADAPTER *iocp;
FWDownloadTCSGE_t *ptsge;
MptSge_t *sgl, *sgIn;
char *sgOut;
pFWDownloadReply_t ReplyMsg = NULL;
unsigned long timeleft;
- if (mpt_verify_adapter(ioc, &iocp) < 0) {
- printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
- ioc);
- return -ENODEV; /* (-6) No such device or address */
- } else {
-
- /* Valid device. Get a message frame and construct the FW download message.
- */
- if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
- return -EAGAIN;
- }
+ /* Valid device. Get a message frame and construct the FW download message.
+ */
+ if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
+ return -EAGAIN;
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT
"mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id));
iocp->name, ufwbuf));
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n",
iocp->name, (int)fwlen));
- dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n",
- iocp->name, ioc));
dlmsg = (FWDownload_t*) mf;
ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
* -ENODEV if no such device/adapter
*/
static int
-mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
+mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_iocinfo *karg;
- MPT_ADAPTER *ioc;
struct pci_dev *pdev;
- int iocnum;
unsigned int port;
int cim_rev;
struct scsi_device *sdev;
return PTR_ERR(karg);
}
- if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- kfree(karg);
- return -ENODEV;
- }
-
/* Verify the data transfer size is correct. */
if (karg->hdr.maxDataSize != data_size) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
* -ENODEV if no such device/adapter
*/
static int
-mptctl_gettargetinfo (unsigned long arg)
+mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_targetinfo karg;
- MPT_ADAPTER *ioc;
VirtDevice *vdevice;
char *pmem;
int *pdata;
- int iocnum;
int numDevices = 0;
int lun;
int maxWordsLeft;
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
ioc->name));
/* Get the port number and set the maximum number of bytes
* -ENODEV if no such device/adapter
*/
static int
-mptctl_readtest (unsigned long arg)
+mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_test __user *uarg = (void __user *) arg;
struct mpt_ioctl_test karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - "
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n",
ioc->name));
/* Fill in the data and return the structure to the calling
* -ENODEV if no such device/adapter
*/
static int
-mptctl_eventquery (unsigned long arg)
+mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventquery karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - "
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n",
ioc->name));
karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_eventenable (unsigned long arg)
+mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventenable karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - "
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n",
ioc->name));
if (ioc->events == NULL) {
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_eventreport (unsigned long arg)
+mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventreport karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int numBytes, maxEvents, max;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n",
ioc->name));
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_replace_fw (unsigned long arg)
+mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
struct mpt_ioctl_replace_fw karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int newFwSize;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n",
ioc->name));
/* If caching FW, Free the old FW image
* -ENOMEM if memory allocation error
*/
static int
-mptctl_mpt_command (unsigned long arg)
+mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_command __user *uarg = (void __user *) arg;
struct mpt_ioctl_command karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int rc;
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
- rc = mptctl_do_mpt_command (karg, &uarg->MF);
+ rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF);
return rc;
}
* -EPERM if SCSI I/O and target is untagged
*/
static int
-mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
+mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr)
{
- MPT_ADAPTER *ioc;
MPT_FRAME_HDR *mf = NULL;
MPIHeader_t *hdr;
char *psge;
dma_addr_t dma_addr_in;
dma_addr_t dma_addr_out;
int sgSize = 0; /* Num SG elements */
- int iocnum, flagsLength;
+ int flagsLength;
int sz, rc = 0;
int msgContext;
u16 req_idx;
bufIn.kptr = bufOut.kptr = NULL;
bufIn.len = bufOut.len = 0;
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
* -ENOMEM if memory allocation error
*/
static int
-mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
+mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
hp_host_info_t __user *uarg = (void __user *) arg;
- MPT_ADAPTER *ioc;
struct pci_dev *pdev;
char *pbuf=NULL;
dma_addr_t buf_dma;
hp_host_info_t karg;
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
- int iocnum;
int rc, cim_rev;
ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
MPT_FRAME_HDR *mf = NULL;
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n",
ioc->name));
* -ENOMEM if memory allocation error
*/
static int
-mptctl_hp_targetinfo(unsigned long arg)
+mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
{
hp_target_info_t __user *uarg = (void __user *) arg;
SCSIDevicePage0_t *pg0_alloc;
SCSIDevicePage3_t *pg3_alloc;
- MPT_ADAPTER *ioc;
MPT_SCSI_HOST *hd = NULL;
hp_target_info_t karg;
- int iocnum;
int data_sz;
dma_addr_t page_dma;
CONFIGPARMS cfg;
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
return -EINVAL;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
kfw.fwlen = kfw32.fwlen;
kfw.bufp = compat_ptr(kfw32.bufp);
- ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
+ ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen);
mutex_unlock(&iocp->ioctl_cmds.mutex);
/* Pass new structure to do_mpt_command
*/
- ret = mptctl_do_mpt_command (karg, &uarg->MF);
+ ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF);
mutex_unlock(&iocp->ioctl_cmds.mutex);
cdev = &edev->component[i];
if (cdev->dev == dev) {
enclosure_remove_links(cdev);
- device_del(&cdev->cdev);
put_device(dev);
cdev->dev = NULL;
- return device_add(&cdev->cdev);
+ return 0;
}
}
return -ENODEV;
void lkdtm_UNSET_SMEP(void)
{
-#ifdef CONFIG_X86_64
+#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
#define MOV_CR4_DEPTH 64
void (*direct_write_cr4)(unsigned long val);
unsigned char *insn;
native_write_cr4(cr4);
}
#else
- pr_err("FAIL: this test is x86_64-only\n");
+ pr_err("XFAIL: this test is x86_64-only\n");
#endif
}
-#ifdef CONFIG_X86_32
void lkdtm_DOUBLE_FAULT(void)
{
+#ifdef CONFIG_X86_32
/*
* Trigger #DF by setting the stack limit to zero. This clobbers
* a GDT TLS slot, which is okay because the current task will die
asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
"r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
- panic("tried to double fault but didn't die\n");
-}
+ pr_err("FAIL: tried to double fault but didn't die\n");
+#else
+ pr_err("XFAIL: this test is ia32-only\n");
#endif
+}
card->erase_arg == MMC_TRIM_ARG ?
INAND_CMD38_ARG_TRIM :
INAND_CMD38_ARG_ERASE,
- 0);
+ card->ext_csd.generic_cmd6_time);
}
if (!err)
err = mmc_erase(card, from, nr, card->erase_arg);
arg == MMC_SECURE_TRIM1_ARG ?
INAND_CMD38_ARG_SECTRIM1 :
INAND_CMD38_ARG_SECERASE,
- 0);
+ card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
INAND_CMD38_ARG_SECTRIM2,
- 0);
+ card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
}
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ unsigned int freq = freqs[i];
+ if (freq > host->f_max) {
+ if (i + 1 < ARRAY_SIZE(freqs))
+ continue;
+ freq = host->f_max;
+ }
+ if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
break;
if (freqs[i] <= host->f_min)
break;
void mmc_start_host(struct mmc_host *host)
{
- host->f_init = max(freqs[0], host->f_min);
+ host->f_init = max(min(freqs[0], host->f_max), host->f_min);
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
struct device *dev = host->parent;
u32 bus_width, drv_type, cd_debounce_delay_ms;
int ret;
- bool cd_cap_invert, cd_gpio_invert = false;
- bool ro_cap_invert, ro_gpio_invert = false;
if (!dev || !dev_fwnode(dev))
return 0;
*/
/* Parse Card Detection */
+
if (device_property_read_bool(dev, "non-removable")) {
host->caps |= MMC_CAP_NONREMOVABLE;
} else {
- cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+ if (device_property_read_bool(dev, "cd-inverted"))
+ host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
if (device_property_read_u32(dev, "cd-debounce-delay-ms",
&cd_debounce_delay_ms))
host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, false,
- cd_debounce_delay_ms * 1000,
- &cd_gpio_invert);
+ cd_debounce_delay_ms * 1000);
if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
-
- /*
- * There are two ways to flag that the CD line is inverted:
- * through the cd-inverted flag and by the GPIO line itself
- * being inverted from the GPIO subsystem. This is a leftover
- * from the times when the GPIO subsystem did not make it
- * possible to flag a line as inverted.
- *
- * If the capability on the host AND the GPIO line are
- * both inverted, the end result is that the CD line is
- * not inverted.
- */
- if (cd_cap_invert ^ cd_gpio_invert)
- host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
}
/* Parse Write Protection */
- ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
- ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert);
+ if (device_property_read_bool(dev, "wp-inverted"))
+ host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ ret = mmc_gpiod_request_ro(host, "wp", 0, 0);
if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
if (device_property_read_bool(dev, "disable-wp"))
host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
- /* See the comment on CD inversion above */
- if (ro_cap_invert ^ ro_gpio_invert)
- host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
if (device_property_read_bool(dev, "cap-sd-highspeed"))
host->caps |= MMC_CAP_SD_HIGHSPEED;
if (device_property_read_bool(dev, "cap-mmc-highspeed"))
#include "host.h"
#include "mmc_ops.h"
-#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/
+#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
+#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
bool expired = false;
bool busy = false;
- /* We have an unspecified cmd timeout, use the fallback value. */
- if (!timeout_ms)
- timeout_ms = MMC_OPS_TIMEOUT_MS;
-
/*
* In cases when not allowed to poll by using CMD13 or because we aren't
* capable of polling by using ->card_busy(), then rely on waiting the
mmc_retune_hold(host);
+ if (!timeout_ms) {
+ pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
+ mmc_hostname(host));
+ timeout_ms = card->ext_csd.generic_cmd6_time;
+ }
+
/*
- * If the cmd timeout and the max_busy_timeout of the host are both
- * specified, let's validate them. A failure means we need to prevent
- * the host from doing hw busy detection, which is done by converting
- * to a R1 response instead of a R1B.
+ * If the max_busy_timeout of the host is specified, make sure it's
+ * enough to fit the used timeout_ms. In case it's not, let's instruct
+ * the host to avoid HW busy detection, by converting to a R1 response
+ * instead of a R1B.
*/
- if (timeout_ms && host->max_busy_timeout &&
- (timeout_ms > host->max_busy_timeout))
+ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
use_r1b_resp = false;
cmd.opcode = MMC_SWITCH;
cmd.flags = MMC_CMD_AC;
if (use_r1b_resp) {
cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
- /*
- * A busy_timeout of zero means the host can decide to use
- * whatever value it finds suitable.
- */
cmd.busy_timeout = timeout_ms;
} else {
cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
* urgent levels by using an asynchronous background task, when idle.
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
+ EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
if (err)
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
(card->ext_csd.cache_size > 0) &&
(card->ext_csd.cache_ctrl & 1)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_FLUSH_CACHE, 1, 0);
+ EXT_CSD_FLUSH_CACHE, 1,
+ MMC_CACHE_FLUSH_TIMEOUT_MS);
if (err)
pr_err("%s: cache flush error %d\n",
mmc_hostname(card->host), err);
struct mmc_gpio {
struct gpio_desc *ro_gpio;
struct gpio_desc *cd_gpio;
- bool override_cd_active_level;
irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
char *ro_label;
char *cd_label;
return -ENOSYS;
cansleep = gpiod_cansleep(ctx->cd_gpio);
- if (ctx->override_cd_active_level) {
- int value = cansleep ?
- gpiod_get_raw_value_cansleep(ctx->cd_gpio) :
- gpiod_get_raw_value(ctx->cd_gpio);
- return !value ^ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
- }
-
return cansleep ?
gpiod_get_value_cansleep(ctx->cd_gpio) :
gpiod_get_value(ctx->cd_gpio);
* @idx: index of the GPIO to obtain in the consumer
* @override_active_level: ignore %GPIO_ACTIVE_LOW flag
* @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not, set
- * to NULL to ignore
*
* Note that this must be called prior to mmc_add_host()
* otherwise the caller must also call mmc_gpiod_request_cd_irq().
*/
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce, bool *gpio_invert)
+ unsigned int debounce)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
ctx->cd_debounce_delay_ms = debounce / 1000;
}
- if (gpio_invert)
- *gpio_invert = !gpiod_is_active_low(desc);
+ /* override forces default (active-low) polarity ... */
+ if (override_active_level && !gpiod_is_active_low(desc))
+ gpiod_toggle_active_low(desc);
+
+ /* ... or active-high */
+ if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
+ gpiod_toggle_active_low(desc);
- ctx->override_cd_active_level = override_active_level;
ctx->cd_gpio = desc;
return 0;
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
* @debounce: debounce time in microseconds
- * @gpio_invert: will return whether the GPIO line is inverted or not,
- * set to NULL to ignore
*
* Returns zero on success, else an error.
*/
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
- unsigned int idx,
- unsigned int debounce, bool *gpio_invert)
+ unsigned int idx, unsigned int debounce)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
return ret;
}
- if (gpio_invert)
- *gpio_invert = !gpiod_is_active_low(desc);
+ if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
+ gpiod_toggle_active_low(desc);
ctx->ro_gpio = desc;
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
tristate "Broadcom SDIO/SD/MMC support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC
depends on MMC_SDHCI_PLTFM
+ select MMC_CQHCI
default y
help
This selects support for the SDIO/SD/MMC Host Controller on
depends on MMC_SDHCI_PLTFM && OF
select THERMAL
imply TI_SOC_THERMAL
+ select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's DRA7 SOCs. The controller supports
help
This selects support for the SD/MMC Host Controller on
Actions Semi Owl SoCs.
+
+config MMC_SDHCI_EXTERNAL_DMA
+ bool
{
struct atmel_mci *host = dev_get_drvdata(dev);
- pinctrl_pm_select_default_state(dev);
+ pinctrl_select_default_state(dev);
return clk_prepare_enable(host->mck);
}
goto out2;
}
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!r) {
- dev_err(&pdev->dev, "no IRQ defined\n");
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
goto out3;
- }
- host->irq = r->start;
mmc->ops = &au1xmmc_ops;
host->dma_chan = NULL;
host->dma_desc = NULL;
- host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx");
+ host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx");
+ if (IS_ERR(host->dma_chan_rxtx)) {
+ ret = PTR_ERR(host->dma_chan_rxtx);
+ host->dma_chan_rxtx = NULL;
+
+ if (ret == -EPROBE_DEFER)
+ goto err;
+
+ /* Ignore errors to fall back to PIO mode */
+ }
+
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
return ret;
host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
- if (!host->base)
- return -EINVAL;
+ if (!host->base) {
+ ret = -EINVAL;
+ goto error;
+ }
/* On ThunderX these are identical */
host->dma_base = host->base;
host->reg_off_dma = 0x160;
host->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(host->clk))
- return PTR_ERR(host->clk);
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto error;
+ }
ret = clk_prepare_enable(host->clk);
if (ret)
- return ret;
+ goto error;
host->sys_freq = clk_get_rate(host->clk);
spin_lock_init(&host->irq_handler_lock);
}
}
clk_disable_unprepare(host->clk);
+ pci_release_regions(pdev);
return ret;
}
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
clk_disable_unprepare(host->clk);
+ pci_release_regions(pdev);
}
static const struct pci_device_id thunder_mmc_id_table[] = {
mmc->caps |= pdata->caps;
/* Register a cd gpio, if there is not one, enable polling */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
mmc->caps |= MMC_CAP_NEEDS_POLL;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret == -EPROBE_DEFER)
return ret;
if (!host->dms)
return -ENOMEM;
- host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
- if (!host->dms->ch) {
+ host->dms->ch = dma_request_chan(host->dev, "rx-tx");
+ if (IS_ERR(host->dms->ch)) {
+ int ret = PTR_ERR(host->dms->ch);
+
dev_err(host->dev, "Failed to get external DMA channel.\n");
kfree(host->dms);
host->dms = NULL;
- return -ENXIO;
+ return ret;
}
return 0;
static int jz4740_mmc_resume(struct device *dev)
{
- return pinctrl_pm_select_default_state(dev);
+ return pinctrl_select_default_state(dev);
}
static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
bool dram_access_quirk;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_clk_gate;
unsigned int bounce_buf_size;
u32 cfg;
if (host->pins_clk_gate)
- pinctrl_select_state(host->pinctrl, host->pins_default);
+ pinctrl_select_default_state(host->dev);
/* Make sure the clock is not stopped in the controller */
cfg = readl(host->regs + SD_EMMC_CFG);
goto free_host;
}
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(host->pins_default)) {
- ret = PTR_ERR(host->pins_default);
- goto free_host;
- }
-
host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
"clk-gate");
if (IS_ERR(host->pins_clk_gate)) {
struct platform_device *slot_pdev;
struct mmc_host *mmc;
struct meson_mx_mmc_host *host;
- struct resource *res;
int ret, irq;
u32 conf;
platform_set_drvdata(pdev, host);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(host->controller_dev, res);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto error_free_mmc;
* SPI protocol. Another is that when chipselect is released while
* the card returns BUSY status, the clock must issue several cycles
* with chipselect high before the card will stop driving its output.
+ *
+ * SPI_CS_HIGH means "asserted" here. In some cases like when using
+ * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
+ * inverted by gpiolib, so if we want to ascertain to drive it high
+ * we should toggle the default with an XOR as we do here.
*/
- host->spi->mode |= SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Just warn; most cards work without it. */
dev_warn(&host->spi->dev,
"can't change chip-select polarity\n");
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
} else {
mmc_spi_readbytes(host, 18);
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Wot, we can't get the same setup we had before? */
dev_err(&host->spi->dev,
* Index 0 is card detect
* Old boardfiles were specifying 1 ms as debounce
*/
- status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL);
+ status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status) {
mmc_detect_change(mmc, 0);
/* Index 1 is write protect/read only */
- status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
+ status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status)
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
+ .dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.datacnt_useless = true,
.datalength_bits = 25,
.datactrl_blocksz = 14,
+ .datactrl_any_blocksz = true,
.stm32_idmabsize_mask = GENMASK(12, 5),
.busy_timeout = true,
.busy_detect = true,
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
.datalength_bits = 24,
.datactrl_blocksz = 11,
+ .datactrl_any_blocksz = true,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 208000000,
.explicit_mclk_control = true,
static int mmci_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
+ struct variant_data *variant = host->variant;
+
if (!data)
return 0;
-
- if (!is_power_of_2(data->blksz)) {
+ if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
dev_err(mmc_dev(host->mmc),
"unsupported block size (%d bytes)\n", data->blksz);
return -EINVAL;
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
- host->ops->dma_start(host, &datactrl);
+ ret = host->ops->dma_start(host, &datactrl);
+ if (ret)
+ return ret;
/* Trigger the DMA transfer */
mmci_write_datactrlreg(host, datactrl);
host->dma_priv = dmae;
- dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
- "rx");
- dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
- "tx");
+ dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
+ if (IS_ERR(dmae->rx_channel)) {
+ int ret = PTR_ERR(dmae->rx_channel);
+ dmae->rx_channel = NULL;
+ return ret;
+ }
+
+ dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
+ if (IS_ERR(dmae->tx_channel)) {
+ if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
+ dev_warn(mmc_dev(host->mmc),
+ "Deferred probe for TX channel ignored\n");
+ dmae->tx_channel = NULL;
+ }
/*
* If only an RX channel is specified, the driver will
if (data->blksz * data->blocks <= variant->fifosize)
return -EINVAL;
+ /*
+ * This is necessary to get SDIO working on the Ux500. We do not yet
+ * know if this is a bug in:
+ * - The Ux500 DMA controller (DMA40)
+ * - The MMCI DMA interface on the Ux500
+ * some power of two blocks (such as 64 bytes) are sent regularly
+ * during SDIO traffic and those work fine so for these we enable DMA
+ * transfers.
+ */
+ if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
+ return -EINVAL;
+
device = chan->device;
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
+ int ret;
host->dma_in_progress = true;
- dmaengine_submit(dmae->desc_current);
+ ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
+ if (ret < 0) {
+ host->dma_in_progress = false;
+ return ret;
+ }
dma_async_issue_pending(dmae->cur);
*datactrl |= MCI_DPSM_DMAENABLE;
} else if (host->variant->busy_timeout && busy_resp &&
status & MCI_DATATIMEOUT) {
cmd->error = -ETIMEDOUT;
+ host->irq_action = IRQ_WAKE_THREAD;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
cmd->resp[1] = readl(base + MMCIRESPONSE1);
return;
}
}
- mmci_request_end(host, host->mrq);
+
+ if (host->irq_action != IRQ_WAKE_THREAD)
+ mmci_request_end(host, host->mrq);
+
} else if (sbc) {
mmci_start_command(host, host->mrq->cmd, 0);
} else if (!host->variant->datactrl_first &&
{
struct mmci_host *host = dev_id;
u32 status;
- int ret = 0;
spin_lock(&host->lock);
+ host->irq_action = IRQ_HANDLED;
do {
status = readl(host->base + MMCISTATUS);
if (host->variant->busy_detect_flag)
status &= ~host->variant->busy_detect_flag;
- ret = 1;
} while (status);
spin_unlock(&host->lock);
- return IRQ_RETVAL(ret);
+ return host->irq_action;
+}
+
+/*
+ * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
+ *
+ * A reset is needed for some variants, where a datatimeout for a R1B request
+ * causes the DPSM to stay busy (non-functional).
+ */
+static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
+{
+ struct mmci_host *host = dev_id;
+ unsigned long flags;
+
+ if (host->rst) {
+ reset_control_assert(host->rst);
+ udelay(2);
+ reset_control_deassert(host->rst);
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ writel(MCI_IRQENABLE | host->variant->start_err,
+ host->base + MMCIMASK0);
+
+ host->irq_action = IRQ_HANDLED;
+ mmci_request_end(host, host->mrq);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return host->irq_action;
}
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
pinctrl_select_state(host->pinctrl, host->pins_opendrain);
else
- pinctrl_select_state(host->pinctrl, host->pins_default);
+ pinctrl_select_default_state(mmc_dev(mmc));
}
/*
goto host_free;
}
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(host->pins_default)) {
- dev_err(mmc_dev(mmc), "Can't select default pins\n");
- ret = PTR_ERR(host->pins_default);
- goto host_free;
- }
-
host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
MMCI_PINCTRL_STATE_OPENDRAIN);
if (IS_ERR(host->pins_opendrain)) {
* silently of these do not exist
*/
if (!np) {
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto clk_disable;
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret == -EPROBE_DEFER)
goto clk_disable;
}
- ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
- DRIVER_NAME " (cmd)", host);
+ ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
+ mmci_irq_thread, IRQF_SHARED,
+ DRIVER_NAME " (cmd)", host);
if (ret)
goto clk_disable;
struct mmci_host *host = mmc_priv(mmc);
clk_prepare_enable(host->clk);
mmci_restore(host);
- pinctrl_pm_select_default_state(dev);
+ pinctrl_select_default_state(dev);
}
return 0;
* @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
* @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
* @datactrl_mask_sdio: SDIO enable mask in datactrl register
- * @datactrl_blksz: block size in power of two
+ * @datactrl_blocksz: block size in power of two
+ * @datactrl_any_blocksz: true if block any block sizes are accepted by
+ * hardware, such as with some SDIO traffic that send
+ * odd packets.
+ * @dma_power_of_2: DMA only works with blocks that are a power of 2.
* @datactrl_first: true if data must be setup before send command
* @datacnt_useless: true if you could not use datacnt register to read
* remaining data
unsigned int datactrl_mask_ddrmode;
unsigned int datactrl_mask_sdio;
unsigned int datactrl_blocksz;
+ u8 datactrl_any_blocksz:1;
+ u8 dma_power_of_2:1;
u8 datactrl_first:1;
u8 datacnt_useless:1;
u8 st_sdio:1;
struct mmci_host_ops *ops;
struct variant_data *variant;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_opendrain;
u8 hw_designer;
struct timer_list timer;
unsigned int oldstat;
+ u32 irq_action;
/* pio stuff */
struct sg_mapping_iter sg_miter;
if (ret)
goto host_free;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(&pdev->dev, res);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto host_free;
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mbus_dram_target_info *dram;
- struct resource *r;
int ret, irq;
if (!np) {
dev_err(&pdev->dev, "no DT node\n");
return -ENODEV;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
+ if (irq < 0)
return -ENXIO;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
spin_lock_init(&host->lock);
- host->base = devm_ioremap_resource(&pdev->dev, r);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto out;
mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
if (!host->pdata) {
- host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
+ host->dma = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(host->dma)) {
+ if (PTR_ERR(host->dma) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk_put;
+ }
+
+ /* Ignore errors to fall back to PIO mode */
+ host->dma = NULL;
+ }
} else {
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res) {
goto out_clk_disable;
}
- ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!ssp->dmach) {
+ ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(ssp->dmach)) {
dev_err(mmc_dev(host->mmc),
"%s: failed to request dma\n", __func__);
- ret = -ENODEV;
+ ret = PTR_ERR(ssp->dmach);
goto out_clk_disable;
}
ret = PTR_ERR(p);
goto err_free_irq;
}
- if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
- dev_info(host->dev, "missing default pinctrl state\n");
- devm_pinctrl_put(p);
- ret = -EINVAL;
- goto err_free_irq;
- }
if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
dev_info(host->dev, "missing idle pinctrl state\n");
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
(host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
- pinctrl_pm_select_default_state(host->dev);
+ pinctrl_select_default_state(host->dev);
/* irq lost, if pinmux incorrect */
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
} else {
- pinctrl_pm_select_default_state(host->dev);
+ pinctrl_select_default_state(host->dev);
}
spin_unlock_irqrestore(&host->irq_lock, flags);
return 0;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc");
- if (!owl_host->dma) {
+ owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
+ if (IS_ERR(owl_host->dma)) {
dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
- ret = -ENXIO;
+ ret = PTR_ERR(owl_host->dma);
goto err_free_host;
}
platform_set_drvdata(pdev, mmc);
- host->dma_chan_rx = dma_request_slave_channel(dev, "rx");
- if (host->dma_chan_rx == NULL) {
+ host->dma_chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->dma_chan_rx)) {
dev_err(dev, "unable to request rx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(host->dma_chan_rx);
+ host->dma_chan_rx = NULL;
goto out;
}
- host->dma_chan_tx = dma_request_slave_channel(dev, "tx");
- if (host->dma_chan_tx == NULL) {
+ host->dma_chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->dma_chan_tx)) {
dev_err(dev, "unable to request tx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(host->dma_chan_tx);
+ host->dma_chan_tx = NULL;
goto out;
}
}
/* FIXME: should we pass detection delay to debounce? */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_cd\n");
goto out;
}
- ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
+ if (!host->pdata->gpio_card_ro_invert)
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret && ret != -ENOENT) {
dev_err(dev, "Failed requesting gpio_ro\n");
goto out;
}
- if (!ret) {
+ if (!ret)
host->use_ro_gpio = true;
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
- }
if (host->pdata->init)
host->pdata->init(dev, pxamci_detect_irq, mmc);
struct renesas_sdhi_scc {
unsigned long clk_rate; /* clock rate for SDR104 */
- u32 tap; /* sampling clock position for SDR104 */
- u32 tap_hs400; /* sampling clock position for HS400 */
+ u32 tap; /* sampling clock position for SDR104/HS400 (8 TAP) */
+ u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */
};
struct renesas_sdhi_of_data {
unsigned short max_segs;
};
+struct renesas_sdhi_quirks {
+ bool hs400_disabled;
+ bool hs400_4taps;
+};
+
struct tmio_mmc_dma {
enum dma_slave_buswidth dma_buswidth;
bool (*filter)(struct dma_chan *chan, void *arg);
struct clk *clk_cd;
struct tmio_mmc_data mmc_data;
struct tmio_mmc_dma dma_priv;
+ const struct renesas_sdhi_quirks *quirks;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default, *pins_uhs;
void __iomem *scc_ctl;
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
-struct renesas_sdhi_quirks {
- bool hs400_disabled;
- bool hs400_4taps;
-};
-
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
- if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400)
+ if (priv->quirks && priv->quirks->hs400_4taps)
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
host->tap_set / 2);
static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
- bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400;
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
/*
* Skip checking SCC errors when running on 4 taps in HS400 mode as
};
static const struct soc_device_attribute sdhi_quirks_match[] = {
+ { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
- { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
{ /* Sentinel. */ },
};
if (!priv)
return -ENOMEM;
+ priv->quirks = quirks;
mmc_data = &priv->mmc_data;
dma_priv = &priv->dma_priv;
if (quirks && quirks->hs400_disabled)
host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
- if (quirks && quirks->hs400_4taps)
- mmc_data->flags |= TMIO_MMC_HAVE_4TAP_HS400;
-
/* For some SoC, we disable internal WP. GPIO may override this */
if (mmc_can_gpio_ro(host->mmc))
mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
MMC_CAP2_HS400_1_8V))) {
const struct renesas_sdhi_scc *taps = of_data->taps;
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
bool hit = false;
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
priv->scc_tappos = taps->tap;
- priv->scc_tappos_hs400 = taps->tap_hs400;
+ priv->scc_tappos_hs400 = use_4tap ?
+ taps->tap_hs400_4tap :
+ taps->tap;
hit = true;
break;
}
}
if (!hit)
- dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
+ dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
host->init_tuning = renesas_sdhi_init_tuning;
host->prepare_tuning = renesas_sdhi_prepare_tuning;
{
.clk_rate = 0,
.tap = 0x00000300,
- .tap_hs400 = 0x00000704,
+ .tap_hs400_4tap = 0x00000100,
},
};
* Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
* implementation as others may use a different implementation.
*/
-static const struct soc_device_attribute soc_whitelist[] = {
- /* specific ones */
+static const struct soc_device_attribute soc_dma_quirks[] = {
{ .soc_id = "r7s9210",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) },
{ .soc_id = "r8a7795", .revision = "ES1.*",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
{ .soc_id = "r8a7796", .revision = "ES1.0",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
- /* generic ones */
- { .soc_id = "r8a774a1" },
- { .soc_id = "r8a774b1" },
- { .soc_id = "r8a774c0" },
- { .soc_id = "r8a77470" },
- { .soc_id = "r8a7795" },
- { .soc_id = "r8a7796" },
- { .soc_id = "r8a77965" },
- { .soc_id = "r8a77970" },
- { .soc_id = "r8a77980" },
- { .soc_id = "r8a77990" },
- { .soc_id = "r8a77995" },
{ /* sentinel */ }
};
static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
{
- const struct soc_device_attribute *soc = soc_device_match(soc_whitelist);
+ const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks);
struct device *dev = &pdev->dev;
- if (!soc)
- return -ENODEV;
-
- global_flags |= (unsigned long)soc->data;
+ if (soc)
+ global_flags |= (unsigned long)soc->data;
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
if (!dev->dma_parms)
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
/* If we get -ENOENT we have no card detect GPIO line */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret != -ENOENT) {
dev_err(&pdev->dev, "error requesting GPIO for CD %d\n",
ret);
return ret;
}
- ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+ ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
if (ret != -ENOENT) {
dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
ret);
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
- err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL);
+ err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
if (err) {
if (err == -EPROBE_DEFER)
goto err_free;
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
-static const struct sdhci_ops sdhci_brcmstb_ops = {
+#define SDHCI_VENDOR 0x78
+#define SDHCI_VENDOR_ENHANCED_STRB 0x1
+
+#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0)
+#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1)
+
+#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
+
+struct sdhci_brcmstb_priv {
+ void __iomem *cfg_regs;
+ bool has_cqe;
+};
+
+struct brcmstb_match_priv {
+ void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios);
+ struct sdhci_ops *ops;
+ unsigned int flags;
+};
+
+static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ u32 reg;
+
+ dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n",
+ __func__);
+ reg = readl(host->ioaddr + SDHCI_VENDOR);
+ if (ios->enhanced_strobe)
+ reg |= SDHCI_VENDOR_ENHANCED_STRB;
+ else
+ reg &= ~SDHCI_VENDOR_ENHANCED_STRB;
+ writel(reg, host->ioaddr + SDHCI_VENDOR);
+}
+
+static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+
+ host->mmc->actual_clock = 0;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ sdhci_enable_clk(host, clk);
+}
+
+static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ u16 ctrl_2;
+
+ dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n",
+ __func__, timing);
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if ((timing == MMC_TIMING_MMC_HS200) ||
+ (timing == MMC_TIMING_UHS_SDR104))
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ else if (timing == MMC_TIMING_SD_HS ||
+ timing == MMC_TIMING_MMC_HS ||
+ timing == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ else if ((timing == MMC_TIMING_UHS_DDR50) ||
+ (timing == MMC_TIMING_MMC_DDR52))
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ else if (timing == MMC_TIMING_MMC_HS400)
+ ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = {
+ .enable = sdhci_brcmstb_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_brcmstb_dumpregs,
+};
+
+static struct sdhci_ops sdhci_brcmstb_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
-static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = {
+static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
+ .set_clock = sdhci_brcmstb_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
+};
+
+static struct brcmstb_match_priv match_priv_7425 = {
+ .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT |
+ BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
.ops = &sdhci_brcmstb_ops,
};
+static struct brcmstb_match_priv match_priv_7445 = {
+ .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
+ .ops = &sdhci_brcmstb_ops,
+};
+
+static const struct brcmstb_match_priv match_priv_7216 = {
+ .hs400es = sdhci_brcmstb_hs400es,
+ .ops = &sdhci_brcmstb_ops_7216,
+};
+
+static const struct of_device_id sdhci_brcm_of_match[] = {
+ { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 },
+ { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 },
+ { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 },
+ {},
+};
+
+static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static int sdhci_brcmstb_add_host(struct sdhci_host *host,
+ struct sdhci_brcmstb_priv *priv)
+{
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!priv->has_cqe)
+ return sdhci_add_host(host);
+
+ dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n");
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(mmc_dev(host->mmc),
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_brcmstb_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64) {
+ dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ }
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_brcmstb_probe(struct platform_device *pdev)
{
- struct sdhci_host *host;
+ const struct brcmstb_match_priv *match_priv;
+ struct sdhci_pltfm_data brcmstb_pdata;
struct sdhci_pltfm_host *pltfm_host;
+ const struct of_device_id *match;
+ struct sdhci_brcmstb_priv *priv;
+ struct sdhci_host *host;
+ struct resource *iomem;
+ bool has_cqe = false;
struct clk *clk;
int res;
+ match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
+ match_priv = match->data;
+
+ dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible);
+
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_err(&pdev->dev, "Clock not found in Device Tree\n");
clk = NULL;
}
if (res)
return res;
- host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0);
+ memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata));
+ if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+ has_cqe = true;
+ match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
+ }
+ brcmstb_pdata.ops = match_priv->ops;
+ host = sdhci_pltfm_init(pdev, &brcmstb_pdata,
+ sizeof(struct sdhci_brcmstb_priv));
if (IS_ERR(host)) {
res = PTR_ERR(host);
goto err_clk;
}
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
+ priv->has_cqe = has_cqe;
+
+ /* Map in the non-standard CFG registers */
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(priv->cfg_regs)) {
+ res = PTR_ERR(priv->cfg_regs);
+ goto err;
+ }
+
sdhci_get_of_property(pdev);
res = mmc_of_parse(host->mmc);
if (res)
goto err;
+ /*
+ * If the chip has enhanced strobe and it's enabled, add
+ * callback
+ */
+ if (match_priv->hs400es &&
+ (host->mmc->caps2 & MMC_CAP2_HS400_ES))
+ host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es;
+
/*
* Supply the existing CAPS, but clear the UHS modes. This
* will allow these modes to be specified by device tree
* properties through mmc_of_parse().
*/
host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
- if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci"))
+ if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT)
host->caps &= ~SDHCI_CAN_64BIT;
host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
- SDHCI_SUPPORT_DDR50);
- host->quirks |= SDHCI_QUIRK_MISSING_CAPS |
- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ SDHCI_SUPPORT_DDR50);
+ host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+
+ if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT)
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- res = sdhci_add_host(host);
+ res = sdhci_brcmstb_add_host(host, priv);
if (res)
goto err;
- pltfm_host = sdhci_priv(host);
pltfm_host->clk = clk;
return res;
return res;
}
-static const struct of_device_id sdhci_brcm_of_match[] = {
- { .compatible = "brcm,bcm7425-sdhci" },
- { .compatible = "brcm,bcm7445-sdhci" },
- {},
-};
+static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = sdhci_pltfm_unregister(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to shutdown\n");
+}
+
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
static struct platform_driver sdhci_brcmstb_driver = {
},
.probe = sdhci_brcmstb_probe,
.remove = sdhci_pltfm_unregister,
+ .shutdown = sdhci_brcmstb_shutdown,
};
module_platform_driver(sdhci_brcmstb_driver);
return 0;
}
-static inline void *sdhci_cdns_priv(struct sdhci_host *host)
+static void *sdhci_cdns_priv(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data {
u32 scratchpad;
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_100mhz;
struct pinctrl_state *pins_200mhz;
const struct esdhc_soc_data *socdata;
dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
if (IS_ERR(imx_data->pinctrl) ||
- IS_ERR(imx_data->pins_default) ||
IS_ERR(imx_data->pins_100mhz) ||
IS_ERR(imx_data->pins_200mhz))
return -EINVAL;
break;
default:
/* back to default state for other legacy timing */
- pinctrl = imx_data->pins_default;
+ return pinctrl_select_default_state(mmc_dev(host->mmc));
}
return pinctrl_select_state(imx_data->pinctrl, pinctrl);
mmc_of_parse_voltage(np, &host->ocr_mask);
- if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) {
+ if (esdhc_is_usdhc(imx_data)) {
imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
ESDHC_PINCTRL_STATE_100MHZ);
imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
host->mmc->parent->platform_data);
/* write_protect */
if (boarddata->wp_type == ESDHC_WP_GPIO) {
- err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL);
+ host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+ err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request write-protect gpio!\n");
return err;
}
- host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
}
/* card_detect */
switch (boarddata->cd_type) {
case ESDHC_CD_GPIO:
- err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request card-detect gpio!\n");
goto disable_ahb_clk;
}
- imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(imx_data->pins_default))
- dev_warn(mmc_dev(host->mmc), "could not get default state\n");
-
if (esdhc_is_usdhc(imx_data)) {
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq, ret = 0;
struct f_sdhost_priv *priv;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "%s: no irq specified\n", __func__);
+ if (irq < 0)
return irq;
- }
host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
if (IS_ERR(host))
host->ops = &sdhci_milbeaut_ops;
host->irq = irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
#include <linux/regulator/consumer.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
#define CORE_MCI_VERSION 0x50
#define CORE_VERSION_MAJOR_SHIFT 28
#define msm_host_writel(msm_host, val, host, offset) \
msm_host->var_ops->msm_writel_relaxed(val, host, offset)
+/* CQHCI vendor specific registers */
+#define CQHCI_VENDOR_CFG1 0xA00
+#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
+
struct sdhci_msm_offset {
u32 core_hc_mode;
u32 core_mci_data_cnt;
__sdhci_msm_set_clock(host, clock);
}
+/*****************************************************************************\
+ * *
+ * MSM Command Queue Engine (CQE) *
+ * *
+\*****************************************************************************/
+
+static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+ return 0;
+}
+
+void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 ctrl;
+
+ /*
+ * When CQE is halted, the legacy SDHCI path operates only
+ * on 16-byte descriptors in 64bit mode.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->desc_sz = 16;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * During CQE command transfers, command complete bit gets latched.
+ * So s/w should clear command complete interrupt status when CQE is
+ * either halted or disabled. Otherwise unexpected SDCHI legacy
+ * interrupt gets triggered when CQE is halted/disabled.
+ */
+ ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
+ ctrl |= SDHCI_INT_RESPONSE;
+ sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ sdhci_cqe_disable(mmc, recovery);
+}
+
+static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
+ .enable = sdhci_cqe_enable,
+ .disable = sdhci_msm_cqe_disable,
+};
+
+static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
+ struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct cqhci_host *cq_host;
+ bool dma64;
+ u32 cqcfg;
+ int ret;
+
+ /*
+ * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
+ * So ensure ADMA table is allocated for 16byte descriptors.
+ */
+ if (host->caps & SDHCI_CAN_64BIT)
+ host->alloc_desc_sz = 16;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = cqhci_pltfm_init(pdev);
+ if (IS_ERR(cq_host)) {
+ ret = PTR_ERR(cq_host);
+ dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
+ goto cleanup;
+ }
+
+ msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ cq_host->ops = &sdhci_msm_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ goto cleanup;
+ }
+
+ /* Disable cqe reset due to cqe enable signal */
+ cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
+ cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
+ cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
+
+ /*
+ * SDHC expects 12byte ADMA descriptors till CQE is enabled.
+ * So limit desc_sz to 12 so that the data commands that are sent
+ * during card initialization (before CQE gets enabled) would
+ * get executed without any issues.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->desc_sz = 12;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ dev_info(&pdev->dev, "%s: CQE init: success\n",
+ mmc_hostname(host->mmc));
+ return ret;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
/*
* Platform specific register write functions. This is so that, if any
* register write needs to be followed up by platform specific actions,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.write_w = sdhci_msm_writew,
.write_b = sdhci_msm_writeb,
+ .irq = sdhci_msm_cqe_irq,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
- struct resource *core_memres;
struct clk *clk;
int ret;
u16 host_version, core_minor;
u8 core_major;
const struct sdhci_msm_offset *msm_offset;
const struct sdhci_msm_variant_info *var_info;
+ struct device_node *node = pdev->dev.of_node;
host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
if (IS_ERR(host))
}
if (!msm_host->mci_removed) {
- core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- msm_host->core_mem = devm_ioremap_resource(&pdev->dev,
- core_memres);
-
+ msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(msm_host->core_mem)) {
ret = PTR_ERR(msm_host->core_mem);
goto clk_disable;
pm_runtime_use_autosuspend(&pdev->dev);
host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
- ret = sdhci_add_host(host);
+ if (of_property_read_bool(node, "supports-cqe"))
+ ret = sdhci_msm_cqe_add_host(host, pdev);
+ else
+ ret = sdhci_add_host(host);
if (ret)
goto pm_runtime_disable;
sdhci_msm_set_regulator_caps(msm_host);
#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
+struct sdhci_at91_soc_data {
+ const struct sdhci_pltfm_data *pdata;
+ bool baseclk_is_generated_internally;
+ unsigned int divider_for_baseclk;
+};
+
struct sdhci_at91_priv {
+ const struct sdhci_at91_soc_data *soc_data;
struct clk *hclock;
struct clk *gck;
struct clk *mainck;
.set_power = sdhci_at91_set_power,
};
-static const struct sdhci_pltfm_data soc_data_sama5d2 = {
+static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = {
.ops = &sdhci_at91_sama5d2_ops,
};
+static const struct sdhci_at91_soc_data soc_data_sama5d2 = {
+ .pdata = &sdhci_sama5d2_pdata,
+ .baseclk_is_generated_internally = false,
+};
+
+static const struct sdhci_at91_soc_data soc_data_sam9x60 = {
+ .pdata = &sdhci_sama5d2_pdata,
+ .baseclk_is_generated_internally = true,
+ .divider_for_baseclk = 2,
+};
+
static const struct of_device_id sdhci_at91_dt_match[] = {
{ .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
+ { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 },
{}
};
MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match);
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
- int ret;
unsigned int caps0, caps1;
unsigned int clk_base, clk_mul;
- unsigned int gck_rate, real_gck_rate;
+ unsigned int gck_rate, clk_base_rate;
unsigned int preset_div;
- /*
- * The mult clock is provided by as a generated clock by the PMC
- * controller. In order to set the rate of gck, we have to get the
- * base clock rate and the clock mult from capabilities.
- */
clk_prepare_enable(priv->hclock);
caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES);
caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1);
- clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
- clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT;
- gck_rate = clk_base * 1000000 * (clk_mul + 1);
- ret = clk_set_rate(priv->gck, gck_rate);
- if (ret < 0) {
- dev_err(dev, "failed to set gck");
- clk_disable_unprepare(priv->hclock);
- return ret;
- }
- /*
- * We need to check if we have the requested rate for gck because in
- * some cases this rate could be not supported. If it happens, the rate
- * is the closest one gck can provide. We have to update the value
- * of clk mul.
- */
- real_gck_rate = clk_get_rate(priv->gck);
- if (real_gck_rate != gck_rate) {
- clk_mul = real_gck_rate / (clk_base * 1000000) - 1;
- caps1 &= (~SDHCI_CLOCK_MUL_MASK);
- caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) &
- SDHCI_CLOCK_MUL_MASK);
- /* Set capabilities in r/w mode. */
- writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN,
- host->ioaddr + SDMMC_CACR);
- writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
- /* Set capabilities in ro mode. */
- writel(0, host->ioaddr + SDMMC_CACR);
- dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n",
- clk_mul, real_gck_rate);
- }
+
+ gck_rate = clk_get_rate(priv->gck);
+ if (priv->soc_data->baseclk_is_generated_internally)
+ clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk;
+ else
+ clk_base_rate = clk_get_rate(priv->mainck);
+
+ clk_base = clk_base_rate / 1000000;
+ clk_mul = gck_rate / clk_base_rate - 1;
+
+ caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK;
+ caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK;
+ caps1 &= ~SDHCI_CLOCK_MUL_MASK;
+ caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK;
+ /* Set capabilities in r/w mode. */
+ writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
+ writel(caps0, host->ioaddr + SDHCI_CAPABILITIES);
+ writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
+ /* Set capabilities in ro mode. */
+ writel(0, host->ioaddr + SDMMC_CACR);
+
+ dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
+ clk_mul, gck_rate, clk_base_rate);
/*
* We have to set preset values because it depends on the clk_mul
* maximum sd clock value is 120 MHz instead of 208 MHz. For that
* reason, we need to use presets to support SDR104.
*/
- preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR12);
- preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR25);
- preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR50);
- preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR104);
- preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_DDR50);
static int sdhci_at91_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
- const struct sdhci_pltfm_data *soc_data;
+ const struct sdhci_at91_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_at91_priv *priv;
return -EINVAL;
soc_data = match->data;
- host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv));
+ host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
+ priv->soc_data = soc_data;
priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
if (IS_ERR(priv->mainck)) {
- dev_err(&pdev->dev, "failed to get baseclk\n");
- return PTR_ERR(priv->mainck);
+ if (soc_data->baseclk_is_generated_internally) {
+ priv->mainck = NULL;
+ } else {
+ dev_err(&pdev->dev, "failed to get baseclk\n");
+ ret = PTR_ERR(priv->mainck);
+ goto sdhci_pltfm_free;
+ }
}
priv->hclock = devm_clk_get(&pdev->dev, "hclock");
if (IS_ERR(priv->hclock)) {
dev_err(&pdev->dev, "failed to get hclock\n");
- return PTR_ERR(priv->hclock);
+ ret = PTR_ERR(priv->hclock);
+ goto sdhci_pltfm_free;
}
priv->gck = devm_clk_get(&pdev->dev, "multclk");
if (IS_ERR(priv->gck)) {
dev_err(&pdev->dev, "failed to get multclk\n");
- return PTR_ERR(priv->gck);
+ ret = PTR_ERR(priv->gck);
+ goto sdhci_pltfm_free;
}
ret = sdhci_at91_set_clks_presets(&pdev->dev);
u16 ret;
int shift = (spec_reg & 0x2) * 8;
+ if (spec_reg == SDHCI_TRANSFER_MODE)
+ return pltfm_host->xfer_mode_shadow;
+
if (spec_reg == SDHCI_HOST_VERSION)
ret = value & 0xffff;
else
static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
{
- u32 val;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
ktime_t timeout;
+ u32 val, clk_en;
+
+ clk_en = ESDHC_CLOCK_SDCLKEN;
+
+ /*
+ * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
+ * is 2.2 or lower.
+ */
+ if (esdhc->vendor_ver <= VENDOR_V_22)
+ clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
+ ESDHC_CLOCK_PEREN);
val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
if (enable)
- val |= ESDHC_CLOCK_SDCLKEN;
+ val |= clk_en;
else
- val &= ~ESDHC_CLOCK_SDCLKEN;
+ val &= ~clk_en;
sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
- /* Wait max 20 ms */
+ /*
+ * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+ * wait clock stable bit which does not exist.
+ */
timeout = ktime_add_ms(ktime_get(), 20);
- val = ESDHC_CLOCK_STABLE;
- while (1) {
+ while (esdhc->vendor_ver > VENDOR_V_22) {
bool timedout = ktime_after(ktime_get(), timeout);
- if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
break;
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
break;
}
- udelay(10);
+ usleep_range(10, 20);
}
}
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- int pre_div = 1;
- int div = 1;
- int division;
+ unsigned int pre_div = 1, div = 1;
+ unsigned int clock_fixup = 0;
ktime_t timeout;
- long fixup = 0;
u32 temp;
- host->mmc->actual_clock = 0;
-
if (clock == 0) {
+ host->mmc->actual_clock = 0;
esdhc_clock_enable(host, false);
return;
}
- /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
+ /* Start pre_div at 2 for vendor version < 2.3. */
if (esdhc->vendor_ver < VENDOR_V_23)
pre_div = 2;
+ /* Fix clock value. */
if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
- esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
- fixup = esdhc->clk_fixup->sd_dflt_max_clk;
+ esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
+ clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
else if (esdhc->clk_fixup)
- fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
-
- if (fixup && clock > fixup)
- clock = fixup;
+ clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
- ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ if (clock_fixup == 0 || clock < clock_fixup)
+ clock_fixup = clock;
- while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ /* Calculate pre_div and div. */
+ while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
pre_div *= 2;
- while (host->max_clk / pre_div / div > clock && div < 16)
+ while (host->max_clk / pre_div / div > clock_fixup && div < 16)
div++;
+ esdhc->div_ratio = pre_div * div;
+
+ /* Limit clock division for HS400 200MHz clock for quirk. */
if (esdhc->quirk_limited_clk_division &&
clock == MMC_HS200_MAX_DTR &&
(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
host->flags & SDHCI_HS400_TUNING)) {
- division = pre_div * div;
- if (division <= 4) {
+ if (esdhc->div_ratio <= 4) {
pre_div = 4;
div = 1;
- } else if (division <= 8) {
+ } else if (esdhc->div_ratio <= 8) {
pre_div = 4;
div = 2;
- } else if (division <= 12) {
+ } else if (esdhc->div_ratio <= 12) {
pre_div = 4;
div = 3;
} else {
pr_warn("%s: using unsupported clock division.\n",
mmc_hostname(host->mmc));
}
+ esdhc->div_ratio = pre_div * div;
}
+ host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
+
dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / pre_div / div);
- host->mmc->actual_clock = host->max_clk / pre_div / div;
- esdhc->div_ratio = pre_div * div;
+ clock, host->mmc->actual_clock);
+
+ /* Set clock division into register. */
pre_div >>= 1;
div--;
+ esdhc_clock_enable(host, false);
+
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
- | (div << ESDHC_DIVIDER_SHIFT)
- | (pre_div << ESDHC_PREDIV_SHIFT));
+ temp &= ~ESDHC_CLOCK_MASK;
+ temp |= ((div << ESDHC_DIVIDER_SHIFT) |
+ (pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ /*
+ * Wait max 20 ms. If vendor version is 2.2 or lower, do not
+ * wait clock stable bit which does not exist.
+ */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (esdhc->vendor_ver > VENDOR_V_22) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ usleep_range(10, 20);
+ }
+
+ /* Additional setting for HS400. */
if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
clock == MMC_HS200_MAX_DTR) {
temp = sdhci_readl(host, ESDHC_TBCTL);
esdhc_clock_enable(host, false);
esdhc_flush_async_fifo(host);
}
-
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- while (1) {
- bool timedout = ktime_after(ktime_get(), timeout);
-
- if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
- break;
- if (timedout) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- return;
- }
- udelay(10);
- }
-
- temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
- temp |= ESDHC_CLOCK_SDCLKEN;
- sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ esdhc_clock_enable(host, true);
}
static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- u32 val;
+ u32 val, bus_width = 0;
+ /*
+ * Add delay to make sure all the DMA transfers are finished
+ * for quirk.
+ */
if (esdhc->quirk_delay_before_data_reset &&
(mask & SDHCI_RESET_DATA) &&
(host->flags & SDHCI_REQ_USE_DMA))
mdelay(5);
+ /*
+ * Save bus-width for eSDHC whose vendor version is 2.2
+ * or lower for data reset.
+ */
+ if ((mask & SDHCI_RESET_DATA) &&
+ (esdhc->vendor_ver <= VENDOR_V_22)) {
+ val = sdhci_readl(host, ESDHC_PROCTL);
+ bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
+ }
+
sdhci_reset(host, mask);
- sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ /*
+ * Restore bus-width setting and interrupt registers for eSDHC
+ * whose vendor version is 2.2 or lower for data reset.
+ */
+ if ((mask & SDHCI_RESET_DATA) &&
+ (esdhc->vendor_ver <= VENDOR_V_22)) {
+ val = sdhci_readl(host, ESDHC_PROCTL);
+ val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
+ val |= bus_width;
+ sdhci_writel(host, val, ESDHC_PROCTL);
- if (mask & SDHCI_RESET_ALL) {
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
+ /*
+ * Some bits have to be cleaned manually for eSDHC whose spec
+ * version is higher than 3.0 for all reset.
+ */
+ if ((mask & SDHCI_RESET_ALL) &&
+ (esdhc->spec_ver >= SDHCI_SPEC_300)) {
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_TB_EN;
sdhci_writel(host, val, ESDHC_TBCTL);
+ /*
+ * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
+ * 0 for quirk.
+ */
if (esdhc->quirk_unreliable_pulse_detection) {
val = sdhci_readl(host, ESDHC_DLLCFG1);
val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
}
static struct soc_device_attribute soc_tuning_erratum_type1[] = {
- { .family = "QorIQ T1023", .revision = "1.0", },
- { .family = "QorIQ T1040", .revision = "1.0", },
- { .family = "QorIQ T2080", .revision = "1.0", },
- { .family = "QorIQ LS1021A", .revision = "1.0", },
+ { .family = "QorIQ T1023", },
+ { .family = "QorIQ T1040", },
+ { .family = "QorIQ T2080", },
+ { .family = "QorIQ LS1021A", },
{ },
};
static struct soc_device_attribute soc_tuning_erratum_type2[] = {
- { .family = "QorIQ LS1012A", .revision = "1.0", },
- { .family = "QorIQ LS1043A", .revision = "1.*", },
- { .family = "QorIQ LS1046A", .revision = "1.0", },
- { .family = "QorIQ LS1080A", .revision = "1.0", },
- { .family = "QorIQ LS2080A", .revision = "1.0", },
- { .family = "QorIQ LA1575A", .revision = "1.0", },
+ { .family = "QorIQ LS1012A", },
+ { .family = "QorIQ LS1043A", },
+ { .family = "QorIQ LS1046A", },
+ { .family = "QorIQ LS1080A", },
+ { .family = "QorIQ LS2080A", },
+ { .family = "QorIQ LA1575A", },
{ },
};
esdhc_clock_enable(host, true);
}
-static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
u8 *window_end)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
- u8 tbstat_15_8, tbstat_7_0;
u32 val;
- if (esdhc->quirk_tuning_erratum_type1) {
- *window_start = 5 * esdhc->div_ratio;
- *window_end = 3 * esdhc->div_ratio;
- return;
- }
-
/* Write TBCTL[11:8]=4'h8 */
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~(0xf << 8);
val = sdhci_readl(host, ESDHC_TBSTAT);
val = sdhci_readl(host, ESDHC_TBSTAT);
+ *window_end = val & 0xff;
+ *window_start = (val >> 8) & 0xff;
+}
+
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+ u8 *window_end)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 start_ptr, end_ptr;
+
+ if (esdhc->quirk_tuning_erratum_type1) {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ return;
+ }
+
+ esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
+
/* Reset data lines by setting ESDHCCTL[RSTD] */
sdhci_reset(host, SDHCI_RESET_DATA);
/* Write 32'hFFFF_FFFF to IRQSTAT register */
sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
- /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
- * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
+ /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
+ * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
* then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
* and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
*/
- tbstat_7_0 = val & 0xff;
- tbstat_15_8 = (val >> 8) & 0xff;
- if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
+ if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
*window_start = 8 * esdhc->div_ratio;
*window_end = 4 * esdhc->div_ratio;
} else {
if (ret)
break;
+ /* For type2 affected platforms of the tuning erratum,
+ * tuning may succeed although eSDHC might not have
+ * tuned properly. Need to check tuning window.
+ */
+ if (esdhc->quirk_tuning_erratum_type2 &&
+ !host->tuning_err) {
+ esdhc_tuning_window_ptr(host, &window_start,
+ &window_end);
+ if (abs(window_start - window_end) >
+ (4 * esdhc->div_ratio + 2))
+ host->tuning_err = -EAGAIN;
+ }
+
/* If HW tuning fails and triggers erratum,
* try workaround.
*/
* 1/2 peripheral clock.
*/
if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
- of_device_is_compatible(np, "fsl,ls1028a-esdhc"))
+ of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
+ of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
esdhc->peripheral_clock = clk_get_rate(clk) / 2;
else
esdhc->peripheral_clock = clk_get_rate(clk);
*/
#include <linux/delay.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
/* sdhci-omap controller flags */
#define SDHCI_OMAP_REQUIRE_IODELAY BIT(0)
+#define SDHCI_OMAP_SPECIAL_RESET BIT(1)
struct sdhci_omap_data {
u32 offset;
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
- reg |= CON_DMA_MASTER;
+ reg &= ~CON_DMA_MASTER;
+ /* Switch to DMA slave mode when using external DMA */
+ if (!host->use_external_dma)
+ reg |= CON_DMA_MASTER;
+
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
return 0;
sdhci_omap_start_clock(omap_host);
}
+#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long limit = MMC_TIMEOUT_US;
+ unsigned long i = 0;
/* Don't reset data lines during tuning operation */
if (omap_host->is_tuning)
mask &= ~SDHCI_RESET_DATA;
+ if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) {
+ sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
+ while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) &&
+ (i++ < limit))
+ udelay(1);
+ i = 0;
+ while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) &&
+ (i++ < limit))
+ udelay(1);
+
+ if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)
+ dev_err(mmc_dev(host->mmc),
+ "Timeout waiting on controller reset in %s\n",
+ __func__);
+ return;
+ }
+
sdhci_reset(host, mask);
}
return intmask;
}
+static void sdhci_omap_set_timeout(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ if (cmd->opcode == MMC_ERASE)
+ sdhci_set_data_timeout_irq(host, false);
+
+ __sdhci_set_timeout(host, cmd);
+}
+
static struct sdhci_ops sdhci_omap_ops = {
.set_clock = sdhci_omap_set_clock,
.set_power = sdhci_omap_set_power,
.reset = sdhci_omap_reset,
.set_uhs_signaling = sdhci_omap_set_uhs_signaling,
.irq = sdhci_omap_irq,
+ .set_timeout = sdhci_omap_set_timeout,
};
static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
.offset = 0x200,
};
+static const struct sdhci_omap_data am335_data = {
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
+static const struct sdhci_omap_data am437_data = {
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
static const struct sdhci_omap_data dra7_data = {
.offset = 0x200,
.flags = SDHCI_OMAP_REQUIRE_IODELAY,
static const struct of_device_id omap_sdhci_match[] = {
{ .compatible = "ti,dra7-sdhci", .data = &dra7_data },
{ .compatible = "ti,k2g-sdhci", .data = &k2g_data },
+ { .compatible = "ti,am335-sdhci", .data = &am335_data },
+ { .compatible = "ti,am437-sdhci", .data = &am437_data },
{},
};
MODULE_DEVICE_TABLE(of, omap_sdhci_match);
const struct of_device_id *match;
struct sdhci_omap_data *data;
const struct soc_device_attribute *soc;
+ struct resource *regs;
match = of_match_device(omap_sdhci_match, dev);
if (!match)
}
offset = data->offset;
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
sizeof(*omap_host));
if (IS_ERR(host)) {
omap_host->timing = MMC_TIMING_LEGACY;
omap_host->flags = data->flags;
host->ioaddr += offset;
+ host->mapbase = regs->start + offset;
mmc = host->mmc;
sdhci_get_of_property(pdev);
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
+ /* Switch to external DMA only if there is the "dmas" property */
+ if (of_find_property(dev->of_node, "dmas", NULL))
+ sdhci_switch_external_dma(host, true);
+
ret = sdhci_setup_host(host);
if (ret)
goto err_put_sync;
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
- slot->cd_override_level, 0, NULL);
+ slot->cd_override_level, 0);
if (ret && ret != -EPROBE_DEFER)
ret = mmc_gpiod_request_cd(host->mmc, NULL,
slot->cd_idx,
slot->cd_override_level,
- 0, NULL);
+ 0);
if (ret == -EPROBE_DEFER)
goto remove;
struct s3c_sdhci_platdata *pdata;
int cur_clk;
int ext_cd_irq;
- int ext_cd_gpio;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
struct device *dev = &pdev->dev;
struct sdhci_host *host;
struct sdhci_s3c *sc;
- struct resource *res;
int ret, irq, ptr, clks;
if (!pdev->dev.platform_data && !pdev->dev.of_node) {
goto err_pdata_io_clk;
} else {
memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
- sc->ext_cd_gpio = -1; /* invalid gpio number */
}
drv_data = sdhci_s3c_get_driver_data(pdev);
goto err_no_busclks;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err_req_regs;
* We must request the IRQ after sdhci_add_host(), as the tasklet only
* gets setup in sdhci_add_host() and we oops.
*/
- ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto err_request_cd;
if (!ret)
static int sdhci_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
- struct resource *iomem;
struct spear_sdhci *sdhci;
struct device *dev;
int ret;
goto err;
}
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret);
* It is optional to use GPIOs for sdhci card detection. If we
* find a descriptor using slot GPIO, we use it.
*/
- ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto disable_clk;
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
}
*/
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/ktime.h>
#include <linux/highmem.h>
#include <linux/io.h>
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
-static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
{
if (enable)
host->ier |= SDHCI_INT_DATA_TIMEOUT;
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
+EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
-static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
- u8 count;
-
- if (host->ops->set_timeout) {
- host->ops->set_timeout(host, cmd);
- } else {
- bool too_big = false;
-
- count = sdhci_calc_timeout(host, cmd, &too_big);
-
- if (too_big &&
- host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
- sdhci_calc_sw_timeout(host, cmd);
- sdhci_set_data_timeout_irq(host, false);
- } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
- sdhci_set_data_timeout_irq(host, true);
- }
-
- sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+ bool too_big = false;
+ u8 count = sdhci_calc_timeout(host, cmd, &too_big);
+
+ if (too_big &&
+ host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
+ sdhci_calc_sw_timeout(host, cmd);
+ sdhci_set_data_timeout_irq(host, false);
+ } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
+ sdhci_set_data_timeout_irq(host, true);
}
+
+ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
}
+EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
- struct mmc_data *data = cmd->data;
-
- host->data_timeout = 0;
-
- if (sdhci_data_line_cmd(cmd))
- sdhci_set_timeout(host, cmd);
-
- if (!data)
- return;
+ if (host->ops->set_timeout)
+ host->ops->set_timeout(host, cmd);
+ else
+ __sdhci_set_timeout(host, cmd);
+}
+static void sdhci_initialize_data(struct sdhci_host *host,
+ struct mmc_data *data)
+{
WARN_ON(host->data);
/* Sanity checks */
host->data = data;
host->data_early = 0;
host->data->bytes_xfered = 0;
+}
+
+static inline void sdhci_set_block_info(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ /* Set the DMA boundary value and block size */
+ sdhci_writew(host,
+ SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
+ SDHCI_BLOCK_SIZE);
+ /*
+ * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
+ * can be supported, in that case 16-bit block count register must be 0.
+ */
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
+ (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
+ if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
+ sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
+ sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+ } else {
+ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ }
+}
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ struct mmc_data *data = cmd->data;
+
+ sdhci_initialize_data(host, data);
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
struct scatterlist *sg;
sdhci_set_transfer_irqs(host);
- /* Set the DMA boundary value and block size */
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
- SDHCI_BLOCK_SIZE);
+ sdhci_set_block_info(host, data);
+}
- /*
- * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
- * can be supported, in that case 16-bit block count register must be 0.
- */
- if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
- (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
- if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
- sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
- sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+
+static int sdhci_external_dma_init(struct sdhci_host *host)
+{
+ int ret = 0;
+ struct mmc_host *mmc = host->mmc;
+
+ host->tx_chan = dma_request_chan(mmc->parent, "tx");
+ if (IS_ERR(host->tx_chan)) {
+ ret = PTR_ERR(host->tx_chan);
+ if (ret != -EPROBE_DEFER)
+ pr_warn("Failed to request TX DMA channel.\n");
+ host->tx_chan = NULL;
+ return ret;
+ }
+
+ host->rx_chan = dma_request_chan(mmc->parent, "rx");
+ if (IS_ERR(host->rx_chan)) {
+ if (host->tx_chan) {
+ dma_release_channel(host->tx_chan);
+ host->tx_chan = NULL;
+ }
+
+ ret = PTR_ERR(host->rx_chan);
+ if (ret != -EPROBE_DEFER)
+ pr_warn("Failed to request RX DMA channel.\n");
+ host->rx_chan = NULL;
+ }
+
+ return ret;
+}
+
+static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
+static int sdhci_external_dma_setup(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ int ret, i;
+ enum dma_transfer_direction dir;
+ struct dma_async_tx_descriptor *desc;
+ struct mmc_data *data = cmd->data;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ dma_cookie_t cookie;
+ int sg_cnt;
+
+ if (!host->mapbase)
+ return -EINVAL;
+
+ cfg.src_addr = host->mapbase + SDHCI_BUFFER;
+ cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = data->blksz / 4;
+ cfg.dst_maxburst = data->blksz / 4;
+
+ /* Sanity check: all the SG entries must be aligned by block size. */
+ for (i = 0; i < data->sg_len; i++) {
+ if ((data->sg + i)->length % data->blksz)
+ return -EINVAL;
+ }
+
+ chan = sdhci_external_dma_channel(host, data);
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret)
+ return ret;
+
+ sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
+ if (sg_cnt <= 0)
+ return -EINVAL;
+
+ dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ return -EINVAL;
+
+ desc->callback = NULL;
+ desc->callback_param = NULL;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie))
+ ret = cookie;
+
+ return ret;
+}
+
+static void sdhci_external_dma_release(struct sdhci_host *host)
+{
+ if (host->tx_chan) {
+ dma_release_channel(host->tx_chan);
+ host->tx_chan = NULL;
+ }
+
+ if (host->rx_chan) {
+ dma_release_channel(host->rx_chan);
+ host->rx_chan = NULL;
+ }
+
+ sdhci_switch_external_dma(host, false);
+}
+
+static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ struct mmc_data *data = cmd->data;
+
+ sdhci_initialize_data(host, data);
+
+ host->flags |= SDHCI_REQ_USE_DMA;
+ sdhci_set_transfer_irqs(host);
+
+ sdhci_set_block_info(host, data);
+}
+
+static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ if (!sdhci_external_dma_setup(host, cmd)) {
+ __sdhci_external_dma_prepare_data(host, cmd);
} else {
- sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ sdhci_external_dma_release(host);
+ pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
+ mmc_hostname(host->mmc));
+ sdhci_prepare_data(host, cmd);
}
}
+static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ struct dma_chan *chan;
+
+ if (!cmd->data)
+ return;
+
+ chan = sdhci_external_dma_channel(host, cmd->data);
+ if (chan)
+ dma_async_issue_pending(chan);
+}
+
+#else
+
+static inline int sdhci_external_dma_init(struct sdhci_host *host)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sdhci_external_dma_release(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ /* This should never happen */
+ WARN_ON_ONCE(1);
+}
+
+static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+}
+
+static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
+ struct mmc_data *data)
+{
+ return NULL;
+}
+
+#endif
+
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
+{
+ host->use_external_dma = en;
+}
+EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
+
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
struct mmc_request *mrq)
{
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}
-static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
{
int i;
- if (host->cmd && host->cmd->mrq == mrq)
- host->cmd = NULL;
-
- if (host->data_cmd && host->data_cmd->mrq == mrq)
- host->data_cmd = NULL;
-
- if (host->data && host->data->mrq == mrq)
- host->data = NULL;
-
- if (sdhci_needs_reset(host, mrq))
- host->pending_reset = true;
-
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
if (host->mrqs_done[i] == mrq) {
WARN_ON(1);
}
WARN_ON(i >= SDHCI_MAX_MRQS);
+}
+
+static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ if (host->cmd && host->cmd->mrq == mrq)
+ host->cmd = NULL;
+
+ if (host->data_cmd && host->data_cmd->mrq == mrq)
+ host->data_cmd = NULL;
+
+ if (host->data && host->data->mrq == mrq)
+ host->data = NULL;
+
+ if (sdhci_needs_reset(host, mrq))
+ host->pending_reset = true;
+
+ sdhci_set_mrq_done(host, mrq);
sdhci_del_timer(host, mrq);
/*
* Need to send CMD12 if -
- * a) open-ended multiblock transfer (no CMD23)
+ * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
* b) error in multiblock transfer
*/
if (data->stop &&
- (data->error ||
- !data->mrq->sbc)) {
+ ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
+ data->error)) {
/*
* 'cap_cmd_during_tfr' request must not use the command line
* after mmc_command_done() has been called. It is upper layer's
}
host->cmd = cmd;
+ host->data_timeout = 0;
if (sdhci_data_line_cmd(cmd)) {
WARN_ON(host->data_cmd);
host->data_cmd = cmd;
+ sdhci_set_timeout(host, cmd);
}
- sdhci_prepare_data(host, cmd);
+ if (cmd->data) {
+ if (host->use_external_dma)
+ sdhci_external_dma_prepare_data(host, cmd);
+ else
+ sdhci_prepare_data(host, cmd);
+ }
sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
timeout += 10 * HZ;
sdhci_mod_timer(host, cmd->mrq, timeout);
+ if (host->use_external_dma)
+ sdhci_external_dma_pre_transfer(host, cmd);
+
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
EXPORT_SYMBOL_GPL(sdhci_send_command);
sdhci_led_activate(host);
- /*
- * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
- * requests if Auto-CMD12 is enabled.
- */
- if (sdhci_auto_cmd12(host, mrq)) {
- if (mrq->stop) {
- mrq->data->stop = NULL;
- mrq->stop = NULL;
- }
- }
-
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
mrq->cmd->error = -ENOMEDIUM;
sdhci_finish_mrq(host, mrq);
if (host->flags & SDHCI_REQ_USE_DMA) {
struct mmc_data *data = mrq->data;
+ if (host->use_external_dma && data &&
+ (mrq->cmd->error || data->error)) {
+ struct dma_chan *chan = sdhci_external_dma_channel(host, data);
+
+ host->mrqs_done[i] = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+ dmaengine_terminate_sync(chan);
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_set_mrq_done(host, mrq);
+ }
+
if (data && data->host_cookie == COOKIE_MAPPED) {
if (host->bounce_buffer) {
/*
if (sdhci_can_64bit_dma(host))
host->flags |= SDHCI_USE_64_BIT_DMA;
+ if (host->use_external_dma) {
+ ret = sdhci_external_dma_init(host);
+ if (ret == -EPROBE_DEFER)
+ goto unreg;
+ /*
+ * Fall back to use the DMA/PIO integrated in standard SDHCI
+ * instead of external DMA devices.
+ */
+ else if (ret)
+ sdhci_switch_external_dma(host, false);
+ /* Disable internal DMA sources */
+ else
+ host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
+ }
+
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->set_dma_mask)
ret = host->ops->set_dma_mask(host);
dma_addr_t dma;
void *buf;
- if (host->flags & SDHCI_USE_64_BIT_DMA) {
- host->adma_table_sz = host->adma_table_cnt *
- SDHCI_ADMA2_64_DESC_SZ(host);
- host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
- } else {
- host->adma_table_sz = host->adma_table_cnt *
- SDHCI_ADMA2_32_DESC_SZ;
- host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
- }
+ if (!(host->flags & SDHCI_USE_64_BIT_DMA))
+ host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
+ else if (!host->alloc_desc_sz)
+ host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
+
+ host->desc_sz = host->alloc_desc_sz;
+ host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
/*
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
- if (host->clk_mul) {
- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+ if (host->clk_mul)
max_clk = host->max_clk * host->clk_mul;
- } else
- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+ /*
+ * Divided Clock Mode minimum clock rate is always less than
+ * Programmable Clock Mode minimum clock rate.
+ */
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
host->adma_table_sz, host->align_buffer,
host->align_addr);
+
+ if (host->use_external_dma)
+ sdhci_external_dma_release(host);
+
host->adma_table = NULL;
host->align_buffer = NULL;
}
pr_info("%s: SDHCI controller on %s [%s] using %s\n",
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+ host->use_external_dma ? "External DMA" :
(host->flags & SDHCI_USE_ADMA) ?
(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
host->adma_table_sz, host->align_buffer,
host->align_addr);
+ if (host->use_external_dma)
+ sdhci_external_dma_release(host);
+
host->adma_table = NULL;
host->align_buffer = NULL;
}
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
+ phys_addr_t mapbase; /* physical address base */
char *bounce_buffer; /* For packing SDMA reads/writes */
dma_addr_t bounce_addr;
unsigned int bounce_buffer_size;
bool pending_reset; /* Cmd/data reset is pending */
bool irq_wake_enabled; /* IRQ wakeup is enabled */
bool v4_mode; /* Host Version 4 Enable */
+ bool use_external_dma; /* Host selects to use external DMA */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
dma_addr_t adma_addr; /* Mapped ADMA descr. table */
dma_addr_t align_addr; /* Mapped bounce buffer */
- unsigned int desc_sz; /* ADMA descriptor size */
+ unsigned int desc_sz; /* ADMA current descriptor size */
+ unsigned int alloc_desc_sz; /* ADMA descr. max size host supports */
struct workqueue_struct *complete_wq; /* Request completion wq */
struct work_struct complete_work; /* Request completion work */
struct timer_list timer; /* Timer for timeouts */
struct timer_list data_timer; /* Timer for data timeouts */
+#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+#endif
+
u32 caps; /* CAPABILITY_0 */
u32 caps1; /* CAPABILITY_1 */
bool read_caps; /* Capability flags have been read */
void sdhci_reset_tuning(struct sdhci_host *host);
void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
+void sdhci_switch_external_dma(struct sdhci_host *host, bool en);
+void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable);
+void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
#endif /* __SDHCI_HW_H */
writeb(val, host->ioaddr + reg);
}
+static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err = sdhci_execute_tuning(mmc, opcode);
+
+ if (err)
+ return err;
+ /*
+ * Tuning data remains in the buffer after tuning.
+ * Do a command and data reset to get rid of it
+ */
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ return 0;
+}
+
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_am654_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
static const struct sdhci_pltfm_data sdhci_am654_pdata = {
.ops = &sdhci_am654_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
.flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
};
-static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
-{
- int cmd_error = 0;
- int data_error = 0;
-
- if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
- return intmask;
-
- cqhci_irq(host->mmc, intmask, cmd_error, data_error);
-
- return 0;
-}
-
static struct sdhci_ops sdhci_j721e_8bit_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
.ops = &sdhci_j721e_8bit_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
.ops = &sdhci_j721e_4bit_ops,
- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
struct sdhci_am654_data *sdhci_am654;
const struct of_device_id *match;
struct sdhci_host *host;
- struct resource *res;
struct clk *clk_xin;
struct device *dev = &pdev->dev;
void __iomem *base;
goto pm_runtime_disable;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto pm_runtime_put;
goto pm_runtime_put;
}
+ host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+
ret = sdhci_am654_init(host);
if (ret)
goto pm_runtime_put;
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq, ctrl = 0, ret = 0;
struct f_sdhost_priv *priv;
u32 reg = 0;
host->ops = &sdhci_f_sdh30_ops;
host->irq = irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
host->chan_rx = sh_mmcif_request_dma_pdata(host,
pdata->slave_id_rx);
} else {
- host->chan_tx = dma_request_slave_channel(dev, "tx");
- host->chan_rx = dma_request_slave_channel(dev, "rx");
+ host->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->chan_tx))
+ host->chan_tx = NULL;
+ host->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->chan_rx))
+ host->chan_rx = NULL;
}
dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
host->chan_rx);
struct sh_mmcif_host *host;
struct device *dev = &pdev->dev;
struct sh_mmcif_plat_data *pd = dev->platform_data;
- struct resource *res;
void __iomem *reg;
const char *name;
if (irq[0] < 0)
return -ENXIO;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
if (ret)
return ret;
- host->reg_base = devm_ioremap_resource(&pdev->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ host->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->reg_base))
return PTR_ERR(host->reg_base);
{
struct tmio_mmc_host *host;
struct mmc_host *mmc;
- struct resource *res;
void __iomem *ctl;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctl = devm_ioremap_resource(&pdev->dev, res);
+ ctl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctl))
return ERR_CAST(ctl);
* Look for a card detect GPIO, if it fails with anything
* else than a probe deferral, just live without it.
*/
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
return ret;
struct uniphier_sd_priv {
struct tmio_mmc_data tmio_data;
struct pinctrl *pinctrl;
- struct pinctrl_state *pinstate_default;
struct pinctrl_state *pinstate_uhs;
struct clk *clk;
struct reset_control *rst;
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
- struct pinctrl_state *pinstate;
+ struct pinctrl_state *pinstate = NULL;
u32 val, tmp;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
val = UNIPHIER_SD_VOLT_330;
- pinstate = priv->pinstate_default;
break;
case MMC_SIGNAL_VOLTAGE_180:
val = UNIPHIER_SD_VOLT_180;
tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val);
writel(tmp, host->ctl + UNIPHIER_SD_VOLT);
- pinctrl_select_state(priv->pinctrl, pinstate);
+ if (pinstate)
+ pinctrl_select_state(priv->pinctrl, pinstate);
+ else
+ pinctrl_select_default_state(mmc_dev(mmc));
return 0;
}
if (IS_ERR(priv->pinctrl))
return PTR_ERR(priv->pinctrl);
- priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(priv->pinstate_default))
- return PTR_ERR(priv->pinstate_default);
-
priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs");
if (IS_ERR(priv->pinstate_uhs))
return PTR_ERR(priv->pinstate_uhs);
/* Pin control */
struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
struct pinctrl_state *pins_uhs;
};
};
int ret;
- host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+ host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
host->chan_tx);
- if (!host->chan_tx)
+ if (IS_ERR(host->chan_tx)) {
+ host->chan_tx = NULL;
return;
+ }
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = start + USDHI6_SD_BUF0;
if (ret < 0)
goto e_release_tx;
- host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
+ host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
host->chan_rx);
- if (!host->chan_rx)
+ if (IS_ERR(host->chan_rx)) {
+ host->chan_rx = NULL;
goto e_release_tx;
+ }
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr;
host->pins_uhs);
default:
- return pinctrl_select_state(host->pinctrl,
- host->pins_default);
+ return pinctrl_select_default_state(mmc_dev(host->mmc));
}
}
}
host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
- if (!IS_ERR(host->pins_uhs)) {
- host->pins_default = pinctrl_lookup_state(host->pinctrl,
- PINCTRL_STATE_DEFAULT);
-
- if (IS_ERR(host->pins_default)) {
- dev_err(dev,
- "UHS pinctrl requires a default pin state.\n");
- ret = PTR_ERR(host->pins_default);
- goto e_free_mmc;
- }
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->base = devm_ioremap_resource(dev, res);
unsigned long timeout;
u32 syscfg;
- if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
+ if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
state == FL_VERIFYING_ERASE) {
int i = 21;
unsigned int intr_flags = ONENAND_INT_MASTER;
switch (state) {
- case FL_RESETING:
+ case FL_RESETTING:
intr_flags |= ONENAND_INT_RESET;
break;
case FL_PREPARING_ERASE:
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
- tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
+ tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
return -EIO;
* context fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
- count < 384 || in_interrupt() || oops_in_progress )
+ count < 384 || in_interrupt() || oops_in_progress)
goto out_copy;
xtra = count & 3;
* context fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
- count < 384 || in_interrupt() || oops_in_progress )
+ count < 384 || in_interrupt() || oops_in_progress)
goto out_copy;
dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
c->gpmc_cs, c->phys_base, c->onenand.base,
c->dma_chan ? "DMA" : "PIO");
- if ((r = onenand_scan(&c->mtd, 1)) < 0)
+ r = onenand_scan(&c->mtd, 1);
+ if (r < 0)
goto err_release_dma;
freq = omap2_onenand_get_freq(c->onenand.version_id);
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
status &= 0x60;
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
return ret;
}
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
return ret;
}
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
} else {
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = len;
this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
printk(KERN_INFO "Die %d boundary: %d%s\n", die,
this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
ret = this->wait(mtd, FL_WRITING);
out:
this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
if (!ret)
/* Recalculate device size on boundary change*/
flexonenand_get_size(mtd);
/* Reset OneNAND to read default register values */
this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
/* Wait reset */
- this->wait(mtd, FL_RESETING);
+ this->wait(mtd, FL_RESETTING);
/* Restore system configuration 1 */
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
normal:
if (count != mtd->writesize) {
/* Copy the bufferram to memory to prevent unaligned access */
- memcpy(this->page_buf, p, mtd->writesize);
- p = this->page_buf + offset;
+ memcpy_fromio(this->page_buf, p, mtd->writesize);
+ memcpy(buffer, this->page_buf + offset, count);
+ } else {
+ memcpy_fromio(buffer, p, count);
}
- memcpy(buffer, p, count);
-
return 0;
}
/* Prepare CDMA descriptor. */
static void
cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
- char nf_mem, u32 flash_ptr, char *mem_ptr,
- char *ctrl_data_ptr, u16 ctype)
+ char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
+ dma_addr_t ctrl_data_ptr, u16 ctype)
{
struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
cdma_desc->command_flags |= CDMA_CF_INT;
- cdma_desc->memory_pointer = (uintptr_t)mem_ptr;
+ cdma_desc->memory_pointer = mem_ptr;
cdma_desc->status = 0;
cdma_desc->sync_flag_pointer = 0;
cdma_desc->sync_arguments = 0;
cdma_desc->command_type = ctype;
- cdma_desc->ctrl_data_ptr = (uintptr_t)ctrl_data_ptr;
+ cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
}
static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
}
cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
- (void *)dma_buf, (void *)dma_ctrl_dat,
- ctype);
+ dma_buf, dma_ctrl_dat, ctype);
status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
cadence_nand_cdma_desc_prepare(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
- page, NULL, NULL,
+ page, 0, 0,
CDMA_CT_ERASE);
status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
if (status) {
struct resources *r = &this->resources;
int ret;
+ ret = pm_runtime_get_sync(this->dev);
+ if (ret < 0)
+ return ret;
+
ret = gpmi_reset_block(r->gpmi_regs, false);
if (ret)
goto err_out;
*/
writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
- return 0;
err_out:
+ pm_runtime_mark_last_busy(this->dev);
+ pm_runtime_put_autosuspend(this->dev);
return ret;
}
return ret;
}
+ /* Set flag to get timing setup restored for next exec_op */
+ if (this->hw.clk_rate)
+ this->hw.must_apply_timings = true;
+
/* re-init the BCH registers */
ret = bch_set_geometry(this);
if (ret) {
/* Max ECC buffer length */
#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
+#define FMC2_TIMEOUT_US 1000
#define FMC2_TIMEOUT_MS 1000
/* Timings */
#define FMC2_PMEM 0x88
#define FMC2_PATT 0x8c
#define FMC2_HECCR 0x94
+#define FMC2_ISR 0x184
+#define FMC2_ICR 0x188
#define FMC2_CSQCR 0x200
#define FMC2_CSQCFGR1 0x204
#define FMC2_CSQCFGR2 0x208
#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24)
#define FMC2_PATT_DEFAULT 0x0a0a0a0a
+/* Register: FMC2_ISR */
+#define FMC2_ISR_IHLF BIT(1)
+
+/* Register: FMC2_ICR */
+#define FMC2_ICR_CIHLF BIT(1)
+
/* Register: FMC2_CSQCR */
#define FMC2_CSQCR_CSQSTART BIT(0)
stm32_fmc2_set_buswidth_16(fmc2, true);
}
+static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ const struct nand_sdr_timings *timings;
+ u32 isr, sr;
+
+ /* Check if there is no pending requests to the NAND flash */
+ if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr,
+ sr & FMC2_SR_NWRF, 1,
+ FMC2_TIMEOUT_US))
+ dev_warn(fmc2->dev, "Waitrdy timeout\n");
+
+ /* Wait tWB before R/B# signal is low */
+ timings = nand_get_sdr_timings(&chip->data_interface);
+ ndelay(PSEC_TO_NSEC(timings->tWB_max));
+
+ /* R/B# signal is low, clear high level flag */
+ writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR);
+
+ /* Wait R/B# signal is high */
+ return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR,
+ isr, isr & FMC2_ISR_IHLF,
+ 5, 1000 * timeout_ms);
+}
+
static int stm32_fmc2_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
break;
case NAND_OP_WAITRDY_INSTR:
- ret = nand_soft_waitrdy(chip,
- instr->ctx.waitrdy.timeout_ms);
+ ret = stm32_fmc2_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
break;
}
}
/* FTL can contain -1 entries that are by default filled with bits */
if (block == -1) {
- memset(buffer, 0xFF, SM_SECTOR_SIZE);
+ if (buffer)
+ memset(buffer, 0xFF, SM_SECTOR_SIZE);
return 0;
}
if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
return 0;
+ nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+
return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
}
static void st_micron_set_default_init(struct spi_nor *nor)
{
nor->flags |= SNOR_F_HAS_LOCK;
+ nor->flags &= ~SNOR_F_HAS_16BIT_SR;
nor->params.quad_enable = NULL;
nor->params.set_4byte = st_micron_set_4byte;
}
static void spansion_post_sfdp_fixups(struct spi_nor *nor)
{
- struct mtd_info *mtd = &nor->mtd;
-
- if (mtd->size <= SZ_16M)
+ if (nor->params.size <= SZ_16M)
return;
nor->flags |= SNOR_F_4B_OPCODES;
#define TCAN4X5X_MODE_NORMAL BIT(7)
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
+#define TCAN4X5X_DISABLE_INH_MSK BIT(9)
#define TCAN4X5X_SW_RESET BIT(2)
}
}
+static int tcan4x5x_reset(struct tcan4x5x_priv *priv)
+{
+ int ret = 0;
+
+ if (priv->reset_gpio) {
+ gpiod_set_value(priv->reset_gpio, 1);
+
+ /* tpulse_width minimum 30us */
+ usleep_range(30, 100);
+ gpiod_set_value(priv->reset_gpio, 0);
+ } else {
+ ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_SW_RESET);
+ if (ret)
+ return ret;
+ }
+
+ usleep_range(700, 1000);
+
+ return ret;
+}
+
static int regmap_spi_gather_write(void *context, const void *reg,
size_t reg_len, const void *val,
size_t val_len)
TCAN4X5X_DISABLE_WAKE_MSK, 0x00);
}
+static int tcan4x5x_disable_state(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+
+ return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_DISABLE_INH_MSK, 0x01);
+}
+
static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+ int ret;
tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
GPIOD_OUT_HIGH);
if (IS_ERR(tcan4x5x->device_wake_gpio)) {
- if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
+ if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
return -EPROBE_DEFER;
tcan4x5x_disable_wake(cdev);
if (IS_ERR(tcan4x5x->reset_gpio))
tcan4x5x->reset_gpio = NULL;
- usleep_range(700, 1000);
+ ret = tcan4x5x_reset(tcan4x5x);
+ if (ret)
+ return ret;
tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
"device-state",
GPIOD_IN);
- if (IS_ERR(tcan4x5x->device_state_gpio))
+ if (IS_ERR(tcan4x5x->device_state_gpio)) {
tcan4x5x->device_state_gpio = NULL;
-
- tcan4x5x->power = devm_regulator_get_optional(cdev->dev,
- "vsup");
- if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ tcan4x5x_disable_state(cdev);
+ }
return 0;
}
if (!priv)
return -ENOMEM;
+ priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
+ if (PTR_ERR(priv->power) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else
+ priv->power = NULL;
+
mcan_class->device_data = priv;
m_can_class_get_clocks(mcan_class);
priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
&spi->dev, &tcan4x5x_regmap);
- ret = tcan4x5x_parse_config(mcan_class);
+ ret = tcan4x5x_power_enable(priv->power, 1);
if (ret)
goto out_clk;
- tcan4x5x_power_enable(priv->power, 1);
+ ret = tcan4x5x_parse_config(mcan_class);
+ if (ret)
+ goto out_power;
+
+ ret = tcan4x5x_init(mcan_class);
+ if (ret)
+ goto out_power;
ret = m_can_class_register(mcan_class);
if (ret)
struct net_device *dev = napi->dev;
struct mscan_regs __iomem *regs = priv->reg_base;
struct net_device_stats *stats = &dev->stats;
- int npackets = 0;
- int ret = 1;
+ int work_done = 0;
struct sk_buff *skb;
struct can_frame *frame;
u8 canrflg;
- while (npackets < quota) {
+ while (work_done < quota) {
canrflg = in_8(®s->canrflg);
if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
break;
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
- npackets++;
+ work_done++;
netif_receive_skb(skb);
}
- if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
- napi_complete(&priv->napi);
- clear_bit(F_RX_PROGRESS, &priv->flags);
- if (priv->can.state < CAN_STATE_BUS_OFF)
- out_8(®s->canrier, priv->shadow_canrier);
- ret = 0;
+ if (work_done < quota) {
+ if (likely(napi_complete_done(&priv->napi, work_done))) {
+ clear_bit(F_RX_PROGRESS, &priv->flags);
+ if (priv->can.state < CAN_STATE_BUS_OFF)
+ out_8(®s->canrier, priv->shadow_canrier);
+ }
}
- return ret;
+ return work_done;
}
static irqreturn_t mscan_isr(int irq, void *dev_id)
*/
static void slcan_write_wakeup(struct tty_struct *tty)
{
- struct slcan *sl = tty->disc_data;
+ struct slcan *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
/* Send a can_frame to a TTY queue. */
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* Flush network side */
GS_USB_BREQ_HOST_FORMAT,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1,
- intf->altsetting[0].desc.bInterfaceNumber,
+ intf->cur_altsetting->desc.bInterfaceNumber,
hconf,
sizeof(*hconf),
1000);
GS_USB_BREQ_DEVICE_CONFIG,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1,
- intf->altsetting[0].desc.bInterfaceNumber,
+ intf->cur_altsetting->desc.bInterfaceNumber,
dconf,
sizeof(*dconf),
1000);
struct usb_endpoint_descriptor *ep;
int i;
- iface_desc = &dev->intf->altsetting[0];
+ iface_desc = dev->intf->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
ep = &iface_desc->endpoint[i].desc;
struct usb_endpoint_descriptor *endpoint;
int i;
- iface_desc = &dev->intf->altsetting[0];
+ iface_desc = dev->intf->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
/* Force link status for IMP port */
reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
+ reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G);
core_writel(priv, reg, offset);
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
{
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
+ /* Use the default high priority for management frames sent to
+ * the CPU.
+ */
+ port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
/* Offset 0x1C: Global Control 2 */
}
static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
- phy_interface_t mode)
+ phy_interface_t mode, bool force)
{
u8 lane;
u16 cmode;
cmode = 0;
}
- /* cmode doesn't change, nothing to do for us */
- if (cmode == chip->ports[port].cmode)
+ /* cmode doesn't change, nothing to do for us unless forced */
+ if (cmode == chip->ports[port].cmode && !force)
return 0;
lane = mv88e6xxx_serdes_get_lane(chip, port);
if (port != 9 && port != 10)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_cmode(chip, port, mode);
+ return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
break;
}
- return mv88e6xxx_port_set_cmode(chip, port, mode);
+ return mv88e6xxx_port_set_cmode(chip, port, mode, false);
}
static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip,
if (err)
return err;
- return mv88e6xxx_port_set_cmode(chip, port, mode);
+ return mv88e6xxx_port_set_cmode(chip, port, mode, true);
}
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
struct device *dev = &priv->spidev->dev;
struct device_node *child;
- for_each_child_of_node(ports_node, child) {
+ for_each_available_child_of_node(ports_node, child) {
struct device_node *phy_node;
phy_interface_t phy_mode;
u32 index;
if (err < 0)
goto err_exit;
+ aq_nic_set_loopback(self);
+
err = self->aq_hw_ops->hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
INIT_WORK(&self->service_task, aq_nic_service_task);
- aq_nic_set_loopback(self);
-
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
aq_nic_service_timer_cb(&self->service_timer);
.rx_extract_ts = hw_atl_b0_rx_extract_ts,
.extract_hwts = hw_atl_b0_extract_hwts,
.hw_set_offload = hw_atl_b0_hw_offload_set,
- .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
- .hw_get_fw_version = hw_atl_utils_get_fw_version,
- .hw_set_offload = hw_atl_b0_hw_offload_set,
.hw_set_loopback = hw_atl_b0_set_loopback,
.hw_set_fc = hw_atl_b0_set_fc,
};
u32 speed;
mpi_state = hw_atl_utils_mpi_get_state(self);
- speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
- FW2X_RATE_2G5 | FW2X_RATE_5G |
- FW2X_RATE_10G);
+ speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT;
if (!speed) {
link_status->mbps = 0U;
int ethaddr_bytes = ETH_ALEN;
memset(ppattern + offset, 0xff, magicsync);
- for (j = 0; j < magicsync; j++)
- set_bit(len++, (unsigned long *) pmask);
+ for (j = 0; j < magicsync; j++) {
+ pmask[len >> 3] |= BIT(len & 7);
+ len++;
+ }
for (j = 0; j < B44_MAX_PATTERNS; j++) {
if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
for (k = 0; k< ethaddr_bytes; k++) {
ppattern[offset + magicsync +
(j * ETH_ALEN) + k] = macaddr[k];
- set_bit(len++, (unsigned long *) pmask);
+ pmask[len >> 3] |= BIT(len & 7);
+ len++;
}
}
return len - 1;
ring->switch_queue = qp;
ring->switch_port = port;
ring->inspect = true;
- priv->ring_map[q + port * num_tx_queues] = ring;
+ priv->ring_map[qp + port * num_tx_queues] = ring;
qp++;
}
struct net_device *slave_dev;
unsigned int num_tx_queues;
struct net_device *dev;
- unsigned int q, port;
+ unsigned int q, qp, port;
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
if (priv->netdev != info->master)
continue;
ring->inspect = false;
- priv->ring_map[q + port * num_tx_queues] = NULL;
+ qp = ring->switch_queue;
+ priv->ring_map[qp + port * num_tx_queues] = NULL;
}
return 0;
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
- if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
- keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
- keys1->ports.ports == keys2->ports.ports &&
- keys1->basic.ip_proto == keys2->basic.ip_proto &&
- keys1->basic.n_proto == keys2->basic.n_proto &&
+ if (keys1->basic.n_proto != keys2->basic.n_proto ||
+ keys1->basic.ip_proto != keys2->basic.ip_proto)
+ return false;
+
+ if (keys1->basic.n_proto == htons(ETH_P_IP)) {
+ if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
+ return false;
+ } else {
+ if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
+ sizeof(keys1->addrs.v6addrs.src)) ||
+ memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
+ sizeof(keys1->addrs.v6addrs.dst)))
+ return false;
+ }
+
+ if (keys1->ports.ports == keys2->ports.ports &&
keys1->control.flags == keys2->control.flags &&
ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
return -EOPNOTSUPP;
/* The PF and it's VF-reps only support the switchdev framework */
- if (!BNXT_PF(bp))
+ if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
return -EOPNOTSUPP;
ppid->id_len = sizeof(bp->switch_id);
put_unaligned_le32(dw, &dsn[0]);
pci_read_config_dword(pdev, pos + 4, &dw);
put_unaligned_le32(dw, &dsn[4]);
+ bp->flags |= BNXT_FLAG_DSN_VALID;
return 0;
}
if (BNXT_PF(bp)) {
/* Read the adapter's DSN to use as the eswitch switch_id */
- rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
- if (rc)
- goto init_err_pci_clean;
+ bnxt_pcie_dsn_get(bp, bp->switch_id);
}
/* MTU range: 60 - FW defined max */
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_MULTI_HOST 0x100000
+ #define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
case HWRM_CFA_ENCAP_RECORD_FREE:
case HWRM_CFA_DECAP_FILTER_ALLOC:
case HWRM_CFA_DECAP_FILTER_FREE:
- case HWRM_CFA_NTUPLE_FILTER_ALLOC:
- case HWRM_CFA_NTUPLE_FILTER_FREE:
- case HWRM_CFA_NTUPLE_FILTER_CFG:
case HWRM_CFA_EM_FLOW_ALLOC:
case HWRM_CFA_EM_FLOW_FREE:
case HWRM_CFA_EM_FLOW_CFG:
struct net_device *dev;
int rc, i;
+ if (!(bp->flags & BNXT_FLAG_DSN_VALID))
+ return -ENODEV;
+
bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
if (!bp->vf_reps)
return -ENOMEM;
DMA_END_ADDR);
/* Initialize Tx NAPI */
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
- NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
+ NAPI_POLL_WEIGHT);
}
/* Initialize a RDMA ring */
.mac_link_up = macb_mac_link_up,
};
+static bool macb_phy_handle_exists(struct device_node *dn)
+{
+ dn = of_parse_phandle(dn, "phy-handle", 0);
+ of_node_put(dn);
+ return dn != NULL;
+}
+
static int macb_phylink_connect(struct macb *bp)
{
+ struct device_node *dn = bp->pdev->dev.of_node;
struct net_device *dev = bp->dev;
struct phy_device *phydev;
int ret;
- if (bp->pdev->dev.of_node &&
- of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) {
- ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node,
- 0);
- if (ret) {
- netdev_err(dev, "Could not attach PHY (%d)\n", ret);
- return ret;
- }
- } else {
+ if (dn)
+ ret = phylink_of_phy_connect(bp->phylink, dn, 0);
+
+ if (!dn || (ret && !macb_phy_handle_exists(dn))) {
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
netdev_err(dev, "no PHY found\n");
/* attach the mac to the phy */
ret = phylink_connect_phy(bp->phylink, phydev);
- if (ret) {
- netdev_err(dev, "Could not attach to PHY (%d)\n", ret);
- return ret;
- }
+ }
+
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
}
phylink_start(bp->phylink);
mgmt->rate = 0;
mgmt->hw.init = &init;
- *tx_clk = clk_register(NULL, &mgmt->hw);
+ *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
if (IS_ERR(*tx_clk))
return PTR_ERR(*tx_clk);
err_disable_clocks:
clk_disable_unprepare(tx_clk);
- clk_unregister(tx_clk);
clk_disable_unprepare(hclk);
clk_disable_unprepare(pclk);
clk_disable_unprepare(rx_clk);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
clk_disable_unprepare(bp->tx_clk);
- clk_unregister(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
clk_disable_unprepare(bp->rx_clk);
if (!is_offload(adapter))
return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
if (!(adapter->flags & FULL_INIT_DONE))
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = seq_tab_get_idx(seq->private, *pos + 1);
- if (v)
- ++*pos;
+ ++(*pos);
return v;
}
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
+ struct ch_sched_queue qe = { 0 };
+ struct ch_sched_params p = { 0 };
struct sched_class *e;
- struct ch_sched_params p;
- struct ch_sched_queue qe;
u32 req_rate;
int err = 0;
return -EINVAL;
}
+ qe.queue = index;
+ e = cxgb4_sched_queue_lookup(dev, &qe);
+ if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
+ dev_err(adap->pdev_dev,
+ "Queue %u already bound to class %u of type: %u\n",
+ index, e->idx, e->info.u.params.level);
+ return -EBUSY;
+ }
+
/* Convert from Mbps to Kbps */
req_rate = rate * 1000;
return 0;
/* Fetch any available unused or matching scheduling class */
- memset(&p, 0, sizeof(p));
p.type = SCHED_CLASS_TYPE_PACKET;
p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
p.u.params.mode = SCHED_CLASS_MODE_CLASS;
struct flow_action *actions = &cls->rule->action;
struct port_info *pi = netdev2pinfo(dev);
struct flow_action_entry *entry;
+ struct ch_sched_queue qe;
+ struct sched_class *e;
u64 max_link_rate;
u32 i, speed;
int ret;
}
}
+ for (i = 0; i < pi->nqsets; i++) {
+ memset(&qe, 0, sizeof(qe));
+ qe.queue = i;
+
+ e = cxgb4_sched_queue_lookup(dev, &qe);
+ if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Some queues are already bound to different class");
+ return -EBUSY;
+ }
+ }
+
return 0;
}
+static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct ch_sched_queue qe;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ qe.queue = i;
+ qe.class = tc;
+ ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
+ if (ret)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ while (i--) {
+ qe.queue = i;
+ qe.class = SCHED_CLS_NONE;
+ cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+ }
+
+ return ret;
+}
+
+static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct ch_sched_queue qe;
+ u32 i;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ qe.queue = i;
+ qe.class = SCHED_CLS_NONE;
+ cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
+ }
+}
+
static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *entry;
struct sched_class *e;
+ int ret;
u32 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
return -ENOMEM;
}
+ ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not bind queues to traffic class");
+ goto out_free;
+ }
+
tc_port_matchall->egress.hwtc = e->idx;
tc_port_matchall->egress.cookie = cls->cookie;
tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
return 0;
+
+out_free:
+ cxgb4_sched_class_free(dev, e->idx);
+ return ret;
}
static void cxgb4_matchall_free_tc(struct net_device *dev)
struct adapter *adap = netdev2adap(dev);
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ cxgb4_matchall_tc_unbind_queues(dev);
cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
u32 speed, qcount = 0, qoffset = 0;
+ u32 start_a, start_b, end_a, end_b;
int ret;
- u8 i;
+ u8 i, j;
if (!mqprio->qopt.num_tc)
return 0;
qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
qcount += mqprio->qopt.count[i];
+ start_a = mqprio->qopt.offset[i];
+ end_a = start_a + mqprio->qopt.count[i] - 1;
+ for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
+ start_b = mqprio->qopt.offset[j];
+ end_b = start_b + mqprio->qopt.count[j] - 1;
+
+ /* If queue count is 0, then the traffic
+ * belonging to this class will not use
+ * ETHOFLD queues. So, no need to validate
+ * further.
+ */
+ if (!mqprio->qopt.count[i])
+ break;
+
+ if (!mqprio->qopt.count[j])
+ continue;
+
+ if (max_t(u32, start_a, start_b) <=
+ min_t(u32, end_a, end_b)) {
+ netdev_err(dev,
+ "Queues can't overlap across tc\n");
+ return -EINVAL;
+ }
+ }
+
/* Convert byte per second to bits per second */
min_rate += (mqprio->min_rate[i] * 8);
max_rate += (mqprio->max_rate[i] * 8);
static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = l2t_get_idx(seq, *pos);
- if (v)
- ++*pos;
+ ++(*pos);
return v;
}
return found;
}
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct sched_queue_entry *qe = NULL;
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_txq *txq;
+
+ if (p->queue < 0 || p->queue >= pi->nqsets)
+ return NULL;
+
+ txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
+ qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
+ return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
+}
+
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{
struct sched_queue_entry *qe = NULL;
return true;
}
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p);
int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type);
int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
{
struct fec_enet_private *fep = netdev_priv(ndev);
u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ struct device *dev = &fep->pdev->dev;
u32 *buf = (u32 *)regbuf;
u32 i, off;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return;
regs->version = fec_enet_register_version;
off >>= 2;
buf[off] = readl(&theregs[off]);
}
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
static int fec_enet_get_ts_info(struct net_device *ndev,
/* Interface Mode Register (IF_MODE) */
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
-#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
tmp = 0;
switch (phy_if) {
case PHY_INTERFACE_MODE_XGMII:
- tmp |= IF_MODE_XGMII;
+ tmp |= IF_MODE_10G;
break;
default:
tmp |= IF_MODE_GMII;
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
bool is_little_endian;
+ bool has_a011043;
};
static u32 xgmac_read32(void __iomem *regs,
return ret;
/* Return all Fs if nothing was there */
- if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) {
+ if ((xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+ !priv->has_a011043) {
dev_err(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
"little-endian");
+ priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
+ "fsl,erratum-a011043");
+
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
rx->cnt = cnt;
rx->fill_cnt += work_done;
- /* restock desc ring slots */
- dma_wmb(); /* Ensure descs are visible before ringing doorbell */
gve_rx_write_doorbell(priv, rx);
return gve_rx_work_pending(rx);
}
* may have added descriptors without ringing the doorbell.
*/
- /* Ensure tx descs from a prior gve_tx are visible before
- * ringing doorbell.
- */
- dma_wmb();
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
return NETDEV_TX_BUSY;
}
if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
return NETDEV_TX_OK;
- /* Ensure tx descs are visible before ringing doorbell */
- dma_wmb();
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
return NETDEV_TX_OK;
}
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
HNS_RX_HEAD_SIZE);
if (unlikely(!skb)) {
- netdev_err(ndev, "alloc rx skb fail\n");
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
container_of(napi, struct hns_nic_ring_data, napi);
struct hnae_ring *ring = ring_data->ring;
-try_again:
clean_complete += ring_data->poll_one(
ring_data, budget - clean_complete,
ring_data->ex_process);
napi_complete(napi);
ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
} else {
- goto try_again;
+ return budget;
}
}
#define HNS3_INNER_VLAN_TAG 1
#define HNS3_OUTER_VLAN_TAG 2
+#define HNS3_MIN_TX_LEN 33U
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
int bd_num = 0;
int ret;
+ /* Hardware can only handle short frames above 32 bytes */
+ if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
+ return NETDEV_TX_OK;
+
/* Prefetch the data used later */
prefetch(skb->data);
/* board specific private data structure */
struct e1000_adapter {
+ struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
- struct delayed_work watchdog_task;
-
- struct workqueue_struct *e1000_workqueue;
+ struct work_struct watchdog_task;
const struct e1000_info *ei;
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
- mod_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task, HZ);
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
if (!test_bit(__E1000_DOWN, &adapter->state))
napi_synchronize(&adapter->napi);
+ del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
}
}
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(struct timer_list *t)
+{
+ struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+
+ /* TODO: make this use queue_delayed_work() */
+}
+
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
- watchdog_task.work);
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
- queue_delayed_work(adapter->e1000_workqueue,
- &adapter->watchdog_task,
- round_jiffies(2 * HZ));
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
}
#define E1000_TX_FLAGS_CSUM 0x00000001
goto err_eeprom;
}
- adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
- e1000e_driver_name);
-
- if (!adapter->e1000_workqueue) {
- err = -ENOMEM;
- goto err_workqueue;
- }
-
- INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
- queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task,
- 0);
-
+ timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
+ INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
return 0;
err_register:
- flush_workqueue(adapter->e1000_workqueue);
- destroy_workqueue(adapter->e1000_workqueue);
-err_workqueue:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
* from being rescheduled.
*/
set_bit(__E1000_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
- cancel_delayed_work(&adapter->watchdog_task);
- flush_workqueue(adapter->e1000_workqueue);
- destroy_workqueue(adapter->e1000_workqueue);
-
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
cancel_work_sync(&adapter->tx_hwtstamp_work);
if (adapter->tx_hwtstamp_skb) {
(aq->api_maj_ver == 1 &&
aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
/* fall through */
default:
break;
*/
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
- hw_dbg(hw, "Buffer to small for PBA data.\n");
+ hw_dbg(hw, "Buffer too small for PBA data.\n");
return I40E_ERR_PARAM;
}
return ret;
}
+/**
+ * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Returns true if validation was successful, else false.
+ */
+static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
+ vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
+ return false;
+
+ return true;
+}
+
/**
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if (i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
goto error_param;
}
- if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
- vqs->rx_queues > I40E_MAX_VF_QUEUES ||
- vqs->tx_queues > I40E_MAX_VF_QUEUES) {
+ if (i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr);
#endif /* _IAVF_H_ */
*
* Returns ptr to the filter object or NULL when no memory available.
**/
-static struct
-iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
- const u8 *macaddr)
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr)
{
struct iavf_mac_filter *f;
struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
+ struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *vlf;
struct iavf_cloud_filter *cf;
- struct iavf_mac_filter *f;
u32 reg_val;
int i = 0, err;
bool running;
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ /* Delete filter for the current MAC address, it could have
+ * been changed by the PF via administratively set MAC.
+ * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ iavf_add_filter(adapter, adapter->hw.mac.addr);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_process_config(adapter);
}
break;
dev_spec->module_plugged = true;
if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags->e100_base_fx) {
+ } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_internal_serdes;
} else if (eth_flags->e1000_base_t) {
break;
}
- /* do not change link mode for 100BaseFX */
- if (dev_spec->eth_flags.e100_base_fx)
- break;
-
/* change current link mode setting */
ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
- if (hw->phy.media_type == e1000_media_type_copper)
+ if (dev_spec->sgmii_active)
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
else
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
advertising &= ~ADVERTISED_1000baseKX_Full;
}
}
- if (eth_flags->e100_base_fx) {
+ if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
supported |= SUPPORTED_100baseT_Full;
advertising |= ADVERTISED_100baseT_Full;
}
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
- u64 action;
+ u8 queue;
spin_lock(&adapter->fdir_perfect_lock);
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
- action = filter->action;
- if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
- action =
- (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+ if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(filter->action);
+ u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
+
+ if (!vf && (ring >= adapter->num_rx_queues)) {
+ e_err(drv, "FDIR restore failed without VF, ring: %u\n",
+ ring);
+ continue;
+ } else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool)) {
+ e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
+ vf, ring);
+ continue;
+ }
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
ixgbe_fdir_write_perfect_filter_82599(hw,
- &filter->filter,
- filter->sw_idx,
- (action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[action]->reg_idx);
+ &filter->filter, filter->sw_idx, queue);
}
spin_unlock(&adapter->fdir_perfect_lock);
struct ixgbe_hw *hw = &adapter->hw;
int count = 0;
- if ((netdev_uc_count(netdev)) > 10) {
- pr_err("Too many unicast filters - No Space\n");
- return -ENOSPC;
- }
-
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp)
{
- u32 ret, act = bpf_prog_run_xdp(prog, xdp);
+ unsigned int len;
+ u32 ret, act;
+
+ len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+ act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
if (err) {
ret = MVNETA_XDP_DROPPED;
__page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ virt_to_head_page(xdp->data),
+ len, true);
} else {
ret = MVNETA_XDP_REDIR;
}
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
__page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ virt_to_head_page(xdp->data),
+ len, true);
break;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_DROP:
__page_pool_put_page(rxq->page_pool,
virt_to_head_page(xdp->data),
- xdp->data_end - xdp->data_hard_start,
- true);
+ len, true);
ret = MVNETA_XDP_DROPPED;
break;
}
crdump_enable_crspace_access(dev, cr_space);
/* Get the available snapshot ID for the dumps */
- id = devlink_region_shapshot_id_get(devlink);
+ id = devlink_region_snapshot_id_get(devlink);
/* Try to capture dumps */
mlx4_crdump_collect_crspace(dev, cr_space, id);
#endif
};
+#define MLX5E_TTC_NUM_GROUPS 3
+#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
+#define MLX5E_TTC_GROUP2_SIZE BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE BIT(0)
+#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
+ MLX5E_TTC_GROUP2_SIZE +\
+ MLX5E_TTC_GROUP3_SIZE)
+
+#define MLX5E_INNER_TTC_NUM_GROUPS 3
+#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
+#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
+#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
+#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
+ MLX5E_INNER_TTC_GROUP2_SIZE +\
+ MLX5E_INNER_TTC_GROUP3_SIZE)
+
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_table {
struct devlink_health_reporter *reporter, char *err_str,
struct mlx5e_err_ctx *err_ctx)
{
- if (!reporter) {
- netdev_err(priv->netdev, err_str);
+ netdev_err(priv->netdev, err_str);
+
+ if (!reporter)
return err_ctx->recover(&err_ctx->ctx);
- }
+
return devlink_health_report(reporter, err_str, err_ctx);
}
struct tx_sync_info {
u64 rcd_sn;
- s32 sync_len;
+ u32 sync_len;
int nr_frags;
skb_frag_t frags[MAX_SKB_FRAGS];
};
static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
- u32 tcp_seq, struct tx_sync_info *info)
+ u32 tcp_seq, int datalen, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
+ bool ends_before;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
goto out;
}
- if (unlikely(tcp_seq < tls_record_start_seq(record))) {
- ret = tls_record_is_start_marker(record) ?
- MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ /* There are the following cases:
+ * 1. packet ends before start marker: bypass offload.
+ * 2. packet starts before start marker and ends after it: drop,
+ * not supported, breaks contract with kernel.
+ * 3. packet ends before tls record info starts: drop,
+ * this packet was already acknowledged and its record info
+ * was released.
+ */
+ ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+
+ if (unlikely(tls_record_is_start_marker(record))) {
+ ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ goto out;
+ } else if (ends_before) {
+ ret = MLX5E_KTLS_SYNC_FAIL;
goto out;
}
u8 num_wqebbs;
int i = 0;
- ret = tx_sync_info_get(priv_tx, seq, &info);
+ ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
stats->tls_skip_no_sync_data++;
goto err_out;
}
- if (unlikely(info.sync_len < 0)) {
- if (likely(datalen <= -info.sync_len))
- return MLX5E_KTLS_SYNC_DONE;
-
- stats->tls_drop_bypass_req++;
- goto err_out;
- }
-
stats->tls_ooo++;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
if (unlikely(contig_wqebbs_room < num_wqebbs))
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- tx_post_resync_params(sq, priv_tx, info.rcd_sn);
-
for (; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
- if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+ switch (ret) {
+ case MLX5E_KTLS_SYNC_DONE:
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
- else if (ret == MLX5E_KTLS_SYNC_FAIL)
+ break;
+ case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+ if (likely(!skb->decrypted))
+ goto out;
+ WARN_ON_ONCE(1);
+ /* fall-through */
+ default: /* MLX5E_KTLS_SYNC_FAIL */
goto err_out;
- else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
- goto out;
+ }
}
priv_tx->expected_seq = seq + datalen;
return err;
}
-#define MLX5E_TTC_NUM_GROUPS 3
-#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
-#define MLX5E_TTC_GROUP2_SIZE BIT(1)
-#define MLX5E_TTC_GROUP3_SIZE BIT(0)
-#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
- MLX5E_TTC_GROUP2_SIZE +\
- MLX5E_TTC_GROUP3_SIZE)
-
-#define MLX5E_INNER_TTC_NUM_GROUPS 3
-#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
-#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
-#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
-#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
- MLX5E_INNER_TTC_GROUP2_SIZE +\
- MLX5E_INNER_TTC_GROUP3_SIZE)
-
static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
bool use_ipv)
{
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
- ft_attr->max_fte = MLX5E_NUM_TT;
+ ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_TC_PRIO;
}
return kmemdup(tun_info, tun_size, GFP_KERNEL);
}
+static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ int out_index,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < out_index; i++) {
+ if (flow->encaps[i].e != e)
+ continue;
+ NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
+ netdev_err(priv->netdev, "can't duplicate encap action\n");
+ return true;
+ }
+
+ return false;
+}
+
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *mirred_dev,
/* must verify if encap is valid or not */
if (e) {
+ /* Check that entry was not already attached to this flow */
+ if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+
mutex_unlock(&esw->offloads.encap_tbl_lock);
wait_for_completion(&e->res_ready);
same_hw_devs(priv, netdev_priv(out_dev));
}
+static bool is_duplicated_output_device(struct net_device *dev,
+ struct net_device *out_dev,
+ int *ifindexes, int if_count,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < if_count; i++) {
+ if (ifindexes[i] == out_dev->ifindex) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't duplicate output to same device");
+ netdev_err(dev, "can't duplicate output to same device: %s\n",
+ out_dev->name);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
+ int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
+ int err, i, if_count = 0;
bool encap = false;
u32 action = 0;
- int err, i;
if (!flow_action_has_entries(flow_action))
return -EINVAL;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper;
+ if (is_duplicated_output_device(priv->netdev,
+ out_dev,
+ ifindexes,
+ if_count,
+ extack))
+ return -EOPNOTSUPP;
+
+ ifindexes[if_count] = out_dev->ifindex;
+ if_count++;
+
rcu_read_lock();
uplink_upper =
netdev_master_upper_dev_get_rcu(uplink_dev);
u32 rate_mbps;
int err;
+ vport_num = rpriv->rep->vport;
+ if (vport_num >= MLX5_VPORT_ECPF) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress rate limit is supported only for Eswitch ports connected to VFs");
+ return -EOPNOTSUPP;
+ }
+
esw = priv->mdev->priv.eswitch;
/* rate is given in bytes/sec.
* First convert to bits/sec and then round to the nearest mbit/secs.
* 1 mbit/sec.
*/
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
- vport_num = rpriv->rep->vport;
-
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
struct mlx5_vport *vport;
int i;
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
memset(&vport->info, 0, sizeof(vport->info));
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ }
}
/* Public E-Switch API */
*/
#define ESW_SIZE (16 * 1024 * 1024)
const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
- 64 * 1024, 4 * 1024 };
+ 64 * 1024, 128 };
static int
get_sz_from_pool(struct mlx5_eswitch *esw)
return -EINVAL;
}
- mlx5_eswitch_disable(esw, false);
+ mlx5_eswitch_disable(esw, true);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
- int err;
+ struct mlx5_vport *vport;
+ int err, i;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
if (err)
goto err_vport_metadata;
+ /* Representor will control the vport link state */
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
goto err_vports;
{
int err, err1;
- mlx5_eswitch_disable(esw, false);
+ mlx5_eswitch_disable(esw, true);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
}
}
-static void del_sw_fte_rcu(struct rcu_head *head)
-{
- struct fs_fte *fte = container_of(head, struct fs_fte, rcu);
- struct mlx5_flow_steering *steering = get_steering(&fte->node);
-
- kmem_cache_free(steering->ftes_cache, fte);
-}
-
static void del_sw_fte(struct fs_node *node)
{
+ struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg;
struct fs_fte *fte;
int err;
rhash_fte);
WARN_ON(err);
ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
-
- call_rcu(&fte->rcu, del_sw_fte_rcu);
+ kmem_cache_free(steering->ftes_cache, fte);
}
static void del_hw_flow_group(struct fs_node *node)
}
static struct fs_fte *
-lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value)
+lookup_fte_locked(struct mlx5_flow_group *g,
+ const u32 *match_value,
+ bool take_write)
{
struct fs_fte *fte_tmp;
- nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
-
- fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte);
- if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
- fte_tmp = NULL;
- goto out;
- }
-
- if (!fte_tmp->node.active) {
- tree_put_node(&fte_tmp->node, false);
- fte_tmp = NULL;
- goto out;
- }
- nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-
-out:
- up_write_ref_node(&g->node, false);
- return fte_tmp;
-}
-
-static struct fs_fte *
-lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
-{
- struct fs_fte *fte_tmp;
-
- if (!tree_get_node(&g->node))
- return NULL;
-
- rcu_read_lock();
- fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte);
+ if (take_write)
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ else
+ nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+ rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
- rcu_read_unlock();
fte_tmp = NULL;
goto out;
}
- rcu_read_unlock();
-
if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL;
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-
out:
- tree_put_node(&g->node, false);
- return fte_tmp;
-}
-
-static struct fs_fte *
-lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write)
-{
- if (write)
- return lookup_fte_for_write_locked(g, match_value);
+ if (take_write)
+ up_write_ref_node(&g->node, false);
else
- return lookup_fte_for_read_locked(g, match_value);
+ up_read_ref_node(&g->node);
+ return fte_tmp;
}
static struct mlx5_flow_handle *
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
- struct rcu_head rcu;
int modify_mask;
};
if (err)
goto err_load;
+ if (boot) {
+ err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
+ if (err)
+ goto err_devlink_reg;
+ }
+
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
return err;
err_reg_dev:
+ if (boot)
+ mlx5_devlink_unregister(priv_to_devlink(dev));
+err_devlink_reg:
mlx5_unload(dev);
err_load:
if (boot)
request_module_nowait(MLX5_IB_MOD);
- err = mlx5_devlink_register(devlink, &pdev->dev);
- if (err)
- goto clean_load;
-
err = mlx5_crdump_enable(dev);
if (err)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
pci_save_state(pdev);
return 0;
-clean_load:
- mlx5_unload_one(dev, true);
-
err_load_one:
mlx5_pci_close(dev);
pci_init_err:
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
+ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
/* We need to copy the refcount since this ste
* may have been traversed several times
*/
- refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount));
+ new_ste->refcount = cur_ste->refcount;
/* Link old STEs rule_mem list to the new ste */
mlx5dr_rule_update_rule_member(cur_ste, new_ste);
if (!rule_mem)
return -ENOMEM;
+ INIT_LIST_HEAD(&rule_mem->list);
+ INIT_LIST_HEAD(&rule_mem->use_ste_list);
+
rule_mem->ste = ste;
list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */
+#include <linux/smp.h>
#include "dr_types.h"
#define QUEUE_SIZE 128
if (!in)
goto err_cqwq;
- vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) {
kvfree(in);
if (dst->next_htbl)
dst->next_htbl->pointing_ste = dst;
- refcount_set(&dst->refcount, refcount_read(&src->refcount));
+ dst->refcount = src->refcount;
INIT_LIST_HEAD(&dst->rule_list);
list_splice_tail_init(&src->rule_list, &dst->rule_list);
bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
{
- return !refcount_read(&ste->refcount);
+ return !ste->refcount;
}
/* Init one ste as a pattern for ste data array */
htbl->ste_arr = chunk->ste_arr;
htbl->hw_ste_arr = chunk->hw_ste_arr;
htbl->miss_list = chunk->miss_list;
- refcount_set(&htbl->refcount, 0);
+ htbl->refcount = 0;
for (i = 0; i < chunk->num_of_entries; i++) {
struct mlx5dr_ste *ste = &htbl->ste_arr[i];
ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
ste->htbl = htbl;
- refcount_set(&ste->refcount, 0);
+ ste->refcount = 0;
INIT_LIST_HEAD(&ste->miss_list_node);
INIT_LIST_HEAD(&htbl->miss_list[i]);
INIT_LIST_HEAD(&ste->rule_list);
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
{
- if (refcount_read(&htbl->refcount))
+ if (htbl->refcount)
return -EBUSY;
mlx5dr_icm_free_chunk(htbl->chunk);
struct mlx5dr_ste {
u8 *hw_ste;
/* refcount: indicates the num of rules that using this ste */
- refcount_t refcount;
+ u32 refcount;
/* attached to the miss_list head at each htbl entry */
struct list_head miss_list_node;
struct mlx5dr_ste_htbl {
u8 lu_type;
u16 byte_mask;
- refcount_t refcount;
+ u32 refcount;
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste *ste_arr;
u8 *hw_ste_arr;
static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
{
- if (refcount_dec_and_test(&htbl->refcount))
+ htbl->refcount--;
+ if (!htbl->refcount)
mlx5dr_ste_htbl_free(htbl);
}
static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
{
- refcount_inc(&htbl->refcount);
+ htbl->refcount++;
}
/* STE utils */
struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher)
{
- if (refcount_dec_and_test(&ste->refcount))
+ ste->refcount--;
+ if (!ste->refcount)
mlx5dr_ste_free(ste, matcher, nic_matcher);
}
/* initial as 0, increased only when ste appears in a new rule */
static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
{
- refcount_inc(&ste->refcount);
+ ste->refcount++;
}
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
- u32 id;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -ENOSPC;
goto free_actions;
}
- switch (type) {
- case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
- id = dst->dest_attr.counter_id;
+ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
- tmp_action =
- mlx5dr_action_create_flow_counter(id);
- if (!tmp_action) {
- err = -ENOMEM;
- goto free_actions;
- }
- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
- actions[num_actions++] = tmp_action;
- break;
+ switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
tmp_action = create_ft_action(dev, dst);
if (!tmp_action) {
}
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ u32 id;
+
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -ENOSPC;
+ goto free_actions;
+ }
+
+ id = dst->dest_attr.counter_id;
+ tmp_action =
+ mlx5dr_action_create_flow_counter(id);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+ }
+
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
u64 len;
int err;
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
return NETDEV_TX_BUSY;
- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
- struct sk_buff *skb_orig = skb;
-
- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
- if (!skb) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb_orig);
- return NETDEV_TX_OK;
- }
- dev_consume_skb_any(skb_orig);
- }
-
if (eth_skb_pad(skb)) {
this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
return NETDEV_TX_OK;
periodic_hw_stats.update_dw.work);
if (!netif_carrier_ok(mlxsw_sp_port->dev))
+ /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
+ * necessary when port goes down.
+ */
goto out;
mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
return 0;
}
+static void
+mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int i;
+
+ for (i = 0; i < TC_MAX_QUEUE; i++)
+ mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
+}
+
static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
char *pude_pl, void *priv)
{
} else {
netdev_info(mlxsw_sp_port->dev, "link down\n");
netif_carrier_off(mlxsw_sp_port->dev);
+ mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
}
}
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
+static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
+ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
static struct mlxsw_driver mlxsw_sp3_driver = {
.kind = mlxsw_sp3_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp2_init,
+ .init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <linux/mutex.h>
#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
struct mlxsw_sp_fid *dummy_fid;
struct rhashtable ruleset_ht;
struct list_head rules;
+ struct mutex rules_lock; /* Protects rules list */
struct {
struct delayed_work dw;
unsigned long interval; /* ms */
goto err_ruleset_block_bind;
}
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
block->rule_count++;
block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
return 0;
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
ruleset->ht_key.block->rule_count--;
+ mutex_lock(&mlxsw_sp->acl->rules_lock);
list_del(&rule->list);
+ mutex_unlock(&mlxsw_sp->acl->rules_lock);
if (!ruleset->ht_key.chain_index &&
mlxsw_sp_acl_ruleset_is_singular(ruleset))
mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
struct mlxsw_sp_acl_rule *rule;
int err;
- /* Protect internal structures from changes */
- rtnl_lock();
+ mutex_lock(&acl->rules_lock);
list_for_each_entry(rule, &acl->rules, list) {
err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
rule);
if (err)
goto err_rule_update;
}
- rtnl_unlock();
+ mutex_unlock(&acl->rules_lock);
return 0;
err_rule_update:
- rtnl_unlock();
+ mutex_unlock(&acl->rules_lock);
return err;
}
acl->dummy_fid = fid;
INIT_LIST_HEAD(&acl->rules);
+ mutex_init(&acl->rules_lock);
err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
if (err)
goto err_acl_ops_init;
return 0;
err_acl_ops_init:
+ mutex_destroy(&acl->rules_lock);
mlxsw_sp_fid_put(fid);
err_fid_get:
rhashtable_destroy(&acl->ruleset_ht);
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
+ mutex_destroy(&acl->rules_lock);
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
return -EOPNOTSUPP;
}
+static u64
+mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+ return xstats->backlog[tclass_num] +
+ xstats->backlog[tclass_num + 8];
+}
+
+static u64
+mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
+{
+ return xstats->tail_drop[tclass_num] +
+ xstats->tail_drop[tclass_num + 8];
+}
+
static void
mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
u8 prio_bitmap, u64 *tx_packets,
&stats_base->tx_bytes);
red_base->prob_mark = xstats->ecn;
red_base->prob_drop = xstats->wred_drop[tclass_num];
- red_base->pdrop = xstats->tail_drop[tclass_num];
+ red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
stats_base->drops = red_base->prob_drop + red_base->pdrop;
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
marks = xstats->ecn - xstats_base->prob_mark;
- pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
+ xstats_base->pdrop;
res->pdrop += pdrops;
res->prob_drop += early_drops;
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
stats_base->overlimits;
- drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
+ drops = xstats->wred_drop[tclass_num] +
+ mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
stats_base->drops;
- backlog = xstats->backlog[tclass_num];
+ backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num);
_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
stats_ptr->qstats->overlimits += overlimits;
tx_packets = stats->tx_packets - stats_base->tx_packets;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- drops += xstats->tail_drop[i];
+ drops += mlxsw_sp_xstats_tail_drop(xstats, i);
drops += xstats->wred_drop[i];
- backlog += xstats->backlog[i];
+ backlog += mlxsw_sp_xstats_backlog(xstats, i);
}
drops = drops - stats_base->drops;
stats_base->drops = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- stats_base->drops += xstats->tail_drop[i];
+ stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
stats_base->drops += xstats->wred_drop[i];
}
mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
return 0;
+ if (!p->child_handle) {
+ /* This is an invisible FIFO replacing the original Qdisc.
+ * Ignore it--the original Qdisc's destroy will follow.
+ */
+ return 0;
+ }
+
/* See if the grafted qdisc is already offloaded on any tclass. If so,
* unoffload it.
*/
u64 len;
int err;
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
return NETDEV_TX_BUSY;
- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
- struct sk_buff *skb_orig = skb;
-
- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
- if (!skb) {
- this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb_orig);
- return NETDEV_TX_OK;
- }
- dev_consume_skb_any(skb_orig);
- }
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
+ spin_lock_init(&lp->lock);
+
for (i = 0; i < SONIC_NUM_RRS; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (skb == NULL) {
return 0;
}
+/* Wait for the SONIC to become idle. */
+static void sonic_quiesce(struct net_device *dev, u16 mask)
+{
+ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
+ int i;
+ u16 bits;
+
+ for (i = 0; i < 1000; ++i) {
+ bits = SONIC_READ(SONIC_CMD) & mask;
+ if (!bits)
+ return;
+ if (irqs_disabled() || in_interrupt())
+ udelay(20);
+ else
+ usleep_range(100, 200);
+ }
+ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
+}
/*
* Close the SONIC device
/*
* stop the SONIC, disable interrupts
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
* put the Sonic into software-reset mode and
* disable all interrupts before releasing DMA buffers
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
* wake the tx queue
* Concurrently with all of this, the SONIC is potentially writing to
* the status flags of the TDs.
- * Until some mutual exclusion is added, this code will not work with SMP. However,
- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
*/
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
dma_addr_t laddr;
int length;
- int entry = lp->next_tx;
+ int entry;
+ unsigned long flags;
netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
return NETDEV_TX_OK;
}
+ spin_lock_irqsave(&lp->lock, flags);
+
+ entry = lp->next_tx;
+
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
- /*
- * Must set tx_skb[entry] only after clearing status, and
- * before clearing EOL and before stopping queue
- */
wmb();
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return NETDEV_TX_OK;
}
struct net_device *dev = dev_id;
struct sonic_local *lp = netdev_priv(dev);
int status;
+ unsigned long flags;
+
+ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
+ * with sonic_send_packet() so that the two functions can share state.
+ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
+ * by macsonic which must use two IRQs with different priority levels.
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ if (!status) {
+ spin_unlock_irqrestore(&lp->lock, flags);
- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
return IRQ_NONE;
+ }
do {
+ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
+
if (status & SONIC_INT_PKTRX) {
netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
sonic_rx(dev); /* got packet(s) */
- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
}
if (status & SONIC_INT_TXDN) {
int td_status;
int freed_some = 0;
- /* At this point, cur_tx is the index of a TD that is one of:
- * unallocated/freed (status set & tx_skb[entry] clear)
- * allocated and sent (status set & tx_skb[entry] set )
- * allocated and not yet sent (status clear & tx_skb[entry] set )
- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+ /* The state of a Transmit Descriptor may be inferred
+ * from { tx_skb[entry], td_status } as follows.
+ * { clear, clear } => the TD has never been used
+ * { set, clear } => the TD was handed to SONIC
+ * { set, set } => the TD was handed back
+ * { clear, set } => the TD is available for re-use
*/
netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
break;
- if (td_status & 0x0001) {
+ if (td_status & SONIC_TCR_PTX) {
lp->stats.tx_packets++;
lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
} else {
- lp->stats.tx_errors++;
- if (td_status & 0x0642)
+ if (td_status & (SONIC_TCR_EXD |
+ SONIC_TCR_EXC | SONIC_TCR_BCM))
lp->stats.tx_aborted_errors++;
- if (td_status & 0x0180)
+ if (td_status &
+ (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
lp->stats.tx_carrier_errors++;
- if (td_status & 0x0020)
+ if (td_status & SONIC_TCR_OWC)
lp->stats.tx_window_errors++;
- if (td_status & 0x0004)
+ if (td_status & SONIC_TCR_FU)
lp->stats.tx_fifo_errors++;
}
if (freed_some || lp->tx_skb[entry] == NULL)
netif_wake_queue(dev); /* The ring is no longer full */
lp->cur_tx = entry;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
}
/*
if (status & SONIC_INT_RFO) {
netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
__func__);
- lp->stats.rx_fifo_errors++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
}
if (status & SONIC_INT_RDE) {
netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
__func__);
- lp->stats.rx_dropped++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
}
if (status & SONIC_INT_RBAE) {
netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
__func__);
- lp->stats.rx_dropped++;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
}
/* counter overruns; all counters are 16bit wide */
- if (status & SONIC_INT_FAE) {
+ if (status & SONIC_INT_FAE)
lp->stats.rx_frame_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
- }
- if (status & SONIC_INT_CRC) {
+ if (status & SONIC_INT_CRC)
lp->stats.rx_crc_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
- }
- if (status & SONIC_INT_MP) {
+ if (status & SONIC_INT_MP)
lp->stats.rx_missed_errors += 65536;
- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
- }
/* transmit error */
if (status & SONIC_INT_TXER) {
- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
- __func__);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+ u16 tcr = SONIC_READ(SONIC_TCR);
+
+ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
+ __func__, tcr);
+
+ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
+ SONIC_TCR_FU | SONIC_TCR_BCM)) {
+ /* Aborted transmission. Try again. */
+ netif_stop_queue(dev);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ }
}
/* bus retry */
/* ... to help debug DMA problems causing endless interrupts. */
/* Bounce the eth interface to turn on the interrupt again. */
SONIC_WRITE(SONIC_IMR, 0);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
}
- /* load CAM done */
- if (status & SONIC_INT_LCD)
- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ } while (status);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return IRQ_HANDLED;
}
+/* Return the array index corresponding to a given Receive Buffer pointer. */
+static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
+ unsigned int last)
+{
+ unsigned int i = last;
+
+ do {
+ i = (i + 1) & SONIC_RRS_MASK;
+ if (addr == lp->rx_laddr[i])
+ return i;
+ } while (i != last);
+
+ return -ENOENT;
+}
+
+/* Allocate and map a new skb to be used as a receive buffer. */
+static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
+ struct sk_buff **new_skb, dma_addr_t *new_addr)
+{
+ *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+ if (!*new_skb)
+ return false;
+
+ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+ skb_reserve(*new_skb, 2);
+
+ *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
+ SONIC_RBSIZE, DMA_FROM_DEVICE);
+ if (!*new_addr) {
+ dev_kfree_skb(*new_skb);
+ *new_skb = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+/* Place a new receive resource in the Receive Resource Area and update RWP. */
+static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
+ dma_addr_t old_addr, dma_addr_t new_addr)
+{
+ unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
+ unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
+ u32 buf;
+
+ /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
+ * scans the other resources in the RRA, those in the range [RWP, RRP).
+ */
+ do {
+ buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
+ sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
+
+ if (buf == old_addr)
+ break;
+
+ entry = (entry + 1) & SONIC_RRS_MASK;
+ } while (entry != end);
+
+ WARN_ONCE(buf != old_addr, "failed to find resource!\n");
+
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
+
+ entry = (entry + 1) & SONIC_RRS_MASK;
+
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
+}
+
/*
* We have a good packet(s), pass it/them up the network stack.
*/
static void sonic_rx(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
- int status;
int entry = lp->cur_rx;
+ int prev_entry = lp->eol_rx;
+ bool rbe = false;
while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
- struct sk_buff *used_skb;
- struct sk_buff *new_skb;
- dma_addr_t new_laddr;
- u16 bufadr_l;
- u16 bufadr_h;
- int pkt_len;
-
- status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
- if (status & SONIC_RCR_PRX) {
- /* Malloc up new buffer. */
- new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
- if (new_skb == NULL) {
- lp->stats.rx_dropped++;
+ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+
+ /* If the RD has LPKT set, the chip has finished with the RB */
+ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
+ struct sk_buff *new_skb;
+ dma_addr_t new_laddr;
+ u32 addr = (sonic_rda_get(dev, entry,
+ SONIC_RD_PKTPTR_H) << 16) |
+ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+ int i = index_from_addr(lp, addr, entry);
+
+ if (i < 0) {
+ WARN_ONCE(1, "failed to find buffer!\n");
break;
}
- /* provide 16 byte IP header alignment unless DMA requires otherwise */
- if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
- skb_reserve(new_skb, 2);
-
- new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
- SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!new_laddr) {
- dev_kfree_skb(new_skb);
- printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
+
+ if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
+ struct sk_buff *used_skb = lp->rx_skb[i];
+ int pkt_len;
+
+ /* Pass the used buffer up the stack */
+ dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
+ DMA_FROM_DEVICE);
+
+ pkt_len = sonic_rda_get(dev, entry,
+ SONIC_RD_PKTLEN);
+ skb_trim(used_skb, pkt_len);
+ used_skb->protocol = eth_type_trans(used_skb,
+ dev);
+ netif_rx(used_skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+
+ lp->rx_skb[i] = new_skb;
+ lp->rx_laddr[i] = new_laddr;
+ } else {
+ /* Failed to obtain a new buffer so re-use it */
+ new_laddr = addr;
lp->stats.rx_dropped++;
- break;
}
-
- /* now we have a new skb to replace it, pass the used one up the stack */
- dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
- used_skb = lp->rx_skb[entry];
- pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
- skb_trim(used_skb, pkt_len);
- used_skb->protocol = eth_type_trans(used_skb, dev);
- netif_rx(used_skb);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
-
- /* and insert the new skb */
- lp->rx_laddr[entry] = new_laddr;
- lp->rx_skb[entry] = new_skb;
-
- bufadr_l = (unsigned long)new_laddr & 0xffff;
- bufadr_h = (unsigned long)new_laddr >> 16;
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
- } else {
- /* This should only happen, if we enable accepting broken packets. */
- lp->stats.rx_errors++;
- if (status & SONIC_RCR_FAER)
- lp->stats.rx_frame_errors++;
- if (status & SONIC_RCR_CRCR)
- lp->stats.rx_crc_errors++;
- }
- if (status & SONIC_RCR_LPKT) {
- /*
- * this was the last packet out of the current receive buffer
- * give the buffer back to the SONIC
+ /* If RBE is already asserted when RWP advances then
+ * it's safe to clear RBE after processing this packet.
*/
- lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
- if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
- netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
- __func__);
- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
- }
- } else
- printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
- dev->name);
+ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
+ sonic_update_rra(dev, lp, addr, new_laddr);
+ }
/*
* give back the descriptor
*/
- sonic_rda_put(dev, entry, SONIC_RD_LINK,
- sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
- sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
- sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
- lp->eol_rx = entry;
- lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
+
+ prev_entry = entry;
+ entry = (entry + 1) & SONIC_RDS_MASK;
+ }
+
+ lp->cur_rx = entry;
+
+ if (prev_entry != lp->eol_rx) {
+ /* Advance the EOL flag to put descriptors back into service */
+ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
+ sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
+ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
+ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
+ lp->eol_rx = prev_entry;
}
+
+ if (rbe)
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
/*
* If any worth-while packets have been received, netif_rx()
* has done a mark_bh(NET_BH) for us and will work on them
(netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
+ unsigned long flags;
+
netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
i++;
}
SONIC_WRITE(SONIC_CDC, 16);
- /* issue Load CAM command */
SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+
+ /* LCAM and TXP commands can't be used simultaneously */
+ spin_lock_irqsave(&lp->lock, flags);
+ sonic_quiesce(dev, SONIC_CR_TXP);
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+ sonic_quiesce(dev, SONIC_CR_LCAM);
+ spin_unlock_irqrestore(&lp->lock, flags);
}
}
*/
static int sonic_init(struct net_device *dev)
{
- unsigned int cmd;
struct sonic_local *lp = netdev_priv(dev);
int i;
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+ /* While in reset mode, clear CAM Enable register */
+ SONIC_WRITE(SONIC_CE, 0);
+
/*
* clear software reset flag, disable receiver, clear and
* enable interrupts, then completely initialize the SONIC
*/
SONIC_WRITE(SONIC_CMD, 0);
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
+ sonic_quiesce(dev, SONIC_CR_ALL);
/*
* initialize the receive resource area
}
/* initialize all RRA registers */
- lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
- lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-
- SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
- SONIC_WRITE(SONIC_REA, lp->rra_end);
- SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
+ SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
+ SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
+ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
- break;
- }
-
- netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
- SONIC_READ(SONIC_CMD), i);
+ sonic_quiesce(dev, SONIC_CR_RRRA);
/*
* Initialize the receive descriptors so that they
* load the CAM
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
-
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
- break;
- }
- netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
+ sonic_quiesce(dev, SONIC_CR_LCAM);
/*
* enable receiver, disable loopback
* and enable all interrupts
*/
- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
-
- cmd = SONIC_READ(SONIC_CMD);
- if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
- printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
SONIC_READ(SONIC_CMD));
#define SONIC_CR_TXP 0x0002
#define SONIC_CR_HTX 0x0001
+#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
+ SONIC_CR_RXEN | SONIC_CR_TXP)
+
/*
* SONIC data configuration bits
*/
#define SONIC_TCR_NCRS 0x0100
#define SONIC_TCR_CRLS 0x0080
#define SONIC_TCR_EXC 0x0040
+#define SONIC_TCR_OWC 0x0020
#define SONIC_TCR_PMB 0x0008
#define SONIC_TCR_FU 0x0004
#define SONIC_TCR_BCM 0x0002
#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
+#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
+#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
+#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
#define SONIC_RBSIZE 1520 /* size of one resource buffer */
u32 rda_laddr; /* logical DMA address of RDA */
dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
- unsigned int rra_end;
- unsigned int cur_rwp;
unsigned int cur_rx;
unsigned int cur_tx; /* first unacked transmit packet */
unsigned int eol_rx;
int msg_enable;
struct device *device; /* generic device */
struct net_device_stats stats;
+ spinlock_t lock;
};
#define TX_TIMEOUT (3 * HZ)
as far as we can tell. */
/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
is a much better name. */
-static inline void sonic_buf_put(void* base, int bitmode,
+static inline void sonic_buf_put(u16 *base, int bitmode,
int offset, __u16 val)
{
if (bitmode)
#ifdef __BIG_ENDIAN
- ((__u16 *) base + (offset*2))[1] = val;
+ __raw_writew(val, base + (offset * 2) + 1);
#else
- ((__u16 *) base + (offset*2))[0] = val;
+ __raw_writew(val, base + (offset * 2) + 0);
#endif
else
- ((__u16 *) base)[offset] = val;
+ __raw_writew(val, base + (offset * 1) + 0);
}
-static inline __u16 sonic_buf_get(void* base, int bitmode,
+static inline __u16 sonic_buf_get(u16 *base, int bitmode,
int offset)
{
if (bitmode)
#ifdef __BIG_ENDIAN
- return ((volatile __u16 *) base + (offset*2))[1];
+ return __raw_readw(base + (offset * 2) + 1);
#else
- return ((volatile __u16 *) base + (offset*2))[0];
+ return __raw_readw(base + (offset * 2) + 0);
#endif
else
- return ((volatile __u16 *) base)[offset];
+ return __raw_readw(base + (offset * 1) + 0);
}
/* Inlines that you should actually use for reading/writing DMA buffers */
(entry * SIZEOF_SONIC_RR) + offset);
}
+static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ return lp->rra_laddr +
+ entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
+}
+
+static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
+ SONIC_BUS_SCALE(lp->dma_bitmode));
+}
+
static const char version[] =
"sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
break;
}
entry += p_hdr->size;
+ cond_resched();
}
p_dev->ahw->reset.seq_index = index;
}
addr += 16;
reg_read -= 16;
ret += 16;
+ cond_resched();
}
out:
mutex_unlock(&adapter->ahw->mem_lock);
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
+ cond_resched();
}
fw_dump->clr = 1;
if (cd->tsu) {
add_tsu_reg(ARSTR);
add_tsu_reg(TSU_CTRST);
- add_tsu_reg(TSU_FWEN0);
- add_tsu_reg(TSU_FWEN1);
- add_tsu_reg(TSU_FCM);
- add_tsu_reg(TSU_BSYSL0);
- add_tsu_reg(TSU_BSYSL1);
- add_tsu_reg(TSU_PRISL0);
- add_tsu_reg(TSU_PRISL1);
- add_tsu_reg(TSU_FWSL0);
- add_tsu_reg(TSU_FWSL1);
+ if (cd->dual_port) {
+ add_tsu_reg(TSU_FWEN0);
+ add_tsu_reg(TSU_FWEN1);
+ add_tsu_reg(TSU_FCM);
+ add_tsu_reg(TSU_BSYSL0);
+ add_tsu_reg(TSU_BSYSL1);
+ add_tsu_reg(TSU_PRISL0);
+ add_tsu_reg(TSU_PRISL1);
+ add_tsu_reg(TSU_FWSL0);
+ add_tsu_reg(TSU_FWSL1);
+ }
add_tsu_reg(TSU_FWSLC);
- add_tsu_reg(TSU_QTAGM0);
- add_tsu_reg(TSU_QTAGM1);
- add_tsu_reg(TSU_FWSR);
- add_tsu_reg(TSU_FWINMK);
- add_tsu_reg(TSU_ADQT0);
- add_tsu_reg(TSU_ADQT1);
- add_tsu_reg(TSU_VTAG0);
- add_tsu_reg(TSU_VTAG1);
+ if (cd->dual_port) {
+ add_tsu_reg(TSU_QTAGM0);
+ add_tsu_reg(TSU_QTAGM1);
+ add_tsu_reg(TSU_FWSR);
+ add_tsu_reg(TSU_FWINMK);
+ add_tsu_reg(TSU_ADQT0);
+ add_tsu_reg(TSU_ADQT1);
+ add_tsu_reg(TSU_VTAG0);
+ add_tsu_reg(TSU_VTAG1);
+ }
add_tsu_reg(TSU_ADSBSY);
add_tsu_reg(TSU_TEN);
add_tsu_reg(TSU_POST1);
-MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
+MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver");
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
phy_ethtool_get_wol(ndev->phydev, wol);
}
-static int ave_ethtool_set_wol(struct net_device *ndev,
- struct ethtool_wolinfo *wol)
+static int __ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
{
- int ret;
-
if (!ndev->phydev ||
(wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
return -EOPNOTSUPP;
- ret = phy_ethtool_set_wol(ndev->phydev, wol);
+ return phy_ethtool_set_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ ret = __ave_ethtool_set_wol(ndev, wol);
if (!ret)
device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
/* set wol initial state disabled */
wol.wolopts = 0;
- ave_ethtool_set_wol(ndev, &wol);
+ __ave_ethtool_set_wol(ndev, &wol);
if (!phy_interface_is_rgmii(phydev))
phy_set_max_speed(phydev, SPEED_100);
ave_ethtool_get_wol(ndev, &wol);
wol.wolopts = priv->wolopts;
- ave_ethtool_set_wol(ndev, &wol);
+ __ave_ethtool_set_wol(ndev, &wol);
if (ndev->phydev) {
ret = phy_resume(ndev->phydev);
/* default */
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
break;
case PHY_INTERFACE_MODE_RMII:
* rate, which then uses the auto-reparenting feature of the
* clock driver, and enabling/disabling the clock.
*/
- if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(gmac->interface)) {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
clk_prepare_enable(gmac->tx_clk);
gmac->clk_enabled = 1;
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_DEBUG_FS
+static const struct net_device_ops stmmac_netdev_ops;
static void stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(struct net_device *dev);
#endif
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
+/* Use network device events to rename debugfs file entries.
+ */
+static int stmmac_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (dev->netdev_ops != &stmmac_netdev_ops)
+ goto done;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ if (priv->dbgfs_dir)
+ priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
+ priv->dbgfs_dir,
+ stmmac_fs_dir,
+ dev->name);
+ break;
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block stmmac_notifier = {
+ .notifier_call = stmmac_device_event,
+};
+
static void stmmac_init_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
/* Entry to report the DMA HW features */
debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
&stmmac_dma_cap_fops);
+
+ register_netdevice_notifier(&stmmac_notifier);
}
static void stmmac_exit_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ unregister_netdevice_notifier(&stmmac_notifier);
debugfs_remove_recursive(priv->dbgfs_dir);
}
#endif /* CONFIG_DEBUG_FS */
static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
struct device_node *np, struct device *dev)
{
- bool mdio = false;
+ bool mdio = !of_phy_is_fixed_link(np);
static const struct of_device_id need_mdio_ids[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10" },
{},
*mac = NULL;
}
- rc = of_get_phy_mode(np, &plat->phy_interface);
- if (rc)
- return ERR_PTR(rc);
+ plat->phy_interface = device_get_phy_mode(&pdev->dev);
+ if (plat->phy_interface < 0)
+ return ERR_PTR(plat->phy_interface);
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
if (attr->max_size && (attr->max_size > size))
size = attr->max_size;
- skb = netdev_alloc_skb_ip_align(priv->dev, size);
+ skb = netdev_alloc_skb(priv->dev, size);
if (!skb)
return NULL;
struct net_device *orig_ndev)
{
struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ unsigned char *src = tpriv->packet->src;
+ unsigned char *dst = tpriv->packet->dst;
struct stmmachdr *shdr;
struct ethhdr *ehdr;
struct udphdr *uhdr;
goto out;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (tpriv->packet->dst) {
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ if (dst) {
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, dst))
goto out;
}
if (tpriv->packet->sarc) {
- if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
+ if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest))
goto out;
- } else if (tpriv->packet->src) {
- if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
+ } else if (src) {
+ if (!ether_addr_equal_unaligned(ehdr->h_source, src))
goto out;
}
struct ethhdr *ehdr;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr))
goto out;
if (ehdr->h_proto != htons(ETH_P_PAUSE))
goto out;
if (tpriv->vlan_id) {
if (skb->vlan_proto != htons(proto))
goto out;
- if (skb->vlan_tci != tpriv->vlan_id)
+ if (skb->vlan_tci != tpriv->vlan_id) {
+ /* Means filter did not work. */
+ tpriv->ok = false;
+ complete(&tpriv->comp);
goto out;
+ }
}
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst))
goto out;
ihdr = ip_hdr(skb);
{
int ret, prev_cap = priv->dma_cap.vlhash;
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ return -EOPNOTSUPP;
+
priv->dma_cap.vlhash = 0;
ret = __stmmac_test_vlanfilt(priv);
priv->dma_cap.vlhash = prev_cap;
{
int ret, prev_cap = priv->dma_cap.vlhash;
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER))
+ return -EOPNOTSUPP;
+
priv->dma_cap.vlhash = 0;
ret = __stmmac_test_dvlanfilt(priv);
priv->dma_cap.vlhash = prev_cap;
struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector;
struct flow_cls_offload *cls;
+ int ret, old_enable = 0;
struct flow_rule *rule;
- int ret;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP;
- if (priv->rss.enable)
+ if (priv->rss.enable) {
+ old_enable = priv->rss.enable;
+ priv->rss.enable = false;
stmmac_rss_configure(priv, priv->hw, NULL,
priv->plat->rx_queues_to_use);
+ }
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) {
cleanup_dissector:
kfree(dissector);
cleanup_rss:
- if (priv->rss.enable) {
+ if (old_enable) {
+ priv->rss.enable = old_enable;
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector;
struct flow_cls_offload *cls;
+ int ret, old_enable = 0;
struct flow_rule *rule;
- int ret;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP;
- if (priv->rss.enable)
+ if (priv->rss.enable) {
+ old_enable = priv->rss.enable;
+ priv->rss.enable = false;
stmmac_rss_configure(priv, priv->hw, NULL,
priv->plat->rx_queues_to_use);
+ }
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) {
cleanup_dissector:
kfree(dissector);
cleanup_rss:
- if (priv->rss.enable) {
+ if (old_enable) {
+ priv->rss.enable = old_enable;
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
struct arphdr *ahdr;
ehdr = (struct ethhdr *)skb_mac_header(skb);
- if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src))
goto out;
ahdr = arp_hdr(skb);
{
int ret = 0;
+ /* When RSS is enabled, the filtering will be bypassed */
+ if (priv->rss.enable)
+ return -EBUSY;
+
switch (cls->command) {
case FLOW_CLS_REPLACE:
ret = tc_add_flow(priv, cls);
return NULL;
}
- if (sock->sk->sk_protocol != IPPROTO_UDP) {
+ sk = sock->sk;
+ if (sk->sk_protocol != IPPROTO_UDP ||
+ sk->sk_type != SOCK_DGRAM ||
+ (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
pr_debug("socket fd=%d not UDP\n", fd);
sk = ERR_PTR(-EINVAL);
goto out_sock;
}
- lock_sock(sock->sk);
- if (sock->sk->sk_user_data) {
+ lock_sock(sk);
+ if (sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
- goto out_sock;
+ goto out_rel_sock;
}
- sk = sock->sk;
sock_hold(sk);
tuncfg.sk_user_data = gtp;
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
-out_sock:
+out_rel_sock:
release_sock(sock->sk);
+out_sock:
sockfd_put(sock);
return sk;
}
/* Halt and release the rndis device */
rndis_filter_halt_device(net_dev, rndis_dev);
- net_dev->extension = NULL;
-
netvsc_device_remove(dev);
}
const struct macvlan_dev *dest;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
- const struct ethhdr *eth = (void *)skb->data;
+ const struct ethhdr *eth = skb_eth_hdr(skb);
/* send to other bridge ports directly */
if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
goto xmit_world;
}
get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
- id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev));
+ id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev));
err = devlink_region_snapshot_create(nsim_dev->dummy_region,
dummy_data, id, kfree);
if (err) {
Currently supports dm9161e and dm9131
config DP83822_PHY
- tristate "Texas Instruments DP83822 PHY"
+ tristate "Texas Instruments DP83822/825 PHYs"
---help---
- Supports the DP83822 PHY.
+ Supports the DP83822 and DP83825I PHYs.
config DP83TC811_PHY
- tristate "Texas Instruments DP83TC822 PHY"
+ tristate "Texas Instruments DP83TC811 PHY"
---help---
- Supports the DP83TC822 PHY.
+ Supports the DP83TC811 PHY.
config DP83848_PHY
tristate "Texas Instruments DP83848 PHY"
#define DP83867_PHYCR_FIFO_DEPTH_MAX 0x03
#define DP83867_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 14)
#define DP83867_PHYCR_RESERVED_MASK BIT(11)
+#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
usleep_range(10, 20);
- return 0;
+ /* After reset FORCE_LINK_GOOD bit is set. Although the
+ * default value should be unset. Disable FORCE_LINK_GOOD
+ * for the phy to work properly.
+ */
+ return phy_modify(phydev, MII_DP83867_PHYCTRL,
+ DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
static struct phy_driver dp83867_driver[] = {
struct sfp_bus *bus;
int ret;
+ if (!fwnode)
+ return 0;
+
bus = sfp_bus_find_fwnode(fwnode);
if (IS_ERR(bus)) {
ret = PTR_ERR(bus);
*/
static void slip_write_wakeup(struct tty_struct *tty)
{
- struct slip *sl = tty->disc_data;
+ struct slip *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
static void sl_tx_timeout(struct net_device *dev)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* VSV = very important to remove timers */
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
+ if (frags) {
+ tfile->napi.skb = NULL;
+ mutex_unlock(&tfile->napi_mutex);
+ }
return total_len;
}
}
#include <linux/mdio.h>
#include <linux/phy.h>
#include <net/ip6_checksum.h>
+#include <net/vxlan.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
return 0;
}
-static int lan78xx_linearize(struct sk_buff *skb)
-{
- return skb_linearize(skb);
-}
-
static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
struct sk_buff *skb, gfp_t flags)
{
return NULL;
}
- if (lan78xx_linearize(skb) < 0)
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
return NULL;
+ }
tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
tasklet_schedule(&dev->bh);
}
+static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ return features;
+}
+
static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_open = lan78xx_open,
.ndo_stop = lan78xx_stop,
.ndo_set_features = lan78xx_set_features,
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
+ .ndo_features_check = lan78xx_features_check,
};
static void lan78xx_stat_monitor(struct timer_list *t)
/* MTU range: 68 - 9000 */
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+ netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
#define NETNEXT_VERSION "11"
/* Information for net */
-#define NET_VERSION "10"
+#define NET_VERSION "11"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define PLA_LED_FEATURE 0xdd92
#define PLA_PHYAR 0xde00
#define PLA_BOOT_CTRL 0xe004
+#define PLA_LWAKE_CTRL_REG 0xe007
#define PLA_GPHY_INTR_IMR 0xe022
#define PLA_EEE_CR 0xe040
#define PLA_EEEP_CR 0xe080
#define PLA_TALLYCNT 0xe890
#define PLA_SFF_STS_7 0xe8de
#define PLA_PHYSTATUS 0xe908
+#define PLA_CONFIG6 0xe90a /* CONFIG6 */
#define PLA_BP_BA 0xfc26
#define PLA_BP_0 0xfc28
#define PLA_BP_1 0xfc2a
#define PLA_BP_EN 0xfc38
#define USB_USB2PHY 0xb41e
+#define USB_SSPHYLINK1 0xb426
#define USB_SSPHYLINK2 0xb428
#define USB_U2P3_CTRL 0xb460
#define USB_CSR_DUMMY1 0xb464
#define LINK_ON_WAKE_EN 0x0010
#define LINK_OFF_WAKE_EN 0x0008
+/* PLA_CONFIG6 */
+#define LANWAKE_CLR_EN BIT(0)
+
/* PLA_CONFIG5 */
#define BWF_EN 0x0040
#define MWF_EN 0x0020
/* PLA_PHY_PWR */
#define TX_10M_IDLE_EN 0x0080
#define PFM_PWM_SWITCH 0x0040
+#define TEST_IO_OFF BIT(4)
/* PLA_MAC_PWR_CTRL */
#define D3_CLK_GATED_EN 0x00004000
#define MAC_CLK_SPDWN_EN BIT(15)
/* PLA_MAC_PWR_CTRL3 */
+#define PLA_MCU_SPDWN_EN BIT(14)
#define PKT_AVAIL_SPDWN_EN 0x0100
#define SUSPEND_SPDWN_EN 0x0004
#define U1U2_SPDWN_EN 0x0002
/* PLA_BOOT_CTRL */
#define AUTOLOAD_DONE 0x0002
+/* PLA_LWAKE_CTRL_REG */
+#define LANWAKE_PIN BIT(7)
+
/* PLA_SUSPEND_FLAG */
#define LINK_CHG_EVENT BIT(0)
#define DEBUG_LTSSM 0x0082
/* PLA_EXTRA_STATUS */
+#define CUR_LINK_OK BIT(15)
#define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */
#define LINK_CHANGE_FLAG BIT(8)
+#define POLL_LINK_CHG BIT(0)
/* USB_USB2PHY */
#define USB2PHY_SUSPEND 0x0001
#define USB2PHY_L1 0x0002
+/* USB_SSPHYLINK1 */
+#define DELAY_PHY_PWR_CHG BIT(1)
+
/* USB_SSPHYLINK2 */
#define pwd_dn_scale_mask 0x3ffe
#define pwd_dn_scale(x) ((x) << 1)
r8153_set_rx_early_timeout(tp);
r8153_set_rx_early_size(tp);
+ if (tp->version == RTL_VER_09) {
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+ ocp_data &= ~FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+ usleep_range(1000, 2000);
+ ocp_data |= FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+ }
+
return rtl_enable(tp);
}
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
- r8153_u2p3en(tp, true);
- r8153b_u1u2en(tp, true);
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
}
}
r8153_aldps_en(tp, true);
r8152b_enable_fc(tp);
- r8153_u2p3en(tp, true);
set_bit(PHY_RESET, &tp->flags);
}
static void rtl8153_up(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
r8153_u2p3en(tp, false);
r8153_aldps_en(tp, false);
r8153_first_init(tp);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data |= LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+ ocp_data &= ~LANWAKE_PIN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1);
+ ocp_data &= ~DELAY_PHY_PWR_CHG;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data);
+
r8153_aldps_en(tp, true);
switch (tp->version) {
static void rtl8153_down(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data &= ~LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
r8153_u1u2en(tp, false);
r8153_u2p3en(tp, false);
r8153_power_cut_en(tp, false);
static void rtl8153b_up(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
r8153_first_init(tp);
ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
r8153_aldps_en(tp, true);
- r8153_u2p3en(tp, true);
- r8153b_u1u2en(tp, true);
+
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
}
static void rtl8153b_down(struct r8152 *tp)
{
+ u32 ocp_data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data |= PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
r8153b_u1u2en(tp, false);
r8153_u2p3en(tp, false);
r8153b_power_cut_en(tp, false);
else
ocp_data |= DYNAMIC_BURST;
ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+
+ r8153_queue_wake(tp, false);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
}
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
r8153_power_cut_en(tp, false);
+ rtl_runtime_suspend_enable(tp, false);
r8153_u1u2en(tp, true);
r8153_mac_clk_spd(tp, false);
usb_enable_lpm(tp->udev);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+ ocp_data |= LANWAKE_CLR_EN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+ ocp_data &= ~LANWAKE_PIN;
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
- r8153b_u1u2en(tp, true);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+ if (rtl8152_get_speed(tp) & LINK_STATUS)
+ ocp_data |= CUR_LINK_OK;
+ else
+ ocp_data &= ~CUR_LINK_OK;
+ ocp_data |= POLL_LINK_CHG;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
+
+ if (tp->udev->speed != USB_SPEED_HIGH)
+ r8153b_u1u2en(tp, true);
usb_enable_lpm(tp->udev);
/* MAC clock speed down */
ocp_data |= MAC_CLK_SPDWN_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+ ocp_data &= ~PLA_MCU_SPDWN_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+ if (tp->version == RTL_VER_09) {
+ /* Disable Test IO for 32QFN */
+ if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+ ocp_data |= TEST_IO_OFF;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+ }
+ }
+
set_bit(GREEN_ETHERNET, &tp->flags);
/* rx aggregation */
return -ENODEV;
}
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
intf->needs_remote_wakeup = 1;
+ if (!rtl_can_wakeup(tp))
+ __rtl_set_wol(tp, 0);
+ else
+ tp->saved_wolopts = __rtl_get_wol(tp);
+
tp->rtl_ops.init(tp);
#if IS_BUILTIN(CONFIG_USB_RTL8152)
/* Retry in case request_firmware() is not ready yet. */
goto out1;
}
- if (!rtl_can_wakeup(tp))
- __rtl_set_wol(tp, 0);
-
- tp->saved_wolopts = __rtl_get_wol(tp);
if (tp->saved_wolopts)
device_set_wakeup_enable(&udev->dev, true);
else
ndst = &rt->dst;
skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
},
};
-static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
+static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
static int uhdlc_init(struct ucc_hdlc_private *priv)
{
{
struct lapbethdev *lapbeth;
- list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) {
+ list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) {
if (lapbeth->ethdev == dev)
return lapbeth;
}
spin_lock_irqsave(&sdla_lock, flags);
SDLA_WINDOW(dev, addr);
- pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK));
+ pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK));
__sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
SDLA_WINDOW(dev, addr);
pbuf->opp_flag = 1;
case AIROGVLIST: ridcode = RID_APLIST; break;
case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
- case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
- case AIROGWEPKNV: ridcode = RID_WEP_PERM;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
+ case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break;
+ case AIROGWEPKNV: ridcode = RID_WEP_PERM; break;
case AIROGSTAT: ridcode = RID_STATUS; break;
case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
case AIROGSTATSC32: ridcode = RID_STATS; break;
return -EINVAL;
}
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+ if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
+ /* Only super-user can read WEP keys */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ }
+
+ if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_station_priv *sta_priv = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
__le16 fc;
u8 hdr_len;
if (unlikely(!dev_cmd))
goto drop_unlock_priv;
- memset(dev_cmd, 0, sizeof(*dev_cmd));
dev_cmd->hdr.cmd = REPLY_TX;
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
{
union acpi_object *wifi_pkg, *data;
bool enabled;
- int i, n_profiles, tbl_rev;
- int ret = 0;
+ int i, n_profiles, tbl_rev, pos;
+ int ret = 0;
data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
if (IS_ERR(data))
goto out_free;
}
- for (i = 0; i < n_profiles; i++) {
- /* the tables start at element 3 */
- int pos = 3;
+ /* the tables start at element 3 */
+ pos = 3;
+ for (i = 0; i < n_profiles; i++) {
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
{
int ret = 0;
- /* if the FW crashed or not debug monitor cfg was given, there is
- * no point in changing the recording state
- */
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
- (!fwrt->trans->dbg.dest_tlv &&
- fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
return 0;
if (fw_has_capa(&fwrt->fw->ucode_capa,
/* CSR GIO */
-#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
+#define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002)
/*
* UCODE-DRIVER GP (general purpose) mailbox register 1
if (!frag || frag->size || !pages)
return -EIO;
- while (pages) {
+ /*
+ * We try to allocate as many pages as we can, starting with
+ * the requested amount and going down until we can allocate
+ * something. Because of DIV_ROUND_UP(), pages will never go
+ * down to 0 and stop the loop, so stop when pages reaches 1,
+ * which is too small anyway.
+ */
+ while (pages > 1) {
block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
&physical,
GFP_KERNEL | __GFP_NOWARN);
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
MODULE_PARM_DESC(nvm_file, "NVM file name");
-module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444);
-MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
-
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
MODULE_PARM_DESC(uapsd_disable,
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
* @nvm_file: specifies a external NVM file
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
- * @lar_disable: disable LAR (regulatory), default = 0
* @fw_monitor: allow to use firmware monitor
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
int antenna_coupling;
char *nvm_file;
u32 uapsd_disable;
- bool lar_disable;
bool fw_monitor;
bool disable_11ac;
/**
NVM_CHANNEL_DC_HIGH = BIT(12),
};
+/**
+ * enum iwl_reg_capa_flags - global flags applied for the whole regulatory
+ * domain.
+ * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 2.4Ghz band is allowed.
+ * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 5Ghz band is allowed.
+ * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
+ * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
+ * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
+ */
+enum iwl_reg_capa_flags {
+ REG_CAPA_BF_CCD_LOW_BAND = BIT(0),
+ REG_CAPA_BF_CCD_HIGH_BAND = BIT(1),
+ REG_CAPA_160MHZ_ALLOWED = BIT(2),
+ REG_CAPA_80MHZ_ALLOWED = BIT(3),
+ REG_CAPA_MCS_8_ALLOWED = BIT(4),
+ REG_CAPA_MCS_9_ALLOWED = BIT(5),
+ REG_CAPA_40MHZ_FORBIDDEN = BIT(7),
+ REG_CAPA_DC_HIGH_ENABLED = BIT(9),
+};
+
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
int chan, u32 flags)
{
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku,
- u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
+ u8 tx_chains, u8 rx_chains)
{
struct iwl_nvm_data *data;
bool lar_enabled;
return NULL;
}
- if (lar_fw_supported && lar_enabled)
+ if (lar_enabled &&
+ fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw))
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
+ u16 cap_flags,
const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
(flags & NL80211_RRF_NO_IR))
flags |= NL80211_RRF_GO_CONCURRENT;
+ /*
+ * cap_flags is per regulatory domain so apply it for every channel
+ */
+ if (ch_idx >= NUM_2GHZ_CHANNELS) {
+ if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
+ flags |= NL80211_RRF_NO_160MHZ;
+ }
+
return flags;
}
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info)
+ u16 geo_info, u16 cap)
{
int ch_idx;
u16 ch_flags;
}
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
- ch_flags, cfg);
+ ch_flags, cap,
+ cfg);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
};
int ret;
- bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
- fw_has_capa(&fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
bool empty_otp;
u32 mac_flags;
u32 sbands_flags = 0;
nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
- if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
+ if (le32_to_cpu(rsp->regulatory.lar_enabled) &&
+ fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) {
nvm->lar_enabled = true;
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
}
*
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*/
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku,
- u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
+ u8 tx_chains, u8 rx_chains);
/**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info);
+ u16 geo_info, u16 cap);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops)
+ const struct iwl_trans_ops *ops,
+ unsigned int cmd_pool_size,
+ unsigned int cmd_pool_align)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
- sizeof(struct iwl_device_cmd),
- sizeof(void *),
- SLAB_HWCACHE_ALIGN,
- NULL);
+ cmd_pool_size, cmd_pool_align,
+ SLAB_HWCACHE_ALIGN, NULL);
if (!trans->dev_cmd_pool)
return NULL;
};
} __packed;
+/**
+ * struct iwl_device_tx_cmd - buffer for TX command
+ * @hdr: the header
+ * @payload: the payload placeholder
+ *
+ * The actual structure is sized dynamically according to need.
+ */
+struct iwl_device_tx_cmd {
+ struct iwl_cmd_header hdr;
+ u8 payload[];
+} __packed;
+
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
/*
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int queue);
+ struct iwl_device_tx_cmd *dev_cmd, int queue);
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
return trans->ops->dump_data(trans, dump_mask);
}
-static inline struct iwl_device_cmd *
+static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
- return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+ return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
- struct iwl_device_cmd *dev_cmd)
+ struct iwl_device_tx_cmd *dev_cmd)
{
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int queue)
+ struct iwl_device_tx_cmd *dev_cmd, int queue)
{
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops);
+ const struct iwl_trans_ops *ops,
+ unsigned int cmd_pool_size,
+ unsigned int cmd_pool_align);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT false
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
+#define IWL_MVM_USE_NSSN_SYNC 0
#endif /* __MVM_CONSTANTS_H */
return 0;
}
+ if (!mvm->fwrt.ppag_table.enabled) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG not enabled, command not sent.\n");
+ return 0;
+ }
+
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
- IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
- mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
- __le16_to_cpu(resp->geo_info));
+ __le16_to_cpu(resp->geo_info),
+ __le16_to_cpu(resp->cap));
/* Store the return source id */
src_id = resp->source_id;
kfree(resp);
return ret;
}
+static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ if (likely(sta)) {
+ if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
+ return;
+ } else {
+ if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
+ return;
+ }
+
+ ieee80211_free_txskb(mvm->hw, skb);
+}
+
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
}
}
- if (sta) {
- if (iwl_mvm_tx_skb(mvm, skb, sta))
- goto drop;
- return;
- }
-
- if (iwl_mvm_tx_skb_non_sta(mvm, skb))
- goto drop;
+ iwl_mvm_tx_skb(mvm, skb, sta);
return;
drop:
ieee80211_free_txskb(hw, skb);
break;
}
- if (!txq->sta)
- iwl_mvm_tx_skb_non_sta(mvm, skb);
- else
- iwl_mvm_tx_skb(mvm, skb, txq->sta);
+ iwl_mvm_tx_skb(mvm, skb, txq->sta);
}
} while (atomic_dec_return(&mvmtxq->tx_request));
rcu_read_unlock();
return ret;
}
+static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
+{
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ rinfo->bw = RATE_INFO_BW_20;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rinfo->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rinfo->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rinfo->bw = RATE_INFO_BW_160;
+ break;
+ }
+
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_HT_MCS_NSS_MSK) + 1;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_RATE_CODE_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_NSS_MSK) + 1;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+ u32 gi_ltf = u32_get_bits(rate_n_flags,
+ RATE_MCS_HE_GI_LTF_MSK);
+
+ rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+ rinfo->mcs = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_RATE_CODE_MSK);
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_NSS_MSK) + 1;
+
+ if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rinfo->bw = RATE_INFO_BW_HE_RU;
+ rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+ case RATE_MCS_HE_TYPE_SU:
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_MU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_TRIG:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+ rinfo->he_dcm = 1;
+ } else {
+ switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) {
+ case IWL_RATE_1M_PLCP:
+ rinfo->legacy = 10;
+ break;
+ case IWL_RATE_2M_PLCP:
+ rinfo->legacy = 20;
+ break;
+ case IWL_RATE_5M_PLCP:
+ rinfo->legacy = 55;
+ break;
+ case IWL_RATE_11M_PLCP:
+ rinfo->legacy = 110;
+ break;
+ case IWL_RATE_6M_PLCP:
+ rinfo->legacy = 60;
+ break;
+ case IWL_RATE_9M_PLCP:
+ rinfo->legacy = 90;
+ break;
+ case IWL_RATE_12M_PLCP:
+ rinfo->legacy = 120;
+ break;
+ case IWL_RATE_18M_PLCP:
+ rinfo->legacy = 180;
+ break;
+ case IWL_RATE_24M_PLCP:
+ rinfo->legacy = 240;
+ break;
+ case IWL_RATE_36M_PLCP:
+ rinfo->legacy = 360;
+ break;
+ case IWL_RATE_48M_PLCP:
+ rinfo->legacy = 480;
+ break;
+ case IWL_RATE_54M_PLCP:
+ rinfo->legacy = 540;
+ break;
+ }
+ }
+}
+
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+ iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
/* if beacon filtering isn't on mac80211 does it anyway */
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;
bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
- if (iwlwifi_mod_params.lar_disable)
- return false;
-
/*
* Enable LAR only if it is supported by the FW (TLV) &&
* enabled in the NVM
int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
u16 len, const void *data,
u32 *status);
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta);
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
struct iwl_nvm_section *sections = mvm->nvm_sections;
const __be16 *hw;
const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
- bool lar_enabled;
int regulatory_type;
/* Checking for required sections */
- if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
- lar_enabled = !iwlwifi_mod_params.lar_disable &&
- fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
-
- return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
+ return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
regulatory, mac_override, phy_sku,
- mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
- lar_enabled);
+ mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant);
}
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
{
- struct iwl_mvm_rss_sync_notif notif = {
- .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
- .metadata.sync = 0,
- .nssn_sync.baid = baid,
- .nssn_sync.nssn = nssn,
- };
-
- iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
+ if (IWL_MVM_USE_NSSN_SYNC) {
+ struct iwl_mvm_rss_sync_notif notif = {
+ .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
+ .metadata.sync = 0,
+ .nssn_sync.baid = baid,
+ .nssn_sync.nssn = nssn,
+ };
+
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if,
+ sizeof(notif));
+ }
}
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
cmd_size = sizeof(struct iwl_scan_config_v2);
else
cmd_size = sizeof(struct iwl_scan_config_v1);
- cmd_size += num_channels;
+ cmd_size += mvm->fw->ucode_capa.n_scan_channels;
cfg = kzalloc(cmd_size, GFP_KERNEL);
if (!cfg)
/*
* Allocates and sets the Tx cmd the driver data pointers in the skb
*/
-static struct iwl_device_cmd *
+static struct iwl_device_tx_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, int hdrlen,
struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
if (unlikely(!dev_cmd))
return NULL;
- /* Make sure we zero enough of dev_cmd */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
-
- memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
}
static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
- struct iwl_device_cmd *cmd)
+ struct iwl_device_tx_cmd *cmd)
{
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info info;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
__le16 fc = hdr->frame_control;
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
- struct iwl_device_cmd *dev_cmd;
+ struct iwl_device_tx_cmd *dev_cmd;
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
- return 0;
+ return -1;
}
if (!iwl_mvm_has_new_tx_api(mvm)) {
return -1;
}
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct ieee80211_sta *sta)
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info info;
#include "internal.h"
#include "iwl-prph.h"
+static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys,
+ int depth)
+{
+ void *result;
+
+ if (WARN(depth > 2,
+ "failed to allocate DMA memory not crossing 2^32 boundary"))
+ return NULL;
+
+ result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
+
+ if (!result)
+ return NULL;
+
+ if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+ void *old = result;
+ dma_addr_t oldphys = *phys;
+
+ result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
+ phys,
+ depth + 1);
+ dma_free_coherent(trans->dev, size, old, oldphys);
+ }
+
+ return result;
+}
+
+static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys)
+{
+ return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
+}
+
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
u32 control_flags = 0, rb_size;
+ dma_addr_t phys;
int ret;
- ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
- &trans_pcie->ctxt_info_dma_addr,
- GFP_KERNEL);
+ ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
+ sizeof(*ctxt_info),
+ &phys);
if (!ctxt_info)
return -ENOMEM;
+ trans_pcie->ctxt_info_dma_addr = phys;
+
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
- struct iwl_device_cmd *cmd;
+ void *cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
/*****************************************************
* TX / HCMD
******************************************************/
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
int iwl_pcie_tx_init(struct iwl_trans *trans);
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
int queue_size);
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb);
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb);
#endif
/* common functions that are used by gen3 transport */
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
napi = &rxq->napi;
if (napi->poll) {
+ napi_gro_flush(napi, false);
+
if (napi->rx_count) {
netif_receive_skb_list(&napi->rx_list);
INIT_LIST_HEAD(&napi->rx_list);
napi->rx_count = 0;
}
-
- napi_gro_flush(napi, false);
}
iwl_pcie_rxq_restock(trans, rxq);
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
#include "fw/dbg.h"
+#include "fw/api/tx.h"
#include "internal.h"
#include "iwl-fh.h"
u16 cap;
/*
- * HW bug W/A for instability in PCIe bus L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
+ * L0S states have been found to be unstable with our devices
+ * and in newer hardware they are not officially supported at
+ * all, so we must always set the L0S_DISABLED bit.
*/
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
+
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
- if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
- iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
- else
- iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
{
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
- int ret, addr_size;
+ int ret, addr_size, txcmd_size, txcmd_align;
+ const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+
+ if (!cfg_trans->gen2) {
+ ops = &trans_ops_pcie;
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = sizeof(void *);
+ } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ txcmd_align = 64;
+ } else {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+ txcmd_align = 128;
+ }
+
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
+
+ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+ if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+ return ERR_PTR(-EINVAL);
ret = pcim_enable_device(pdev);
if (ret)
return ERR_PTR(ret);
- if (cfg_trans->gen2)
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, &trans_ops_pcie_gen2);
- else
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, &trans_ops_pcie);
-
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+ txcmd_size, txcmd_align);
if (!trans)
return ERR_PTR(-ENOMEM);
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
struct iwl_tfh_tb *tb;
+ /*
+ * Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_pcie_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
return -EINVAL;
tb = &tfd->tbs[idx];
return idx;
}
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct page **page_ptr;
+ struct page *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ /* set the chaining pointer to the previous page if there */
+ *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+ IWL_WARN(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
{
#ifdef CONFIG_INET
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
u16 length, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
/*
* Pull the ieee80211 header to be able to use TSO core,
dev_kfree_skb(csum_skb);
goto out_err;
}
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
tb_phys, tb_len);
/* put the payload */
while (data_left) {
+ int ret;
+
tb_len = min_t(unsigned int, tso.size, data_left);
tb_phys = dma_map_single(trans->dev, tso.data,
tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL);
+ if (ret) {
dev_kfree_skb(csum_skb);
goto out_err;
}
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- tb_phys, tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
- int tb_idx;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
- if (!skb_frag_size(frag))
+ if (!fragsz)
continue;
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- return -ENOMEM;
- tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
- skb_frag_size(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
- tb_phys, skb_frag_size(frag));
- if (tb_idx < 0)
- return tb_idx;
-
- out_meta->tbs |= BIT(tb_idx);
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta);
+ if (ret)
+ return ret;
}
return 0;
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
/* The first TB points to bi-directional DMA data */
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
tb2_len = skb_headlen(skb) - hdr_len;
if (tb2_len > 0) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
- tb_phys, tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
skb_walk_frags(skb, frag) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, frag->data,
skb_headlen(frag), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
- tb_phys, skb_headlen(frag));
if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
goto out_err;
}
static
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta;
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->write_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[read_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
struct sk_buff *skb)
{
struct page **page_ptr;
+ struct page *next;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
- if (*page_ptr) {
- __free_page(*page_ptr);
- *page_ptr = NULL;
+ while (next) {
+ struct page *tmp = next;
+
+ next = *(void **)(page_address(next) + PAGE_SIZE -
+ sizeof(void *));
+ __free_page(tmp);
}
}
while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
- struct iwl_device_cmd *dev_cmd_ptr;
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
}
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+ struct page **page_ptr;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
if (!p->page)
goto alloc;
- /* enough room on this page */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
- return p;
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+ sizeof(void *))
+ goto out;
/* We don't have enough room on this page, get a new one. */
__free_page(p->page);
if (!p->page)
return NULL;
p->pos = page_address(p->page);
+ /* set the chaining pointer to NULL */
+ *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+ *page_ptr = p->page;
+ get_page(p->page);
return p;
}
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
/* No A-MSDU without CONFIG_INET */
WARN_ON(1);
#endif /* CONFIG_INET */
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
int hw, ap, ap_max = ie[1];
u8 hw_rate;
+ if (ap_max > MAX_RATES) {
+ lbs_deb_assoc("invalid rates\n");
+ return tlv;
+ }
/* Advance past IE header */
ie += 2;
struct cmd_ds_802_11_ad_hoc_join cmd;
u8 preamble = RADIO_PREAMBLE_SHORT;
int ret = 0;
+ int hw, i;
+ u8 rates_max;
+ u8 *rates;
/* TODO: set preamble based on scan result */
ret = lbs_set_radio(priv, preamble, 1);
if (!rates_eid) {
lbs_add_rates(cmd.bss.rates);
} else {
- int hw, i;
- u8 rates_max = rates_eid[1];
- u8 *rates = cmd.bss.rates;
+ rates_max = rates_eid[1];
+ if (rates_max > MAX_RATES) {
+ lbs_deb_join("invalid rates");
+ goto out;
+ }
+ rates = cmd.bss.rates;
for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
u8 hw_rate = lbs_rates[hw].bitrate / 5;
for (i = 0; i < rates_max; i++) {
return 0;
sband = dev->hw->wiphy->bands[status->band];
- if (!sband || status->rate_idx > sband->n_bitrates)
+ if (!sband || status->rate_idx >= sband->n_bitrates)
return 0;
rate = &sband->bitrates[status->rate_idx];
{
struct ieee80211_hw *hw = dev->hw;
- mt76_led_cleanup(dev);
+ if (IS_ENABLED(CONFIG_MT76_LEDS))
+ mt76_led_cleanup(dev);
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
}
cmd, sizeof(cmd), false);
rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
- &transferred, 0);
+ &transferred, 5000);
kfree(buffer);
if (rc || (transferred != sizeof(cmd))) {
nfc_err(&phy->udev->dev,
# SPDX-License-Identifier: GPL-2.0-only
config NVME_CORE
tristate
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
config BLK_DEV_NVME
tristate "NVM Express block device"
case NVME_SC_CAP_EXCEEDED:
return BLK_STS_NOSPC;
case NVME_SC_LBA_RANGE:
+ case NVME_SC_CMD_INTERRUPTED:
+ case NVME_SC_NS_NOT_READY:
return BLK_STS_TARGET;
case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_ONCS_NOT_SUPPORTED:
return len;
}
+static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
+{
+ switch (cdw10 & 0xff) {
+ case NVME_FEAT_HOST_ID:
+ return sizeof(req->sq->ctrl->hostid);
+ default:
+ return 0;
+ }
+}
+
u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
{
return le64_to_cpu(cmd->get_log_page.lpo);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
return;
switch (cdw10 & 0xff) {
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
kref_init(&opp_table->kref);
- kref_init(&opp_table->list_kref);
/* Secure the device table modification */
list_add(&opp_table->node, &opp_tables);
mutex_unlock(&opp_table_lock);
}
-void _opp_remove_all_static(struct opp_table *opp_table)
-{
- struct dev_pm_opp *opp, *tmp;
-
- list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (!opp->dynamic)
- dev_pm_opp_put(opp);
- }
-
- opp_table->parsed_static_opps = false;
-}
-
-static void _opp_table_list_kref_release(struct kref *kref)
-{
- struct opp_table *opp_table = container_of(kref, struct opp_table,
- list_kref);
-
- _opp_remove_all_static(opp_table);
- mutex_unlock(&opp_table_lock);
-}
-
-void _put_opp_list_kref(struct opp_table *opp_table)
-{
- kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release,
- &opp_table_lock);
-}
-
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+void _opp_remove_all_static(struct opp_table *opp_table)
+{
+ struct dev_pm_opp *opp, *tmp;
+
+ mutex_lock(&opp_table->lock);
+
+ if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps)
+ goto unlock;
+
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
+ if (!opp->dynamic)
+ dev_pm_opp_put_unlocked(opp);
+ }
+
+unlock:
+ mutex_unlock(&opp_table->lock);
+}
+
/**
* dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
* @dev: device for which we do this operation
return;
}
- _put_opp_list_kref(opp_table);
+ _opp_remove_all_static(opp_table);
/* Drop reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
struct dev_pm_opp *opp;
/* OPP table is already initialized for the device */
+ mutex_lock(&opp_table->lock);
if (opp_table->parsed_static_opps) {
- kref_get(&opp_table->list_kref);
+ opp_table->parsed_static_opps++;
+ mutex_unlock(&opp_table->lock);
return 0;
}
- /*
- * Re-initialize list_kref every time we add static OPPs to the OPP
- * table as the reference count may be 0 after the last tie static OPPs
- * were removed.
- */
- kref_init(&opp_table->list_kref);
+ opp_table->parsed_static_opps = 1;
+ mutex_unlock(&opp_table->lock);
/* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_table->np, np) {
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
of_node_put(np);
- return ret;
+ goto remove_static_opp;
} else if (opp) {
count++;
}
}
/* There should be one of more OPP defined */
- if (WARN_ON(!count))
- return -ENOENT;
+ if (WARN_ON(!count)) {
+ ret = -ENOENT;
+ goto remove_static_opp;
+ }
list_for_each_entry(opp, &opp_table->opp_list, node)
pstate_count += !!opp->pstate;
if (pstate_count && pstate_count != count) {
dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
count, pstate_count);
- return -ENOENT;
+ ret = -ENOENT;
+ goto remove_static_opp;
}
if (pstate_count)
opp_table->genpd_performance_state = true;
- opp_table->parsed_static_opps = true;
-
return 0;
+
+remove_static_opp:
+ _opp_remove_all_static(opp_table);
+
+ return ret;
}
/* Initializes OPP tables based on old-deprecated bindings */
if (ret) {
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
__func__, freq, ret);
+ _opp_remove_all_static(opp_table);
return ret;
}
nr -= 2;
* @dev_list: list of devices that share these OPPs
* @opp_list: table of opps
* @kref: for reference count of the table.
- * @list_kref: for reference count of the OPP list.
* @lock: mutex protecting the opp_list and dev_list.
* @np: struct device_node pointer for opp's DT node.
* @clock_latency_ns_max: Max clock latency in nanoseconds.
- * @parsed_static_opps: True if OPPs are initialized from DT.
+ * @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
struct list_head dev_list;
struct list_head opp_list;
struct kref kref;
- struct kref list_kref;
struct mutex lock;
struct device_node *np;
/* For backward compatibility with v1 bindings */
unsigned int voltage_tolerance_v1;
- bool parsed_static_opps;
+ unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
struct dev_pm_opp *suspend_opp;
#ifdef CONFIG_PCI_ATS
/*
- * Some devices have a broken ATS implementation causing IOMMU stalls.
- * Don't use ATS for those devices.
+ * Some devices require additional driver setup to enable ATS. Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+ * chance to load and configure the device.
*/
-static void quirk_no_ats(struct pci_dev *pdev)
+static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
- pci_info(pdev, "disabling ATS (broken on this device)\n");
+ if (pdev->device == 0x7340 && pdev->revision != 0xc5)
+ return;
+
+ pci_info(pdev, "disabling ATS\n");
pdev->ats_cap = 0;
}
/* AMD Stoney platform GPU */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
+/* AMD Iceland dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+/* AMD Navi14 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
if (ret < 0) {
dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
- goto ddr_perf_err;
+ goto cpuhp_state_err;
}
pmu->cpuhp_state = ret;
/* Register the pmu instance for cpu hotplug */
- cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ goto cpuhp_instance_err;
+ }
/* Request irq */
irq = of_irq_get(np, 0);
return 0;
ddr_perf_err:
- if (pmu->cpuhp_state)
- cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
-
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(pmu->cpuhp_state);
+cpuhp_state_err:
ida_simple_remove(&ddr_ida, pmu->id);
dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ cpuhp_remove_multi_state(pmu->cpuhp_state);
irq_set_affinity_hint(pmu->irq, NULL);
perf_pmu_unregister(&pmu->pmu);
hisi_pmu->ops->stop_counters(hisi_pmu);
}
+
/*
- * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
- * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu
- * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID
- * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID
- * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID
- * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
+ * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be
+ * determined from the MPIDR_EL1, but the encoding varies by CPU:
+ *
+ * - For MT variants of TSV110:
+ * SCCL is Aff2[7:3], CCL is Aff2[2:0]
+ *
+ * - For other MT parts:
+ * SCCL is Aff3[7:0], CCL is Aff2[7:0]
+ *
+ * - For non-MT parts:
+ * SCCL is Aff2[7:0], CCL is Aff1[7:0]
*/
-static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
+static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
{
u64 mpidr = read_cpuid_mpidr();
-
- if (mpidr & MPIDR_MT_BITMASK) {
- if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
- int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
-
- if (sccl_id)
- *sccl_id = aff2 >> 3;
- if (ccl_id)
- *ccl_id = aff2 & 0x7;
- } else {
- if (sccl_id)
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
- if (ccl_id)
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
- }
+ int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+ int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ bool mt = mpidr & MPIDR_MT_BITMASK;
+ int sccl, ccl;
+
+ if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
+ sccl = aff2 >> 3;
+ ccl = aff2 & 0x7;
+ } else if (mt) {
+ sccl = aff3;
+ ccl = aff2;
} else {
- if (sccl_id)
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
- if (ccl_id)
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ sccl = aff2;
+ ccl = aff1;
}
+
+ if (scclp)
+ *scclp = sccl;
+ if (cclp)
+ *cclp = ccl;
}
/*
enum cpcap_gpio_mode {
CPCAP_DM_DP,
CPCAP_MDM_RX_TX,
- CPCAP_UNKNOWN,
+ CPCAP_UNKNOWN_DISABLED, /* Seems to disable USB lines */
CPCAP_OTG_DM_DP,
};
struct iio_channel *id;
struct regulator *vusb;
atomic_t active;
+ unsigned int vbus_provider:1;
+ unsigned int docked:1;
};
static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata)
static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata);
static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata);
+static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata,
+ enum musb_vbus_id_status status)
+{
+ int error;
+
+ error = musb_mailbox(status);
+ if (!error)
+ return;
+
+ dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n",
+ __func__, error);
+}
+
static void cpcap_usb_detect(struct work_struct *work)
{
struct cpcap_phy_ddata *ddata;
if (error)
return;
- if (s.id_ground) {
- dev_dbg(ddata->dev, "id ground, USB host mode\n");
+ vbus = cpcap_usb_vbus_valid(ddata);
+
+ /* We need to kick the VBUS as USB A-host */
+ if (s.id_ground && ddata->vbus_provider) {
+ dev_dbg(ddata->dev, "still in USB A-host mode, kicking VBUS\n");
+
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
+ CPCAP_BIT_VBUSSTBY_EN |
+ CPCAP_BIT_VBUSEN_SPI,
+ CPCAP_BIT_VBUSEN_SPI);
+ if (error)
+ goto out_err;
+
+ return;
+ }
+
+ if (vbus && s.id_ground && ddata->docked) {
+ dev_dbg(ddata->dev, "still docked as A-host, signal ID down\n");
+
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+ return;
+ }
+
+ /* No VBUS needed with docks */
+ if (vbus && s.id_ground && !ddata->vbus_provider) {
+ dev_dbg(ddata->dev, "connected to a dock\n");
+
+ ddata->docked = true;
+
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_ID_GROUND);
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
+ /*
+ * Force check state again after musb has reoriented,
+ * otherwise devices won't enumerate after loading PHY
+ * driver.
+ */
+ schedule_delayed_work(&ddata->detect_work,
+ msecs_to_jiffies(1000));
+
+ return;
+ }
+
+ if (s.id_ground && !ddata->docked) {
+ dev_dbg(ddata->dev, "id ground, USB host mode\n");
+
+ ddata->vbus_provider = true;
+
+ error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
+
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_VBUSSTBY_EN |
CPCAP_BIT_VBUSEN_SPI,
vbus = cpcap_usb_vbus_valid(ddata);
+ /* Otherwise assume we're connected to a USB host */
if (vbus) {
- /* Are we connected to a docking station with vbus? */
- if (s.id_ground) {
- dev_dbg(ddata->dev, "connected to a dock\n");
-
- /* No VBUS needed with docks */
- error = cpcap_usb_set_usb_mode(ddata);
- if (error)
- goto out_err;
- error = musb_mailbox(MUSB_ID_GROUND);
- if (error)
- goto out_err;
-
- return;
- }
-
- /* Otherwise assume we're connected to a USB host */
dev_dbg(ddata->dev, "connected to USB host\n");
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_VBUS_VALID);
- if (error)
- goto out_err;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID);
return;
}
+ ddata->vbus_provider = false;
+ ddata->docked = false;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
+
/* Default to debug UART mode */
error = cpcap_usb_set_uart_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_VBUS_OFF);
- if (error)
- goto out_err;
-
dev_dbg(ddata->dev, "set UART mode\n");
return;
{
int error;
- error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+ /* Disable lines to prevent glitches from waking up mdm6600 */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
if (error)
goto out_err;
if (error)
goto out_err;
+ /* Enable UART mode */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+ if (error)
+ goto out_err;
+
return 0;
out_err:
{
int error;
- error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+ /* Disable lines to prevent glitches from waking up mdm6600 */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_UNKNOWN_DISABLED);
if (error)
return error;
if (error)
goto out_err;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2,
- CPCAP_BIT_USBXCVREN,
- CPCAP_BIT_USBXCVREN);
- if (error)
- goto out_err;
-
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_PU_SPI |
CPCAP_BIT_DMPD_SPI |
if (error)
goto out_err;
+ /* Enable USB mode */
+ error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+ if (error)
+ goto out_err;
+
return 0;
out_err:
if (error)
dev_err(ddata->dev, "could not set UART mode\n");
- error = musb_mailbox(MUSB_VBUS_OFF);
- if (error)
- dev_err(ddata->dev, "could not set mailbox\n");
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
usb_remove_phy(&ddata->phy);
cancel_delayed_work_sync(&ddata->detect_work);
struct phy_mdm6600 *ddata;
struct device *dev;
DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES);
- int error, i, val = 0;
+ int error;
ddata = container_of(work, struct phy_mdm6600, status_work.work);
dev = ddata->dev;
if (error)
return;
- for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
- val |= test_bit(i, values) << i;
- dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
- __func__, i, test_bit(i, values), val);
- }
- ddata->status = values[0];
+ ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1);
dev_info(dev, "modem status: %i %s\n",
ddata->status,
- phy_mdm6600_status_name[ddata->status & 7]);
+ phy_mdm6600_status_name[ddata->status]);
complete(&ddata->ack);
}
/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-#define PHY_INIT_COMPLETE_TIMEOUT 1000
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
#define POWER_DOWN_DELAY_US_MIN 10
#define POWER_DOWN_DELAY_US_MAX 11
{
const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ rate = (rate / 1000) * 1000;
+
for (; cfg->pixclock != 0; cfg++)
if (cfg->pixclock == rate && !cfg->fracdiv)
break;
{
const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ rate = (rate / 1000) * 1000;
+
for (; cfg->pixclock != 0; cfg++)
if (cfg->pixclock == rate)
break;
config PINCTRL_LOCHNAGAR
tristate "Cirrus Logic Lochnagar pinctrl driver"
depends on MFD_LOCHNAGAR
+ select GPIOLIB
select PINMUX
select PINCONF
select GENERIC_PINCONF
return ret;
}
-#ifdef CONFIG_PM
-
-/**
- * pinctrl_pm_select_state() - select pinctrl state for PM
- * @dev: device to select default state for
- * @state: state to set
- */
-static int pinctrl_pm_select_state(struct device *dev,
- struct pinctrl_state *state)
+static int pinctrl_select_bound_state(struct device *dev,
+ struct pinctrl_state *state)
{
struct dev_pin_info *pins = dev->pins;
int ret;
}
/**
- * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * pinctrl_select_default_state() - select default pinctrl state
* @dev: device to select default state for
*/
-int pinctrl_pm_select_default_state(struct device *dev)
+int pinctrl_select_default_state(struct device *dev)
{
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->default_state);
+ return pinctrl_select_bound_state(dev, dev->pins->default_state);
+}
+EXPORT_SYMBOL_GPL(pinctrl_select_default_state);
+
+#ifdef CONFIG_PM
+
+/**
+ * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * @dev: device to select default state for
+ */
+int pinctrl_pm_select_default_state(struct device *dev)
+{
+ return pinctrl_select_default_state(dev);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state);
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->sleep_state);
+ return pinctrl_select_bound_state(dev, dev->pins->sleep_state);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state);
if (!dev->pins)
return 0;
- return pinctrl_pm_select_state(dev, dev->pins->idle_state);
+ return pinctrl_select_bound_state(dev, dev->pins->idle_state);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state);
#endif
.padown_offset = SPT_PAD_OWN, \
.padcfglock_offset = SPT_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
+ .is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
return ret;
meson_calc_reg_and_bit(bank, pin, REG_DS, ®, &bit);
+ bit = bit << 1;
ret = regmap_read(pc->reg_ds, reg, &val);
if (ret)
return ret;
}
- if (response->status) {
- dev_err(ec->dev,
- "EC reported failure sending keyboard LEDs command: %d",
- response->status);
- return -EIO;
- }
-
return 0;
}
{
struct wilco_keyboard_leds_msg request;
struct wilco_keyboard_leds_msg response;
+ int ret;
memset(&request, 0, sizeof(request));
request.command = WILCO_EC_COMMAND_KBBL;
request.mode = WILCO_KBBL_MODE_FLAG_PWM;
request.percent = brightness;
- return send_kbbl_msg(ec, &request, &response);
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ if (response.status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d",
+ response.status);
+ return -EIO;
+ }
+
+ return 0;
}
static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
if (ret < 0)
return ret;
+ if (response.status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d",
+ response.status);
+ return -EIO;
+ }
+
if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
return response.percent;
* @work: work struct for deferred process
* @timer: background timer
* @vring: Tx/Rx ring
- * @spin_lock: spin lock
+ * @spin_lock: Tx/Rx spin lock
* @is_ready: ready flag
*/
struct mlxbf_tmfifo {
struct work_struct work;
struct timer_list timer;
struct mlxbf_tmfifo_vring *vring[2];
- spinlock_t spin_lock; /* spin lock */
+ spinlock_t spin_lock[2]; /* spin lock */
bool is_ready;
};
writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
/* Use spin-lock to protect the 'cons->tx_buf'. */
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[0], flags);
while (size > 0) {
addr = cons->tx_buf.buf + cons->tx_buf.tail;
}
}
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
}
/* Rx/Tx one word in the descriptor buffer. */
fifo->vring[is_rx] = NULL;
/* Notify upper layer that packet is done. */
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
vring_interrupt(0, vring->vq);
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
}
mlxbf_tmfifo_desc_done:
* worker handler.
*/
if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
- spin_lock_irqsave(&fifo->spin_lock, flags);
+ spin_lock_irqsave(&fifo->spin_lock[0], flags);
tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
mlxbf_tmfifo_console_output(tm_vdev, vring);
- spin_unlock_irqrestore(&fifo->spin_lock, flags);
+ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
} else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
&fifo->pend_events)) {
return true;
if (!fifo)
return -ENOMEM;
- spin_lock_init(&fifo->spin_lock);
+ spin_lock_init(&fifo->spin_lock[0]);
+ spin_lock_init(&fifo->spin_lock[1]);
INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
mutex_init(&fifo->lock);
item = pdata->items;
for (i = 0; i < pdata->counter; i++, item++) {
+ if (item->capability) {
+ /*
+ * Read group capability register to get actual number
+ * of interrupt capable components and set group mask
+ * accordingly.
+ */
+ ret = regmap_read(priv->regmap, item->capability,
+ ®val);
+ if (ret)
+ goto out;
+
+ item->mask = GENMASK((regval & item->mask) - 1, 0);
+ }
+
/* Clear group presense event. */
ret = regmap_write(priv->regmap, item->reg +
MLXREG_HOTPLUG_EVENT_OFF, 0);
config INTEL_SCU_IPC_UTIL
tristate "Intel SCU IPC utility driver"
depends on INTEL_SCU_IPC
- default y
---help---
The IPC Util driver provides an interface with the SCU enabling
low level access for debug work and updating the firmware. Say
depends on PCI && IOSF_MBI && PM
help
Power-management driver for Intel's Image Signal Processor found on
- Bay and Cherry Trail devices. This dummy driver's sole purpose is to
- turn the ISP off (put it in D3) to save power and to allow entering
- of S0ix modes.
+ Bay Trail and Cherry Trail devices. This dummy driver's sole purpose
+ is to turn the ISP off (put it in D3) to save power and to allow
+ entering of S0ix modes.
To compile this driver as a module, choose M here: the module
will be called intel_atomisp2_pm.
To compile this driver as a module, choose M here: the module
will be called pcengines-apuv2.
+config INTEL_UNCORE_FREQ_CONTROL
+ tristate "Intel Uncore frequency control driver"
+ depends on X86_64
+ help
+ This driver allows control of uncore frequency limits on
+ supported server platforms.
+ Uncore frequency controls RING/LLC (last-level cache) clocks.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-uncore-frequency.
+
source "drivers/platform/x86/intel_speed_select_if/Kconfig"
config SYSTEM76_ACPI
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
+obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o
{ KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
{ KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
{ KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
{ KE_KEY, 0x7c, { KEY_MICMUTE } },
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
#define NOTIFY_KBD_BRTDWN 0xc5
#define NOTIFY_KBD_BRTTOGGLE 0xc7
#define NOTIFY_KBD_FBM 0x99
+#define NOTIFY_KBD_TTP 0xae
#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
#define ASUS_FAN_BOOST_MODE_SILENT_MASK 0x02
#define ASUS_FAN_BOOST_MODES_MASK 0x03
+#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT 0
+#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1
+#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2
+
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
+ bool throttle_thermal_policy_available;
+ u8 throttle_thermal_policy_mode;
+
// The RSOC controls the maximum charging percentage.
bool battery_rsoc_available;
{
int ctrl_param = 0;
- /*
- * bits 0-2: level
- * bit 7: light on/off
- */
- if (asus->kbd_led_wk > 0)
- ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
-
+ ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
}
// Fan boost mode: 0 - normal, 1 - overboost, 2 - silent
static DEVICE_ATTR_RW(fan_boost_mode);
+/* Throttle thermal policy ****************************************************/
+
+static int throttle_thermal_policy_check_present(struct asus_wmi *asus)
+{
+ u32 result;
+ int err;
+
+ asus->throttle_thermal_policy_available = false;
+
+ err = asus_wmi_get_devstate(asus,
+ ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ &result);
+ if (err) {
+ if (err == -ENODEV)
+ return 0;
+ return err;
+ }
+
+ if (result & ASUS_WMI_DSTS_PRESENCE_BIT)
+ asus->throttle_thermal_policy_available = true;
+
+ return 0;
+}
+
+static int throttle_thermal_policy_write(struct asus_wmi *asus)
+{
+ int err;
+ u8 value;
+ u32 retval;
+
+ value = asus->throttle_thermal_policy_mode;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ value, &retval);
+ if (err) {
+ pr_warn("Failed to set throttle thermal policy: %d\n", err);
+ return err;
+ }
+
+ if (retval != 1) {
+ pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n",
+ retval);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
+{
+ if (!asus->throttle_thermal_policy_available)
+ return 0;
+
+ asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+ return throttle_thermal_policy_write(asus);
+}
+
+static int throttle_thermal_policy_switch_next(struct asus_wmi *asus)
+{
+ u8 new_mode = asus->throttle_thermal_policy_mode + 1;
+
+ if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
+
+ asus->throttle_thermal_policy_mode = new_mode;
+ return throttle_thermal_policy_write(asus);
+}
+
+static ssize_t throttle_thermal_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u8 mode = asus->throttle_thermal_policy_mode;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mode);
+}
+
+static ssize_t throttle_thermal_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result;
+ u8 new_mode;
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou8(buf, 10, &new_mode);
+ if (result < 0)
+ return result;
+
+ if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
+ return -EINVAL;
+
+ asus->throttle_thermal_policy_mode = new_mode;
+ throttle_thermal_policy_write(asus);
+
+ return count;
+}
+
+// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
+static DEVICE_ATTR_RW(throttle_thermal_policy);
+
/* Backlight ******************************************************************/
static int read_backlight_power(struct asus_wmi *asus)
return;
}
+ if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) {
+ throttle_thermal_policy_switch_next(asus);
+ return;
+ }
+
if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle)
return;
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
+ &dev_attr_throttle_thermal_policy.attr,
NULL
};
devid = ASUS_WMI_DEVID_ALS_ENABLE;
else if (attr == &dev_attr_fan_boost_mode.attr)
ok = asus->fan_boost_mode_available;
+ else if (attr == &dev_attr_throttle_thermal_policy.attr)
+ ok = asus->throttle_thermal_policy_available;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
if (err)
goto fail_fan_boost_mode;
+ err = throttle_thermal_policy_check_present(asus);
+ if (err)
+ goto fail_throttle_thermal_policy;
+ else
+ throttle_thermal_policy_set_default(asus);
+
err = asus_wmi_sysfs_init(asus->platform_device);
if (err)
goto fail_sysfs;
fail_input:
asus_wmi_sysfs_exit(asus->platform_device);
fail_sysfs:
+fail_throttle_thermal_policy:
fail_fan_boost_mode:
fail_platform:
kfree(asus);
#define MAX_SPEED 3
-static int temp_limits[3] = { 55000, 60000, 65000 };
+#define TEMP_LIMIT0_DEFAULT 55000
+#define TEMP_LIMIT1_DEFAULT 60000
+#define TEMP_LIMIT2_DEFAULT 65000
+
+#define HYSTERESIS_DEFAULT 3000
+
+#define SPEED_ON_AC_DEFAULT 2
+
+static int temp_limits[3] = {
+ TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT,
+};
module_param_array(temp_limits, int, NULL, 0444);
MODULE_PARM_DESC(temp_limits,
"Millicelsius values above which the fan speed increases");
-static int hysteresis = 3000;
+static int hysteresis = HYSTERESIS_DEFAULT;
module_param(hysteresis, int, 0444);
MODULE_PARM_DESC(hysteresis,
"Hysteresis in millicelsius before lowering the fan speed");
-static int speed_on_ac = 2;
+static int speed_on_ac = SPEED_ON_AC_DEFAULT;
module_param(speed_on_ac, int, 0444);
MODULE_PARM_DESC(speed_on_ac,
"minimum fan speed to allow when system is powered by AC");
int i;
for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
- if (temp_limits[i] < 40000 || temp_limits[i] > 70000) {
+ if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n",
temp_limits[i]);
- return -EINVAL;
+ temp_limits[0] = TEMP_LIMIT0_DEFAULT;
+ temp_limits[1] = TEMP_LIMIT1_DEFAULT;
+ temp_limits[2] = TEMP_LIMIT2_DEFAULT;
+ break;
}
}
if (hysteresis < 1000 || hysteresis > 10000) {
dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n",
hysteresis);
- return -EINVAL;
+ hysteresis = HYSTERESIS_DEFAULT;
}
if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) {
dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n",
speed_on_ac);
- return -EINVAL;
+ speed_on_ac = SPEED_ON_AC_DEFAULT;
}
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
MODULE_AUTHOR("Alex Hung");
static const struct acpi_device_id intel_hid_ids[] = {
+ {"INT1051", 0},
{"INT33D5", 0},
{"", 0},
};
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Uncore Frequency Setting
+ * Copyright (c) 2019, Intel Corporation.
+ * All rights reserved.
+ *
+ * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
+ * one control CPU is identified per die to read/write limit. This control CPU
+ * is changed, if the CPU state is changed to offline. When the last CPU is
+ * offline in a die then remove the sysfs object for that die.
+ * The majority of actual code is related to sysfs create and read/write
+ * attributes.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define MSR_UNCORE_RATIO_LIMIT 0x620
+#define UNCORE_FREQ_KHZ_MULTIPLIER 100000
+
+/**
+ * struct uncore_data - Encapsulate all uncore data
+ * @stored_uncore_data: Last user changed MSR 620 value, which will be restored
+ * on system resume.
+ * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init
+ * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init
+ * @control_cpu: Designated CPU for a die to read/write
+ * @valid: Mark the data valid/invalid
+ *
+ * This structure is used to encapsulate all data related to uncore sysfs
+ * settings for a die/package.
+ */
+struct uncore_data {
+ struct kobject kobj;
+ u64 stored_uncore_data;
+ u32 initial_min_freq_khz;
+ u32 initial_max_freq_khz;
+ int control_cpu;
+ bool valid;
+};
+
+#define to_uncore_data(a) container_of(a, struct uncore_data, kobj)
+
+/* Max instances for uncore data, one for each die */
+static int uncore_max_entries __read_mostly;
+/* Storage for uncore data for all instances */
+static struct uncore_data *uncore_instances;
+/* Root of the all uncore sysfs kobjs */
+struct kobject uncore_root_kobj;
+/* Stores the CPU mask of the target CPUs to use during uncore read/write */
+static cpumask_t uncore_cpu_mask;
+/* CPU online callback register instance */
+static enum cpuhp_state uncore_hp_state __read_mostly;
+/* Mutex to control all mutual exclusions */
+static DEFINE_MUTEX(uncore_lock);
+
+struct uncore_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *kobj,
+ struct attribute *attr, const char *c, ssize_t count);
+};
+
+#define define_one_uncore_ro(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_uncore_rw(_name) \
+static struct uncore_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+#define show_uncore_data(member_name) \
+ static ssize_t show_##member_name(struct kobject *kobj, \
+ struct attribute *attr, \
+ char *buf) \
+ { \
+ struct uncore_data *data = to_uncore_data(kobj); \
+ return scnprintf(buf, PAGE_SIZE, "%u\n", \
+ data->member_name); \
+ } \
+ define_one_uncore_ro(member_name)
+
+show_uncore_data(initial_min_freq_khz);
+show_uncore_data(initial_max_freq_khz);
+
+/* Common function to read MSR 0x620 and read min/max */
+static int uncore_read_ratio(struct uncore_data *data, unsigned int *min,
+ unsigned int *max)
+{
+ u64 cap;
+ int ret;
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ return ret;
+
+ *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
+ *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
+
+ return 0;
+}
+
+/* Common function to set min/max ratios to be used by sysfs callbacks */
+static int uncore_write_ratio(struct uncore_data *data, unsigned int input,
+ int set_max)
+{
+ int ret;
+ u64 cap;
+
+ mutex_lock(&uncore_lock);
+
+ input /= UNCORE_FREQ_KHZ_MULTIPLIER;
+ if (!input || input > 0x7F) {
+ ret = -EINVAL;
+ goto finish_write;
+ }
+
+ ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ if (ret)
+ goto finish_write;
+
+ if (set_max) {
+ cap &= ~0x7F;
+ cap |= input;
+ } else {
+ cap &= ~GENMASK(14, 8);
+ cap |= (input << 8);
+ }
+
+ ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
+ if (ret)
+ goto finish_write;
+
+ data->stored_uncore_data = cap;
+
+finish_write:
+ mutex_unlock(&uncore_lock);
+
+ return ret;
+}
+
+static ssize_t store_min_max_freq_khz(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, ssize_t count,
+ int min_max)
+{
+ struct uncore_data *data = to_uncore_data(kobj);
+ unsigned int input;
+
+ if (kstrtouint(buf, 10, &input))
+ return -EINVAL;
+
+ uncore_write_ratio(data, input, min_max);
+
+ return count;
+}
+
+static ssize_t show_min_max_freq_khz(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf, int min_max)
+{
+ struct uncore_data *data = to_uncore_data(kobj);
+ unsigned int min, max;
+ int ret;
+
+ mutex_lock(&uncore_lock);
+ ret = uncore_read_ratio(data, &min, &max);
+ mutex_unlock(&uncore_lock);
+ if (ret)
+ return ret;
+
+ if (min_max)
+ return sprintf(buf, "%u\n", max);
+
+ return sprintf(buf, "%u\n", min);
+}
+
+#define store_uncore_min_max(name, min_max) \
+ static ssize_t store_##name(struct kobject *kobj, \
+ struct attribute *attr, \
+ const char *buf, ssize_t count) \
+ { \
+ \
+ return store_min_max_freq_khz(kobj, attr, buf, count, \
+ min_max); \
+ }
+
+#define show_uncore_min_max(name, min_max) \
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct attribute *attr, char *buf) \
+ { \
+ \
+ return show_min_max_freq_khz(kobj, attr, buf, min_max); \
+ }
+
+store_uncore_min_max(min_freq_khz, 0);
+store_uncore_min_max(max_freq_khz, 1);
+
+show_uncore_min_max(min_freq_khz, 0);
+show_uncore_min_max(max_freq_khz, 1);
+
+define_one_uncore_rw(min_freq_khz);
+define_one_uncore_rw(max_freq_khz);
+
+static struct attribute *uncore_attrs[] = {
+ &initial_min_freq_khz.attr,
+ &initial_max_freq_khz.attr,
+ &max_freq_khz.attr,
+ &min_freq_khz.attr,
+ NULL
+};
+
+static struct kobj_type uncore_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = uncore_attrs,
+};
+
+static struct kobj_type uncore_root_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* Caller provides protection */
+static struct uncore_data *uncore_get_instance(unsigned int cpu)
+{
+ int id = topology_logical_die_id(cpu);
+
+ if (id >= 0 && id < uncore_max_entries)
+ return &uncore_instances[id];
+
+ return NULL;
+}
+
+static void uncore_add_die_entry(int cpu)
+{
+ struct uncore_data *data;
+
+ mutex_lock(&uncore_lock);
+ data = uncore_get_instance(cpu);
+ if (!data) {
+ mutex_unlock(&uncore_lock);
+ return;
+ }
+
+ if (data->valid) {
+ /* control cpu changed */
+ data->control_cpu = cpu;
+ } else {
+ char str[64];
+ int ret;
+
+ memset(data, 0, sizeof(*data));
+ sprintf(str, "package_%02d_die_%02d",
+ topology_physical_package_id(cpu),
+ topology_die_id(cpu));
+
+ uncore_read_ratio(data, &data->initial_min_freq_khz,
+ &data->initial_max_freq_khz);
+
+ ret = kobject_init_and_add(&data->kobj, &uncore_ktype,
+ &uncore_root_kobj, str);
+ if (!ret) {
+ data->control_cpu = cpu;
+ data->valid = true;
+ }
+ }
+ mutex_unlock(&uncore_lock);
+}
+
+/* Last CPU in this die is offline, so remove sysfs entries */
+static void uncore_remove_die_entry(int cpu)
+{
+ struct uncore_data *data;
+
+ mutex_lock(&uncore_lock);
+ data = uncore_get_instance(cpu);
+ if (data) {
+ kobject_put(&data->kobj);
+ data->control_cpu = -1;
+ data->valid = false;
+ }
+ mutex_unlock(&uncore_lock);
+}
+
+static int uncore_event_cpu_online(unsigned int cpu)
+{
+ int target;
+
+ /* Check if there is an online cpu in the package for uncore MSR */
+ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
+ if (target < nr_cpu_ids)
+ return 0;
+
+ /* Use this CPU on this die as a control CPU */
+ cpumask_set_cpu(cpu, &uncore_cpu_mask);
+ uncore_add_die_entry(cpu);
+
+ return 0;
+}
+
+static int uncore_event_cpu_offline(unsigned int cpu)
+{
+ int target;
+
+ /* Check if existing cpu is used for uncore MSRs */
+ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+ return 0;
+
+ /* Find a new cpu to set uncore MSR */
+ target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &uncore_cpu_mask);
+ uncore_add_die_entry(target);
+ } else {
+ uncore_remove_die_entry(cpu);
+ }
+
+ return 0;
+}
+
+static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
+ void *_unused)
+{
+ int cpu;
+
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ for_each_cpu(cpu, &uncore_cpu_mask) {
+ struct uncore_data *data;
+ int ret;
+
+ data = uncore_get_instance(cpu);
+ if (!data || !data->valid || !data->stored_uncore_data)
+ continue;
+
+ ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT,
+ data->stored_uncore_data);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block uncore_pm_nb = {
+ .notifier_call = uncore_pm_notify,
+};
+
+#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
+ ICPU(INTEL_FAM6_BROADWELL_G),
+ ICPU(INTEL_FAM6_BROADWELL_X),
+ ICPU(INTEL_FAM6_BROADWELL_D),
+ ICPU(INTEL_FAM6_SKYLAKE_X),
+ ICPU(INTEL_FAM6_ICELAKE_X),
+ ICPU(INTEL_FAM6_ICELAKE_D),
+ {}
+};
+
+static int __init intel_uncore_init(void)
+{
+ const struct x86_cpu_id *id;
+ int ret;
+
+ id = x86_match_cpu(intel_uncore_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ uncore_max_entries = topology_max_packages() *
+ topology_max_die_per_package();
+ uncore_instances = kcalloc(uncore_max_entries,
+ sizeof(*uncore_instances), GFP_KERNEL);
+ if (!uncore_instances)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype,
+ &cpu_subsys.dev_root->kobj,
+ "intel_uncore_frequency");
+ if (ret)
+ goto err_free;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "platform/x86/uncore-freq:online",
+ uncore_event_cpu_online,
+ uncore_event_cpu_offline);
+ if (ret < 0)
+ goto err_rem_kobj;
+
+ uncore_hp_state = ret;
+
+ ret = register_pm_notifier(&uncore_pm_nb);
+ if (ret)
+ goto err_rem_state;
+
+ return 0;
+
+err_rem_state:
+ cpuhp_remove_state(uncore_hp_state);
+err_rem_kobj:
+ kobject_put(&uncore_root_kobj);
+err_free:
+ kfree(uncore_instances);
+
+ return ret;
+}
+module_init(intel_uncore_init)
+
+static void __exit intel_uncore_exit(void)
+{
+ int i;
+
+ unregister_pm_notifier(&uncore_pm_nb);
+ cpuhp_remove_state(uncore_hp_state);
+ for (i = 0; i < uncore_max_entries; ++i) {
+ if (uncore_instances[i].valid)
+ kobject_put(&uncore_instances[i].kobj);
+ }
+ kobject_put(&uncore_root_kobj);
+ kfree(uncore_instances);
+}
+module_exit(intel_uncore_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
// SPDX-License-Identifier: GPL-2.0
/*
- * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry
- * Trail devices. The sole purpose of this driver is to allow the ISP to
- * be put in D3.
+ * Dummy driver for Intel's Image Signal Processor found on Bay Trail
+ * and Cherry Trail devices. The sole purpose of this driver is to allow
+ * the ISP to be put in D3.
*
* Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
*
static int isp_set_power(struct pci_dev *dev, bool enable)
{
unsigned long timeout;
- u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON :
- ISPSSPM0_IUNIT_POWER_OFF;
+ u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF;
/* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
/*
* There should be no IUNIT access while power-down is
- * in progress HW sighting: 4567865
+ * in progress. HW sighting: 4567865.
* Wait up to 50 ms for the IUNIT to shut down.
* And we do the same for power on.
*/
timeout = jiffies + msecs_to_jiffies(50);
- while (1) {
+ do {
u32 tmp;
/* Wait until ISPSSPM0 bit[25:24] shows the right value */
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
if (tmp == val)
- break;
+ return 0;
- if (time_after(jiffies, timeout)) {
- dev_err(&dev->dev, "IUNIT power-%s timeout.\n",
- enable ? "on" : "off");
- return -EBUSY;
- }
usleep_range(1000, 2000);
- }
+ } while (time_before(jiffies, timeout));
- return 0;
+ dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
+ return -EBUSY;
}
static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
INT33FE_NODE_MAX,
};
-static const struct software_node nodes[];
-
-static const struct software_node_ref_args pi3usb30532_ref = {
- &nodes[INT33FE_NODE_PI3USB30532]
-};
-
-static const struct software_node_ref_args dp_ref = {
- &nodes[INT33FE_NODE_DISPLAYPORT]
-};
-
-static struct software_node_ref_args mux_ref;
-
-static const struct software_node_reference usb_connector_refs[] = {
- { "orientation-switch", 1, &pi3usb30532_ref},
- { "mode-switch", 1, &pi3usb30532_ref},
- { "displayport", 1, &dp_ref},
- { }
-};
-
-static const struct software_node_reference fusb302_refs[] = {
- { "usb-role-switch", 1, &mux_ref},
- { }
-};
-
/*
* Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates
* the max17047 both through the INT33FE ACPI device (it is right there
{ }
};
+/*
+ * We are not using inline property here because those are constant,
+ * and we need to adjust this one at runtime to point to real
+ * software node.
+ */
+static struct software_node_ref_args fusb302_mux_refs[] = {
+ { .node = NULL },
+};
+
static const struct property_entry fusb302_props[] = {
PROPERTY_ENTRY_STRING("linux,extcon-name", "cht_wcove_pwrsrc"),
+ PROPERTY_ENTRY_REF_ARRAY("usb-role-switch", fusb302_mux_refs),
{ }
};
PDO_VAR(5000, 12000, 3000),
};
+static const struct software_node nodes[];
+
static const struct property_entry usb_connector_props[] = {
PROPERTY_ENTRY_STRING("data-role", "dual"),
PROPERTY_ENTRY_STRING("power-role", "dual"),
PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
PROPERTY_ENTRY_U32("op-sink-microwatt", 2500000),
+ PROPERTY_ENTRY_REF("orientation-switch",
+ &nodes[INT33FE_NODE_PI3USB30532]),
+ PROPERTY_ENTRY_REF("mode-switch",
+ &nodes[INT33FE_NODE_PI3USB30532]),
+ PROPERTY_ENTRY_REF("displayport",
+ &nodes[INT33FE_NODE_DISPLAYPORT]),
{ }
};
static const struct software_node nodes[] = {
- { "fusb302", NULL, fusb302_props, fusb302_refs },
+ { "fusb302", NULL, fusb302_props },
{ "max17047", NULL, max17047_props },
{ "pi3usb30532" },
{ "displayport" },
- { "connector", &nodes[0], usb_connector_props, usb_connector_refs },
+ { "connector", &nodes[0], usb_connector_props },
{ }
};
{
software_node_unregister_nodes(nodes);
- if (mux_ref.node) {
- fwnode_handle_put(software_node_fwnode(mux_ref.node));
- mux_ref.node = NULL;
+ if (fusb302_mux_refs[0].node) {
+ fwnode_handle_put(
+ software_node_fwnode(fusb302_mux_refs[0].node));
+ fusb302_mux_refs[0].node = NULL;
}
if (data->dp) {
static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
{
+ const struct software_node *mux_ref_node;
int ret;
- ret = software_node_register_nodes(nodes);
- if (ret)
- return ret;
-
- /* The devices that are not created in this driver need extra steps. */
-
/*
* There is no ACPI device node for the USB role mux, so we need to wait
* until the mux driver has created software node for the mux device.
* It means we depend on the mux driver. This function will return
* -EPROBE_DEFER until the mux device is registered.
*/
- mux_ref.node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
- if (!mux_ref.node) {
- ret = -EPROBE_DEFER;
- goto err_remove_nodes;
- }
+ mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!mux_ref_node)
+ return -EPROBE_DEFER;
+
+ /*
+ * Update node used in "usb-role-switch" property. Note that we
+ * rely on software_node_register_nodes() to use the original
+ * instance of properties instead of copying them.
+ */
+ fusb302_mux_refs[0].node = mux_ref_node;
+
+ ret = software_node_register_nodes(nodes);
+ if (ret)
+ return ret;
+
+ /* The devices that are not created in this driver need extra steps. */
/*
* The DP connector does have ACPI device node. In this case we can just
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2010 Intel Corporation
*/
input_set_capability(input, EV_KEY, KEY_POWER);
- ddata = (struct mid_pb_ddata *)id->driver_data;
+ ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
+ sizeof(*ddata), GFP_KERNEL);
if (!ddata)
- return -ENODATA;
+ return -ENOMEM;
ddata->dev = &pdev->dev;
ddata->irq = irq;
{"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
{"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
{"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
- {},
+ {}
};
static const struct pmc_bit_map spt_mphy_map[] = {
{"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
{"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
{"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
- {},
+ {}
};
static const struct pmc_bit_map spt_pfear_map[] = {
{"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
{"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
{"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
- {},
+ {}
+};
+
+static const struct pmc_bit_map *ext_spt_pfear_map[] = {
+ spt_pfear_map,
+ NULL
};
static const struct pmc_bit_map spt_ltr_show_map[] = {
};
static const struct pmc_reg_map spt_reg_map = {
- .pfear_sts = spt_pfear_map,
+ .pfear_sts = ext_spt_pfear_map,
.mphy_sts = spt_mphy_map,
.pll_sts = spt_pll_map,
.ltr_show_sts = spt_ltr_show_map,
{"SDX", BIT(4)},
{"SPE", BIT(5)},
{"Fuse", BIT(6)},
- /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+ /*
+ * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+ * Tiger Lake and Elkhart Lake.
+ */
{"SBR8", BIT(7)},
{"CSME_FSC", BIT(0)},
{"HDA_PGD4", BIT(2)},
{"HDA_PGD5", BIT(3)},
{"HDA_PGD6", BIT(4)},
- /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
+ /*
+ * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
+ * Tiger Lake and ELkhart Lake.
+ */
{"PSF6", BIT(5)},
{"PSF7", BIT(6)},
{"PSF8", BIT(7)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+ cnp_pfear_map,
+ NULL
+};
+static const struct pmc_bit_map icl_pfear_map[] = {
/* Ice Lake generation onwards only */
{"RES_65", BIT(0)},
{"RES_66", BIT(1)},
{}
};
+static const struct pmc_bit_map *ext_icl_pfear_map[] = {
+ cnp_pfear_map,
+ icl_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map tgl_pfear_map[] = {
+ /* Tiger Lake and Elkhart Lake generation onwards only */
+ {"PSF9", BIT(0)},
+ {"RES_66", BIT(1)},
+ {"RES_67", BIT(2)},
+ {"RES_68", BIT(3)},
+ {"RES_69", BIT(4)},
+ {"RES_70", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+ cnp_pfear_map,
+ tgl_pfear_map,
+ NULL
+};
+
static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
{"AUDIO_D3", BIT(0)},
{"OTG_D3", BIT(1)},
cnp_slps0_dbg0_map,
cnp_slps0_dbg1_map,
cnp_slps0_dbg2_map,
- NULL,
+ NULL
};
static const struct pmc_bit_map cnp_ltr_show_map[] = {
};
static const struct pmc_reg_map cnp_reg_map = {
- .pfear_sts = cnp_pfear_map,
+ .pfear_sts = ext_cnp_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
};
static const struct pmc_reg_map icl_reg_map = {
- .pfear_sts = cnp_pfear_map,
+ .pfear_sts = ext_icl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
};
-static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
-{
- return readb(pmcdev->regbase + offset);
-}
+static const struct pmc_reg_map tgl_reg_map = {
+ .pfear_sts = ext_tgl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+};
static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
{
return readl(pmcdev->regbase + reg_offset);
}
-static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
- reg_offset, u32 val)
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
+ u32 val)
{
writel(val, pmcdev->regbase + reg_offset);
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
static bool slps0_dbg_latch;
-static void pmc_core_display_map(struct seq_file *s, int index,
- u8 pf_reg, const struct pmc_bit_map *pf_map)
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+ return readb(pmcdev->regbase + offset);
+}
+
+static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
+ u8 pf_reg, const struct pmc_bit_map **pf_map)
{
seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
- index, pf_map[index].name,
- pf_map[index].bit_mask & pf_reg ? "Off" : "On");
+ ip, pf_map[idx][index].name,
+ pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
}
static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
+ const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
- int index, iter;
+ int index, iter, idx, ip = 0;
iter = pmcdev->map->ppfear0_offset;
index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
- for (index = 0; map[index].name &&
- index < pmcdev->map->ppfear_buckets * 8; index++)
- pmc_core_display_map(s, index, pf_regs[index / 8], map);
+ for (idx = 0; maps[idx]; idx++) {
+ for (index = 0; maps[idx][index].name &&
+ index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
+ pmc_core_display_map(s, index, idx, ip,
+ pf_regs[index / 8], maps);
+ }
return 0;
}
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
-static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
-*userbuf, size_t count, loff_t *ppos)
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
{
struct pmc_dev *pmcdev = &pmc;
const struct pmc_reg_map *map = pmcdev->map;
u32 val, buf_size, fd;
- int err = 0;
+ int err;
buf_size = count < 64 ? count : 64;
- mutex_lock(&pmcdev->lock);
- if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
- err = -EFAULT;
- goto out_unlock;
- }
+ err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&pmcdev->lock);
if (val > map->ltr_ignore_max) {
err = -EINVAL;
debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
&pmc_core_dev_state);
- debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev,
- &pmc_core_ppfear_fops);
+ if (pmcdev->map->pfear_sts)
+ debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
+ pmcdev, &pmc_core_ppfear_fops);
debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
&pmc_core_ltr_ignore_ops);
INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
+ INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map),
+ INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map),
+ INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
static const struct pci_device_id pmc_pci_ids[] = {
- { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0},
- { 0, },
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+ { }
};
/*
* This quirk can be used on those platforms where
- * the platform BIOS enforces 24Mhx Crystal to shutdown
+ * the platform BIOS enforces 24Mhz crystal to shutdown
* before PMC can assert SLP_S0#.
*/
static int quirk_xtal_ignore(const struct dmi_system_id *id)
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel Core SoC Power Management Controller Header File
*
#define ICL_NUM_IP_IGN_ALLOWED 20
#define ICL_PMC_LTR_WIGIG 0x1BFC
+#define TGL_NUM_IP_IGN_ALLOWED 22
+
struct pmc_bit_map {
const char *name;
u32 bit_mask;
* captures them to have a common implementation.
*/
struct pmc_reg_map {
- const struct pmc_bit_map *pfear_sts;
+ const struct pmc_bit_map **pfear_sts;
const struct pmc_bit_map *mphy_sts;
const struct pmc_bit_map *pll_sts;
const struct pmc_bit_map **slps0_dbg_maps;
INTEL_CPU_FAM6(KABYLAKE, pmc_core_device),
INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device),
INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device),
+ INTEL_CPU_FAM6(COMETLAKE, pmc_core_device),
+ INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
*/
#include <linux/acpi.h>
-#include <linux/atomic.h>
-#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_qos.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/suspend.h>
#include <asm/intel_pmc_ipc.h>
writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
}
-static inline u8 __maybe_unused ipc_data_readb(u32 offset)
-{
- return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
-}
-
static inline u32 ipc_data_readl(u32 offset)
{
return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
return 0;
}
-/**
- * intel_pmc_gcr_read() - Read a 32-bit PMC GCR register
- * @offset: offset of GCR register from GCR address base
- * @data: data pointer for storing the register output
- *
- * Reads the 32-bit PMC GCR register at given offset.
- *
- * Return: negative value on error or 0 on success.
- */
-int intel_pmc_gcr_read(u32 offset, u32 *data)
-{
- int ret;
-
- spin_lock(&ipcdev.gcr_lock);
-
- ret = is_gcr_valid(offset);
- if (ret < 0) {
- spin_unlock(&ipcdev.gcr_lock);
- return ret;
- }
-
- *data = readl(ipcdev.gcr_mem_base + offset);
-
- spin_unlock(&ipcdev.gcr_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_read);
-
/**
* intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register
* @offset: offset of GCR register from GCR address base
}
EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64);
-/**
- * intel_pmc_gcr_write() - Write PMC GCR register
- * @offset: offset of GCR register from GCR address base
- * @data: register update value
- *
- * Writes the PMC GCR register of given offset with given
- * value.
- *
- * Return: negative value on error or 0 on success.
- */
-int intel_pmc_gcr_write(u32 offset, u32 data)
-{
- int ret;
-
- spin_lock(&ipcdev.gcr_lock);
-
- ret = is_gcr_valid(offset);
- if (ret < 0) {
- spin_unlock(&ipcdev.gcr_lock);
- return ret;
- }
-
- writel(data, ipcdev.gcr_mem_base + offset);
-
- spin_unlock(&ipcdev.gcr_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
-
/**
* intel_pmc_gcr_update() - Update PMC GCR register bits
* @offset: offset of GCR register from GCR address base
*
* Return: negative value on error or 0 on success.
*/
-int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
+static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
{
u32 new_val;
int ret = 0;
spin_unlock(&ipcdev.gcr_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
static int update_no_reboot_bit(void *priv, bool set)
{
*
* Return: an IPC error code or 0 on success.
*/
-int intel_pmc_ipc_simple_command(int cmd, int sub)
+static int intel_pmc_ipc_simple_command(int cmd, int sub)
{
int ret;
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
/**
* intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
*
* Return: an IPC error code or 0 on success.
*/
-int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
- u32 outlen, u32 dptr, u32 sptr)
+static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+ u32 outlen, u32 dptr, u32 sptr)
{
u32 wbuf[4] = { 0 };
int ret;
return ret;
}
-EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
/**
* intel_pmc_ipc_command() - IPC command with input/output data
}
return (ssize_t)count;
}
+static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store);
static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev,
struct device_attribute *attr,
int subcmd;
int ret;
- if (kstrtoul(buf, 0, &val))
- return -EINVAL;
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
if (val)
subcmd = 1;
}
return (ssize_t)count;
}
-
-static DEVICE_ATTR(simplecmd, S_IWUSR,
- NULL, intel_pmc_ipc_simple_cmd_store);
-static DEVICE_ATTR(northpeak, S_IWUSR,
- NULL, intel_pmc_ipc_northpeak_store);
+static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store);
static struct attribute *intel_ipc_attrs[] = {
&dev_attr_northpeak.attr,
.attrs = intel_ipc_attrs,
};
+static const struct attribute_group *intel_ipc_groups[] = {
+ &intel_ipc_group,
+ NULL
+};
+
static struct resource punit_res_array[] = {
/* Punit BIOS */
{
goto err_irq;
}
- ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create sysfs group %d\n",
- ret);
- goto err_sys;
- }
-
ipcdev.has_gcr_regs = true;
return 0;
-err_sys:
- devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
+
err_irq:
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
static int ipc_plat_remove(struct platform_device *pdev)
{
- sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
.driver = {
.name = "pmc-ipc-plat",
.acpi_match_table = ACPI_PTR(ipc_acpi_ids),
+ .dev_groups = intel_ipc_groups,
},
};
#include <asm/intel_scu_ipc.h>
/* IPC defines the following message types */
-#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
-#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */
-#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */
-#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */
-#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */
+#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
/* Command id associated with message IPCMSG_PCNTRL */
#define IPC_CMD_PCNTRL_W 0 /* Register write */
#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
#define IPC_IOC 0x100 /* IPC command register IOC bit */
-#define PCI_DEVICE_ID_LINCROFT 0x082a
-#define PCI_DEVICE_ID_PENWELL 0x080e
-#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea
-#define PCI_DEVICE_ID_TANGIER 0x11a0
-
-/* intel scu ipc driver data */
-struct intel_scu_ipc_pdata_t {
- u32 i2c_base;
- u32 i2c_len;
- u8 irq_mode;
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
- .i2c_base = 0xff12b000,
- .i2c_len = 0x10,
- .irq_mode = 0,
-};
-
-/* Penwell and Cloverview */
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
- .i2c_base = 0xff12b000,
- .i2c_len = 0x10,
- .irq_mode = 1,
-};
-
-static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
- .i2c_base = 0xff00d000,
- .i2c_len = 0x10,
- .irq_mode = 0,
-};
-
struct intel_scu_ipc_dev {
struct device *dev;
void __iomem *ipc_base;
- void __iomem *i2c_base;
struct completion cmd_complete;
u8 irq_mode;
};
static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
+#define IPC_STATUS 0x04
+#define IPC_STATUS_IRQ BIT(2)
+#define IPC_STATUS_ERR BIT(1)
+#define IPC_STATUS_BUSY BIT(0)
+
/*
- * IPC Read Buffer (Read Only):
- * 16 byte buffer for receiving data from SCU, if IPC command
- * processing results in response data
+ * IPC Write/Read Buffers:
+ * 16 byte buffer for sending and receiving data to and from SCU.
*/
+#define IPC_WRITE_BUFFER 0x80
#define IPC_READ_BUFFER 0x90
-#define IPC_I2C_CNTRL_ADDR 0
-#define I2C_DATA_ADDR 0x04
+/* Timeout in jiffies */
+#define IPC_TIMEOUT (3 * HZ)
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
*/
static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
{
- if (scu->irq_mode) {
- reinit_completion(&scu->cmd_complete);
- writel(cmd | IPC_IOC, scu->ipc_base);
- }
- writel(cmd, scu->ipc_base);
+ reinit_completion(&scu->cmd_complete);
+ writel(cmd | IPC_IOC, scu->ipc_base);
}
/*
*/
static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
{
- writel(data, scu->ipc_base + 0x80 + offset);
+ writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
}
/*
*/
static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
{
- return __raw_readl(scu->ipc_base + 0x04);
+ return __raw_readl(scu->ipc_base + IPC_STATUS);
}
/* Read ipc byte data */
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
- u32 status = ipc_read_status(scu);
- u32 loop_count = 100000;
+ unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
- /* break if scu doesn't reset busy bit after huge retry */
- while ((status & BIT(0)) && --loop_count) {
- udelay(1); /* scu processing time is in few u secods */
- status = ipc_read_status(scu);
- }
+ do {
+ u32 status;
- if (status & BIT(0)) {
- dev_err(scu->dev, "IPC timed out");
- return -ETIMEDOUT;
- }
+ status = ipc_read_status(scu);
+ if (!(status & IPC_STATUS_BUSY))
+ return (status & IPC_STATUS_ERR) ? -EIO : 0;
- if (status & BIT(1))
- return -EIO;
+ usleep_range(50, 100);
+ } while (time_before(jiffies, end));
- return 0;
+ dev_err(scu->dev, "IPC timed out");
+ return -ETIMEDOUT;
}
/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
{
int status;
- if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) {
+ if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) {
dev_err(scu->dev, "IPC timed out\n");
return -ETIMEDOUT;
}
status = ipc_read_status(scu);
- if (status & BIT(1))
+ if (status & IPC_STATUS_ERR)
return -EIO;
return 0;
}
/**
- * intel_scu_ipc_ioread8 - read a word via the SCU
- * @addr: register on SCU
- * @data: return pointer for read byte
+ * intel_scu_ipc_ioread8 - read a word via the SCU
+ * @addr: Register on SCU
+ * @data: Return pointer for read byte
*
- * Read a single register. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * Read a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_ioread8(u16 addr, u8 *data)
{
EXPORT_SYMBOL(intel_scu_ipc_ioread8);
/**
- * intel_scu_ipc_ioread16 - read a word via the SCU
- * @addr: register on SCU
- * @data: return pointer for read word
- *
- * Read a register pair. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data)
-{
- u16 x[2] = {addr, addr + 1};
- return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread16);
-
-/**
- * intel_scu_ipc_ioread32 - read a dword via the SCU
- * @addr: register on SCU
- * @data: return pointer for read dword
- *
- * Read four registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * This function may sleep.
- */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data)
-{
- u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
- return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread32);
-
-/**
- * intel_scu_ipc_iowrite8 - write a byte via the SCU
- * @addr: register on SCU
- * @data: byte to write
+ * intel_scu_ipc_iowrite8 - write a byte via the SCU
+ * @addr: Register on SCU
+ * @data: Byte to write
*
- * Write a single register. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * Write a single register. Returns %0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_iowrite8(u16 addr, u8 data)
{
EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
/**
- * intel_scu_ipc_iowrite16 - write a word via the SCU
- * @addr: register on SCU
- * @data: word to write
- *
- * Write two registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_readvv - read a set of registers
+ * @addr: Register list
+ * @data: Bytes to return
+ * @len: Length of array
*
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data)
-{
- u16 x[2] = {addr, addr + 1};
- return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
-
-/**
- * intel_scu_ipc_iowrite32 - write a dword via the SCU
- * @addr: register on SCU
- * @data: dword to write
+ * Read registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
*
- * Write four registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * The largest array length permitted by the hardware is 5 items.
*
- * This function may sleep.
- */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data)
-{
- u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
- return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
-
-/**
- * intel_scu_ipc_readvv - read a set of registers
- * @addr: register list
- * @data: bytes to return
- * @len: length of array
- *
- * Read registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
- *
- * The largest array length permitted by the hardware is 5 items.
- *
- * This function may sleep.
+ * This function may sleep.
*/
int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
{
EXPORT_SYMBOL(intel_scu_ipc_readv);
/**
- * intel_scu_ipc_writev - write a set of registers
- * @addr: register list
- * @data: bytes to write
- * @len: length of array
- *
- * Write registers. Returns 0 on success or an error code. All
- * locking between SCU accesses is handled for the caller.
+ * intel_scu_ipc_writev - write a set of registers
+ * @addr: Register list
+ * @data: Bytes to write
+ * @len: Length of array
*
- * The largest array length permitted by the hardware is 5 items.
+ * Write registers. Returns %0 on success or an error code. All locking
+ * between SCU accesses is handled for the caller.
*
- * This function may sleep.
+ * The largest array length permitted by the hardware is 5 items.
*
+ * This function may sleep.
*/
int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
{
EXPORT_SYMBOL(intel_scu_ipc_writev);
/**
- * intel_scu_ipc_update_register - r/m/w a register
- * @addr: register address
- * @bits: bits to update
- * @mask: mask of bits to update
- *
- * Read-modify-write power control unit register. The first data argument
- * must be register value and second is mask value
- * mask is a bitmap that indicates which bits to update.
- * 0 = masked. Don't modify this bit, 1 = modify this bit.
- * returns 0 on success or an error code.
- *
- * This function may sleep. Locking between SCU accesses is handled
- * for the caller.
+ * intel_scu_ipc_update_register - r/m/w a register
+ * @addr: Register address
+ * @bits: Bits to update
+ * @mask: Mask of bits to update
+ *
+ * Read-modify-write power control unit register. The first data argument
+ * must be register value and second is mask value mask is a bitmap that
+ * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
+ * modify this bit. returns %0 on success or an error code.
+ *
+ * This function may sleep. Locking between SCU accesses is handled
+ * for the caller.
*/
int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
{
EXPORT_SYMBOL(intel_scu_ipc_update_register);
/**
- * intel_scu_ipc_simple_command - send a simple command
- * @cmd: command
- * @sub: sub type
+ * intel_scu_ipc_simple_command - send a simple command
+ * @cmd: Command
+ * @sub: Sub type
*
- * Issue a simple command to the SCU. Do not use this interface if
- * you must then access data as any data values may be overwritten
- * by another SCU access by the time this function returns.
+ * Issue a simple command to the SCU. Do not use this interface if you must
+ * then access data as any data values may be overwritten by another SCU
+ * access by the time this function returns.
*
- * This function may sleep. Locking for SCU accesses is handled for
- * the caller.
+ * This function may sleep. Locking for SCU accesses is handled for the
+ * caller.
*/
int intel_scu_ipc_simple_command(int cmd, int sub)
{
EXPORT_SYMBOL(intel_scu_ipc_simple_command);
/**
- * intel_scu_ipc_command - command with data
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in dwords
- * @out: output data
- * @outlein: output length in dwords
- *
- * Issue a command to the SCU which involves data transfers. Do the
- * data copies under the lock but leave it for the caller to interpret
+ * intel_scu_ipc_command - command with data
+ * @cmd: Command
+ * @sub: Sub type
+ * @in: Input data
+ * @inlen: Input length in dwords
+ * @out: Output data
+ * @outlen: Output length in dwords
+ *
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret.
*/
int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
u32 *out, int outlen)
}
EXPORT_SYMBOL(intel_scu_ipc_command);
-#define IPC_SPTR 0x08
-#define IPC_DPTR 0x0C
-
-/**
- * intel_scu_ipc_raw_command() - IPC command with data and pointers
- * @cmd: IPC command code.
- * @sub: IPC command sub type.
- * @in: input data of this IPC command.
- * @inlen: input data length in dwords.
- * @out: output data of this IPC command.
- * @outlen: output data length in dwords.
- * @sptr: data writing to SPTR register.
- * @dptr: data writing to DPTR register.
- *
- * Send an IPC command to SCU with input/output data and source/dest pointers.
- *
- * Return: an IPC error code or 0 on success.
- */
-int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
- u32 *out, int outlen, u32 dptr, u32 sptr)
-{
- struct intel_scu_ipc_dev *scu = &ipcdev;
- int inbuflen = DIV_ROUND_UP(inlen, 4);
- u32 inbuf[4];
- int i, err;
-
- /* Up to 16 bytes */
- if (inbuflen > 4)
- return -EINVAL;
-
- mutex_lock(&ipclock);
- if (scu->dev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
-
- writel(dptr, scu->ipc_base + IPC_DPTR);
- writel(sptr, scu->ipc_base + IPC_SPTR);
-
- /*
- * SRAM controller doesn't support 8-bit writes, it only
- * supports 32-bit writes, so we have to copy input data into
- * the temporary buffer, and SCU FW will use the inlen to
- * determine the actual input data length in the temporary
- * buffer.
- */
- memcpy(inbuf, in, inlen);
-
- for (i = 0; i < inbuflen; i++)
- ipc_data_writel(scu, inbuf[i], 4 * i);
-
- ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
- err = intel_scu_ipc_check_status(scu);
- if (!err) {
- for (i = 0; i < outlen; i++)
- *out++ = ipc_data_readl(scu, 4 * i);
- }
-
- mutex_unlock(&ipclock);
- return err;
-}
-EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command);
-
-/* I2C commands */
-#define IPC_I2C_WRITE 1 /* I2C Write command */
-#define IPC_I2C_READ 2 /* I2C Read command */
-
-/**
- * intel_scu_ipc_i2c_cntrl - I2C read/write operations
- * @addr: I2C address + command bits
- * @data: data to read/write
- *
- * Perform an an I2C read/write operation via the SCU. All locking is
- * handled for the caller. This function may sleep.
- *
- * Returns an error code or 0 on success.
- *
- * This has to be in the IPC driver for the locking.
- */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
-{
- struct intel_scu_ipc_dev *scu = &ipcdev;
- u32 cmd = 0;
-
- mutex_lock(&ipclock);
- if (scu->dev == NULL) {
- mutex_unlock(&ipclock);
- return -ENODEV;
- }
- cmd = (addr >> 24) & 0xFF;
- if (cmd == IPC_I2C_READ) {
- writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
- /* Write not getting updated without delay */
- usleep_range(1000, 2000);
- *data = readl(scu->i2c_base + I2C_DATA_ADDR);
- } else if (cmd == IPC_I2C_WRITE) {
- writel(*data, scu->i2c_base + I2C_DATA_ADDR);
- usleep_range(1000, 2000);
- writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR);
- } else {
- dev_err(scu->dev,
- "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
-
- mutex_unlock(&ipclock);
- return -EIO;
- }
- mutex_unlock(&ipclock);
- return 0;
-}
-EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
-
/*
* Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
* When ioc bit is set to 1, caller api must wait for interrupt handler called
static irqreturn_t ioc(int irq, void *dev_id)
{
struct intel_scu_ipc_dev *scu = dev_id;
+ int status = ipc_read_status(scu);
- if (scu->irq_mode)
- complete(&scu->cmd_complete);
+ writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
+ complete(&scu->cmd_complete);
return IRQ_HANDLED;
}
{
int err;
struct intel_scu_ipc_dev *scu = &ipcdev;
- struct intel_scu_ipc_pdata_t *pdata;
if (scu->dev) /* We support only one SCU */
return -EBUSY;
- pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
- if (!pdata)
- return -ENODEV;
-
- scu->irq_mode = pdata->irq_mode;
-
err = pcim_enable_device(pdev);
if (err)
return err;
scu->ipc_base = pcim_iomap_table(pdev)[0];
- scu->i2c_base = ioremap(pdata->i2c_base, pdata->i2c_len);
- if (!scu->i2c_base)
- return -ENOMEM;
-
err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
scu);
if (err)
return 0;
}
-#define SCU_DEVICE(id, pdata) {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata}
-
static const struct pci_device_id pci_ids[] = {
- SCU_DEVICE(PCI_DEVICE_ID_LINCROFT, intel_scu_ipc_lincroft_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_PENWELL, intel_scu_ipc_penwell_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW, intel_scu_ipc_penwell_pdata),
- SCU_DEVICE(PCI_DEVICE_ID_TANGIER, intel_scu_ipc_tangier_pdata),
+ { PCI_VDEVICE(INTEL, 0x080e) },
+ { PCI_VDEVICE(INTEL, 0x08ea) },
+ { PCI_VDEVICE(INTEL, 0x11a0) },
{}
};
{0x7F, 0x00, 0x0B},
{0x7F, 0x10, 0x12},
{0x7F, 0x20, 0x23},
+ {0x94, 0x03, 0x03},
+ {0x95, 0x03, 0x03},
};
static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
{0xD0, 0x03, 0x08},
{0x7F, 0x02, 0x00},
{0x7F, 0x08, 0x00},
+ {0x95, 0x03, 0x03},
};
struct isst_cmd {
u32 verbosity;
int err;
- if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
- return -EFAULT;
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity);
if (err) {
pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err);
- count = err;
+ return err;
}
return count;
u32 verbosity;
int err;
- if (kstrtou32_from_user(userbuf, count, 0, &verbosity))
- return -EFAULT;
+ err = kstrtou32_from_user(userbuf, count, 0, &verbosity);
+ if (err)
+ return err;
err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity);
if (err) {
pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err);
- count = err;
+ return err;
}
return count;
static int telemetry_pltdrv_probe(struct platform_device *pdev)
{
- struct resource *res0 = NULL, *res1 = NULL;
const struct x86_cpu_id *id;
- int size, ret = -ENOMEM;
+ void __iomem *mem;
+ int ret;
id = x86_match_cpu(telemetry_cpu_ids);
if (!id)
telm_conf = (struct telemetry_plt_config *)id->driver_data;
- res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res0) {
- ret = -EINVAL;
- goto out;
- }
- size = resource_size(res0);
- if (!devm_request_mem_region(&pdev->dev, res0->start, size,
- pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- telm_conf->pss_config.ssram_base_addr = res0->start;
- telm_conf->pss_config.ssram_size = size;
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
- res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res1) {
- ret = -EINVAL;
- goto out;
- }
- size = resource_size(res1);
- if (!devm_request_mem_region(&pdev->dev, res1->start, size,
- pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
+ telm_conf->pss_config.regmap = mem;
- telm_conf->ioss_config.ssram_base_addr = res1->start;
- telm_conf->ioss_config.ssram_size = size;
+ mem = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
- telm_conf->pss_config.regmap = ioremap(
- telm_conf->pss_config.ssram_base_addr,
- telm_conf->pss_config.ssram_size);
- if (!telm_conf->pss_config.regmap) {
- ret = -ENOMEM;
- goto out;
- }
-
- telm_conf->ioss_config.regmap = ioremap(
- telm_conf->ioss_config.ssram_base_addr,
- telm_conf->ioss_config.ssram_size);
- if (!telm_conf->ioss_config.regmap) {
- ret = -ENOMEM;
- goto out;
- }
+ telm_conf->ioss_config.regmap = mem;
mutex_init(&telm_conf->telem_lock);
mutex_init(&telm_conf->telem_trace_lock);
return 0;
out:
- if (res0)
- release_mem_region(res0->start, resource_size(res0));
- if (res1)
- release_mem_region(res1->start, resource_size(res1));
- if (telm_conf->pss_config.regmap)
- iounmap(telm_conf->pss_config.regmap);
- if (telm_conf->ioss_config.regmap)
- iounmap(telm_conf->ioss_config.regmap);
dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
return ret;
static int telemetry_pltdrv_remove(struct platform_device *pdev)
{
telemetry_clear_pltdata();
- iounmap(telm_conf->pss_config.regmap);
- iounmap(telm_conf->ioss_config.regmap);
-
return 0;
}
#define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23
#define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24
#define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a
+#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b
+#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e
#define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30
#define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31
#define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
+#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
+#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2
#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
#define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
#define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8
+#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9
+#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
+#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
+#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc
#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
#define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
/* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
#define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0)
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
+#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
+#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
+
+/* Masks for aggregation for comex carriers */
+#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1)
+#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
+ MLXPLAT_CPLD_AGGR_MASK_CARRIER)
+#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1
/* Default I2C parent bus number */
#define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1
/* Maximum number of possible physical buses equipped on system */
#define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM 16
+#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM 24
/* Number of channels in group */
#define MLXPLAT_CPLD_GRP_CHNL_NUM 8
/* Start channel numbers */
#define MLXPLAT_CPLD_CH1 2
#define MLXPLAT_CPLD_CH2 10
+#define MLXPLAT_CPLD_CH3 18
/* Number of LPC attached MUX platform devices */
-#define MLXPLAT_CPLD_LPC_MUX_DEVS 2
+#define MLXPLAT_CPLD_LPC_MUX_DEVS 3
/* Hotplug devices adapter numbers */
#define MLXPLAT_CPLD_NR_NONE -1
#define MLXPLAT_CPLD_PSU_DEFAULT_NR 10
#define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4
+#define MLXPLAT_CPLD_PSU_MSNXXXX_NR2 3
#define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11
#define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12
#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
IORESOURCE_IO),
};
+/* Platform i2c next generation systems data */
+static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = {
+ {
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .mask = MLXPLAT_CPLD_I2C_CAP_MASK,
+ .bit = MLXPLAT_CPLD_I2C_CAP_BIT,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = {
+ {
+ .data = mlxplat_mlxcpld_i2c_ng_items_data,
+ },
+};
+
/* Platform next generation systems i2c data */
static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = {
+ .items = mlxplat_mlxcpld_i2c_ng_items,
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET,
static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
/* Platform mux data */
-static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
+static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = {
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH1,
};
+/* Platform mux configuration variables */
+static int mlxplat_max_adap_num;
+static int mlxplat_mux_num;
+static struct i2c_mux_reg_platform_data *mlxplat_mux_data;
+
+/* Platform extended mux data */
+static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH3,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ },
+
+};
+
/* Platform hotplug devices */
static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
{
},
};
+/* Platform hotplug comex carrier system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
/* Platform hotplug default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
{
},
};
+static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = {
+ {
+ .data = mlxplat_mlxcpld_comex_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_psu),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_fan),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.items = mlxplat_mlxcpld_default_items,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = {
+ .items = mlxplat_mlxcpld_comex_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK,
+};
+
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = {
{
.label = "pwr1",
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+/* Platform hotplug extended system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(2),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(3),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+ },
+ {
+ .label = "pwr4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
+ {
+ .data = mlxplat_mlxcpld_ext_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_ext_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
+ .items = mlxplat_mlxcpld_ext_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data),
};
+/* Platform led for Comex based 100GbE systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan2:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan3:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan4:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = {
+ .data = mlxplat_mlxcpld_comex_100G_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data),
+};
+
/* Platform register access default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
{
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0200,
},
+ {
+ .label = "select_iio",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
{
.label = "asic_health",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
+ {
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
{
.label = "reset_comex_wd",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
+ {
+ .label = "reset_sw_pwr_off",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
{
.label = "reset_comex_thermal",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
+ {
+ .label = "reset_ac_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
{
.label = "psu1_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
};
static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+ case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
return false;
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
+ case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
return false;
{ MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
};
+static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = {
+ { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET,
+ MLXPLAT_CPLD_LOW_AGGRCX_MASK },
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+};
+
+static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = {
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+};
+
struct mlxplat_mlxcpld_regmap_context {
void __iomem *base;
};
.reg_write = mlxplat_mlxcpld_reg_write,
};
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_comex_default,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_ng400,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
static struct resource mlxplat_mlxcpld_resources[] = {
[0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"),
};
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_default_channels[i];
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_default_channels[i]);
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1;
-};
+}
static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
return 1;
-};
+}
+
+static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data);
+ mlxplat_mux_data = mlxplat_extended_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_comex_data;
+ mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
+ mlxplat_led = &mlxplat_comex_100G_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_ext_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return 1;
+}
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
DMI_MATCH(DMI_BOARD_NAME, "VMOD0007"),
},
},
+ {
+ .callback = mlxplat_dmi_comex_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_ng400_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
+ },
+ },
{
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
/* Scan adapters from expected id to verify it is free. */
*nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i <
- MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; i++) {
+ mlxplat_max_adap_num; i++) {
search_adap = i2c_get_adapter(i);
if (search_adap) {
i2c_put_adapter(search_adap);
}
/* Return with error if free id for adapter is not found. */
- if (i == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM)
+ if (i == mlxplat_max_adap_num)
return -ENODEV;
/* Shift adapter ids, since expected parent adapter is not free. */
*nr = i;
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ for (i = 0; i < mlxplat_mux_num; i++) {
shift = *nr - mlxplat_mux_data[i].parent;
mlxplat_mux_data[i].parent = *nr;
mlxplat_mux_data[i].base_nr += shift;
if (nr < 0)
goto fail_alloc;
- nr = (nr == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) ? -1 : nr;
+ nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
if (mlxplat_i2c)
mlxplat_i2c->regmap = priv->regmap;
priv->pdev_i2c = platform_device_register_resndata(
goto fail_alloc;
}
- for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ for (i = 0; i < mlxplat_mux_num; i++) {
priv->pdev_mux[i] = platform_device_register_resndata(
&priv->pdev_i2c->dev,
"i2c-mux-reg", i, NULL,
platform_device_unregister(priv->pdev_led);
platform_device_unregister(priv->pdev_hotplug);
- for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--)
+ for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
platform_device_unregister(priv->pdev_mux[i]);
platform_device_unregister(priv->pdev_i2c);
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-digma_citi_e200.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-gp-electronic-t701.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"),
{ }
};
PROPERTY_ENTRY_U32("touchscreen-size-x", 1698),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3676-onda-v80-plus-v3.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-onda-v820w-32g.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3680-onda-v891w-v1.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
PROPERTY_ENTRY_U32("touchscreen-size-x", 1625),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3676-onda-v891w-v3.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-pipo-w2s.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"),
{ }
};
.properties = pipo_w2s_props,
};
+static const struct property_entry pipo_w11_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data pipo_w11_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pipo_w11_props,
+};
+
static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1692),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
PROPERTY_ENTRY_U32("touchscreen-size-x", 1794),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3692-pov-mobii-wintab-p800w.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-schneider-sct101ctm.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-teclast_x98plus2.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
PROPERTY_ENTRY_U32("touchscreen-size-x", 1970),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primebook-c11.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
static const struct property_entry trekstor_primebook_c13_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primebook-c13.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
static const struct property_entry trekstor_primetab_t13b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1680-trekstor-primetab-t13b.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1900),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl3670-surftab-twin-10-1-st10432-8.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
- PROPERTY_ENTRY_STRING("firmware-name",
- "gsl1686-surftab-wintron70-st70416-6.fw"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
DMI_MATCH(DMI_PRODUCT_NAME, "W2S"),
},
},
+ {
+ /* Pipo W11 */
+ .driver_data = (void *)&pipo_w11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ /* Above matches are too generic, add bios-ver match */
+ DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"),
+ },
+ },
{
/* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
- DMI_MATCH(DMI_PRODUCT_NAME,
- "SurfTab wintron 7.0 ST70416-6"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"),
/* Exact match, different versions need different fw */
DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"),
},
}
static int ts_dmi_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
+ unsigned long action, void *data)
{
struct device *dev = data;
struct i2c_client *client;
}
}
-/*
- * Compute ISA PnP checksum for first eight bytes.
- */
-static unsigned char __init isapnp_checksum(unsigned char *data)
-{
- int i, j;
- unsigned char checksum = 0x6a, bit, b;
-
- for (i = 0; i < 8; i++) {
- b = data[i];
- for (j = 0; j < 8; j++) {
- bit = 0;
- if (b & (1 << j))
- bit = 1;
- checksum =
- ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7)
- | (checksum >> 1);
- }
- }
- return checksum;
-}
-
/*
* Build device list for all present ISA PnP devices.
*/
static int __init isapnp_build_device_list(void)
{
int csn;
- unsigned char header[9], checksum;
+ unsigned char header[9];
struct pnp_card *card;
u32 eisa_id;
char id[8];
for (csn = 1; csn <= isapnp_csn_count; csn++) {
isapnp_wake(csn);
isapnp_peek(header, 9);
- checksum = isapnp_checksum(header);
eisa_id = header[0] | header[1] << 8 |
header[2] << 16 | header[3] << 24;
pnp_eisa_id_to_string(eisa_id, id);
Say Y here to enable Adaptive Voltage Scaling class support.
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ depends on POWER_AVS
+ select PM_OPP
+ select REGMAP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like QCS404.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
config ROCKCHIP_IODOMAIN
tristate "Rockchip IO domain support"
depends on POWER_AVS && ARCH_ROCKCHIP && OF
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
+obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o
obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/nvmem-consumer.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
+
+#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
+#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4c
+
+#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+
+#define REG_RBIF_SW_VLEVEL 0x94
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define REG_RBCPR_STEP_QUOT 0x80
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9c
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xa0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 50
+
+#define FUSE_REVISION_UNKNOWN (-1)
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct cpr_fuse {
+ char *ring_osc;
+ char *init_voltage;
+ char *quotient;
+ char *quotient_offset;
+};
+
+struct fuse_corner_data {
+ int ref_uV;
+ int max_uV;
+ int min_uV;
+ int max_volt_scale;
+ int max_quot_scale;
+ /* fuse quot */
+ int quot_offset;
+ int quot_scale;
+ int quot_adjust;
+ /* fuse quot_offset */
+ int quot_offset_scale;
+ int quot_offset_adjust;
+};
+
+struct cpr_fuses {
+ int init_voltage_step;
+ int init_voltage_width;
+ struct fuse_corner_data *fuse_corner_data;
+};
+
+struct corner_data {
+ unsigned int fuse_corner;
+ unsigned long freq;
+};
+
+struct cpr_desc {
+ unsigned int num_fuse_corners;
+ int min_diff_quot;
+ int *step_quot;
+
+ unsigned int timer_delay_us;
+ unsigned int timer_cons_up;
+ unsigned int timer_cons_down;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int idle_clocks;
+ unsigned int gcnt_us;
+ unsigned int vdd_apc_step_up_limit;
+ unsigned int vdd_apc_step_down_limit;
+ unsigned int clamp_timer_interval;
+
+ struct cpr_fuses cpr_fuses;
+ bool reduce_to_fuse_uV;
+ bool reduce_to_corner_uV;
+};
+
+struct acc_desc {
+ unsigned int enable_reg;
+ u32 enable_mask;
+
+ struct reg_sequence *config;
+ struct reg_sequence *settings;
+ int num_regs_per_fuse;
+};
+
+struct cpr_acc_desc {
+ const struct cpr_desc *cpr_desc;
+ const struct acc_desc *acc_desc;
+};
+
+struct fuse_corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int quot;
+ int step_quot;
+ const struct reg_sequence *accs;
+ int num_accs;
+ unsigned long max_freq;
+ u8 ring_osc_idx;
+};
+
+struct corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int last_uV;
+ int quot_adjust;
+ u32 save_ctl;
+ u32 save_irq;
+ unsigned long freq;
+ struct fuse_corner *fuse_corner;
+};
+
+struct cpr_drv {
+ unsigned int num_corners;
+ unsigned int ref_clk_khz;
+
+ struct generic_pm_domain pd;
+ struct device *dev;
+ struct device *attached_cpu_dev;
+ struct mutex lock;
+ void __iomem *base;
+ struct corner *corner;
+ struct regulator *vdd_apc;
+ struct clk *cpu_clk;
+ struct regmap *tcsr;
+ bool loop_disabled;
+ u32 gcnt;
+ unsigned long flags;
+
+ struct fuse_corner *fuse_corners;
+ struct corner *corners;
+
+ const struct cpr_desc *desc;
+ const struct acc_desc *acc_desc;
+ const struct cpr_fuse *cpr_fuses;
+
+ struct dentry *debugfs;
+};
+
+static bool cpr_is_allowed(struct cpr_drv *drv)
+{
+ return !drv->loop_disabled;
+}
+
+static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
+{
+ writel_relaxed(value, drv->base + offset);
+}
+
+static u32 cpr_read(struct cpr_drv *drv, u32 offset)
+{
+ return readl_relaxed(drv->base + offset);
+}
+
+static void
+cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(drv->base + offset);
+ val &= ~mask;
+ val |= value & mask;
+ writel_relaxed(val, drv->base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_drv *drv)
+{
+ cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
+{
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
+{
+ cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 val, mask;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
+ cpr_masked_write(drv, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ corner->save_ctl);
+ cpr_irq_set(drv, corner->save_irq);
+
+ if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_drv *drv)
+{
+ cpr_irq_set(drv, 0);
+ cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
+{
+ corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
+ corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
+}
+
+static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 gcnt, ctl, irq, ro_sel, step_quot;
+ struct fuse_corner *fuse = corner->fuse_corner;
+ const struct cpr_desc *desc = drv->desc;
+ int i;
+
+ ro_sel = fuse->ring_osc_idx;
+ gcnt = drv->gcnt;
+ gcnt |= fuse->quot - corner->quot_adjust;
+
+ /* Program the step quotient and idle clocks */
+ step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
+ step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
+ cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = corner->save_ctl;
+ cpr_write(drv, REG_RBCPR_CTL, ctl);
+ irq = corner->save_irq;
+ cpr_irq_set(drv, irq);
+ dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
+ ctl, irq);
+}
+
+static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
+ struct fuse_corner *end)
+{
+ if (f == end)
+ return;
+
+ if (f < end) {
+ for (f += 1; f <= end; f++)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ } else {
+ for (f -= 1; f >= end; f--)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ }
+}
+
+static int cpr_pre_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == DOWN)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_post_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == UP)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
+ int new_uV, enum voltage_change_dir dir)
+{
+ int ret;
+ struct fuse_corner *fuse_corner = corner->fuse_corner;
+
+ ret = cpr_pre_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
+ if (ret) {
+ dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
+ new_uV);
+ return ret;
+ }
+
+ ret = cpr_post_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
+{
+ return drv->corner ? drv->corner - drv->corners + 1 : 0;
+}
+
+static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
+{
+ u32 val, error_steps, reg_mask;
+ int last_uV, new_uV, step_uV, ret;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ if (dir != UP && dir != DOWN)
+ return 0;
+
+ step_uV = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_uV)
+ return -EINVAL;
+
+ corner = drv->corner;
+
+ val = cpr_read(drv, REG_RBCPR_RESULT_0);
+
+ error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
+ error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_uV = corner->last_uV;
+
+ if (dir == UP) {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->up_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->up_threshold,
+ desc->vdd_apc_step_up_limit);
+ }
+
+ if (last_uV >= corner->max_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = reg_mask;
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_up_limit)
+ error_steps = desc->vdd_apc_step_up_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV + error_steps * step_uV;
+ new_uV = min(new_uV, corner->max_uV);
+
+ dev_dbg(drv->dev,
+ "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ } else if (dir == DOWN) {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->down_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->down_threshold,
+ desc->vdd_apc_step_down_limit);
+ }
+
+ if (last_uV <= corner->min_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_down_limit)
+ error_steps = desc->vdd_apc_step_down_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV - error_steps * step_uV;
+ new_uV = max(new_uV, corner->min_uV);
+
+ dev_dbg(drv->dev,
+ "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ }
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret) {
+ cpr_irq_clr_nack(drv);
+ return ret;
+ }
+ drv->corner->last_uV = new_uV;
+
+ if (dir == UP) {
+ /* Disable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = 0;
+ } else if (dir == DOWN) {
+ /* Restore default threshold for UP */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = desc->up_threshold;
+ val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ }
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(drv);
+
+ return 0;
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_drv *drv = dev;
+ const struct cpr_desc *desc = drv->desc;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 val;
+
+ mutex_lock(&drv->lock);
+
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+
+ dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
+
+ if (!cpr_ctl_is_enabled(drv)) {
+ dev_dbg(drv->dev, "CPR is disabled\n");
+ ret = IRQ_NONE;
+ } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
+ dev_dbg(drv->dev, "CPR measurement is not ready\n");
+ } else if (!cpr_is_allowed(drv)) {
+ val = cpr_read(drv, REG_RBCPR_CTL);
+ dev_err_ratelimited(drv->dev,
+ "Interrupt broken? RBCPR_CTL = %#02x\n",
+ val);
+ ret = IRQ_NONE;
+ } else {
+ /*
+ * Following sequence of handling is as per each IRQ's
+ * priority
+ */
+ if (val & CPR_INT_UP) {
+ cpr_scale(drv, UP);
+ } else if (val & CPR_INT_DOWN) {
+ cpr_scale(drv, DOWN);
+ } else if (val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
+ } else {
+ dev_dbg(drv->dev,
+ "IRQ occurred for unknown flag (%#08x)\n", val);
+ }
+
+ /* Save register values for the corner */
+ cpr_corner_save(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_enable(struct cpr_drv *drv)
+{
+ int ret;
+
+ ret = regulator_enable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv) && drv->corner) {
+ cpr_irq_clr(drv);
+ cpr_corner_restore(drv, drv->corner);
+ cpr_ctl_enable(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+static int cpr_disable(struct cpr_drv *drv)
+{
+ int ret;
+
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_clr(drv);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ ret = regulator_disable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cpr_config(struct cpr_drv *drv)
+{
+ int i;
+ u32 val, gcnt;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Disable interrupt and CPR */
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
+ cpr_write(drv, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW ceiling, floor and vlevel */
+ val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+ << RBIF_LIMIT_CEILING_SHIFT;
+ val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
+ cpr_write(drv, REG_RBIF_LIMIT, val);
+ cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /*
+ * Clear the target quotient value and gate count of all
+ * ring oscillators
+ */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
+ gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
+ gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ drv->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
+ cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
+ dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
+ desc->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
+ cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(drv, REG_RBCPR_CTL, val);
+
+ for (i = 0; i < drv->num_corners; i++) {
+ corner = &drv->corners[i];
+ corner->save_ctl = val;
+ corner->save_irq = CPR_INT_DEFAULT;
+ }
+
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ val = cpr_read(drv, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ return 0;
+}
+
+static int cpr_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ struct corner *corner, *end;
+ enum voltage_change_dir dir;
+ int ret = 0, new_uV;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
+ __func__, state, cpr_get_cur_perf_state(drv));
+
+ /*
+ * Determine new corner we're going to.
+ * Remove one since lowest performance state is 1.
+ */
+ corner = drv->corners + state - 1;
+ end = &drv->corners[drv->num_corners - 1];
+ if (corner > end || corner < drv->corners) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Determine direction */
+ if (drv->corner > corner)
+ dir = DOWN;
+ else if (drv->corner < corner)
+ dir = UP;
+ else
+ dir = NO_CHANGE;
+
+ if (cpr_is_allowed(drv))
+ new_uV = corner->last_uV;
+ else
+ new_uV = corner->uV;
+
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret)
+ goto unlock;
+
+ if (cpr_is_allowed(drv)) {
+ cpr_irq_clr(drv);
+ if (drv->corner != corner)
+ cpr_corner_restore(drv, corner);
+ cpr_ctl_enable(drv, corner);
+ }
+
+ drv->corner = corner;
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data)
+{
+ struct nvmem_cell *cell;
+ ssize_t len;
+ char *ret;
+ int i;
+
+ *data = 0;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) != -EPROBE_DEFER)
+ dev_err(dev, "undefined cell %s\n", cname);
+ return PTR_ERR(cell);
+ }
+
+ ret = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(ret)) {
+ dev_err(dev, "can't read cell %s\n", cname);
+ return PTR_ERR(ret);
+ }
+
+ for (i = 0; i < len; i++)
+ *data |= ret[i] << (8 * i);
+
+ kfree(ret);
+ dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len);
+
+ return 0;
+}
+
+static int
+cpr_populate_ring_osc_idx(struct cpr_drv *drv)
+{
+ struct fuse_corner *fuse = drv->fuse_corners;
+ struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ u32 data;
+ int ret;
+
+ for (; fuse < end; fuse++, fuses++) {
+ ret = cpr_read_efuse(drv->dev, fuses->ring_osc,
+ &data);
+ if (ret)
+ return ret;
+ fuse->ring_osc_idx = data;
+ }
+
+ return 0;
+}
+
+static int cpr_read_fuse_uV(const struct cpr_desc *desc,
+ const struct fuse_corner_data *fdata,
+ const char *init_v_efuse,
+ int step_volt,
+ struct cpr_drv *drv)
+{
+ int step_size_uV, steps, uV;
+ u32 bits = 0;
+ int ret;
+
+ ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits);
+ if (ret)
+ return ret;
+
+ steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
+ /* Not two's complement.. instead highest bit is sign bit */
+ if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
+ steps = -steps;
+
+ step_size_uV = desc->cpr_fuses.init_voltage_step;
+
+ uV = fdata->ref_uV + steps * step_size_uV;
+ return DIV_ROUND_UP(uV, step_volt) * step_volt;
+}
+
+static int cpr_fuse_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int i;
+ unsigned int step_volt;
+ struct fuse_corner_data *fdata;
+ struct fuse_corner *fuse, *end;
+ int uV;
+ const struct reg_sequence *accs;
+ int ret;
+
+ accs = acc_desc->settings;
+
+ step_volt = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_volt)
+ return -EINVAL;
+
+ /* Populate fuse_corner members */
+ fuse = drv->fuse_corners;
+ end = &fuse[desc->num_fuse_corners - 1];
+ fdata = desc->cpr_fuses.fuse_corner_data;
+
+ for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
+ /*
+ * Update SoC voltages: platforms might choose a different
+ * regulators than the one used to characterize the algorithms
+ * (ie, init_voltage_step).
+ */
+ fdata->min_uV = roundup(fdata->min_uV, step_volt);
+ fdata->max_uV = roundup(fdata->max_uV, step_volt);
+
+ /* Populate uV */
+ uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
+ step_volt, drv);
+ if (uV < 0)
+ return uV;
+
+ fuse->min_uV = fdata->min_uV;
+ fuse->max_uV = fdata->max_uV;
+ fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
+
+ if (fuse == end) {
+ /*
+ * Allow the highest fuse corner's PVS voltage to
+ * define the ceiling voltage for that corner in order
+ * to support SoC's in which variable ceiling values
+ * are required.
+ */
+ end->max_uV = max(end->max_uV, end->uV);
+ }
+
+ /* Populate target quotient by scaling */
+ ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot);
+ if (ret)
+ return ret;
+
+ fuse->quot *= fdata->quot_scale;
+ fuse->quot += fdata->quot_offset;
+ fuse->quot += fdata->quot_adjust;
+ fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
+
+ /* Populate acc settings */
+ fuse->accs = accs;
+ fuse->num_accs = acc_desc->num_regs_per_fuse;
+ accs += acc_desc->num_regs_per_fuse;
+ }
+
+ /*
+ * Restrict all fuse corner PVS voltages based upon per corner
+ * ceiling and floor voltages.
+ */
+ for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
+ if (fuse->uV > fuse->max_uV)
+ fuse->uV = fuse->max_uV;
+ else if (fuse->uV < fuse->min_uV)
+ fuse->uV = fuse->min_uV;
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->min_uV,
+ fuse->min_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "min uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->min_uV, i);
+ return -EINVAL;
+ }
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->max_uV,
+ fuse->max_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "max uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->max_uV, i);
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev,
+ "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
+ i, fuse->min_uV, fuse->uV, fuse->max_uV,
+ fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
+ }
+
+ return 0;
+}
+
+static int cpr_calculate_scaling(const char *quot_offset,
+ struct cpr_drv *drv,
+ const struct fuse_corner_data *fdata,
+ const struct corner *corner)
+{
+ u32 quot_diff = 0;
+ unsigned long freq_diff;
+ int scaling;
+ const struct fuse_corner *fuse, *prev_fuse;
+ int ret;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ if (quot_offset) {
+ ret = cpr_read_efuse(drv->dev, quot_offset, "_diff);
+ if (ret)
+ return ret;
+
+ quot_diff *= fdata->quot_offset_scale;
+ quot_diff += fdata->quot_offset_adjust;
+ } else {
+ quot_diff = fuse->quot - prev_fuse->quot;
+ }
+
+ freq_diff = fuse->max_freq - prev_fuse->max_freq;
+ freq_diff /= 1000000; /* Convert to MHz */
+ scaling = 1000 * quot_diff / freq_diff;
+ return min(scaling, fdata->max_quot_scale);
+}
+
+static int cpr_interpolate(const struct corner *corner, int step_volt,
+ const struct fuse_corner_data *fdata)
+{
+ unsigned long f_high, f_low, f_diff;
+ int uV_high, uV_low, uV;
+ u64 temp, temp_limit;
+ const struct fuse_corner *fuse, *prev_fuse;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ f_high = fuse->max_freq;
+ f_low = prev_fuse->max_freq;
+ uV_high = fuse->uV;
+ uV_low = prev_fuse->uV;
+ f_diff = fuse->max_freq - corner->freq;
+
+ /*
+ * Don't interpolate in the wrong direction. This could happen
+ * if the adjusted fuse voltage overlaps with the previous fuse's
+ * adjusted voltage.
+ */
+ if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
+ return corner->uV;
+
+ temp = f_diff * (uV_high - uV_low);
+ do_div(temp, f_high - f_low);
+
+ /*
+ * max_volt_scale has units of uV/MHz while freq values
+ * have units of Hz. Divide by 1000000 to convert to.
+ */
+ temp_limit = f_diff * fdata->max_volt_scale;
+ do_div(temp_limit, 1000000);
+
+ uV = uV_high - min(temp, temp_limit);
+ return roundup(uV, step_volt);
+}
+
+static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
+{
+ struct device_node *np;
+ unsigned int fuse_corner = 0;
+
+ np = dev_pm_opp_get_of_node(opp);
+ if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
+ pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
+ __func__);
+
+ of_node_put(np);
+
+ return fuse_corner;
+}
+
+static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
+ struct device *cpu_dev)
+{
+ u64 rate = 0;
+ struct device_node *ref_np;
+ struct device_node *desc_np;
+ struct device_node *child_np = NULL;
+ struct device_node *child_req_np = NULL;
+
+ desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!desc_np)
+ return 0;
+
+ ref_np = dev_pm_opp_get_of_node(ref);
+ if (!ref_np)
+ goto out_ref;
+
+ do {
+ of_node_put(child_req_np);
+ child_np = of_get_next_available_child(desc_np, child_np);
+ child_req_np = of_parse_phandle(child_np, "required-opps", 0);
+ } while (child_np && child_req_np != ref_np);
+
+ if (child_np && child_req_np == ref_np)
+ of_property_read_u64(child_np, "opp-hz", &rate);
+
+ of_node_put(child_req_np);
+ of_node_put(child_np);
+ of_node_put(ref_np);
+out_ref:
+ of_node_put(desc_np);
+
+ return (unsigned long) rate;
+}
+
+static int cpr_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ int i, level, scaling = 0;
+ unsigned int fnum, fc;
+ const char *quot_offset;
+ struct fuse_corner *fuse, *prev_fuse;
+ struct corner *corner, *end;
+ struct corner_data *cdata;
+ const struct fuse_corner_data *fdata;
+ bool apply_scaling;
+ unsigned long freq_diff, freq_diff_mhz;
+ unsigned long freq;
+ int step_volt = regulator_get_linear_step(drv->vdd_apc);
+ struct dev_pm_opp *opp;
+
+ if (!step_volt)
+ return -EINVAL;
+
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+
+ cdata = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(struct corner_data),
+ GFP_KERNEL);
+ if (!cdata)
+ return -ENOMEM;
+
+ /*
+ * Store maximum frequency for each fuse corner based on the frequency
+ * plan
+ */
+ for (level = 1; level <= drv->num_corners; level++) {
+ opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
+ if (IS_ERR(opp))
+ return -EINVAL;
+ fc = cpr_get_fuse_corner(opp);
+ if (!fc) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ fnum = fc - 1;
+ freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
+ if (!freq) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ cdata[level - 1].fuse_corner = fnum;
+ cdata[level - 1].freq = freq;
+
+ fuse = &drv->fuse_corners[fnum];
+ dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
+ freq, dev_pm_opp_get_level(opp) - 1, fnum);
+ if (freq > fuse->max_freq)
+ fuse->max_freq = freq;
+ dev_pm_opp_put(opp);
+ }
+
+ /*
+ * Get the quotient adjustment scaling factor, according to:
+ *
+ * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+ * / (freq(corner_N) - freq(corner_N-1)), max_factor)
+ *
+ * QUOT(corner_N): quotient read from fuse for fuse corner N
+ * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
+ * freq(corner_N): max frequency in MHz supported by fuse corner N
+ * freq(corner_N-1): max frequency in MHz supported by fuse corner
+ * (N - 1)
+ *
+ * Then walk through the corners mapped to each fuse corner
+ * and calculate the quotient adjustment for each one using the
+ * following formula:
+ *
+ * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+ *
+ * freq_max: max frequency in MHz supported by the fuse corner
+ * freq_corner: frequency in MHz corresponding to the corner
+ * scaling: calculated from above equation
+ *
+ *
+ * + +
+ * | v |
+ * q | f c o | f c
+ * u | c l | c
+ * o | f t | f
+ * t | c a | c
+ * | c f g | c f
+ * | e |
+ * +--------------- +----------------
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ * corner corner
+ *
+ * c = corner
+ * f = fuse corner
+ *
+ */
+ for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
+ fnum = cdata[i].fuse_corner;
+ fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
+ quot_offset = fuses[fnum].quotient_offset;
+ fuse = &drv->fuse_corners[fnum];
+ if (fnum)
+ prev_fuse = &drv->fuse_corners[fnum - 1];
+ else
+ prev_fuse = NULL;
+
+ corner->fuse_corner = fuse;
+ corner->freq = cdata[i].freq;
+ corner->uV = fuse->uV;
+
+ if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
+ scaling = cpr_calculate_scaling(quot_offset, drv,
+ fdata, corner);
+ if (scaling < 0)
+ return scaling;
+
+ apply_scaling = true;
+ } else if (corner->freq == fuse->max_freq) {
+ /* This is a fuse corner; don't scale anything */
+ apply_scaling = false;
+ }
+
+ if (apply_scaling) {
+ freq_diff = fuse->max_freq - corner->freq;
+ freq_diff_mhz = freq_diff / 1000000;
+ corner->quot_adjust = scaling * freq_diff_mhz / 1000;
+
+ corner->uV = cpr_interpolate(corner, step_volt, fdata);
+ }
+
+ corner->max_uV = fuse->max_uV;
+ corner->min_uV = fuse->min_uV;
+ corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
+ corner->last_uV = corner->uV;
+
+ /* Reduce the ceiling voltage if needed */
+ if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
+ corner->max_uV = corner->uV;
+ else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
+ corner->max_uV = max(corner->min_uV, fuse->uV);
+
+ dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
+ corner->min_uV, corner->uV, corner->max_uV,
+ fuse->quot - corner->quot_adjust);
+ }
+
+ return 0;
+}
+
+static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct cpr_fuse *fuses;
+ int i;
+
+ fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
+ sizeof(struct cpr_fuse),
+ GFP_KERNEL);
+ if (!fuses)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < desc->num_fuse_corners; i++) {
+ char tbuf[32];
+
+ snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
+ fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].ring_osc)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
+ fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].init_voltage)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
+ fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].quotient)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
+ fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].quotient_offset)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return fuses;
+}
+
+static void cpr_set_loop_allowed(struct cpr_drv *drv)
+{
+ drv->loop_disabled = false;
+}
+
+static int cpr_init_parameters(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct clk *clk;
+
+ clk = clk_get(drv->dev, "ref");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ drv->ref_clk_khz = clk_get_rate(clk) / 1000;
+ clk_put(clk);
+
+ if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
+ desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
+ desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
+ desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
+ desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
+ desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
+ return -EINVAL;
+
+ dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
+ desc->up_threshold, desc->down_threshold);
+
+ return 0;
+}
+
+static int cpr_find_initial_corner(struct cpr_drv *drv)
+{
+ unsigned long rate;
+ const struct corner *end;
+ struct corner *iter;
+ unsigned int i = 0;
+
+ if (!drv->cpu_clk) {
+ dev_err(drv->dev, "cannot get rate from NULL clk\n");
+ return -EINVAL;
+ }
+
+ end = &drv->corners[drv->num_corners - 1];
+ rate = clk_get_rate(drv->cpu_clk);
+
+ /*
+ * Some bootloaders set a CPU clock frequency that is not defined
+ * in the OPP table. When running at an unlisted frequency,
+ * cpufreq_online() will change to the OPP which has the lowest
+ * frequency, at or above the unlisted frequency.
+ * Since cpufreq_online() always "rounds up" in the case of an
+ * unlisted frequency, this function always "rounds down" in case
+ * of an unlisted frequency. That way, when cpufreq_online()
+ * triggers the first ever call to cpr_set_performance_state(),
+ * it will correctly determine the direction as UP.
+ */
+ for (iter = drv->corners; iter <= end; iter++) {
+ if (iter->freq > rate)
+ break;
+ i++;
+ if (iter->freq == rate) {
+ drv->corner = iter;
+ break;
+ }
+ if (iter->freq < rate)
+ drv->corner = iter;
+ }
+
+ if (!drv->corner) {
+ dev_err(drv->dev, "boot up corner not found\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev, "boot up perf state: %u\n", i);
+
+ return 0;
+}
+
+static const struct cpr_desc qcs404_cpr_desc = {
+ .num_fuse_corners = 3,
+ .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
+ .step_quot = (int []){ 25, 25, 25, },
+ .timer_delay_us = 5000,
+ .timer_cons_up = 0,
+ .timer_cons_down = 2,
+ .up_threshold = 1,
+ .down_threshold = 3,
+ .idle_clocks = 15,
+ .gcnt_us = 1,
+ .vdd_apc_step_up_limit = 1,
+ .vdd_apc_step_down_limit = 1,
+ .cpr_fuses = {
+ .init_voltage_step = 8000,
+ .init_voltage_width = 6,
+ .fuse_corner_data = (struct fuse_corner_data[]){
+ /* fuse corner 0 */
+ {
+ .ref_uV = 1224000,
+ .max_uV = 1224000,
+ .min_uV = 1048000,
+ .max_volt_scale = 0,
+ .max_quot_scale = 0,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 1 */
+ {
+ .ref_uV = 1288000,
+ .max_uV = 1288000,
+ .min_uV = 1048000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = -20,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 2 */
+ {
+ .ref_uV = 1352000,
+ .max_uV = 1384000,
+ .min_uV = 1088000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ },
+ },
+};
+
+static const struct acc_desc qcs404_acc_desc = {
+ .settings = (struct reg_sequence[]){
+ { 0xb120, 0x1041040 },
+ { 0xb124, 0x41 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ },
+ .config = (struct reg_sequence[]){
+ { 0xb138, 0xff },
+ { 0xb130, 0x5555 },
+ },
+ .num_regs_per_fuse = 2,
+};
+
+static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
+ .cpr_desc = &qcs404_cpr_desc,
+ .acc_desc = &qcs404_acc_desc,
+};
+
+static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int cpr_power_off(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_disable(drv);
+}
+
+static int cpr_power_on(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_enable(drv);
+}
+
+static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int ret = 0;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
+
+ /*
+ * This driver only supports scaling voltage for a CPU cluster
+ * where all CPUs in the cluster share a single regulator.
+ * Therefore, save the struct device pointer only for the first
+ * CPU device that gets attached. There is no need to do any
+ * additional initialization when further CPUs get attached.
+ */
+ if (drv->attached_cpu_dev)
+ goto unlock;
+
+ /*
+ * cpr_scale_voltage() requires the direction (if we are changing
+ * to a higher or lower OPP). The first time
+ * cpr_set_performance_state() is called, there is no previous
+ * performance state defined. Therefore, we call
+ * cpr_find_initial_corner() that gets the CPU clock frequency
+ * set by the bootloader, so that we can determine the direction
+ * the first time cpr_set_performance_state() is called.
+ */
+ drv->cpu_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(drv->cpu_clk)) {
+ ret = PTR_ERR(drv->cpu_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
+ goto unlock;
+ }
+ drv->attached_cpu_dev = dev;
+
+ dev_dbg(drv->dev, "using cpu clk from: %s\n",
+ dev_name(drv->attached_cpu_dev));
+
+ /*
+ * Everything related to (virtual) corners has to be initialized
+ * here, when attaching to the power domain, since we need to know
+ * the maximum frequency for each fuse corner, and this is only
+ * available after the cpufreq driver has attached to us.
+ * The reason for this is that we need to know the highest
+ * frequency associated with each fuse corner.
+ */
+ ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
+ if (ret < 0) {
+ dev_err(drv->dev, "could not get OPP count\n");
+ goto unlock;
+ }
+ drv->num_corners = ret;
+
+ if (drv->num_corners < 2) {
+ dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ dev_dbg(drv->dev, "number of OPPs: %d\n", drv->num_corners);
+
+ drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(*drv->corners),
+ GFP_KERNEL);
+ if (!drv->corners) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = cpr_corner_init(drv);
+ if (ret)
+ goto unlock;
+
+ cpr_set_loop_allowed(drv);
+
+ ret = cpr_init_parameters(drv);
+ if (ret)
+ goto unlock;
+
+ /* Configure CPR HW but keep it disabled */
+ ret = cpr_config(drv);
+ if (ret)
+ goto unlock;
+
+ ret = cpr_find_initial_corner(drv);
+ if (ret)
+ goto unlock;
+
+ if (acc_desc->config)
+ regmap_multi_reg_write(drv->tcsr, acc_desc->config,
+ acc_desc->num_regs_per_fuse);
+
+ /* Enable ACC if required */
+ if (acc_desc->enable_mask)
+ regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
+ acc_desc->enable_mask,
+ acc_desc->enable_mask);
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_debug_info_show(struct seq_file *s, void *unused)
+{
+ u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+ u32 step_dn, step_up, error, error_lt0, busy;
+ struct cpr_drv *drv = s->private;
+ struct fuse_corner *fuse_corner;
+ struct corner *corner;
+
+ corner = drv->corner;
+ fuse_corner = corner->fuse_corner;
+
+ seq_printf(s, "corner, current_volt = %d uV\n",
+ corner->last_uV);
+
+ ro_sel = fuse_corner->ring_osc_idx;
+ gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
+ seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
+
+ ctl = cpr_read(drv, REG_RBCPR_CTL);
+ seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
+
+ irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
+
+ reg = cpr_read(drv, REG_RBCPR_RESULT_0);
+ seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
+
+ step_dn = reg & 0x01;
+ step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+ seq_printf(s, " [step_dn = %u", step_dn);
+
+ seq_printf(s, ", step_up = %u", step_up);
+
+ error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ seq_printf(s, ", error_steps = %u", error_steps);
+
+ error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+ seq_printf(s, ", error = %u", error);
+
+ error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+ seq_printf(s, ", error_lt_0 = %u", error_lt0);
+
+ busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+ seq_printf(s, ", busy = %u]\n", busy);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
+
+static void cpr_debugfs_init(struct cpr_drv *drv)
+{
+ drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
+
+ debugfs_create_file("debug_info", 0444, drv->debugfs,
+ drv, &cpr_debug_info_fops);
+}
+
+static int cpr_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct cpr_drv *drv;
+ int irq, ret;
+ const struct cpr_acc_desc *data;
+ struct device_node *np;
+ u32 cpr_rev = FUSE_REVISION_UNKNOWN;
+
+ data = of_device_get_match_data(dev);
+ if (!data || !data->cpr_desc || !data->acc_desc)
+ return -EINVAL;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+ drv->dev = dev;
+ drv->desc = data->cpr_desc;
+ drv->acc_desc = data->acc_desc;
+
+ drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
+ sizeof(*drv->fuse_corners),
+ GFP_KERNEL);
+ if (!drv->fuse_corners)
+ return -ENOMEM;
+
+ np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
+ if (!np)
+ return -ENODEV;
+
+ drv->tcsr = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(drv->tcsr))
+ return PTR_ERR(drv->tcsr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drv->base))
+ return PTR_ERR(drv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
+ if (IS_ERR(drv->vdd_apc))
+ return PTR_ERR(drv->vdd_apc);
+
+ /*
+ * Initialize fuse corners, since it simply depends
+ * on data in efuses.
+ * Everything related to (virtual) corners has to be
+ * initialized after attaching to the power domain,
+ * since it depends on the CPU's OPP table.
+ */
+ ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev);
+ if (ret)
+ return ret;
+
+ drv->cpr_fuses = cpr_get_fuses(drv);
+ if (IS_ERR(drv->cpr_fuses))
+ return PTR_ERR(drv->cpr_fuses);
+
+ ret = cpr_populate_ring_osc_idx(drv);
+ if (ret)
+ return ret;
+
+ ret = cpr_fuse_corner_init(drv);
+ if (ret)
+ return ret;
+
+ mutex_init(&drv->lock);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ cpr_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr", drv);
+ if (ret)
+ return ret;
+
+ drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
+ GFP_KERNEL);
+ if (!drv->pd.name)
+ return -EINVAL;
+
+ drv->pd.power_off = cpr_power_off;
+ drv->pd.power_on = cpr_power_on;
+ drv->pd.set_performance_state = cpr_set_performance_state;
+ drv->pd.opp_to_performance_state = cpr_get_performance_state;
+ drv->pd.attach_dev = cpr_pd_attach_dev;
+
+ ret = pm_genpd_init(&drv->pd, NULL, true);
+ if (ret)
+ return ret;
+
+ ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, drv);
+ cpr_debugfs_init(drv);
+
+ return 0;
+}
+
+static int cpr_remove(struct platform_device *pdev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_set(drv, 0);
+ }
+
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&drv->pd);
+
+ debugfs_remove_recursive(drv->debugfs);
+
+ return 0;
+}
+
+static const struct of_device_id cpr_match_table[] = {
+ { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cpr_match_table);
+
+static struct platform_driver cpr_driver = {
+ .probe = cpr_probe,
+ .remove = cpr_remove,
+ .driver = {
+ .name = "qcom-cpr",
+ .of_match_table = cpr_match_table,
+ },
+};
+module_platform_driver(cpr_driver);
+
+MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
+MODULE_LICENSE("GPL v2");
INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
+ INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core),
+ INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core),
INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
struct cpuinfo_x86 *c = &cpu_data(cpu);
int ret;
+ if (!rapl_defaults)
+ return ERR_PTR(-ENODEV);
+
rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
if (!rp)
return ERR_PTR(-ENOMEM);
{
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
+ ptp_cleanup_pin_groups(ptp);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, ptp->index);
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
- ptp_cleanup_pin_groups(ptp);
-
posix_clock_unregister(&ptp->clock);
+
return 0;
}
EXPORT_SYMBOL(ptp_clock_unregister);
This driver can also be built as a module. If so, the module
will be called bd70528-regulator.
+config REGULATOR_BD71828
+ tristate "ROHM BD71828 Power Regulator"
+ depends on MFD_ROHM_BD71828
+ select REGULATOR_ROHM
+ help
+ This driver supports voltage regulators on ROHM BD71828 PMIC.
+ This will enable support for the software controllable buck
+ and LDO regulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called bd71828-regulator.
+
config REGULATOR_BD718XX
tristate "ROHM BD71837 Power Regulator"
depends on MFD_ROHM_BD718XX
through the regulator interface. In addition it enables
suspend-to-ram/standby transition.
+config REGULATOR_MP8859
+ tristate "MPS MP8859 regulator driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the MP8859 voltage regulator. This driver
+ supports basic operations (get/set voltage) through the regulator
+ interface.
+ Say M here if you want to include support for the regulator as a
+ module. The module will be named "mp8859".
+
+config REGULATOR_MPQ7920
+ tristate "Monolithic MPQ7920 PMIC"
+ depends on I2C && OF
+ select REGMAP_I2C
+ help
+ Say y here to support the MPQ7920 PMIC. This will enable supports
+ the software controllable 4 buck and 5 LDO regulators.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_MT6311
tristate "MediaTek MT6311 PMIC"
depends on I2C
This driver provides support for voltage regulators available
on the ARM Ltd's Versatile Express platform.
+config REGULATOR_VQMMC_IPQ4019
+ tristate "IPQ4019 VQMMC SD LDO regulator support"
+ depends on ARCH_QCOM
+ help
+ This driver provides support for the VQMMC LDO I/0
+ voltage regulator of the IPQ4019 SD/EMMC controller.
+
config REGULATOR_WM831X
tristate "Wolfson Microelectronics WM831x PMIC regulators"
depends on MFD_WM831X
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
+obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o
+obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o
+obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o
obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o
obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
+obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
int i;
for (i = 0; i < rate_count; i++) {
- if (ramp <= slew_rates[i])
- cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
- else
+ if (ramp > slew_rates[i])
break;
+
+ if (id == AXP20X_DCDC2)
+ cfg = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(i);
+ else
+ cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
}
if (cfg == 0xff) {
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
- AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = bd70528_set_ramp_delay,
};
static const struct regulator_ops bd70528_led_ops = {
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019 ROHM Semiconductors
+// bd71828-regulator.c ROHM BD71828GW-DS1 regulator driver
+//
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd71828.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+struct reg_init {
+ unsigned int reg;
+ unsigned int mask;
+ unsigned int val;
+};
+struct bd71828_regulator_data {
+ struct regulator_desc desc;
+ const struct rohm_dvs_config dvs;
+ const struct reg_init *reg_inits;
+ int reg_init_amnt;
+};
+
+static const struct reg_init buck1_inits[] = {
+ /*
+ * DVS Buck voltages can be changed by register values or via GPIO.
+ * Use register accesses by default.
+ */
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK1_CTRL,
+ .val = BD71828_DVS_BUCK1_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck2_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK2_CTRL,
+ .val = BD71828_DVS_BUCK2_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck6_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK6_CTRL,
+ .val = BD71828_DVS_BUCK6_CTRL_I2C,
+ },
+};
+
+static const struct reg_init buck7_inits[] = {
+ {
+ .reg = BD71828_REG_PS_CTRL_1,
+ .mask = BD71828_MASK_DVS_BUCK7_CTRL,
+ .val = BD71828_DVS_BUCK7_CTRL_I2C,
+ },
+};
+
+static const struct regulator_linear_range bd71828_buck1267_volts[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0x00, 0xef, 6250),
+ REGULATOR_LINEAR_RANGE(2000000, 0xf0, 0xff, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck3_volts[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x0f, 50000),
+ REGULATOR_LINEAR_RANGE(2000000, 0x10, 0x1f, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck4_volts[] = {
+ REGULATOR_LINEAR_RANGE(1000000, 0x00, 0x1f, 25000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x20, 0x3f, 0),
+};
+
+static const struct regulator_linear_range bd71828_buck5_volts[] = {
+ REGULATOR_LINEAR_RANGE(2500000, 0x00, 0x0f, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x10, 0x1f, 0),
+};
+
+static const struct regulator_linear_range bd71828_ldo_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x31, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x32, 0x3f, 0),
+};
+
+static int bd71828_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int val;
+
+ switch (ramp_delay) {
+ case 1 ... 2500:
+ val = 0;
+ break;
+ case 2501 ... 5000:
+ val = 1;
+ break;
+ case 5001 ... 10000:
+ val = 2;
+ break;
+ case 10001 ... 20000:
+ val = 3;
+ break;
+ default:
+ val = 3;
+ dev_err(&rdev->dev,
+ "ramp_delay: %d not supported, setting 20mV/uS",
+ ramp_delay);
+ }
+
+ /*
+ * On BD71828 the ramp delay level control reg is at offset +2 to
+ * enable reg
+ */
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg + 2,
+ BD71828_MASK_RAMP_DELAY,
+ val << (ffs(BD71828_MASK_RAMP_DELAY) - 1));
+}
+
+static int buck_set_hw_dvs_levels(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct bd71828_regulator_data *data;
+
+ data = container_of(desc, struct bd71828_regulator_data, desc);
+
+ return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap);
+}
+
+static int ldo6_parse_dt(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ int ret, i;
+ uint32_t uv = 0;
+ unsigned int en;
+ struct regmap *regmap = cfg->regmap;
+ static const char * const props[] = { "rohm,dvs-run-voltage",
+ "rohm,dvs-idle-voltage",
+ "rohm,dvs-suspend-voltage",
+ "rohm,dvs-lpsr-voltage" };
+ unsigned int mask[] = { BD71828_MASK_RUN_EN, BD71828_MASK_IDLE_EN,
+ BD71828_MASK_SUSP_EN, BD71828_MASK_LPSR_EN };
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ ret = of_property_read_u32(np, props[i], &uv);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ continue;
+ }
+ if (uv)
+ en = 0xffffffff;
+ else
+ en = 0;
+
+ ret = regmap_update_bits(regmap, desc->enable_reg, mask[i], en);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static const struct regulator_ops bd71828_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd71828_dvs_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = bd71828_set_ramp_delay,
+};
+
+static const struct regulator_ops bd71828_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd71828_ldo6_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct bd71828_regulator_data bd71828_rdata[] = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK1,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK1_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK1_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK1_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK1_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ /*
+ * LPSR voltage is same as SUSPEND voltage. Allow
+ * setting it so that regulator can be set enabled at
+ * LPSR state
+ */
+ .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck1_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck1_inits),
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK2,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK2_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK2_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK2_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK2_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK2_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK2_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck2_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck2_inits),
+ },
+ {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK3,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck3_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck3_volts),
+ .n_voltages = BD71828_BUCK3_VOLTS,
+ .enable_reg = BD71828_REG_BUCK3_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK3_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK3_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK3 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK3_VOLT,
+ .idle_reg = BD71828_REG_BUCK3_VOLT,
+ .suspend_reg = BD71828_REG_BUCK3_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK3_VOLT,
+ .run_mask = BD71828_MASK_BUCK3_VOLT,
+ .idle_mask = BD71828_MASK_BUCK3_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK3_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK3_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK4,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck4_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck4_volts),
+ .n_voltages = BD71828_BUCK4_VOLTS,
+ .enable_reg = BD71828_REG_BUCK4_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK4_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK4_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK4 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK4_VOLT,
+ .idle_reg = BD71828_REG_BUCK4_VOLT,
+ .suspend_reg = BD71828_REG_BUCK4_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK4_VOLT,
+ .run_mask = BD71828_MASK_BUCK4_VOLT,
+ .idle_mask = BD71828_MASK_BUCK4_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK4_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK4_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck5",
+ .of_match = of_match_ptr("BUCK5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK5,
+ .ops = &bd71828_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck5_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck5_volts),
+ .n_voltages = BD71828_BUCK5_VOLTS,
+ .enable_reg = BD71828_REG_BUCK5_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK5_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK5_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * BUCK5 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK5_VOLT,
+ .idle_reg = BD71828_REG_BUCK5_VOLT,
+ .suspend_reg = BD71828_REG_BUCK5_VOLT,
+ .lpsr_reg = BD71828_REG_BUCK5_VOLT,
+ .run_mask = BD71828_MASK_BUCK5_VOLT,
+ .idle_mask = BD71828_MASK_BUCK5_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK5_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK5_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck6",
+ .of_match = of_match_ptr("BUCK6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK6,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK6_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK6_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK6_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK6_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK6_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK6_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck6_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck6_inits),
+ },
+ {
+ .desc = {
+ .name = "buck7",
+ .of_match = of_match_ptr("BUCK7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_BUCK7,
+ .ops = &bd71828_dvs_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_buck1267_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts),
+ .n_voltages = BD71828_BUCK1267_VOLTS,
+ .enable_reg = BD71828_REG_BUCK7_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_BUCK7_VOLT,
+ .vsel_mask = BD71828_MASK_BUCK1267_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_BUCK7_VOLT,
+ .run_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_reg = BD71828_REG_BUCK7_IDLE_VOLT,
+ .idle_mask = BD71828_MASK_BUCK1267_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_reg = BD71828_REG_BUCK7_SUSP_VOLT,
+ .suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ .lpsr_reg = BD71828_REG_BUCK7_SUSP_VOLT,
+ .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ },
+ .reg_inits = buck7_inits,
+ .reg_init_amnt = ARRAY_SIZE(buck7_inits),
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO1,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO1_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO1_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO1 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO1_VOLT,
+ .idle_reg = BD71828_REG_LDO1_VOLT,
+ .suspend_reg = BD71828_REG_LDO1_VOLT,
+ .lpsr_reg = BD71828_REG_LDO1_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO2,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO2_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO2_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO2 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO2_VOLT,
+ .idle_reg = BD71828_REG_LDO2_VOLT,
+ .suspend_reg = BD71828_REG_LDO2_VOLT,
+ .lpsr_reg = BD71828_REG_LDO2_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO3,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO3_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO3_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO3 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO3_VOLT,
+ .idle_reg = BD71828_REG_LDO3_VOLT,
+ .suspend_reg = BD71828_REG_LDO3_VOLT,
+ .lpsr_reg = BD71828_REG_LDO3_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ }, {
+ .desc = {
+ .name = "ldo4",
+ .of_match = of_match_ptr("LDO4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO4,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO4_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO4_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO1 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO4_VOLT,
+ .idle_reg = BD71828_REG_LDO4_VOLT,
+ .suspend_reg = BD71828_REG_LDO4_VOLT,
+ .lpsr_reg = BD71828_REG_LDO4_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+ }, {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("LDO5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO5,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO5_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO5_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ .owner = THIS_MODULE,
+ },
+ /*
+ * LDO5 is special. It can choose vsel settings to be configured
+ * from 2 different registers (by GPIO).
+ *
+ * This driver supports only configuration where
+ * BD71828_REG_LDO5_VOLT_L is used.
+ */
+ .dvs = {
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO5_VOLT,
+ .idle_reg = BD71828_REG_LDO5_VOLT,
+ .suspend_reg = BD71828_REG_LDO5_VOLT,
+ .lpsr_reg = BD71828_REG_LDO5_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ }, {
+ .desc = {
+ .name = "ldo6",
+ .of_match = of_match_ptr("LDO6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO6,
+ .ops = &bd71828_ldo6_ops,
+ .type = REGULATOR_VOLTAGE,
+ .fixed_uV = BD71828_LDO_6_VOLTAGE,
+ .n_voltages = 1,
+ .enable_reg = BD71828_REG_LDO6_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .owner = THIS_MODULE,
+ /*
+ * LDO6 only supports enable/disable for all states.
+ * Voltage for LDO6 is fixed.
+ */
+ .of_parse_cb = ldo6_parse_dt,
+ },
+ }, {
+ .desc = {
+ /* SNVS LDO in data-sheet */
+ .name = "ldo7",
+ .of_match = of_match_ptr("LDO7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD71828_LDO_SNVS,
+ .ops = &bd71828_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd71828_ldo_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts),
+ .n_voltages = BD71828_LDO_VOLTS,
+ .enable_reg = BD71828_REG_LDO7_EN,
+ .enable_mask = BD71828_MASK_RUN_EN,
+ .vsel_reg = BD71828_REG_LDO7_VOLT,
+ .vsel_mask = BD71828_MASK_LDO_VOLT,
+ .owner = THIS_MODULE,
+ .of_parse_cb = buck_set_hw_dvs_levels,
+ },
+ .dvs = {
+ /*
+ * LDO7 only supports single voltage for all states.
+ * voltage can be individually enabled for each state
+ * though => allow setting all states to support
+ * enabling power rail on different states.
+ */
+ .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE |
+ ROHM_DVS_LEVEL_SUSPEND |
+ ROHM_DVS_LEVEL_LPSR,
+ .run_reg = BD71828_REG_LDO7_VOLT,
+ .idle_reg = BD71828_REG_LDO7_VOLT,
+ .suspend_reg = BD71828_REG_LDO7_VOLT,
+ .lpsr_reg = BD71828_REG_LDO7_VOLT,
+ .run_mask = BD71828_MASK_LDO_VOLT,
+ .idle_mask = BD71828_MASK_LDO_VOLT,
+ .suspend_mask = BD71828_MASK_LDO_VOLT,
+ .lpsr_mask = BD71828_MASK_LDO_VOLT,
+ .idle_on_mask = BD71828_MASK_IDLE_EN,
+ .suspend_on_mask = BD71828_MASK_SUSP_EN,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ },
+
+ },
+};
+
+static int bd71828_probe(struct platform_device *pdev)
+{
+ struct rohm_regmap_dev *bd71828;
+ int i, j, ret;
+ struct regulator_config config = {
+ .dev = pdev->dev.parent,
+ };
+
+ bd71828 = dev_get_drvdata(pdev->dev.parent);
+ if (!bd71828) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ return -EINVAL;
+ }
+
+ config.regmap = bd71828->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(bd71828_rdata); i++) {
+ struct regulator_dev *rdev;
+ const struct bd71828_regulator_data *rd;
+
+ rd = &bd71828_rdata[i];
+ rdev = devm_regulator_register(&pdev->dev,
+ &rd->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "failed to register %s regulator\n",
+ rd->desc.name);
+ return PTR_ERR(rdev);
+ }
+ for (j = 0; j < rd->reg_init_amnt; j++) {
+ ret = regmap_update_bits(bd71828->regmap,
+ rd->reg_inits[j].reg,
+ rd->reg_inits[j].mask,
+ rd->reg_inits[j].val);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "regulator %s init failed\n",
+ rd->desc.name);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver bd71828_regulator = {
+ .driver = {
+ .name = "bd71828-pmic"
+ },
+ .probe = bd71828_probe,
+};
+
+module_platform_driver(bd71828_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD71828 voltage regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd71828-pmic");
},
};
-struct bd718xx_pmic_inits {
- const struct bd718xx_regulator_data *r_datas;
- unsigned int r_amount;
-};
-
static int bd718xx_probe(struct platform_device *pdev)
{
struct bd718xx *mfd;
struct regulator_config config = { 0 };
- struct bd718xx_pmic_inits pmic_regulators[ROHM_CHIP_TYPE_AMOUNT] = {
- [ROHM_CHIP_TYPE_BD71837] = {
- .r_datas = bd71837_regulators,
- .r_amount = ARRAY_SIZE(bd71837_regulators),
- },
- [ROHM_CHIP_TYPE_BD71847] = {
- .r_datas = bd71847_regulators,
- .r_amount = ARRAY_SIZE(bd71847_regulators),
- },
- };
-
int i, j, err;
bool use_snvs;
+ const struct bd718xx_regulator_data *reg_data;
+ unsigned int num_reg_data;
mfd = dev_get_drvdata(pdev->dev.parent);
if (!mfd) {
goto err;
}
- if (mfd->chip.chip_type >= ROHM_CHIP_TYPE_AMOUNT ||
- !pmic_regulators[mfd->chip.chip_type].r_datas) {
+ switch (mfd->chip.chip_type) {
+ case ROHM_CHIP_TYPE_BD71837:
+ reg_data = bd71837_regulators;
+ num_reg_data = ARRAY_SIZE(bd71837_regulators);
+ break;
+ case ROHM_CHIP_TYPE_BD71847:
+ reg_data = bd71847_regulators;
+ num_reg_data = ARRAY_SIZE(bd71847_regulators);
+ break;
+ default:
dev_err(&pdev->dev, "Unsupported chip type\n");
err = -EINVAL;
goto err;
}
}
- for (i = 0; i < pmic_regulators[mfd->chip.chip_type].r_amount; i++) {
+ for (i = 0; i < num_reg_data; i++) {
const struct regulator_desc *desc;
struct regulator_dev *rdev;
const struct bd718xx_regulator_data *r;
- r = &pmic_regulators[mfd->chip.chip_type].r_datas[i];
+ r = ®_data[i];
desc = &r->desc;
config.dev = pdev->dev.parent;
out:
return ret;
}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev);
static int regulator_limit_voltage_step(struct regulator_dev *rdev,
int *current_uV, int *min_uV)
return ret;
return ret - rdev->constraints->uV_offset;
}
+EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev);
/**
* regulator_get_voltage - get regulator output voltage
};
MODULE_DEVICE_TABLE(of, da9210_dt_ids);
-static int da9210_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da9210_i2c_probe(struct i2c_client *i2c)
{
struct da9210 *chip;
struct device *dev = &i2c->dev;
.name = "da9210",
.of_match_table = of_match_ptr(da9210_dt_ids),
},
- .probe = da9210_i2c_probe,
+ .probe_new = da9210_i2c_probe,
.id_table = da9210_i2c_id,
};
/*
* I2C driver interface functions
*/
-static int da9211_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int da9211_i2c_probe(struct i2c_client *i2c)
{
struct da9211 *chip;
int error, ret;
.name = "da9211",
.of_match_table = of_match_ptr(da9211_dt_ids),
},
- .probe = da9211_i2c_probe,
+ .probe_new = da9211_i2c_probe,
.id_table = da9211_i2c_id,
};
#include <linux/regulator/driver.h>
#include <linux/module.h>
+#include "internal.h"
+
/**
* regulator_is_enabled_regmap - standard is_enabled() for regmap users
*
consumers[i].supply = supply_names[i];
}
EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names);
+
+/**
+ * regulator_is_equal - test whether two regulators are the same
+ *
+ * @reg1: first regulator to operate on
+ * @reg2: second regulator to operate on
+ */
+bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
+{
+ return reg1->rdev == reg2->rdev;
+}
+EXPORT_SYMBOL_GPL(regulator_is_equal);
.cache_type = REGCACHE_RBTREE,
};
-static int isl9305_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int isl9305_i2c_probe(struct i2c_client *i2c)
{
struct regulator_config config = { };
struct isl9305_pdata *pdata = i2c->dev.platform_data;
.name = "isl9305",
.of_match_table = of_match_ptr(isl9305_dt_ids),
},
- .probe = isl9305_i2c_probe,
+ .probe_new = isl9305_i2c_probe,
.id_table = isl9305_i2c_id,
};
return 0;
}
-static int lp3971_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int lp3971_i2c_probe(struct i2c_client *i2c)
{
struct lp3971 *lp3971;
struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev);
.driver = {
.name = "LP3971",
},
- .probe = lp3971_i2c_probe,
+ .probe_new = lp3971_i2c_probe,
.id_table = lp3971_i2c_id,
};
return IRQ_HANDLED;
}
-static int ltc3676_regulator_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc3676_regulator_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct regulator_init_data *init_data = dev_get_platdata(dev);
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ltc3676_of_match),
},
- .probe = ltc3676_regulator_probe,
+ .probe_new = ltc3676_regulator_probe,
.id_table = ltc3676_i2c_id,
};
module_i2c_driver(ltc3676_driver);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 five technologies GmbH
+// Author: Markus Reichl <m.reichl@fivetechno.de>
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/regulator/driver.h>
+#include <linux/regmap.h>
+
+
+#define VOL_MIN_IDX 0x00
+#define VOL_MAX_IDX 0x7ff
+
+/* Register definitions */
+#define MP8859_VOUT_L_REG 0 //3 lo Bits
+#define MP8859_VOUT_H_REG 1 //8 hi Bits
+#define MP8859_VOUT_GO_REG 2
+#define MP8859_IOUT_LIM_REG 3
+#define MP8859_CTL1_REG 4
+#define MP8859_CTL2_REG 5
+#define MP8859_RESERVED1_REG 6
+#define MP8859_RESERVED2_REG 7
+#define MP8859_RESERVED3_REG 8
+#define MP8859_STATUS_REG 9
+#define MP8859_INTERRUPT_REG 0x0A
+#define MP8859_MASK_REG 0x0B
+#define MP8859_ID1_REG 0x0C
+#define MP8859_MFR_ID_REG 0x27
+#define MP8859_DEV_ID_REG 0x28
+#define MP8859_IC_REV_REG 0x29
+
+#define MP8859_MAX_REG 0x29
+
+#define MP8859_GO_BIT 0x01
+
+
+static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
+{
+ int ret;
+
+ ret = regmap_write(rdev->regmap, MP8859_VOUT_L_REG, sel & 0x7);
+
+ if (ret)
+ return ret;
+ ret = regmap_write(rdev->regmap, MP8859_VOUT_H_REG, sel >> 3);
+
+ if (ret)
+ return ret;
+ ret = regmap_update_bits(rdev->regmap, MP8859_VOUT_GO_REG,
+ MP8859_GO_BIT, 1);
+ return ret;
+}
+
+static int mp8859_get_voltage_sel(struct regulator_dev *rdev)
+{
+ unsigned int val_tmp;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, MP8859_VOUT_H_REG, &val_tmp);
+
+ if (ret)
+ return ret;
+ val = val_tmp << 3;
+
+ ret = regmap_read(rdev->regmap, MP8859_VOUT_L_REG, &val_tmp);
+
+ if (ret)
+ return ret;
+ val |= val_tmp & 0x07;
+ return val;
+}
+
+static const struct regulator_linear_range mp8859_dcdc_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000),
+};
+
+static const struct regmap_config mp8859_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MP8859_MAX_REG,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regulator_ops mp8859_ops = {
+ .set_voltage_sel = mp8859_set_voltage_sel,
+ .get_voltage_sel = mp8859_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+};
+
+static const struct regulator_desc mp8859_regulators[] = {
+ {
+ .id = 0,
+ .type = REGULATOR_VOLTAGE,
+ .name = "mp8859_dcdc",
+ .of_match = of_match_ptr("mp8859_dcdc"),
+ .n_voltages = VOL_MAX_IDX + 1,
+ .linear_ranges = mp8859_dcdc_ranges,
+ .n_linear_ranges = 1,
+ .ops = &mp8859_ops,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int mp8859_i2c_probe(struct i2c_client *i2c)
+{
+ int ret;
+ struct regulator_config config = {.dev = &i2c->dev};
+ struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap);
+ struct regulator_dev *rdev;
+
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+ rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0],
+ &config);
+
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(&i2c->dev, "failed to register %s: %d\n",
+ mp8859_regulators[0].name, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct of_device_id mp8859_dt_id[] = {
+ {.compatible = "mps,mp8859"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mp8859_dt_id);
+
+static const struct i2c_device_id mp8859_i2c_id[] = {
+ { "mp8859", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mp8859_i2c_id);
+
+static struct i2c_driver mp8859_regulator_driver = {
+ .driver = {
+ .name = "mp8859",
+ .of_match_table = of_match_ptr(mp8859_dt_id),
+ },
+ .probe_new = mp8859_i2c_probe,
+ .id_table = mp8859_i2c_id,
+};
+
+module_i2c_driver(mp8859_regulator_driver);
+
+MODULE_DESCRIPTION("Monolithic Power Systems MP8859 voltage regulator driver");
+MODULE_AUTHOR("Markus Reichl <m.reichl@fivetechno.de>");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+//
+// mpq7920.c - regulator driver for mps mpq7920
+//
+// Copyright 2019 Monolithic Power Systems, Inc
+//
+// Author: Saravanan Sekar <sravanhome@gmail.com>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include "mpq7920.h"
+
+#define MPQ7920_BUCK_VOLT_RANGE \
+ ((MPQ7920_VOLT_MAX - MPQ7920_BUCK_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
+#define MPQ7920_LDO_VOLT_RANGE \
+ ((MPQ7920_VOLT_MAX - MPQ7920_LDO_VOLT_MIN)/MPQ7920_VOLT_STEP + 1)
+
+#define MPQ7920BUCK(_name, _id, _ilim) \
+ [MPQ7920_BUCK ## _id] = { \
+ .id = MPQ7920_BUCK ## _id, \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .of_parse_cb = mpq7920_parse_cb, \
+ .ops = &mpq7920_buck_ops, \
+ .min_uV = MPQ7920_BUCK_VOLT_MIN, \
+ .uV_step = MPQ7920_VOLT_STEP, \
+ .n_voltages = MPQ7920_BUCK_VOLT_RANGE, \
+ .curr_table = _ilim, \
+ .n_current_limits = ARRAY_SIZE(_ilim), \
+ .csel_reg = MPQ7920_BUCK ##_id## _REG_C, \
+ .csel_mask = MPQ7920_MASK_BUCK_ILIM, \
+ .enable_reg = MPQ7920_REG_REGULATOR_EN, \
+ .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \
+ MPQ7920_BUCK ## _id), \
+ .vsel_reg = MPQ7920_BUCK ##_id## _REG_A, \
+ .vsel_mask = MPQ7920_MASK_VREF, \
+ .active_discharge_on = MPQ7920_DISCHARGE_ON, \
+ .active_discharge_reg = MPQ7920_BUCK ##_id## _REG_B, \
+ .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \
+ .soft_start_reg = MPQ7920_BUCK ##_id## _REG_C, \
+ .soft_start_mask = MPQ7920_MASK_SOFTSTART, \
+ .owner = THIS_MODULE, \
+ }
+
+#define MPQ7920LDO(_name, _id, _ops, _ilim, _ilim_sz, _creg, _cmask) \
+ [MPQ7920_LDO ## _id] = { \
+ .id = MPQ7920_LDO ## _id, \
+ .name = _name, \
+ .of_match = _name, \
+ .regulators_node = "regulators", \
+ .ops = _ops, \
+ .min_uV = MPQ7920_LDO_VOLT_MIN, \
+ .uV_step = MPQ7920_VOLT_STEP, \
+ .n_voltages = MPQ7920_LDO_VOLT_RANGE, \
+ .vsel_reg = MPQ7920_LDO ##_id## _REG_A, \
+ .vsel_mask = MPQ7920_MASK_VREF, \
+ .curr_table = _ilim, \
+ .n_current_limits = _ilim_sz, \
+ .csel_reg = _creg, \
+ .csel_mask = _cmask, \
+ .enable_reg = (_id == 1) ? 0 : MPQ7920_REG_REGULATOR_EN,\
+ .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \
+ MPQ7920_LDO ##_id + 1), \
+ .active_discharge_on = MPQ7920_DISCHARGE_ON, \
+ .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \
+ .active_discharge_reg = MPQ7920_LDO ##_id## _REG_B, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+enum mpq7920_regulators {
+ MPQ7920_BUCK1,
+ MPQ7920_BUCK2,
+ MPQ7920_BUCK3,
+ MPQ7920_BUCK4,
+ MPQ7920_LDO1, /* LDORTC */
+ MPQ7920_LDO2,
+ MPQ7920_LDO3,
+ MPQ7920_LDO4,
+ MPQ7920_LDO5,
+ MPQ7920_MAX_REGULATORS,
+};
+
+struct mpq7920_regulator_info {
+ struct regmap *regmap;
+ struct regulator_desc *rdesc;
+};
+
+static const struct regmap_config mpq7920_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x25,
+};
+
+/* Current limits array (in uA)
+ * ILIM1 & ILIM3
+ */
+static const unsigned int mpq7920_I_limits1[] = {
+ 4600000, 6600000, 7600000, 9300000
+};
+
+/* ILIM2 & ILIM4 */
+static const unsigned int mpq7920_I_limits2[] = {
+ 2700000, 3900000, 5100000, 6100000
+};
+
+/* LDO4 & LDO5 */
+static const unsigned int mpq7920_I_limits3[] = {
+ 300000, 700000
+};
+
+static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay);
+static int mpq7920_parse_cb(struct device_node *np,
+ const struct regulator_desc *rdesc,
+ struct regulator_config *config);
+
+/* RTCLDO not controllable, always ON */
+static const struct regulator_ops mpq7920_ldortc_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_ops mpq7920_ldo_wo_current_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_ops mpq7920_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+};
+
+static const struct regulator_ops mpq7920_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_soft_start = regulator_set_soft_start_regmap,
+ .set_ramp_delay = mpq7920_set_ramp_delay,
+};
+
+static struct regulator_desc mpq7920_regulators_desc[MPQ7920_MAX_REGULATORS] = {
+ MPQ7920BUCK("buck1", 1, mpq7920_I_limits1),
+ MPQ7920BUCK("buck2", 2, mpq7920_I_limits2),
+ MPQ7920BUCK("buck3", 3, mpq7920_I_limits1),
+ MPQ7920BUCK("buck4", 4, mpq7920_I_limits2),
+ MPQ7920LDO("ldortc", 1, &mpq7920_ldortc_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo2", 2, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo3", 3, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0),
+ MPQ7920LDO("ldo4", 4, &mpq7920_ldo_ops, mpq7920_I_limits3,
+ ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO4_REG_B,
+ MPQ7920_MASK_LDO_ILIM),
+ MPQ7920LDO("ldo5", 5, &mpq7920_ldo_ops, mpq7920_I_limits3,
+ ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO5_REG_B,
+ MPQ7920_MASK_LDO_ILIM),
+};
+
+/*
+ * DVS ramp rate BUCK1 to BUCK4
+ * 00-01: Reserved
+ * 10: 8mV/us
+ * 11: 4mV/us
+ */
+static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int ramp_val;
+
+ if (ramp_delay > 8000 || ramp_delay < 0)
+ return -EINVAL;
+
+ if (ramp_delay <= 4000)
+ ramp_val = 3;
+ else
+ ramp_val = 2;
+
+ return regmap_update_bits(rdev->regmap, MPQ7920_REG_CTL0,
+ MPQ7920_MASK_DVS_SLEWRATE, ramp_val << 6);
+}
+
+static int mpq7920_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ uint8_t val;
+ int ret;
+ struct mpq7920_regulator_info *info = config->driver_data;
+ struct regulator_desc *rdesc = &info->rdesc[desc->id];
+
+ if (of_property_read_bool(np, "mps,buck-ovp-disable")) {
+ regmap_update_bits(config->regmap,
+ MPQ7920_BUCK1_REG_B + (rdesc->id * 4),
+ MPQ7920_MASK_OVP, MPQ7920_OVP_DISABLE);
+ }
+
+ ret = of_property_read_u8(np, "mps,buck-phase-delay", &val);
+ if (!ret) {
+ regmap_update_bits(config->regmap,
+ MPQ7920_BUCK1_REG_C + (rdesc->id * 4),
+ MPQ7920_MASK_BUCK_PHASE_DEALY,
+ (val & 3) << 4);
+ }
+
+ ret = of_property_read_u8(np, "mps,buck-softstart", &val);
+ if (!ret)
+ rdesc->soft_start_val_on = (val & 3) << 2;
+
+ return 0;
+}
+
+static void mpq7920_parse_dt(struct device *dev,
+ struct mpq7920_regulator_info *info)
+{
+ int ret;
+ struct device_node *np = dev->of_node;
+ uint8_t freq;
+
+ np = of_get_child_by_name(np, "regulators");
+ if (!np) {
+ dev_err(dev, "missing 'regulators' subnode in DT\n");
+ return;
+ }
+
+ ret = of_property_read_u8(np, "mps,switch-freq", &freq);
+ if (!ret) {
+ regmap_update_bits(info->regmap, MPQ7920_REG_CTL0,
+ MPQ7920_MASK_SWITCH_FREQ,
+ (freq & 3) << 4);
+ }
+
+ of_node_put(np);
+}
+
+static int mpq7920_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct mpq7920_regulator_info *info;
+ struct regulator_config config = { NULL, };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int i;
+
+ info = devm_kzalloc(dev, sizeof(struct mpq7920_regulator_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->rdesc = mpq7920_regulators_desc;
+ regmap = devm_regmap_init_i2c(client, &mpq7920_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(regmap);
+ }
+
+ i2c_set_clientdata(client, info);
+ info->regmap = regmap;
+ if (client->dev.of_node)
+ mpq7920_parse_dt(&client->dev, info);
+
+ config.dev = dev;
+ config.regmap = regmap;
+ config.driver_data = info;
+
+ for (i = 0; i < MPQ7920_MAX_REGULATORS; i++) {
+ rdev = devm_regulator_register(dev,
+ &mpq7920_regulators_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator!\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mpq7920_of_match[] = {
+ { .compatible = "mps,mpq7920"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpq7920_of_match);
+
+static const struct i2c_device_id mpq7920_id[] = {
+ { "mpq7920", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mpq7920_id);
+
+static struct i2c_driver mpq7920_regulator_driver = {
+ .driver = {
+ .name = "mpq7920",
+ .of_match_table = of_match_ptr(mpq7920_of_match),
+ },
+ .probe_new = mpq7920_i2c_probe,
+ .id_table = mpq7920_id,
+};
+module_i2c_driver(mpq7920_regulator_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MPQ7920 PMIC regulator driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * mpq7920.h - Regulator definitions for mpq7920
+ *
+ * Copyright 2019 Monolithic Power Systems, Inc
+ *
+ */
+
+#ifndef __MPQ7920_H__
+#define __MPQ7920_H__
+
+#define MPQ7920_REG_CTL0 0x00
+#define MPQ7920_REG_CTL1 0x01
+#define MPQ7920_REG_CTL2 0x02
+#define MPQ7920_BUCK1_REG_A 0x03
+#define MPQ7920_BUCK1_REG_B 0x04
+#define MPQ7920_BUCK1_REG_C 0x05
+#define MPQ7920_BUCK1_REG_D 0x06
+#define MPQ7920_BUCK2_REG_A 0x07
+#define MPQ7920_BUCK2_REG_B 0x08
+#define MPQ7920_BUCK2_REG_C 0x09
+#define MPQ7920_BUCK2_REG_D 0x0a
+#define MPQ7920_BUCK3_REG_A 0x0b
+#define MPQ7920_BUCK3_REG_B 0x0c
+#define MPQ7920_BUCK3_REG_C 0x0d
+#define MPQ7920_BUCK3_REG_D 0x0e
+#define MPQ7920_BUCK4_REG_A 0x0f
+#define MPQ7920_BUCK4_REG_B 0x10
+#define MPQ7920_BUCK4_REG_C 0x11
+#define MPQ7920_BUCK4_REG_D 0x12
+#define MPQ7920_LDO1_REG_A 0x13
+#define MPQ7920_LDO1_REG_B 0x0
+#define MPQ7920_LDO2_REG_A 0x14
+#define MPQ7920_LDO2_REG_B 0x15
+#define MPQ7920_LDO2_REG_C 0x16
+#define MPQ7920_LDO3_REG_A 0x17
+#define MPQ7920_LDO3_REG_B 0x18
+#define MPQ7920_LDO3_REG_C 0x19
+#define MPQ7920_LDO4_REG_A 0x1a
+#define MPQ7920_LDO4_REG_B 0x1b
+#define MPQ7920_LDO4_REG_C 0x1c
+#define MPQ7920_LDO5_REG_A 0x1d
+#define MPQ7920_LDO5_REG_B 0x1e
+#define MPQ7920_LDO5_REG_C 0x1f
+#define MPQ7920_REG_MODE 0x20
+#define MPQ7920_REG_REGULATOR_EN 0x22
+
+#define MPQ7920_MASK_VREF 0x7f
+#define MPQ7920_MASK_BUCK_ILIM 0xc0
+#define MPQ7920_MASK_LDO_ILIM BIT(6)
+#define MPQ7920_MASK_DISCHARGE BIT(5)
+#define MPQ7920_MASK_MODE 0xc0
+#define MPQ7920_MASK_SOFTSTART 0x0c
+#define MPQ7920_MASK_SWITCH_FREQ 0x30
+#define MPQ7920_MASK_BUCK_PHASE_DEALY 0x30
+#define MPQ7920_MASK_DVS_SLEWRATE 0xc0
+#define MPQ7920_MASK_OVP 0x40
+#define MPQ7920_OVP_DISABLE ~(0x40)
+#define MPQ7920_DISCHARGE_ON BIT(5)
+
+#define MPQ7920_REGULATOR_EN_OFFSET 7
+
+/* values in mV */
+#define MPQ7920_BUCK_VOLT_MIN 400000
+#define MPQ7920_LDO_VOLT_MIN 650000
+#define MPQ7920_VOLT_MAX 3587500
+#define MPQ7920_VOLT_STEP 12500
+
+#endif /* __MPQ7920_H__ */
/*
* I2C driver interface functions
*/
-static int mt6311_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int mt6311_i2c_probe(struct i2c_client *i2c)
{
struct regulator_config config = { };
struct regulator_dev *rdev;
.name = "mt6311",
.of_match_table = of_match_ptr(mt6311_dt_ids),
},
- .probe = mt6311_i2c_probe,
+ .probe_new = mt6311_i2c_probe,
.id_table = mt6311_i2c_id,
};
/*
* I2C driver interface functions
*/
-static int pv88060_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int pv88060_i2c_probe(struct i2c_client *i2c)
{
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct pv88060 *chip;
.name = "pv88060",
.of_match_table = of_match_ptr(pv88060_dt_ids),
},
- .probe = pv88060_i2c_probe,
+ .probe_new = pv88060_i2c_probe,
.id_table = pv88060_i2c_id,
};
/*
* I2C driver interface functions
*/
-static int pv88090_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int pv88090_i2c_probe(struct i2c_client *i2c)
{
struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct pv88090 *chip;
.name = "pv88090",
.of_match_table = of_match_ptr(pv88090_dt_ids),
},
- .probe = pv88090_i2c_probe,
+ .probe_new = pv88090_i2c_probe,
.id_table = pv88090_i2c_id,
};
}
if (!pdata->dvs_gpio[i]) {
- dev_warn(dev, "there is no dvs%d gpio\n", i);
+ dev_info(dev, "there is no dvs%d gpio\n", i);
continue;
}
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPA01 Regulator Driver");
MODULE_LICENSE("GPL");
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver");
MODULE_LICENSE("GPL");
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S5M8767 Regulator Driver");
+MODULE_DESCRIPTION("Samsung S5M8767 Regulator Driver");
MODULE_LICENSE("GPL");
dev_dbg(chip->dev, "Fault log: FLT_POR\n");
}
-static int slg51000_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int slg51000_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct slg51000 *chip;
.driver = {
.name = "slg51000-regulator",
},
- .probe = slg51000_i2c_probe,
+ .probe_new = slg51000_i2c_probe,
.id_table = slg51000_i2c_id,
};
/*
* I2C driver interface functions
*/
-static int sy8106a_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int sy8106a_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct regulator_dev *rdev;
.name = "sy8106a",
.of_match_table = of_match_ptr(sy8106a_i2c_of_match),
},
- .probe = sy8106a_i2c_probe,
+ .probe_new = sy8106a_i2c_probe,
.id_table = sy8106a_i2c_id,
};
.val_bits = 8,
};
-static int sy8824_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sy8824_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
.name = "sy8824-regulator",
.of_match_table = of_match_ptr(sy8824_dt_ids),
},
- .probe = sy8824_i2c_probe,
+ .probe_new = sy8824_i2c_probe,
.id_table = sy8824_id,
};
module_i2c_driver(sy8824_regulator_driver);
.wr_table = &tps65132_no_reg_table,
};
-static int tps65132_probe(struct i2c_client *client,
- const struct i2c_device_id *client_id)
+static int tps65132_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tps65132_regulator *tps;
.driver = {
.name = "tps65132",
},
- .probe = tps65132_probe,
+ .probe_new = tps65132_probe,
.id_table = tps65132_id,
};
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/sort.h>
+#include "internal.h"
+
struct vctrl_voltage_range {
int min_uV;
int max_uV;
static int vctrl_get_voltage(struct regulator_dev *rdev)
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
- int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
+ int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
return vctrl_calc_output_voltage(vctrl, ctrl_uV);
}
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
struct regulator *ctrl_reg = vctrl->ctrl_reg;
- int orig_ctrl_uV = regulator_get_voltage(ctrl_reg);
+ int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
int ret;
if (req_min_uV >= uV || !vctrl->ovp_threshold)
/* voltage rising or no OVP */
- return regulator_set_voltage(
- ctrl_reg,
+ return regulator_set_voltage_rdev(ctrl_reg->rdev,
vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
- vctrl_calc_ctrl_voltage(vctrl, req_max_uV));
+ vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
+ PM_SUSPEND_ON);
while (uV > req_min_uV) {
int max_drop_uV = (uV * vctrl->ovp_threshold) / 100;
next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+ next_ctrl_uV,
next_ctrl_uV,
- next_ctrl_uV);
+ PM_SUSPEND_ON);
if (ret)
goto err;
err:
/* Try to go back to original voltage */
- regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV);
+ regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
+ PM_SUSPEND_ON);
return ret;
}
if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
/* voltage rising or no OVP */
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+ vctrl->vtable[selector].ctrl,
vctrl->vtable[selector].ctrl,
- vctrl->vtable[selector].ctrl);
+ PM_SUSPEND_ON);
if (!ret)
vctrl->sel = selector;
else
next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
- ret = regulator_set_voltage(ctrl_reg,
+ ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
vctrl->vtable[next_sel].ctrl,
- vctrl->vtable[next_sel].ctrl);
+ vctrl->vtable[next_sel].ctrl,
+ PM_SUSPEND_ON);
if (ret) {
dev_err(&rdev->dev,
"failed to set control voltage to %duV\n",
err:
if (vctrl->sel != orig_sel) {
/* Try to go back to original voltage */
- if (!regulator_set_voltage(ctrl_reg,
+ if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
+ vctrl->vtable[orig_sel].ctrl,
vctrl->vtable[orig_sel].ctrl,
- vctrl->vtable[orig_sel].ctrl))
+ PM_SUSPEND_ON))
vctrl->sel = orig_sel;
else
dev_warn(&rdev->dev,
if (ret)
return ret;
- ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg);
+ ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
if (ctrl_uV < 0) {
dev_err(&pdev->dev, "failed to get control voltage\n");
return ctrl_uV;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2019 Mantas Pucka <mantas@8devices.com>
+// Copyright (c) 2019 Robert Marko <robert.marko@sartura.hr>
+//
+// Driver for IPQ4019 SD/MMC controller's I/O LDO voltage regulator
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+static const unsigned int ipq4019_vmmc_voltages[] = {
+ 1500000, 1800000, 2500000, 3000000,
+};
+
+static const struct regulator_ops ipq4019_regulator_voltage_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc vmmc_regulator = {
+ .name = "vmmcq",
+ .ops = &ipq4019_regulator_voltage_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .volt_table = ipq4019_vmmc_voltages,
+ .n_voltages = ARRAY_SIZE(ipq4019_vmmc_voltages),
+ .vsel_reg = 0,
+ .vsel_mask = 0x3,
+};
+
+static const struct regmap_config ipq4019_vmmcq_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static int ipq4019_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regulator_init_data *init_data;
+ struct regulator_config cfg = {};
+ struct regulator_dev *rdev;
+ struct resource *res;
+ struct regmap *rmap;
+ void __iomem *base;
+
+ init_data = of_get_regulator_init_data(dev, dev->of_node,
+ &vmmc_regulator);
+ if (!init_data)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ rmap = devm_regmap_init_mmio(dev, base, &ipq4019_vmmcq_regmap_config);
+ if (IS_ERR(rmap))
+ return PTR_ERR(rmap);
+
+ cfg.dev = dev;
+ cfg.init_data = init_data;
+ cfg.of_node = dev->of_node;
+ cfg.regmap = rmap;
+
+ rdev = devm_regulator_register(dev, &vmmc_regulator, &cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator: %ld\n",
+ PTR_ERR(rdev));
+ return PTR_ERR(rdev);
+ }
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static const struct of_device_id regulator_ipq4019_of_match[] = {
+ { .compatible = "qcom,vqmmc-ipq4019-regulator", },
+ {},
+};
+
+static struct platform_driver ipq4019_regulator_driver = {
+ .probe = ipq4019_regulator_probe,
+ .driver = {
+ .name = "vqmmc-ipq4019-regulator",
+ .of_match_table = of_match_ptr(regulator_ipq4019_of_match),
+ },
+};
+module_platform_driver(ipq4019_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mantas Pucka <mantas@8devices.com>");
+MODULE_DESCRIPTION("IPQ4019 VQMMC voltage regulator");
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-
-#ifdef CONFIG_X86
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 == 0x17) ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
- CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)),
- RTC_FREQ_SELECT);
- save_freq_select &= ~RTC_DIV_RESET2;
- } else
- CMOS_WRITE((save_freq_select | RTC_DIV_RESET2),
- RTC_FREQ_SELECT);
-#else
- CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
-#endif
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
irqen = irqsta & ~RTC_IRQ_EN_AL;
mutex_lock(&rtc->lock);
if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN,
- irqen) < 0)
+ irqen) == 0)
mtk_rtc_write_trigger(rtc);
mutex_unlock(&rtc->lock);
alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM);
mutex_unlock(&rtc->lock);
- tm->tm_sec = data[RTC_OFFSET_SEC];
- tm->tm_min = data[RTC_OFFSET_MIN];
- tm->tm_hour = data[RTC_OFFSET_HOUR];
- tm->tm_mday = data[RTC_OFFSET_DOM];
- tm->tm_mon = data[RTC_OFFSET_MTH];
- tm->tm_year = data[RTC_OFFSET_YEAR];
+ tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK;
+ tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK;
+ tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK;
+ tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK;
+ tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK;
+ tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK;
tm->tm_year += RTC_MIN_YEAR_OFFSET;
tm->tm_mon--;
tm->tm_year -= RTC_MIN_YEAR_OFFSET;
tm->tm_mon++;
- data[RTC_OFFSET_SEC] = tm->tm_sec;
- data[RTC_OFFSET_MIN] = tm->tm_min;
- data[RTC_OFFSET_HOUR] = tm->tm_hour;
- data[RTC_OFFSET_DOM] = tm->tm_mday;
- data[RTC_OFFSET_MTH] = tm->tm_mon;
- data[RTC_OFFSET_YEAR] = tm->tm_year;
-
mutex_lock(&rtc->lock);
+ ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC,
+ data, RTC_OFFSET_COUNT);
+ if (ret < 0)
+ goto exit;
+
+ data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) |
+ (tm->tm_sec & RTC_AL_SEC_MASK));
+ data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) |
+ (tm->tm_min & RTC_AL_MIN_MASK));
+ data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) |
+ (tm->tm_hour & RTC_AL_HOU_MASK));
+ data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) |
+ (tm->tm_mday & RTC_AL_DOM_MASK));
+ data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) |
+ (tm->tm_mon & RTC_AL_MTH_MASK));
+ data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) |
+ (tm->tm_year & RTC_AL_YEA_MASK));
+
if (alm->enabled) {
ret = regmap_bulk_write(rtc->regmap,
rtc->addr_base + RTC_AL_SEC,
CLK_OF_DECLARE_DRIVER(sun50i_h6_rtc_clk, "allwinner,sun50i-h6-rtc",
sun50i_h6_rtc_clk_init);
+/*
+ * The R40 user manual is self-conflicting on whether the prescaler is
+ * fixed or configurable. The clock diagram shows it as fixed, but there
+ * is also a configurable divider in the RTC block.
+ */
+static const struct sun6i_rtc_clk_data sun8i_r40_rtc_data = {
+ .rc_osc_rate = 16000000,
+ .fixed_prescaler = 512,
+};
+static void __init sun8i_r40_rtc_clk_init(struct device_node *node)
+{
+ sun6i_rtc_clk_init(node, &sun8i_r40_rtc_data);
+}
+CLK_OF_DECLARE_DRIVER(sun8i_r40_rtc_clk, "allwinner,sun8i-r40-rtc",
+ sun8i_r40_rtc_clk_init);
+
static const struct sun6i_rtc_clk_data sun8i_v3_rtc_data = {
.rc_osc_rate = 32000,
.has_out_clk = 1,
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
return -ENODEV;
- /* (re-)init queue's state machine */
- ap_queue_reinit_state(to_ap_queue(dev));
}
/* Add queue/card to list of active queues/cards */
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
-void ap_queue_reinit_state(struct ap_queue *aq);
+void ap_queue_init_state(struct ap_queue *aq);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
aq->qid = qid;
- aq->state = AP_STATE_RESET_START;
+ aq->state = AP_STATE_UNBOUND;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->list);
spin_unlock_bh(&aq->lock);
}
-void ap_queue_reinit_state(struct ap_queue *aq)
+void ap_queue_init_state(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
aq->state = AP_STATE_RESET_START;
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
+EXPORT_SYMBOL(ap_queue_init_state);
prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
/* do some plausibility checks on the key block */
- if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
- prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+ if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
DEBUG_ERR("%s reply with invalid or unknown key block\n",
__func__);
rc = -EIO;
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2A_CLEANUP_TIME,
aq->private = zq;
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_rapq(aq->qid);
rc = zcrypt_cex2c_rng_supported(aq);
if (rc < 0) {
zcrypt_queue_free(zq);
else
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2C_CLEANUP_TIME;
aq->private = zq;
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME,
aq->private = zq;
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
---help---
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
- u64 a0, a1;
+ u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
- u64 a0, a1;
+ u64 a[2] = {};
int wait = 1000;
int err, i;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
- err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait);
if (err)
return err;
for (i = 0; i < ETH_ALEN; i++)
- mac_addr[i] = ((u8 *)&a0)[i];
+ mac_addr[i] = ((u8 *)&a)[i];
return 0;
}
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait);
if (err)
pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait);
if (err)
pr_err("Can't del addr [%pM], %d\n", addr, err);
}
u8 type;
int ret = 0;
- if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+ if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
+ sdkp->protection_type = 0;
return ret;
+ }
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
q->limits.zoned = BLK_ZONED_HM;
} else {
sdkp->zoned = (buffer[8] >> 4) & 3;
- if (sdkp->zoned == 1)
+ if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) {
/* Host-aware */
q->limits.zoned = BLK_ZONED_HA;
- else
+ } else {
/*
- * Treat drive-managed devices as
- * regular block devices.
+ * Treat drive-managed devices and host-aware devices
+ * with partitions as regular block devices.
*/
q->limits.zoned = BLK_ZONED_NONE;
+ }
}
if (blk_queue_is_zoned(q) && sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
*/
host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
/*
+ * For non-IDE disks, the host supports multiple channels.
* Set the number of HW queues we are supporting.
*/
- host->nr_hw_queues = num_present_cpus();
+ if (!dev_is_ide)
+ host->nr_hw_queues = num_present_cpus();
/*
* Set the error handler work queue.
struct meson_ee_pwrc *pwrc,
struct meson_ee_pwrc_domain *dom)
{
+ int ret;
+
dom->pwrc = pwrc;
dom->num_rstc = dom->desc.reset_names_count;
dom->num_clks = dom->desc.clk_names_count;
* prepare/enable counters won't be in sync.
*/
if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) {
- int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
+ ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
if (ret)
return ret;
- pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false);
- } else
- pm_genpd_init(&dom->base, NULL,
- (dom->desc.get_power ?
- dom->desc.get_power(dom) : true));
+ ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov,
+ false);
+ if (ret)
+ return ret;
+ } else {
+ ret = pm_genpd_init(&dom->base, NULL,
+ (dom->desc.get_power ?
+ dom->desc.get_power(dom) : true));
+ if (ret)
+ return ret;
+ }
return 0;
}
pwrc->xlate.domains[i] = &dom->base;
}
- of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
-
- return 0;
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
}
static void meson_ee_pwrc_shutdown(struct platform_device *pdev)
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <asm/sifive_l2_cache.h>
+#include <soc/sifive/sifive_l2_cache.h>
#define SIFIVE_L2_DIRECCFIX_LOW 0x100
#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
called ti_sci_pm_domains. Note this is needed early in boot before
rootfs may be available.
+config TI_K3_RINGACC
+ bool "K3 Ring accelerator Sub System"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_INTA_IRQCHIP
+ help
+ Say y here to support the K3 Ring accelerator module.
+ The Ring Accelerator (RINGACC or RA) provides hardware acceleration
+ to enable straightforward passing of work between a producer
+ and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
+ If unsure, say N.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
+obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 NAVSS Ring Accelerator subsystem driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+static LIST_HEAD(k3_ringacc_list);
+static DEFINE_MUTEX(k3_ringacc_list_lock);
+
+#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+
+/**
+ * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
+ *
+ * @resv_16: Reserved
+ * @db: Ring Doorbell Register
+ * @resv_4: Reserved
+ * @occ: Ring Occupancy Register
+ * @indx: Ring Current Index Register
+ * @hwocc: Ring Hardware Occupancy Register
+ * @hwindx: Ring Hardware Current Index Register
+ */
+struct k3_ring_rt_regs {
+ u32 resv_16[4];
+ u32 db;
+ u32 resv_4[1];
+ u32 occ;
+ u32 indx;
+ u32 hwocc;
+ u32 hwindx;
+};
+
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+
+/**
+ * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
+ *
+ * @head_data: Ring Head Entry Data Registers
+ * @tail_data: Ring Tail Entry Data Registers
+ * @peek_head_data: Ring Peek Head Entry Data Regs
+ * @peek_tail_data: Ring Peek Tail Entry Data Regs
+ */
+struct k3_ring_fifo_regs {
+ u32 head_data[128];
+ u32 tail_data[128];
+ u32 peek_head_data[128];
+ u32 peek_tail_data[128];
+};
+
+/**
+ * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
+ *
+ * @revision: Revision Register
+ * @config: Config Register
+ */
+struct k3_ringacc_proxy_gcfg_regs {
+ u32 revision;
+ u32 config;
+};
+
+#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
+
+/**
+ * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
+ *
+ * @control: Proxy Control Register
+ * @status: Proxy Status Register
+ * @resv_512: Reserved
+ * @data: Proxy Data Register
+ */
+struct k3_ringacc_proxy_target_regs {
+ u32 control;
+ u32 status;
+ u8 resv_512[504];
+ u32 data[128];
+};
+
+#define K3_RINGACC_PROXY_TARGET_STEP 0x1000
+#define K3_RINGACC_PROXY_NOT_USED (-1)
+
+enum k3_ringacc_proxy_access_mode {
+ PROXY_ACCESS_MODE_HEAD = 0,
+ PROXY_ACCESS_MODE_TAIL = 1,
+ PROXY_ACCESS_MODE_PEEK_HEAD = 2,
+ PROXY_ACCESS_MODE_PEEK_TAIL = 3,
+};
+
+#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
+#define K3_RINGACC_FIFO_REGS_STEP 0x1000
+#define K3_RINGACC_MAX_DB_RING_CNT (127U)
+
+struct k3_ring_ops {
+ int (*push_tail)(struct k3_ring *ring, void *elm);
+ int (*push_head)(struct k3_ring *ring, void *elm);
+ int (*pop_tail)(struct k3_ring *ring, void *elm);
+ int (*pop_head)(struct k3_ring *ring, void *elm);
+};
+
+/**
+ * struct k3_ring - RA Ring descriptor
+ *
+ * @rt: Ring control/status registers
+ * @fifos: Ring queues registers
+ * @proxy: Ring Proxy Datapath registers
+ * @ring_mem_dma: Ring buffer dma address
+ * @ring_mem_virt: Ring buffer virt address
+ * @ops: Ring operations
+ * @size: Ring size in elements
+ * @elm_size: Size of the ring element
+ * @mode: Ring mode
+ * @flags: flags
+ * @free: Number of free elements
+ * @occ: Ring occupancy
+ * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING)
+ * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING)
+ * @ring_id: Ring Id
+ * @parent: Pointer on struct @k3_ringacc
+ * @use_count: Use count for shared rings
+ * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ */
+struct k3_ring {
+ struct k3_ring_rt_regs __iomem *rt;
+ struct k3_ring_fifo_regs __iomem *fifos;
+ struct k3_ringacc_proxy_target_regs __iomem *proxy;
+ dma_addr_t ring_mem_dma;
+ void *ring_mem_virt;
+ struct k3_ring_ops *ops;
+ u32 size;
+ enum k3_ring_size elm_size;
+ enum k3_ring_mode mode;
+ u32 flags;
+#define K3_RING_FLAG_BUSY BIT(1)
+#define K3_RING_FLAG_SHARED BIT(2)
+ u32 free;
+ u32 occ;
+ u32 windex;
+ u32 rindex;
+ u32 ring_id;
+ struct k3_ringacc *parent;
+ u32 use_count;
+ int proxy_id;
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator descriptor
+ *
+ * @dev: pointer on RA device
+ * @proxy_gcfg: RA proxy global config registers
+ * @proxy_target_base: RA proxy datapath region
+ * @num_rings: number of ring in RA
+ * @rings_inuse: bitfield for ring usage tracking
+ * @rm_gp_range: general purpose rings range from tisci
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ * @num_proxies: number of RA proxies
+ * @proxy_inuse: bitfield for proxy usage tracking
+ * @rings: array of rings descriptors (struct @k3_ring)
+ * @list: list of RAs in the system
+ * @req_lock: protect rings allocation
+ * @tisci: pointer ti-sci handle
+ * @tisci_ring_ops: ti-sci rings ops
+ * @tisci_dev_id: ti-sci device id
+ */
+struct k3_ringacc {
+ struct device *dev;
+ struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
+ void __iomem *proxy_target_base;
+ u32 num_rings; /* number of rings in Ringacc module */
+ unsigned long *rings_inuse;
+ struct ti_sci_resource *rm_gp_range;
+
+ bool dma_ring_reset_quirk;
+ u32 num_proxies;
+ unsigned long *proxy_inuse;
+
+ struct k3_ring *rings;
+ struct list_head list;
+ struct mutex req_lock; /* protect rings allocation */
+
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
+ u32 tisci_dev_id;
+};
+
+static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+{
+ return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+ (4 << ring->elm_size);
+}
+
+static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
+{
+ return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_ring_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_ringacc_ring_pop_mem,
+};
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_msg_ops = {
+ .push_tail = k3_ringacc_ring_push_io,
+ .push_head = k3_ringacc_ring_push_head_io,
+ .pop_tail = k3_ringacc_ring_pop_tail_io,
+ .pop_head = k3_ringacc_ring_pop_io,
+};
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+ .push_tail = k3_ringacc_ring_push_tail_proxy,
+ .push_head = k3_ringacc_ring_push_head_proxy,
+ .pop_tail = k3_ringacc_ring_pop_tail_proxy,
+ .pop_head = k3_ringacc_ring_pop_head_proxy,
+};
+
+static void k3_ringacc_ring_dump(struct k3_ring *ring)
+{
+ struct device *dev = ring->parent->dev;
+
+ dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
+ dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
+ &ring->ring_mem_dma);
+ dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
+ ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+
+ dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
+ dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
+ dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
+ dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
+ dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
+
+ if (ring->ring_mem_virt)
+ print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
+ 16, 1, ring->ring_mem_virt, 16 * 8, false);
+}
+
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+ int id, u32 flags)
+{
+ int proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (id == K3_RINGACC_RING_ID_ANY) {
+ /* Request for any general purpose ring */
+ struct ti_sci_resource_desc *gp_rings =
+ &ringacc->rm_gp_range->desc[0];
+ unsigned long size;
+
+ size = gp_rings->start + gp_rings->num;
+ id = find_next_zero_bit(ringacc->rings_inuse, size,
+ gp_rings->start);
+ if (id == size)
+ goto error;
+ } else if (id < 0) {
+ goto error;
+ }
+
+ if (test_bit(id, ringacc->rings_inuse) &&
+ !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
+ goto error;
+ else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
+ goto out;
+
+ if (flags & K3_RINGACC_RING_USE_PROXY) {
+ proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
+ ringacc->num_proxies, 0);
+ if (proxy_id == ringacc->num_proxies)
+ goto error;
+ }
+
+ if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ set_bit(proxy_id, ringacc->proxy_inuse);
+ ringacc->rings[id].proxy_id = proxy_id;
+ dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
+ proxy_id);
+ } else {
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
+ }
+
+ set_bit(id, ringacc->rings_inuse);
+out:
+ ringacc->rings[id].use_count++;
+ mutex_unlock(&ringacc->req_lock);
+ return &ringacc->rings[id];
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+
+static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ ring->size,
+ 0,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ ring->occ = 0;
+ ring->free = 0;
+ ring->rindex = 0;
+ ring->windex = 0;
+
+ k3_ringacc_ring_reset_sci(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
+
+static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
+ enum k3_ring_mode mode)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ 0,
+ mode,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ if (!ring->parent->dma_ring_reset_quirk)
+ goto reset;
+
+ if (!occ)
+ occ = readl(&ring->rt->occ);
+
+ if (occ) {
+ u32 db_ring_cnt, db_ring_cnt_cur;
+
+ dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
+ ring->ring_id, occ);
+ /* TI-SCI ring reset */
+ k3_ringacc_ring_reset_sci(ring);
+
+ /*
+ * Setup the ring in ring/doorbell mode (if not already in this
+ * mode)
+ */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(
+ ring, K3_RINGACC_RING_MODE_RING);
+ /*
+ * Ring the doorbell 2**22 – ringOcc times.
+ * This will wrap the internal UDMAP ring state occupancy
+ * counter (which is 21-bits wide) to 0.
+ */
+ db_ring_cnt = (1U << 22) - occ;
+
+ while (db_ring_cnt != 0) {
+ /*
+ * Ring the doorbell with the maximum count each
+ * iteration if possible to minimize the total
+ * of writes
+ */
+ if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
+ db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
+ else
+ db_ring_cnt_cur = db_ring_cnt;
+
+ writel(db_ring_cnt_cur, &ring->rt->db);
+ db_ring_cnt -= db_ring_cnt_cur;
+ }
+
+ /* Restore the original ring mode (if not ring mode) */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
+ }
+
+reset:
+ /* Reset the ring */
+ k3_ringacc_ring_reset(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
+
+static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+int k3_ringacc_ring_free(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc;
+
+ if (!ring)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (--ring->use_count)
+ goto out;
+
+ if (!(ring->flags & K3_RING_FLAG_BUSY))
+ goto no_init;
+
+ k3_ringacc_ring_free_sci(ring);
+
+ dma_free_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt, ring->ring_mem_dma);
+ ring->flags = 0;
+ ring->ops = NULL;
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ clear_bit(ring->proxy_id, ringacc->proxy_inuse);
+ ring->proxy = NULL;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+
+no_init:
+ clear_bit(ring->ring_id, ringacc->rings_inuse);
+
+out:
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
+
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->ring_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->parent->tisci_dev_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
+
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
+{
+ int irq_num;
+
+ if (!ring)
+ return -EINVAL;
+
+ irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
+ if (irq_num <= 0)
+ irq_num = -EINVAL;
+ return irq_num;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
+
+static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ u32 ring_idx;
+ int ret;
+
+ if (!ringacc->tisci)
+ return -EINVAL;
+
+ ring_idx = ring->ring_id;
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+ ringacc->tisci_dev_id,
+ ring_idx,
+ lower_32_bits(ring->ring_mem_dma),
+ upper_32_bits(ring->ring_mem_dma),
+ ring->size,
+ ring->mode,
+ ring->elm_size,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
+ ret, ring_idx);
+
+ return ret;
+}
+
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret = 0;
+
+ if (!ring || !cfg)
+ return -EINVAL;
+ if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
+ cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
+ cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
+ !test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
+ ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
+ cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
+ dev_err(ringacc->dev,
+ "Message mode must use proxy for %u element size\n",
+ 4 << ring->elm_size);
+ return -EINVAL;
+ }
+
+ /*
+ * In case of shared ring only the first user (master user) can
+ * configure the ring. The sequence should be by the client:
+ * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
+ * k3_ringacc_ring_cfg(ring, cfg); # master configuration
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ */
+ if (ring->use_count != 1)
+ return 0;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ ring->occ = 0;
+ ring->free = 0;
+ ring->rindex = 0;
+ ring->windex = 0;
+
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
+ ring->proxy = ringacc->proxy_target_base +
+ ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
+
+ switch (ring->mode) {
+ case K3_RINGACC_RING_MODE_RING:
+ ring->ops = &k3_ring_mode_ring_ops;
+ break;
+ case K3_RINGACC_RING_MODE_MESSAGE:
+ if (ring->proxy)
+ ring->ops = &k3_ring_mode_proxy_ops;
+ else
+ ring->ops = &k3_ring_mode_msg_ops;
+ break;
+ default:
+ ring->ops = NULL;
+ ret = -EINVAL;
+ goto err_free_proxy;
+ };
+
+ ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+ ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
+ K3_RING_FLAG_SHARED : 0;
+
+ k3_ringacc_ring_dump(ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+err_free_proxy:
+ ring->proxy = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
+
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return ring->size;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
+
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->free)
+ ring->free = ring->size - readl(&ring->rt->occ);
+
+ return ring->free;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
+
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return readl(&ring->rt->occ);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
+
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
+{
+ return !k3_ringacc_ring_get_free(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
+
+enum k3_ringacc_access_mode {
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
+ K3_RINGACC_ACCESS_MODE_POP_TAIL,
+ K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
+ K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
+};
+
+#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
+#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
+static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
+ enum k3_ringacc_proxy_access_mode mode)
+{
+ u32 val;
+
+ val = ring->ring_id;
+ val |= K3_RINGACC_PROXY_MODE(mode);
+ val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
+ writel(val, &ring->proxy->control);
+ return 0;
+}
+
+static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)&ring->proxy->data;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free,
+ ring->occ);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ ptr = (void __iomem *)&ring->fifos->head_data;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ ptr = (void __iomem *)&ring->fifos->tail_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free,
+ ring->windex, ring->occ, ring->rindex);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex);
+
+ memcpy(elem_ptr, elem, (4 << ring->elm_size));
+
+ ring->windex = (ring->windex + 1) % ring->size;
+ ring->free--;
+ writel(1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
+ ring->free, ring->windex);
+
+ return 0;
+}
+
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex);
+
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+
+ ring->rindex = (ring->rindex + 1) % ring->size;
+ ring->occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
+ ring->occ, ring->rindex, elem_ptr);
+ return 0;
+}
+
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free,
+ ring->windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_tail)
+ ret = ring->ops->push_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
+
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
+ ring->free, ring->windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_head)
+ ret = ring->ops->push_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
+
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->occ)
+ ring->occ = k3_ringacc_ring_get_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ,
+ ring->rindex);
+
+ if (!ring->occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_head)
+ ret = ring->ops->pop_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
+
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->occ)
+ ring->occ = k3_ringacc_ring_get_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ,
+ ring->rindex);
+
+ if (!ring->occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_tail)
+ ret = ring->ops->pop_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
+
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct device_node *ringacc_np;
+ struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
+ struct k3_ringacc *entry;
+
+ ringacc_np = of_parse_phandle(np, property, 0);
+ if (!ringacc_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_for_each_entry(entry, &k3_ringacc_list, list)
+ if (entry->dev->of_node == ringacc_np) {
+ ringacc = entry;
+ break;
+ }
+ mutex_unlock(&k3_ringacc_list_lock);
+ of_node_put(ringacc_np);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
+
+static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+{
+ struct device_node *node = ringacc->dev->of_node;
+ struct device *dev = ringacc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
+ if (ret) {
+ dev_err(dev, "ti,num-rings read failure %d\n", ret);
+ return ret;
+ }
+
+ ringacc->dma_ring_reset_quirk =
+ of_property_read_bool(node, "ti,dma-ring-reset-quirk");
+
+ ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+ if (IS_ERR(ringacc->tisci)) {
+ ret = PTR_ERR(ringacc->tisci);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "ti,sci read fail %d\n", ret);
+ ringacc->tisci = NULL;
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "ti,sci-dev-id",
+ &ringacc->tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
+ return ret;
+ }
+
+ pdev->id = ringacc->tisci_dev_id;
+
+ ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
+ ringacc->tisci_dev_id,
+ "ti,sci-rm-range-gp-rings");
+ if (IS_ERR(ringacc->rm_gp_range)) {
+ dev_err(dev, "Failed to allocate MSI interrupts\n");
+ return PTR_ERR(ringacc->rm_gp_range);
+ }
+
+ return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
+ ringacc->rm_gp_range);
+}
+
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+ struct k3_ringacc *ringacc;
+ void __iomem *base_fifo, *base_rt;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret, i;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return -ENOMEM;
+
+ ringacc->dev = dev;
+ mutex_init(&ringacc->req_lock);
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi_domain) {
+ dev_err(dev, "Failed to get MSI domain\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = k3_ringacc_probe_dt(ringacc);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return PTR_ERR(base_rt);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
+ base_fifo = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_fifo))
+ return PTR_ERR(base_fifo);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
+ ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_gcfg))
+ return PTR_ERR(ringacc->proxy_gcfg);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "proxy_target");
+ ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_target_base))
+ return PTR_ERR(ringacc->proxy_target_base);
+
+ ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
+ K3_RINGACC_PROXY_CFG_THREADS_MASK;
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_rings),
+ sizeof(unsigned long), GFP_KERNEL);
+ ringacc->proxy_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_proxies),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
+ return -ENOMEM;
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ ringacc->rings[i].rt = base_rt +
+ K3_RINGACC_RT_REGS_STEP * i;
+ ringacc->rings[i].fifos = base_fifo +
+ K3_RINGACC_FIFO_REGS_STEP * i;
+ ringacc->rings[i].parent = ringacc;
+ ringacc->rings[i].ring_id = i;
+ ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+ dev_set_drvdata(dev, ringacc);
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_add_tail(&ringacc->list, &k3_ringacc_list);
+ mutex_unlock(&k3_ringacc_list_lock);
+
+ dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
+ ringacc->num_rings,
+ ringacc->rm_gp_range->desc[0].start,
+ ringacc->rm_gp_range->desc[0].num,
+ ringacc->tisci_dev_id);
+ dev_info(dev, "dma-ring-reset-quirk: %s\n",
+ ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
+ dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
+ readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id k3_ringacc_of_match[] = {
+ { .compatible = "ti,am654-navss-ringacc", },
+ {},
+};
+
+static struct platform_driver k3_ringacc_driver = {
+ .probe = k3_ringacc_probe,
+ .driver = {
+ .name = "k3-ringacc",
+ .of_match_table = k3_ringacc_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(k3_ringacc_driver);
ret = rproc_boot(m3_ipc->rproc);
if (ret)
dev_err(dev, "rproc_boot failed\n");
+ else
+ m3_ipc_state = m3_ipc;
do_exit(0);
}
goto err_put_rproc;
}
- m3_ipc_state = m3_ipc;
-
return 0;
err_put_rproc:
This controller does not support generic SPI messages. It only
supports the high-level SPI memory interface.
+config SPI_HISI_SFC_V3XX
+ tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ depends on HAS_IOMEM
+ select CONFIG_MTD_SPI_NOR
+ help
+ This enables support for HiSilicon v3xx SPI-NOR flash controller
+ found in hi16xx chipsets.
+
config SPI_NXP_FLEXSPI
tristate "NXP Flex SPI controller"
depends on ARCH_LAYERSCAPE || HAS_IOMEM
obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
+obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
err = PTR_ERR(master->dma_tx);
- if (err == -EPROBE_DEFER) {
- dev_warn(dev, "no DMA channel available at the moment\n");
- goto error_clear;
- }
- dev_err(dev,
- "DMA TX channel not available, SPI unable to use DMA\n");
- err = -EBUSY;
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "No TX DMA channel, DMA is disabled\n");
goto error_clear;
}
- /*
- * No reason to check EPROBE_DEFER here since we have already requested
- * tx channel. If it fails here, it's for another reason.
- */
- master->dma_rx = dma_request_slave_channel(dev, "rx");
-
- if (!master->dma_rx) {
- dev_err(dev,
- "DMA RX channel not available, SPI unable to use DMA\n");
- err = -EBUSY;
+ master->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(master->dma_rx)) {
+ err = PTR_ERR(master->dma_rx);
+ /*
+ * No reason to check EPROBE_DEFER here since we have already
+ * requested tx channel.
+ */
+ dev_err(dev, "No RX DMA channel, DMA is disabled\n");
goto error;
}
return 0;
error:
- if (master->dma_rx)
+ if (!IS_ERR(master->dma_rx))
dma_release_channel(master->dma_rx);
if (!IS_ERR(master->dma_tx))
dma_release_channel(master->dma_tx);
name = qspi_irq_tab[val].irq_name;
if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
/* get the l2 interrupts */
- irq = platform_get_irq_byname(pdev, name);
+ irq = platform_get_irq_byname_optional(pdev, name);
} else if (!num_ints && soc_intc) {
/* all mspi, bspi intrs muxed to one L1 intr */
irq = platform_get_irq(pdev, 0);
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_DMA_MIN_LENGTH 96
-#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */
+#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
}
}
-static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
- struct bcm2835_spi *bs)
+static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
+ struct bcm2835_spi *bs)
{
struct dma_slave_config slave_config;
const __be32 *addr;
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
if (!addr) {
dev_err(dev, "could not get DMA-register address - not using dma mode\n");
- goto err;
+ /* Fall back to interrupt mode */
+ return 0;
}
dma_reg_base = be32_to_cpup(addr);
/* get tx/rx dma */
- ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
- if (!ctlr->dma_tx) {
+ ctlr->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ctlr->dma_tx)) {
dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
+ ret = PTR_ERR(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
goto err;
}
- ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
- if (!ctlr->dma_rx) {
+ ctlr->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ctlr->dma_rx)) {
dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
+ ret = PTR_ERR(ctlr->dma_rx);
+ ctlr->dma_rx = NULL;
goto err_release;
}
/* all went well, so set can_dma */
ctlr->can_dma = bcm2835_spi_can_dma;
- return;
+ return 0;
err_config:
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
err_release:
bcm2835_dma_release(ctlr, bs);
err:
- return;
+ /*
+ * Only report error for deferred probing, otherwise fall back to
+ * interrupt mode
+ */
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
}
static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
- dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ if (err == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
+ else
+ dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_controller_put;
}
clk_prepare_enable(bs->clk);
- bcm2835_dma_init(ctlr, &pdev->dev, bs);
+ err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
+ if (err)
+ goto out_clk_disable;
/* initialise the hardware with the default polarities */
bcm2835_wr(bs, BCM2835_SPI_CS,
dev_name(&pdev->dev), ctlr);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
- goto out_clk_disable;
+ goto out_dma_release;
}
err = devm_spi_register_controller(&pdev->dev, ctlr);
if (err) {
dev_err(&pdev->dev, "could not register SPI controller: %d\n",
err);
- goto out_clk_disable;
+ goto out_dma_release;
}
bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
return 0;
+out_dma_release:
+ bcm2835_dma_release(ctlr, bs);
out_clk_disable:
clk_disable_unprepare(bs->clk);
out_controller_put:
int spi_bitbang_init(struct spi_bitbang *bitbang)
{
struct spi_master *master = bitbang->master;
+ bool custom_cs;
- if (!master || !bitbang->chipselect)
+ if (!master)
+ return -EINVAL;
+ /*
+ * We only need the chipselect callback if we are actually using it.
+ * If we just use GPIO descriptors, it is surplus. If the
+ * SPI_MASTER_GPIO_SS flag is set, we always need to call the
+ * driver-specific chipselect routine.
+ */
+ custom_cs = (!master->use_gpio_descriptors ||
+ (master->flags & SPI_MASTER_GPIO_SS));
+
+ if (custom_cs && !bitbang->chipselect)
return -EINVAL;
mutex_init(&bitbang->lock);
master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
master->transfer_one = spi_bitbang_transfer_one;
- master->set_cs = spi_bitbang_set_cs;
+ /*
+ * When using GPIO descriptors, the ->set_cs() callback doesn't even
+ * get called unless SPI_MASTER_GPIO_SS is set.
+ */
+ if (custom_cs)
+ master->set_cs = spi_bitbang_set_cs;
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
static void dw_writer(struct dw_spi *dws)
{
- u32 max = tx_max(dws);
+ u32 max;
u16 txw = 0;
+ spin_lock(&dws->buf_lock);
+ max = tx_max(dws);
while (max--) {
/* Set the tx word if the transfer's original "tx" is not null */
if (dws->tx_end - dws->len) {
dw_write_io_reg(dws, DW_SPI_DR, txw);
dws->tx += dws->n_bytes;
}
+ spin_unlock(&dws->buf_lock);
}
static void dw_reader(struct dw_spi *dws)
{
- u32 max = rx_max(dws);
+ u32 max;
u16 rxw;
+ spin_lock(&dws->buf_lock);
+ max = rx_max(dws);
while (max--) {
rxw = dw_read_io_reg(dws, DW_SPI_DR);
/* Care rx only if the transfer's original "rx" is not null */
}
dws->rx += dws->n_bytes;
}
+ spin_unlock(&dws->buf_lock);
}
static void int_error_stop(struct dw_spi *dws, const char *msg)
{
struct dw_spi *dws = spi_controller_get_devdata(master);
struct chip_data *chip = spi_get_ctldata(spi);
+ unsigned long flags;
u8 imask = 0;
u16 txlevel = 0;
u32 cr0;
int ret;
dws->dma_mapped = 0;
-
+ spin_lock_irqsave(&dws->buf_lock, flags);
dws->tx = (void *)transfer->tx_buf;
dws->tx_end = dws->tx + transfer->len;
dws->rx = transfer->rx_buf;
dws->rx_end = dws->rx + transfer->len;
dws->len = transfer->len;
+ spin_unlock_irqrestore(&dws->buf_lock, flags);
+
+ /* Ensure dw->rx and dw->rx_end are visible */
+ smp_mb();
spi_enable_chip(dws, 0);
struct spi_controller *master;
int ret;
- BUG_ON(dws == NULL);
+ if (!dws)
+ return -EINVAL;
master = spi_alloc_master(dev, 0);
if (!master)
dws->type = SSI_MOTO_SPI;
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
+ spin_lock_init(&dws->buf_lock);
spi_controller_set_devdata(master, dws);
size_t len;
void *tx;
void *tx_end;
+ spinlock_t buf_lock;
void *rx;
void *rx_end;
int dma_mapped;
struct spi_transfer *cur_transfer;
struct spi_message *cur_msg;
struct chip_data *cur_chip;
+ size_t progress;
size_t len;
const void *tx;
void *rx;
if (!dma)
return -ENOMEM;
- dma->chan_rx = dma_request_slave_channel(dev, "rx");
- if (!dma->chan_rx) {
+ dma->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
dev_err(dev, "rx dma channel not available\n");
- ret = -ENODEV;
+ ret = PTR_ERR(dma->chan_rx);
return ret;
}
- dma->chan_tx = dma_request_slave_channel(dev, "tx");
- if (!dma->chan_tx) {
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
dev_err(dev, "tx dma channel not available\n");
- ret = -ENODEV;
+ ret = PTR_ERR(dma->chan_tx);
goto err_tx_channel;
}
dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
- /* Write two TX FIFO entries first, and then the corresponding
- * CMD FIFO entry.
+ /* Write the CMD FIFO entry first, and then the two
+ * corresponding TX FIFO entries.
*/
u32 data = dspi_pop_tx(dspi);
- if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) {
- /* LSB */
- tx_fifo_write(dspi, data & 0xFFFF);
- tx_fifo_write(dspi, data >> 16);
- } else {
- /* MSB */
- tx_fifo_write(dspi, data >> 16);
- tx_fifo_write(dspi, data & 0xFFFF);
- }
cmd_fifo_write(dspi);
+ tx_fifo_write(dspi, data & 0xFFFF);
+ tx_fifo_write(dspi, data >> 16);
} else {
/* Write one entry to both TX FIFO and CMD FIFO
* simultaneously.
u32 spi_tcr;
spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
- dspi->tx - dspi->bytes_per_word, !dspi->irq);
+ dspi->progress, !dspi->irq);
/* Get transfer counter (in number of SPI transfers). It was
* reset to 0 when transfer(s) were started.
spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
/* Update total number of bytes that were transferred */
msg->actual_length += spi_tcnt * dspi->bytes_per_word;
+ dspi->progress += spi_tcnt;
trans_mode = dspi->devtype_data->trans_mode;
if (trans_mode == DSPI_EOQ_MODE)
return 0;
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
- dspi->tx, !dspi->irq);
+ dspi->progress, !dspi->irq);
if (trans_mode == DSPI_EOQ_MODE)
dspi_eoq_write(dspi);
dspi->rx = transfer->rx_buf;
dspi->rx_end = dspi->rx + transfer->len;
dspi->len = transfer->len;
+ dspi->progress = 0;
/* Validated transfer specific frame size (defaults applied) */
dspi->bits_per_word = transfer->bits_per_word;
if (transfer->bits_per_word <= 8)
SPI_CTARE_DTCP(1));
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
- dspi->tx, !dspi->irq);
+ dspi->progress, !dspi->irq);
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
fsl_lpspi->watermark = fsl_lpspi->txfifosize;
if (fsl_lpspi_can_dma(controller, spi, t))
- fsl_lpspi->usedma = 1;
+ fsl_lpspi->usedma = true;
else
- fsl_lpspi->usedma = 0;
+ fsl_lpspi->usedma = false;
return fsl_lpspi_config(fsl_lpspi);
}
fsl_lpspi->dev = &pdev->dev;
fsl_lpspi->is_slave = is_slave;
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ controller->transfer_one = fsl_lpspi_transfer_one;
+ controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+ controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->dev.of_node = pdev->dev.of_node;
+ controller->bus_num = pdev->id;
+ controller->slave_abort = fsl_lpspi_slave_abort;
+
+ ret = devm_spi_register_controller(&pdev->dev, controller);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_controller error.\n");
+ goto out_controller_put;
+ }
+
if (!fsl_lpspi->is_slave) {
for (i = 0; i < controller->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
controller->prepare_message = fsl_lpspi_prepare_message;
}
- controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- controller->transfer_one = fsl_lpspi_transfer_one;
- controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
- controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
- controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
- controller->dev.of_node = pdev->dev.of_node;
- controller->bus_num = pdev->id;
- controller->slave_abort = fsl_lpspi_slave_abort;
-
init_completion(&fsl_lpspi->xfer_done);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
- ret = devm_spi_register_controller(&pdev->dev, controller);
- if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_controller error.\n");
- goto out_controller_put;
- }
-
return 0;
out_controller_put:
op->data.nbytes > q->devtype_data->txfifo)
return false;
- return true;
+ return spi_mem_default_supports_op(mem, op);
}
static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
- int irq = 0, type;
- int ret = -ENOMEM;
+ int irq, type;
+ int ret;
ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
if (spisel_boot) {
pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
- if (!pinfo->immr_spi_cs) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!pinfo->immr_spi_cs)
+ return -ENOMEM;
}
#endif
/*
ret = of_address_to_resource(np, 0, &mem);
if (ret)
- goto err;
+ return ret;
irq = platform_get_irq(ofdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err;
- }
+ if (irq < 0)
+ return irq;
master = fsl_spi_probe(dev, &mem, irq);
- if (IS_ERR(master)) {
- ret = PTR_ERR(master);
- goto err;
- }
-
- return 0;
-err:
- return ret;
+ return PTR_ERR_OR_ZERO(master);
}
static int of_fsl_spi_remove(struct platform_device *ofdev)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets
+//
+// Copyright (c) 2019 HiSilicon Technologies Co., Ltd.
+// Author: John Garry <john.garry@huawei.com>
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define HISI_SFC_V3XX_VERSION (0x1f8)
+
+#define HISI_SFC_V3XX_CMD_CFG (0x300)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
+#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
+#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
+#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4
+#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3)
+#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1
+#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0)
+#define HISI_SFC_V3XX_CMD_INS (0x308)
+#define HISI_SFC_V3XX_CMD_ADDR (0x30c)
+#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400)
+
+struct hisi_sfc_v3xx_host {
+ struct device *dev;
+ void __iomem *regbase;
+ int max_cmd_dword;
+};
+
+#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000
+#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10
+
+static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host)
+{
+ u32 reg;
+
+ return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg,
+ !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK),
+ HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US,
+ HISI_SFC_V3XX_WAIT_TIMEOUT_US);
+}
+
+static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
+ struct spi_mem_op *op)
+{
+ struct spi_device *spi = mem->spi;
+ struct hisi_sfc_v3xx_host *host;
+ uintptr_t addr = (uintptr_t)op->data.buf.in;
+ int max_byte_count;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ max_byte_count = host->max_cmd_dword * 4;
+
+ if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4)
+ op->data.nbytes = 4 - (addr % 4);
+ else if (op->data.nbytes > max_byte_count)
+ op->data.nbytes = max_byte_count;
+
+ return 0;
+}
+
+/*
+ * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the
+ * DATABUF registers -so use __io{read,write}32_copy when possible. For
+ * trailing bytes, copy them byte-by-byte from the DATABUF register, as we
+ * can't clobber outside the source/dest buffer.
+ *
+ * For efficient data read/write, we try to put any start 32b unaligned data
+ * into a separate transaction in hisi_sfc_v3xx_adjust_op_size().
+ */
+static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host,
+ u8 *to, unsigned int len)
+{
+ void __iomem *from;
+ int i;
+
+ from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+ if (IS_ALIGNED((uintptr_t)to, 4)) {
+ int words = len / 4;
+
+ __ioread32_copy(to, from, words);
+
+ len -= words * 4;
+ if (len) {
+ u32 val;
+
+ to += words * 4;
+ from += words * 4;
+
+ val = __raw_readl(from);
+
+ for (i = 0; i < len; i++, val >>= 8, to++)
+ *to = (u8)val;
+ }
+ } else {
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) {
+ u32 val = __raw_readl(from);
+ int j;
+
+ for (j = 0; j < 4 && (j + (i * 4) < len);
+ to++, val >>= 8, j++)
+ *to = (u8)val;
+ }
+ }
+}
+
+static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host,
+ const u8 *from, unsigned int len)
+{
+ void __iomem *to;
+ int i;
+
+ to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0;
+
+ if (IS_ALIGNED((uintptr_t)from, 4)) {
+ int words = len / 4;
+
+ __iowrite32_copy(to, from, words);
+
+ len -= words * 4;
+ if (len) {
+ u32 val = 0;
+
+ to += words * 4;
+ from += words * 4;
+
+ for (i = 0; i < len; i++, from++)
+ val |= *from << i * 8;
+ __raw_writel(val, to);
+ }
+
+ } else {
+ for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) {
+ u32 val = 0;
+ int j;
+
+ for (j = 0; j < 4 && (j + (i * 4) < len);
+ from++, j++)
+ val |= *from << j * 8;
+ __raw_writel(val, to);
+ }
+ }
+}
+
+static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
+ const struct spi_mem_op *op,
+ u8 chip_select)
+{
+ int ret, len = op->data.nbytes;
+ u32 config = 0;
+
+ if (op->addr.nbytes)
+ config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
+
+ if (op->data.dir != SPI_MEM_NO_DATA) {
+ config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
+ config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len);
+ else if (op->data.dir == SPI_MEM_DATA_IN)
+ config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK;
+
+ config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF |
+ chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF |
+ HISI_SFC_V3XX_CMD_CFG_START_MSK;
+
+ writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR);
+ writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS);
+
+ writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG);
+
+ ret = hisi_sfc_v3xx_wait_cmd_idle(host);
+ if (ret)
+ return ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len);
+
+ return 0;
+}
+
+static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct hisi_sfc_v3xx_host *host;
+ struct spi_device *spi = mem->spi;
+ u8 chip_select = spi->chip_select;
+
+ host = spi_controller_get_devdata(spi->master);
+
+ return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select);
+}
+
+static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
+ .adjust_op_size = hisi_sfc_v3xx_adjust_op_size,
+ .exec_op = hisi_sfc_v3xx_exec_op,
+};
+
+static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_sfc_v3xx_host *host;
+ struct spi_controller *ctlr;
+ u32 version;
+ int ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*host));
+ if (!ctlr)
+ return -ENOMEM;
+
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+
+ host = spi_controller_get_devdata(ctlr);
+ host->dev = dev;
+
+ platform_set_drvdata(pdev, host);
+
+ host->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->regbase)) {
+ ret = PTR_ERR(host->regbase);
+ goto err_put_master;
+ }
+
+ ctlr->bus_num = -1;
+ ctlr->num_chipselect = 1;
+ ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
+
+ version = readl(host->regbase + HISI_SFC_V3XX_VERSION);
+
+ switch (version) {
+ case 0x351:
+ host->max_cmd_dword = 64;
+ break;
+ default:
+ host->max_cmd_dword = 16;
+ break;
+ }
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret)
+ goto err_put_master;
+
+ dev_info(&pdev->dev, "hw version 0x%x\n", version);
+
+ return 0;
+
+err_put_master:
+ spi_master_put(ctlr);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = {
+ {"HISI0341", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids);
+#endif
+
+static struct platform_driver hisi_sfc_v3xx_spi_driver = {
+ .driver = {
+ .name = "hisi-sfc-v3xx",
+ .acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids),
+ },
+ .probe = hisi_sfc_v3xx_probe,
+};
+
+module_platform_driver(hisi_sfc_v3xx_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets");
master->unprepare_message = img_spfi_unprepare;
master->handle_err = img_spfi_handle_err;
- spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
- spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
+ spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
+ if (IS_ERR(spfi->tx_ch)) {
+ ret = PTR_ERR(spfi->tx_ch);
+ spfi->tx_ch = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto disable_pm;
+ }
+
+ spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
+ if (IS_ERR(spfi->rx_ch)) {
+ ret = PTR_ERR(spfi->rx_ch);
+ spfi->rx_ch = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto disable_pm;
+ }
+
if (!spfi->tx_ch || !spfi->rx_ch) {
if (spfi->tx_ch)
dma_release_channel(spfi->tx_ch);
}
if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
- spi_imx->usedma = 1;
+ spi_imx->usedma = true;
else
- spi_imx->usedma = 0;
+ spi_imx->usedma = false;
if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
spi_imx->rx = mx53_ecspi_rx_slave;
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
-#include <linux/gpio.h>
/*
* The Meson SPICC controller could support DMA based transfers, but is not
static int meson_spicc_setup(struct spi_device *spi)
{
- int ret = 0;
-
if (!spi->controller_state)
spi->controller_state = spi_master_get_devdata(spi->master);
- else if (gpio_is_valid(spi->cs_gpio))
- goto out_gpio;
- else if (spi->cs_gpio == -ENOENT)
- return 0;
-
- if (gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
- if (ret) {
- dev_err(&spi->dev, "failed to request cs gpio\n");
- return ret;
- }
- }
-
-out_gpio:
- ret = gpio_direction_output(spi->cs_gpio,
- !(spi->mode & SPI_CS_HIGH));
- return ret;
+ return 0;
}
static void meson_spicc_cleanup(struct spi_device *spi)
{
- if (gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
-
spi->controller_state = NULL;
}
master->prepare_message = meson_spicc_prepare_message;
master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
master->transfer_one = meson_spicc_transfer_one;
+ master->use_gpio_descriptors = true;
/* Setup max rate according to the Meson GX datasheet */
if ((rate >> 2) > SPICC_MAX_FREQ)
if (ret)
goto out_master_free;
- ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
- if (!ssp->dmach) {
+ ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
+ if (IS_ERR(ssp->dmach)) {
dev_err(ssp->dev, "Failed to request DMA\n");
- ret = -ENODEV;
+ ret = PTR_ERR(ssp->dmach);
goto out_master_free;
}
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/reset.h>
#include <asm/unaligned.h>
struct npcm_pspi {
struct completion xfer_done;
- struct regmap *rst_regmap;
+ struct reset_control *reset;
struct spi_master *master;
unsigned int tx_bytes;
unsigned int rx_bytes;
#define NPCM_PSPI_MIN_CLK_DIVIDER 4
#define NPCM_PSPI_DEFAULT_CLK 25000000
-/* reset register */
-#define NPCM7XX_IPSRST2_OFFSET 0x24
-
-#define NPCM7XX_PSPI1_RESET BIT(22)
-#define NPCM7XX_PSPI2_RESET BIT(23)
-
static inline unsigned int bytes_per_word(unsigned int bits)
{
return bits <= 8 ? 1 : 2;
priv->mode = spi->mode;
}
+ /*
+ * If transfer is even length, and 8 bits per word transfer,
+ * then implement 16 bits-per-word transfer.
+ */
+ if (priv->bits_per_word == 8 && !(t->len & 0x1))
+ t->bits_per_word = 16;
+
if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
npcm_pspi_set_transfer_size(priv, t->bits_per_word);
priv->bits_per_word = t->bits_per_word;
static void npcm_pspi_send(struct npcm_pspi *priv)
{
int wsize;
+ u16 val;
wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
priv->tx_bytes -= wsize;
switch (wsize) {
case 1:
- iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+ val = *priv->tx_buf++;
+ iowrite8(val, NPCM_PSPI_DATA + priv->base);
break;
case 2:
- iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+ val = *priv->tx_buf++;
+ val = *priv->tx_buf++ | (val << 8);
+ iowrite16(val, NPCM_PSPI_DATA + priv->base);
break;
default:
WARN_ON_ONCE(1);
return;
}
-
- priv->tx_buf += wsize;
}
static void npcm_pspi_recv(struct npcm_pspi *priv)
switch (rsize) {
case 1:
- val = ioread8(priv->base + NPCM_PSPI_DATA);
+ *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA);
break;
case 2:
val = ioread16(priv->base + NPCM_PSPI_DATA);
+ *priv->rx_buf++ = (val >> 8);
+ *priv->rx_buf++ = val & 0xff;
break;
default:
WARN_ON_ONCE(1);
return;
}
-
- *priv->rx_buf = val;
- priv->rx_buf += rsize;
}
static int npcm_pspi_transfer_one(struct spi_master *master,
static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
{
- regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET,
- NPCM7XX_PSPI1_RESET << priv->id);
- regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0);
+ reset_control_assert(priv->reset);
+ udelay(5);
+ reset_control_deassert(priv->reset);
}
static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
if (num_cs < 0)
return num_cs;
- pdev->id = of_alias_get_id(np, "spi");
- if (pdev->id < 0)
- pdev->id = 0;
-
master = spi_alloc_master(&pdev->dev, sizeof(*priv));
if (!master)
return -ENOMEM;
priv = spi_master_get_devdata(master);
priv->master = master;
priv->is_save_param = false;
- priv->id = pdev->id;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
goto out_disable_clk;
}
- priv->rst_regmap =
- syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
- if (IS_ERR(priv->rst_regmap)) {
- dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n");
- return PTR_ERR(priv->rst_regmap);
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto out_disable_clk;
}
/* reset SPI-HW block */
master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
master->mode_bits = SPI_CPHA | SPI_CPOL;
master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
+ master->bus_num = -1;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->transfer_one = npcm_pspi_transfer_one;
master->prepare_transfer_hardware =
if (ret)
goto out_disable_clk;
- pr_info("NPCM Peripheral SPI %d probed\n", pdev->id);
+ pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num);
return 0;
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/spi_oc_tiny.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/of.h>
#define DRV_NAME "spi_oc_tiny"
unsigned int txc, rxc;
const u8 *txp;
u8 *rxp;
- int gpio_cs_count;
- int *gpio_cs;
};
static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
}
-static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
-{
- struct tiny_spi *hw = tiny_spi_to_hw(spi);
-
- if (hw->gpio_cs_count > 0) {
- gpio_set_value(hw->gpio_cs[spi->chip_select],
- (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
- }
-}
-
static int tiny_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
- unsigned int i;
u32 val;
if (!np)
return 0;
- hw->gpio_cs_count = of_gpio_count(np);
- if (hw->gpio_cs_count > 0) {
- hw->gpio_cs = devm_kcalloc(&pdev->dev,
- hw->gpio_cs_count, sizeof(unsigned int),
- GFP_KERNEL);
- if (!hw->gpio_cs)
- return -ENOMEM;
- }
- for (i = 0; i < hw->gpio_cs_count; i++) {
- hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
- if (hw->gpio_cs[i] < 0)
- return -ENODEV;
- }
hw->bitbang.master->dev.of_node = pdev->dev.of_node;
if (!of_property_read_u32(np, "clock-frequency", &val))
hw->freq = val;
struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct tiny_spi *hw;
struct spi_master *master;
- unsigned int i;
int err = -ENODEV;
master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
/* setup the master state. */
master->bus_num = pdev->id;
- master->num_chipselect = 255;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tiny_spi_setup;
+ master->use_gpio_descriptors = true;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
hw->bitbang.master = master;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
- hw->bitbang.chipselect = tiny_spi_chipselect;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
/* find and map our resources */
}
/* find platform data */
if (platp) {
- hw->gpio_cs_count = platp->gpio_cs_count;
- hw->gpio_cs = platp->gpio_cs;
- if (platp->gpio_cs_count && !platp->gpio_cs) {
- err = -EBUSY;
- goto exit;
- }
hw->freq = platp->freq;
hw->baudwidth = platp->baudwidth;
} else {
if (err)
goto exit;
}
- for (i = 0; i < hw->gpio_cs_count; i++) {
- err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
- if (err)
- goto exit_gpio;
- gpio_direction_output(hw->gpio_cs[i], 1);
- }
- hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
return 0;
-exit_gpio:
- while (i-- > 0)
- gpio_free(hw->gpio_cs[i]);
exit:
spi_master_put(master);
return err;
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct spi_master *master = hw->bitbang.master;
- unsigned int i;
spi_bitbang_stop(&hw->bitbang);
- for (i = 0; i < hw->gpio_cs_count; i++)
- gpio_free(hw->gpio_cs[i]);
spi_master_put(master);
return 0;
}
return limit;
}
+static void pxa2xx_spi_off(struct driver_data *drv_data)
+{
+ /* On MMP, disabling SSE seems to corrupt the rx fifo */
+ if (drv_data->ssp_type == MMP2_SSP)
+ return;
+
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+}
+
static int null_writer(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
dev_err(&drv_data->pdev->dev, "%s\n", msg);
static void handle_bad_msg(struct driver_data *drv_data)
{
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
pxa2xx_spi_write(drv_data, SSCR1,
pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
if (!pxa25x_ssp_comp(drv_data))
|| (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
!= (cr1 & change_mask)) {
/* stop the SSP, and update the other bits */
- pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
+ if (drv_data->ssp_type != MMP2_SSP)
+ pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
/* first set CR1 without interrupt and service enables */
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
pxa2xx_spi_flush(drv_data);
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP */
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
/* Clear and disable interrupts and service requests */
write_SSSR_CS(drv_data, drv_data->clear_sr);
pxa2xx_spi_write(drv_data, SSCR1,
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP now */
- pxa2xx_spi_write(drv_data, SSCR0,
- pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+ pxa2xx_spi_off(drv_data);
return 0;
}
/* KBL-H */
{ PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
{ PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
+ /* CML-V */
+ { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
struct qcom_qspi {
void __iomem *base;
struct device *dev;
- struct clk_bulk_data clks[QSPI_NUM_CLKS];
+ struct clk_bulk_data *clks;
struct qspi_xfer xfer;
/* Lock to protect xfer and IRQ accessed registers */
spinlock_t lock;
goto exit_probe_master_put;
}
+ ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
+ sizeof(*ctrl->clks), GFP_KERNEL);
+ if (!ctrl->clks) {
+ ret = -ENOMEM;
+ goto exit_probe_master_put;
+ }
+
ctrl->clks[QSPI_CLK_CORE].id = "core";
ctrl->clks[QSPI_CLK_IFACE].id = "iface";
ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
-#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */
#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
u16 mode_bits;
u16 flags;
u16 fifo_size;
+ u8 num_hw_ss;
};
/*
return n;
}
-#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
-
static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
{
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
dmaengine_terminate_all(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&rspi->ctlr->dev),
- dev_name(&rspi->ctlr->dev));
+ dev_warn_once(&rspi->ctlr->dev,
+ "DMA not available, falling back to PIO\n");
}
return ret;
}
if (spi->mode & SPI_CPHA)
rspi->spcmd |= SPCMD_CPHA;
+ /* Configure slave signal to assert */
+ rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
+ : spi->chip_select);
+
/* CMOS output mode and MOSI signal from previous transfer */
rspi->sppcr = 0;
if (spi->mode & SPI_LOOP)
rspi->sppcr |= SPPCR_SPLP;
- set_config_register(rspi, 8);
+ rspi->ops->set_config_register(rspi, 8);
if (msg->spi->mode &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
+ .num_hw_ss = 2,
};
static const struct spi_ops rspi_rz_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
+ .num_hw_ss = 1,
};
static const struct spi_ops qspi_ops = {
SPI_RX_DUAL | SPI_RX_QUAD,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
+ .num_hw_ss = 1,
};
#ifdef CONFIG_OF
ctlr->mode_bits = ops->mode_bits;
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = rspi->ops->num_hw_ss;
ret = platform_get_irq_byname_optional(pdev, "rx");
if (ret < 0) {
static const struct platform_device_id spi_driver_ids[] = {
{ "rspi", (kernel_ulong_t)&rspi_ops },
- { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
- { "qspi", (kernel_ulong_t)&qspi_ops },
{},
};
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
- unsigned short unused_ss;
bool native_cs_inited;
bool native_cs_high;
bool slave_aborted;
#define MAX_SS 3 /* Maximum number of native chip selects */
-#define TMDR1 0x00 /* Transmit Mode Register 1 */
-#define TMDR2 0x04 /* Transmit Mode Register 2 */
-#define TMDR3 0x08 /* Transmit Mode Register 3 */
-#define RMDR1 0x10 /* Receive Mode Register 1 */
-#define RMDR2 0x14 /* Receive Mode Register 2 */
-#define RMDR3 0x18 /* Receive Mode Register 3 */
-#define TSCR 0x20 /* Transmit Clock Select Register */
-#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
-#define CTR 0x28 /* Control Register */
-#define FCTR 0x30 /* FIFO Control Register */
-#define STR 0x40 /* Status Register */
-#define IER 0x44 /* Interrupt Enable Register */
-#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
-#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
-#define TFDR 0x50 /* Transmit FIFO Data Register */
-#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
-#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
-#define RFDR 0x60 /* Receive FIFO Data Register */
-
-/* TMDR1 and RMDR1 */
-#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
-#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
-#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */
-#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */
-#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
-#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
-#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
-#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
-#define MDR1_FLD_SHIFT 2
-#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
-/* TMDR1 */
-#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */
-#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
-#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
-
-/* TMDR2 and RMDR2 */
-#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
-#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
-#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
-
-/* TSCR and RSCR */
-#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
-#define SCR_BRPS(i) (((i) - 1) << 8)
-#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
-#define SCR_BRDV_DIV_2 0
-#define SCR_BRDV_DIV_4 1
-#define SCR_BRDV_DIV_8 2
-#define SCR_BRDV_DIV_16 3
-#define SCR_BRDV_DIV_32 4
-#define SCR_BRDV_DIV_1 7
-
-/* CTR */
-#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
-#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
-#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
-#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
-#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
-#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
-#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
-#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
-#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
-#define CTR_TXDIZ_LOW (0 << 22) /* 0 */
-#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */
-#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
-#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
-#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
-#define CTR_TXE BIT(9) /* Transmit Enable */
-#define CTR_RXE BIT(8) /* Receive Enable */
-#define CTR_TXRST BIT(1) /* Transmit Reset */
-#define CTR_RXRST BIT(0) /* Receive Reset */
-
-/* FCTR */
-#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
-#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
-#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
-#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
-#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
-#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
-#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
-#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
-#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
-#define FCTR_TFUA_SHIFT 20
-#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT)
-#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
-#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
-#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
-#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
-#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
-#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
-#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
-#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
-#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
-#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
-#define FCTR_RFUA_SHIFT 4
-#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT)
-
-/* STR */
-#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */
-#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */
-#define STR_TEOF BIT(23) /* Frame Transmission End */
-#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
-#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */
-#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */
-#define STR_RFFUL BIT(13) /* Receive FIFO Full */
-#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */
-#define STR_REOF BIT(7) /* Frame Reception End */
-#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
-#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */
-#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */
-
-/* IER */
-#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
-#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
-#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
-#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */
-#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
-#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
-#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
-#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
-#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */
-#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
-#define IER_REOFE BIT(7) /* Frame Reception End Enable */
-#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
-#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
-#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
+#define SITMDR1 0x00 /* Transmit Mode Register 1 */
+#define SITMDR2 0x04 /* Transmit Mode Register 2 */
+#define SITMDR3 0x08 /* Transmit Mode Register 3 */
+#define SIRMDR1 0x10 /* Receive Mode Register 1 */
+#define SIRMDR2 0x14 /* Receive Mode Register 2 */
+#define SIRMDR3 0x18 /* Receive Mode Register 3 */
+#define SITSCR 0x20 /* Transmit Clock Select Register */
+#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR 0x28 /* Control Register */
+#define SIFCTR 0x30 /* FIFO Control Register */
+#define SISTR 0x40 /* Status Register */
+#define SIIER 0x44 /* Interrupt Enable Register */
+#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50 /* Transmit FIFO Data Register */
+#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60 /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */
+#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */
+#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_FLD_SHIFT 2
+#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
+#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
+#define SISCR_BRPS(i) (((i) - 1) << 8)
+#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+#define SISCR_BRDV_DIV_2 0
+#define SISCR_BRDV_DIV_4 1
+#define SISCR_BRDV_DIV_8 2
+#define SISCR_BRDV_DIV_16 3
+#define SISCR_BRDV_DIV_32 4
+#define SISCR_BRDV_DIV_1 7
+
+/* SICTR */
+#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */
+#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */
+#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
+#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE BIT(9) /* Transmit Enable */
+#define SICTR_RXE BIT(8) /* Receive Enable */
+#define SICTR_TXRST BIT(1) /* Transmit Reset */
+#define SICTR_RXRST BIT(0) /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_TFUA_SHIFT 20
+#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
+#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+#define SIFCTR_RFUA_SHIFT 4
+#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT)
+
+/* SISTR */
+#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF BIT(7) /* Frame Reception End */
+#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
+#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
switch (reg_offs) {
- case TSCR:
- case RSCR:
+ case SITSCR:
+ case SIRSCR:
return ioread16(p->mapbase + reg_offs);
default:
return ioread32(p->mapbase + reg_offs);
u32 value)
{
switch (reg_offs) {
- case TSCR:
- case RSCR:
+ case SITSCR:
+ case SIRSCR:
iowrite16(value, p->mapbase + reg_offs);
break;
default:
u32 mask = clr | set;
u32 data;
- data = sh_msiof_read(p, CTR);
+ data = sh_msiof_read(p, SICTR);
data &= ~clr;
data |= set;
- sh_msiof_write(p, CTR, data);
+ sh_msiof_write(p, SICTR, data);
- return readl_poll_timeout_atomic(p->mapbase + CTR, data,
+ return readl_poll_timeout_atomic(p->mapbase + SICTR, data,
(data & mask) == set, 1, 100);
}
struct sh_msiof_spi_priv *p = data;
/* just disable the interrupt and wake up */
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
complete(&p->done);
return IRQ_HANDLED;
static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
{
- u32 mask = CTR_TXRST | CTR_RXRST;
+ u32 mask = SICTR_TXRST | SICTR_RXRST;
u32 data;
- data = sh_msiof_read(p, CTR);
+ data = sh_msiof_read(p, SICTR);
data |= mask;
- sh_msiof_write(p, CTR, data);
+ sh_msiof_write(p, SICTR, data);
- readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1,
+ readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1,
100);
}
static const u32 sh_msiof_spi_div_array[] = {
- SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4,
- SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32,
+ SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
+ SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
div = DIV_ROUND_UP(parent_rate, spi_hz);
if (div <= 1024) {
- /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+ /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
if (!div_pow && div <= 32 && div > 2)
div_pow = 1;
brps = 32;
}
- scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
- sh_msiof_write(p, TSCR, scr);
+ scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
+ sh_msiof_write(p, SITSCR, scr);
if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
- sh_msiof_write(p, RSCR, scr);
+ sh_msiof_write(p, SIRSCR, scr);
}
static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
return 0;
}
- val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
- val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
+ val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
+ val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
return val;
}
* 1 0 11 11 0 0
* 1 1 11 11 1 1
*/
- tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
- tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
- tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+ tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
+ tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
+ tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
if (spi_controller_is_slave(p->ctlr)) {
- sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
+ sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
} else {
- sh_msiof_write(p, TMDR1,
- tmp | MDR1_TRMD | TMDR1_PCON |
- (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+ sh_msiof_write(p, SITMDR1,
+ tmp | SIMDR1_TRMD | SITMDR1_PCON |
+ (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
}
if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
}
- sh_msiof_write(p, RMDR1, tmp);
+ sh_msiof_write(p, SIRMDR1, tmp);
tmp = 0;
- tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
- tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
+ tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
+ tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
edge = cpol ^ !cpha;
- tmp |= edge << CTR_TEDG_SHIFT;
- tmp |= edge << CTR_REDG_SHIFT;
- tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
- sh_msiof_write(p, CTR, tmp);
+ tmp |= edge << SICTR_TEDG_SHIFT;
+ tmp |= edge << SICTR_REDG_SHIFT;
+ tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
+ sh_msiof_write(p, SICTR, tmp);
}
static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
u32 bits, u32 words)
{
- u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
+ u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
- sh_msiof_write(p, TMDR2, dr2);
+ sh_msiof_write(p, SITMDR2, dr2);
else
- sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
+ sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
if (rx_buf)
- sh_msiof_write(p, RMDR2, dr2);
+ sh_msiof_write(p, SIRMDR2, dr2);
}
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
{
- sh_msiof_write(p, STR,
- sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
+ sh_msiof_write(p, SISTR,
+ sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ));
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_8[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_8[k] << fs);
}
static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_16[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_16[k] << fs);
}
static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
+ sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
}
static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, buf_32[k] << fs);
+ sh_msiof_write(p, SITFDR, buf_32[k] << fs);
}
static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
+ sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
}
static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
+ sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
}
static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
+ sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
}
static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
+ put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
}
static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
+ buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
+ put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
}
static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
+ buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
}
static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
int k;
for (k = 0; k < words; k++)
- put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
+ put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
}
static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
/* Configure native chip select mode/polarity early */
- clr = MDR1_SYNCMD_MASK;
- set = MDR1_SYNCMD_SPI;
+ clr = SIMDR1_SYNCMD_MASK;
+ set = SIMDR1_SYNCMD_SPI;
if (spi->mode & SPI_CS_HIGH)
- clr |= BIT(MDR1_SYNCAC_SHIFT);
+ clr |= BIT(SIMDR1_SYNCAC_SHIFT);
else
- set |= BIT(MDR1_SYNCAC_SHIFT);
+ set |= BIT(SIMDR1_SYNCAC_SHIFT);
pm_runtime_get_sync(&p->pdev->dev);
- tmp = sh_msiof_read(p, TMDR1) & ~clr;
- sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON);
- tmp = sh_msiof_read(p, RMDR1) & ~clr;
- sh_msiof_write(p, RMDR1, tmp | set);
+ tmp = sh_msiof_read(p, SITMDR1) & ~clr;
+ sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
+ tmp = sh_msiof_read(p, SIRMDR1) & ~clr;
+ sh_msiof_write(p, SIRMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev);
p->native_cs_high = spi->mode & SPI_CS_HIGH;
p->native_cs_inited = true;
/* Configure pins before asserting CS */
if (spi->cs_gpiod) {
- ss = p->unused_ss;
+ ss = ctlr->unused_native_cs;
cs_high = p->native_cs_high;
} else {
ss = spi->chip_select;
/* setup clock and rx/tx signals */
if (!slave)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
if (rx_buf && !ret)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
if (!ret)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
/* start by setting frame bit */
if (!ret && !slave)
- ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
+ ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
return ret;
}
/* shut down frame, rx/tx and clock signals */
if (!slave)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
if (!ret)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
if (rx_buf && !ret)
- ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
if (!ret && !slave)
- ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
+ ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
return ret;
}
fifo_shift = 32 - bits;
/* default FIFO watermarks for PIO */
- sh_msiof_write(p, FCTR, 0);
+ sh_msiof_write(p, SIFCTR, 0);
/* setup msiof transfer mode registers */
sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
- sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
+ sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
/* write tx fifo */
if (tx_buf)
sh_msiof_reset_str(p);
sh_msiof_spi_stop(p, rx_buf);
stop_ier:
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
return ret;
}
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
- ier_bits |= IER_RDREQE | IER_RDMAE;
+ ier_bits |= SIIER_RDREQE | SIIER_RDMAE;
desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
p->rx_dma_addr, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
if (tx) {
- ier_bits |= IER_TDREQE | IER_TDMAE;
+ ier_bits |= SIIER_TDREQE | SIIER_TDMAE;
dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
p->tx_dma_addr, len, DMA_TO_DEVICE);
desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
}
/* 1 stage FIFO watermarks for DMA */
- sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
+ sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
/* setup msiof transfer mode registers (32-bit words) */
sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
- sh_msiof_write(p, IER, ier_bits);
+ sh_msiof_write(p, SIIER, ier_bits);
reinit_completion(&p->done);
if (tx)
if (ret)
goto stop_reset;
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
} else {
/* wait for tx fifo to be emptied */
- sh_msiof_write(p, IER, IER_TEOFE);
+ sh_msiof_write(p, SIIER, SIIER_TEOFE);
ret = sh_msiof_wait_for_completion(p, &p->done);
if (ret)
goto stop_reset;
no_dma_tx:
if (rx)
dmaengine_terminate_all(p->ctlr->dma_rx);
- sh_msiof_write(p, IER, 0);
+ sh_msiof_write(p, SIIER, 0);
return ret;
}
}
#endif
-static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
-{
- struct device *dev = &p->pdev->dev;
- unsigned int used_ss_mask = 0;
- unsigned int cs_gpios = 0;
- unsigned int num_cs, i;
- int ret;
-
- ret = gpiod_count(dev, "cs");
- if (ret <= 0)
- return 0;
-
- num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
- for (i = 0; i < num_cs; i++) {
- struct gpio_desc *gpiod;
-
- gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
- if (!IS_ERR(gpiod)) {
- devm_gpiod_put(dev, gpiod);
- cs_gpios++;
- continue;
- }
-
- if (PTR_ERR(gpiod) != -ENOENT)
- return PTR_ERR(gpiod);
-
- if (i >= MAX_SS) {
- dev_err(dev, "Invalid native chip select %d\n", i);
- return -EINVAL;
- }
- used_ss_mask |= BIT(i);
- }
- p->unused_ss = ffz(used_ss_mask);
- if (cs_gpios && p->unused_ss >= MAX_SS) {
- dev_err(dev, "No unused native chip select available\n");
- return -EINVAL;
- }
- return 0;
-}
-
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
{
ctlr = p->ctlr;
ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
- dma_tx_id, res->start + TFDR);
+ dma_tx_id, res->start + SITFDR);
if (!ctlr->dma_tx)
return -ENODEV;
ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
- dma_rx_id, res->start + RFDR);
+ dma_rx_id, res->start + SIRFDR);
if (!ctlr->dma_rx)
goto free_tx_chan;
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
- /* Setup GPIO chip selects */
- ctlr->num_chipselect = p->info->num_chipselect;
- ret = sh_msiof_get_cs_gpios(p);
- if (ret)
- goto err1;
-
/* init controller code */
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
ctlr->flags = chipdata->ctlr_flags;
ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = p->info->num_chipselect;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->setup = sh_msiof_spi_setup;
ctlr->prepare_message = sh_msiof_prepare_message;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = sh_msiof_transfer_one;
ctlr->use_gpio_descriptors = true;
+ ctlr->max_native_cs = MAX_SS;
ret = sh_msiof_request_dma(p);
if (ret < 0)
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
/* request DMA channels */
- sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
- if (!sspi->rx_chan) {
+ sspi->rx_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(sspi->rx_chan)) {
dev_err(&pdev->dev, "can not allocate rx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(sspi->rx_chan);
goto free_master;
}
- sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
- if (!sspi->tx_chan) {
+ sspi->tx_chan = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(sspi->tx_chan)) {
dev_err(&pdev->dev, "can not allocate tx dma channel\n");
- ret = -ENODEV;
+ ret = PTR_ERR(sspi->tx_chan);
goto free_rx_dma;
}
return 0;
}
-static void stm32_qspi_dma_setup(struct stm32_qspi *qspi)
+static int stm32_qspi_dma_setup(struct stm32_qspi *qspi)
{
struct dma_slave_config dma_cfg;
struct device *dev = qspi->dev;
+ int ret = 0;
memset(&dma_cfg, 0, sizeof(dma_cfg));
dma_cfg.src_maxburst = 4;
dma_cfg.dst_maxburst = 4;
- qspi->dma_chrx = dma_request_slave_channel(dev, "rx");
- if (qspi->dma_chrx) {
+ qspi->dma_chrx = dma_request_chan(dev, "rx");
+ if (IS_ERR(qspi->dma_chrx)) {
+ ret = PTR_ERR(qspi->dma_chrx);
+ qspi->dma_chrx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto out;
+ } else {
if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) {
dev_err(dev, "dma rx config failed\n");
dma_release_channel(qspi->dma_chrx);
}
}
- qspi->dma_chtx = dma_request_slave_channel(dev, "tx");
- if (qspi->dma_chtx) {
+ qspi->dma_chtx = dma_request_chan(dev, "tx");
+ if (IS_ERR(qspi->dma_chtx)) {
+ ret = PTR_ERR(qspi->dma_chtx);
+ qspi->dma_chtx = NULL;
+ } else {
if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) {
dev_err(dev, "dma tx config failed\n");
dma_release_channel(qspi->dma_chtx);
}
}
+out:
init_completion(&qspi->dma_completion);
+
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
}
static void stm32_qspi_dma_free(struct stm32_qspi *qspi)
qspi->dev = dev;
platform_set_drvdata(pdev, qspi);
- stm32_qspi_dma_setup(qspi);
+ ret = stm32_qspi_dma_setup(qspi);
+ if (ret)
+ goto err;
+
mutex_init(&qspi->lock);
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
return IRQ_HANDLED;
}
-/**
- * stm32_spi_setup - setup device chip select
- */
-static int stm32_spi_setup(struct spi_device *spi_dev)
-{
- int ret = 0;
-
- if (!gpio_is_valid(spi_dev->cs_gpio)) {
- dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
- spi_dev->cs_gpio);
- return -EINVAL;
- }
-
- dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
- spi_dev->cs_gpio,
- (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
-
- ret = gpio_direction_output(spi_dev->cs_gpio,
- !(spi_dev->mode & SPI_CS_HIGH));
-
- return ret;
-}
-
/**
* stm32_spi_prepare_msg - set up the controller to transfer a single message
*/
struct spi_master *master;
struct stm32_spi *spi;
struct resource *res;
- int i, ret;
+ int ret;
master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
if (!master) {
master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
- master->setup = stm32_spi_setup;
+ master->use_gpio_descriptors = true;
master->prepare_message = stm32_spi_prepare_msg;
master->transfer_one = stm32_spi_transfer_one;
master->unprepare_message = stm32_spi_unprepare_msg;
- spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
- if (!spi->dma_tx)
+ spi->dma_tx = dma_request_chan(spi->dev, "tx");
+ if (IS_ERR(spi->dma_tx)) {
+ ret = PTR_ERR(spi->dma_tx);
+ spi->dma_tx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_clk_disable;
+
dev_warn(&pdev->dev, "failed to request tx dma channel\n");
- else
+ } else {
master->dma_tx = spi->dma_tx;
+ }
+
+ spi->dma_rx = dma_request_chan(spi->dev, "rx");
+ if (IS_ERR(spi->dma_rx)) {
+ ret = PTR_ERR(spi->dma_rx);
+ spi->dma_rx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma_release;
- spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
- if (!spi->dma_rx)
dev_warn(&pdev->dev, "failed to request rx dma channel\n");
- else
+ } else {
master->dma_rx = spi->dma_rx;
+ }
if (spi->dma_tx || spi->dma_rx)
master->can_dma = stm32_spi_can_dma;
if (ret) {
dev_err(&pdev->dev, "spi master registration failed: %d\n",
ret);
- goto err_dma_release;
+ goto err_pm_disable;
}
- if (!master->cs_gpios) {
+ if (!master->cs_gpiods) {
dev_err(&pdev->dev, "no CS gpios available\n");
ret = -EINVAL;
- goto err_dma_release;
- }
-
- for (i = 0; i < master->num_chipselect; i++) {
- if (!gpio_is_valid(master->cs_gpios[i])) {
- dev_err(&pdev->dev, "%i is not a valid gpio\n",
- master->cs_gpios[i]);
- ret = -EINVAL;
- goto err_dma_release;
- }
-
- ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "can't get CS gpio %i\n",
- master->cs_gpios[i]);
- goto err_dma_release;
- }
+ goto err_pm_disable;
}
dev_info(&pdev->dev, "driver initialized\n");
return 0;
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_dma_release:
if (spi->dma_tx)
dma_release_channel(spi->dma_tx);
if (spi->dma_rx)
dma_release_channel(spi->dma_rx);
-
- pm_runtime_disable(&pdev->dev);
err_clk_disable:
clk_disable_unprepare(spi->clk);
err_master_put:
if ((bits_per_word == 8 || bits_per_word == 16 ||
bits_per_word == 32) && t->len > 3) {
- tspi->is_packed = 1;
+ tspi->is_packed = true;
tspi->words_per_32bit = 32/bits_per_word;
} else {
- tspi->is_packed = 0;
+ tspi->is_packed = false;
tspi->words_per_32bit = 1;
}
#define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
-#define QSPI_FCLK 192000000
-
/* Clock Control */
#define QSPI_CLK_EN (1 << 31)
#define QSPI_CLK_DIV_MAX 0xffff
{
int wlen;
unsigned int cmd;
+ u32 rx;
+ u8 rxlen, rx_wlen;
u8 *rxbuf;
rxbuf = t->rx_buf;
break;
}
wlen = t->bits_per_word >> 3; /* in bytes */
+ rx_wlen = wlen;
while (count) {
dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
if (qspi_is_busy(qspi))
return -EBUSY;
+ switch (wlen) {
+ case 1:
+ /*
+ * Optimize the 8-bit words transfers, as used by
+ * the SPI flash devices.
+ */
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ rxlen = QSPI_WLEN_MAX_BYTES;
+ } else {
+ rxlen = min(count, 4);
+ }
+ rx_wlen = rxlen << 3;
+ cmd &= ~QSPI_WLEN_MASK;
+ cmd |= QSPI_WLEN(rx_wlen);
+ break;
+ default:
+ rxlen = wlen;
+ break;
+ }
+
ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
if (ti_qspi_poll_wc(qspi)) {
dev_err(qspi->dev, "read timed out\n");
return -ETIMEDOUT;
}
+
switch (wlen) {
case 1:
- *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
+ /*
+ * Optimize the 8-bit words transfers, as used by
+ * the SPI flash devices.
+ */
+ if (count >= QSPI_WLEN_MAX_BYTES) {
+ u32 *rxp = (u32 *) rxbuf;
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
+ *rxp++ = be32_to_cpu(rx);
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+ *rxp++ = be32_to_cpu(rx);
+ } else {
+ u8 *rxp = rxbuf;
+ rx = readl(qspi->base + QSPI_SPI_DATA_REG);
+ if (rx_wlen >= 8)
+ *rxp++ = rx >> (rx_wlen - 8);
+ if (rx_wlen >= 16)
+ *rxp++ = rx >> (rx_wlen - 16);
+ if (rx_wlen >= 24)
+ *rxp++ = rx >> (rx_wlen - 24);
+ if (rx_wlen >= 32)
+ *rxp++ = rx;
+ }
break;
case 2:
*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
break;
}
- rxbuf += wlen;
- count -= wlen;
+ rxbuf += rxlen;
+ count -= rxlen;
}
return 0;
QSPI_SPI_SETUP_REG(spi->chip_select));
}
+static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ size_t max_len;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->addr.val < qspi->mmap_size) {
+ /* Limit MMIO to the mmaped region */
+ if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
+ max_len = qspi->mmap_size - op->addr.val;
+ op->data.nbytes = min((size_t) op->data.nbytes,
+ max_len);
+ }
+ } else {
+ /*
+ * Use fallback mode (SW generated transfers) above the
+ * mmaped region.
+ * Adjust size to comply with the QSPI max frame length.
+ */
+ max_len = QSPI_FRAME;
+ max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
+ op->data.nbytes = min((size_t) op->data.nbytes,
+ max_len);
+ }
+ }
+
+ return 0;
+}
+
static int ti_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.exec_op = ti_qspi_exec_mem_op,
+ .adjust_op_size = ti_qspi_adjust_op_size,
};
static int ti_qspi_start_transfer_one(struct spi_master *master,
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
struct uniphier_spi_priv {
void __iomem *base;
+ dma_addr_t base_dma_addr;
struct clk *clk;
struct spi_master *master;
struct completion xfer_done;
unsigned int rx_bytes;
const u8 *tx_buf;
u8 *rx_buf;
+ atomic_t dma_busy;
bool is_save_param;
u8 bits_per_word;
#define SSI_FPS_FSTRT BIT(14)
#define SSI_SR 0x14
+#define SSI_SR_BUSY BIT(7)
#define SSI_SR_RNE BIT(0)
#define SSI_IE 0x18
+#define SSI_IE_TCIE BIT(4)
#define SSI_IE_RCIE BIT(3)
+#define SSI_IE_TXRE BIT(2)
+#define SSI_IE_RXRE BIT(1)
#define SSI_IE_RORIE BIT(0)
+#define SSI_IE_ALL_MASK GENMASK(4, 0)
#define SSI_IS 0x1c
#define SSI_IS_RXRS BIT(9)
#define SSI_RXDR 0x24
#define SSI_FIFO_DEPTH 8U
+#define SSI_FIFO_BURST_NUM 1
+
+#define SSI_DMA_RX_BUSY BIT(1)
+#define SSI_DMA_TX_BUSY BIT(0)
static inline unsigned int bytes_per_word(unsigned int bits)
{
return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
}
-static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
+ u32 mask)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
val = readl(priv->base + SSI_IE);
writel(val, priv->base + SSI_IE);
}
-static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask)
+static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
+ u32 mask)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
u32 val;
val = readl(priv->base + SSI_IE);
}
}
-static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
+ unsigned int threshold)
{
- unsigned int fifo_threshold, fill_bytes;
u32 val;
- fifo_threshold = DIV_ROUND_UP(priv->rx_bytes,
- bytes_per_word(priv->bits_per_word));
- fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
-
- fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes);
-
- /* set fifo threshold */
val = readl(priv->base + SSI_FC);
val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
- val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold);
- val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold);
+ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
+ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
writel(val, priv->base + SSI_FC);
+}
+
+static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
+{
+ unsigned int fifo_threshold, fill_words;
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+ fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
+ fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
+
+ uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
+
+ fill_words = fifo_threshold -
+ DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
- while (fill_bytes--)
+ while (fill_words--)
uniphier_spi_send(priv);
}
writel(val, priv->base + SSI_FPS);
}
+static bool uniphier_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ unsigned int bpw = bytes_per_word(priv->bits_per_word);
+
+ if ((!master->dma_tx && !master->dma_rx)
+ || (!master->dma_tx && t->tx_buf)
+ || (!master->dma_rx && t->rx_buf))
+ return false;
+
+ return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
+}
+
+static void uniphier_spi_dma_rxcb(void *data)
+{
+ struct spi_master *master = data;
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
+
+ if (!(state & SSI_DMA_TX_BUSY))
+ spi_finalize_current_transfer(master);
+}
+
+static void uniphier_spi_dma_txcb(void *data)
+{
+ struct spi_master *master = data;
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
+
+ if (!(state & SSI_DMA_RX_BUSY))
+ spi_finalize_current_transfer(master);
+}
+
+static int uniphier_spi_transfer_one_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
+ int buswidth;
+
+ atomic_set(&priv->dma_busy, 0);
+
+ uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
+
+ if (priv->bits_per_word <= 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (priv->bits_per_word <= 16)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ if (priv->rx_buf) {
+ struct dma_slave_config rxconf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = priv->base_dma_addr + SSI_RXDR,
+ .src_addr_width = buswidth,
+ .src_maxburst = SSI_FIFO_BURST_NUM,
+ };
+
+ dmaengine_slave_config(master->dma_rx, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(
+ master->dma_rx,
+ t->rx_sg.sgl, t->rx_sg.nents,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto out_err_prep;
+
+ rxdesc->callback = uniphier_spi_dma_rxcb;
+ rxdesc->callback_param = master;
+
+ uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
+ atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
+
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(master->dma_rx);
+ }
+
+ if (priv->tx_buf) {
+ struct dma_slave_config txconf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = priv->base_dma_addr + SSI_TXDR,
+ .dst_addr_width = buswidth,
+ .dst_maxburst = SSI_FIFO_BURST_NUM,
+ };
+
+ dmaengine_slave_config(master->dma_tx, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(
+ master->dma_tx,
+ t->tx_sg.sgl, t->tx_sg.nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto out_err_prep;
+
+ txdesc->callback = uniphier_spi_dma_txcb;
+ txdesc->callback_param = master;
+
+ uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
+ atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
+
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(master->dma_tx);
+ }
+
+ /* signal that we need to wait for completion */
+ return (priv->tx_buf || priv->rx_buf);
+
+out_err_prep:
+ if (rxdesc)
+ dmaengine_terminate_sync(master->dma_rx);
+
+ return -EINVAL;
+}
+
static int uniphier_spi_transfer_one_irq(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
uniphier_spi_fill_tx_fifo(priv);
- uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+ uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
time_left = wait_for_completion_timeout(&priv->xfer_done,
msecs_to_jiffies(SSI_TIMEOUT_MS));
- uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE);
+ uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
if (!time_left) {
dev_err(dev, "transfer timeout.\n");
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
unsigned long threshold;
+ bool use_dma;
/* Terminate and return success for 0 byte length transfer */
if (!t->len)
uniphier_spi_setup_transfer(spi, t);
+ use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
+ if (use_dma)
+ return uniphier_spi_transfer_one_dma(master, spi, t);
+
/*
* If the transfer operation will take longer than
* SSI_POLL_TIMEOUT_US, it should use irq.
return 0;
}
+static void uniphier_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ u32 val;
+
+ /* stop running spi transfer */
+ writel(0, priv->base + SSI_CTL);
+
+ /* reset FIFOs */
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+ writel(val, priv->base + SSI_FC);
+
+ uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
+
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
+ dmaengine_terminate_async(master->dma_tx);
+ atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
+ }
+
+ if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
+ dmaengine_terminate_async(master->dma_rx);
+ atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
+ }
+}
+
static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
{
struct uniphier_spi_priv *priv = dev_id;
{
struct uniphier_spi_priv *priv;
struct spi_master *master;
+ struct resource *res;
+ struct dma_slave_caps caps;
+ u32 dma_tx_burst = 0, dma_rx_burst = 0;
unsigned long clk_rate;
int irq;
int ret;
priv->master = master;
priv->is_save_param = false;
- priv->base = devm_platform_ioremap_resource(pdev, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out_master_put;
}
+ priv->base_dma_addr = res->start;
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
= uniphier_spi_prepare_transfer_hardware;
master->unprepare_transfer_hardware
= uniphier_spi_unprepare_transfer_hardware;
+ master->handle_err = uniphier_spi_handle_err;
+ master->can_dma = uniphier_spi_can_dma;
+
master->num_chipselect = 1;
+ master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+
+ master->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR_OR_NULL(master->dma_tx)) {
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+ goto out_disable_clk;
+ master->dma_tx = NULL;
+ dma_tx_burst = INT_MAX;
+ } else {
+ ret = dma_get_slave_caps(master->dma_tx, &caps);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
+ ret);
+ goto out_disable_clk;
+ }
+ dma_tx_burst = caps.max_burst;
+ }
+
+ master->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR_OR_NULL(master->dma_rx)) {
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+ goto out_disable_clk;
+ master->dma_rx = NULL;
+ dma_rx_burst = INT_MAX;
+ } else {
+ ret = dma_get_slave_caps(master->dma_rx, &caps);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
+ ret);
+ goto out_disable_clk;
+ }
+ dma_rx_burst = caps.max_burst;
+ }
+
+ master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
{
struct uniphier_spi_priv *priv = platform_get_drvdata(pdev);
+ if (priv->master->dma_tx)
+ dma_release_channel(priv->master->dma_tx);
+ if (priv->master->dma_rx)
+ dma_release_channel(priv->master->dma_rx);
+
clk_disable_unprepare(priv->clk);
return 0;
* advances its @tx buffer pointer monotonically.
* @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped
- * @tx: Pointer to the current word within the xfer->tx_buf that the driver is
- * preparing to transmit right now.
+ * @progress: How many words (not bytes) have been transferred so far
* @irqs_off: If true, will disable IRQs and preemption for the duration of the
* transfer, for less jitter in time measurement. Only compatible
* with PIO drivers. If true, must follow up with
*/
void spi_take_timestamp_pre(struct spi_controller *ctlr,
struct spi_transfer *xfer,
- const void *tx, bool irqs_off)
+ size_t progress, bool irqs_off)
{
- u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
-
if (!xfer->ptp_sts)
return;
if (xfer->timestamped_pre)
return;
- if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word))
+ if (progress < xfer->ptp_sts_word_pre)
return;
/* Capture the resolution of the timestamp */
- xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word;
+ xfer->ptp_sts_word_pre = progress;
xfer->timestamped_pre = true;
* timestamped.
* @ctlr: Pointer to the spi_controller structure of the driver
* @xfer: Pointer to the transfer being timestamped
- * @tx: Pointer to the current word within the xfer->tx_buf that the driver has
- * just transmitted.
+ * @progress: How many words (not bytes) have been transferred so far
* @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
*/
void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
- const void *tx, bool irqs_off)
+ size_t progress, bool irqs_off)
{
- u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
-
if (!xfer->ptp_sts)
return;
if (xfer->timestamped_post)
return;
- if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word))
+ if (progress < xfer->ptp_sts_word_post)
return;
ptp_read_system_postts(xfer->ptp_sts);
}
/* Capture the resolution of the timestamp */
- xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word;
+ xfer->ptp_sts_word_post = progress;
xfer->timestamped_post = true;
}
}
}
+ if (unlikely(ctlr->ptp_sts_supported)) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre);
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post);
+ }
+ }
+
spi_unmap_msg(ctlr, mesg);
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
int nb, i;
struct gpio_desc **cs;
struct device *dev = &ctlr->dev;
+ unsigned long native_cs_mask = 0;
+ unsigned int num_cs_gpios = 0;
nb = gpiod_count(dev, "cs");
ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
if (!gpioname)
return -ENOMEM;
gpiod_set_consumer_name(cs[i], gpioname);
+ num_cs_gpios++;
+ continue;
}
+
+ if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ native_cs_mask |= BIT(i);
+ }
+
+ ctlr->unused_native_cs = ffz(native_cs_mask);
+ if (num_cs_gpios && ctlr->max_native_cs &&
+ ctlr->unused_native_cs >= ctlr->max_native_cs) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
}
return 0;
#define PCI171X_RANGE_UNI BIT(4)
#define PCI171X_RANGE_GAIN(x) (((x) & 0x7) << 0)
#define PCI171X_MUX_REG 0x04 /* W: A/D multiplexor control */
-#define PCI171X_MUX_CHANH(x) (((x) & 0xf) << 8)
-#define PCI171X_MUX_CHANL(x) (((x) & 0xf) << 0)
+#define PCI171X_MUX_CHANH(x) (((x) & 0xff) << 8)
+#define PCI171X_MUX_CHANL(x) (((x) & 0xff) << 0)
#define PCI171X_MUX_CHAN(x) (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x))
#define PCI171X_STATUS_REG 0x06 /* R: status register */
#define PCI171X_STATUS_IRQ BIT(11) /* 1=IRQ occurred */
}
}
- if (!rv)
- return -ENODATA;
-
/* Second, find the set of routes valid for this device. */
for (i = 0; ni_device_routes_list[i]; ++i) {
if (memcmp(ni_device_routes_list[i]->device, board_name,
}
}
- if (!dr)
- return -ENODATA;
-
tables->route_values = rv;
tables->valid_routes = dr;
+ if (!rv || !dr)
+ return -ENODATA;
+
return 0;
}
{
int src;
+ if (!tables->route_values)
+ return -EINVAL;
+
dest = B(dest); /* subtract NI names offset */
/* ensure we are not going to under/over run the route value table */
if (dest < 0 || dest >= NI_NUM_NAMES)
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+ {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */
memcpy(array, addr, length);
- ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_BBREG, length, array);
+ ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+ MESSAGE_REQUEST_BBREG, length, array);
if (ret)
goto end;
*/
int vnt_radio_power_on(struct vnt_private *priv)
{
- int ret = true;
+ int ret = 0;
vnt_exit_deep_sleep(priv);
u8 mac_hw;
/* netdev */
struct usb_device *usb;
+ struct usb_interface *intf;
u64 tsf_time;
u8 rx_rate;
int vnt_init(struct vnt_private *priv)
{
- if (!(vnt_init_registers(priv)))
+ if (vnt_init_registers(priv))
return -EAGAIN;
SET_IEEE80211_PERM_ADDR(priv->hw, priv->permanent_net_addr);
priv = hw->priv;
priv->hw = hw;
priv->usb = udev;
+ priv->intf = intf;
vnt_set_options(priv);
kfree(usb_buffer);
- if (ret >= 0 && ret < (int)length)
+ if (ret == (int)length)
+ ret = 0;
+ else
ret = -EIO;
end_unlock:
reg_off, reg, sizeof(u8), &data);
}
+int vnt_control_out_blocks(struct vnt_private *priv,
+ u16 block, u8 reg, u16 length, u8 *data)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < length; i += block) {
+ u16 len = min_t(int, length - i, block);
+
+ ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE,
+ i, reg, len, data + i);
+ if (ret)
+ goto end;
+ }
+end:
+ return ret;
+}
+
int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer)
{
kfree(usb_buffer);
- if (ret >= 0 && ret < (int)length)
+ if (ret == (int)length)
+ ret = 0;
+ else
ret = -EIO;
end_unlock:
#include "device.h"
+#define VNT_REG_BLOCK_SIZE 64
+
int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer);
int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data);
int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data);
+int vnt_control_out_blocks(struct vnt_private *priv,
+ u16 block, u8 reg, u16 len, u8 *data);
+
int vnt_start_interrupt_urb(struct vnt_private *priv);
int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb);
int vnt_tx_context(struct vnt_private *priv,
if (vnt_init(priv)) {
/* If fail all ends TODO retry */
dev_err(&priv->usb->dev, "failed to start\n");
+ usb_set_intfdata(priv->intf, NULL);
ieee80211_free_hw(priv->hw);
return;
}
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
- if (conn->conn_transport->iscsit_wait_conn)
- conn->conn_transport->iscsit_wait_conn(conn);
-
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
struct crypto_ahash *tfm;
config OPTEE
tristate "OP-TEE"
depends on HAVE_ARM_SMCCC
+ depends on MMU
help
This implements the OP-TEE Trusted Execution Environment (TEE)
driver.
shm->size = PAGE_SIZE << order;
if (shm->flags & TEE_SHM_DMA_BUF) {
+ unsigned int nr_pages = 1 << order, i;
+ struct page **pages;
+
+ pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_pages; i++) {
+ pages[i] = page;
+ page++;
+ }
+
shm->flags |= TEE_SHM_REGISTER;
- rc = optee_shm_register(shm->ctx, shm, &page, 1 << order,
+ rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
(unsigned long)shm->kaddr);
+ kfree(pages);
}
return rc;
}
static const struct acpi_device_id int3400_thermal_match[] = {
+ {"INT1040", 0},
{"INT3400", 0},
{}
};
}
static const struct acpi_device_id int3403_device_ids[] = {
+ {"INT1043", 0},
{"INT3403", 0},
{"", 0},
};
irq = platform_get_irq_byname(pdev, "uplow");
if (irq < 0) {
ret = irq;
+ /* For old DTs with no IRQ defined */
+ if (irq == -ENXIO)
+ ret = 0;
goto err_put_device;
}
return AE_OK;
}
+static const struct acpi_device_id serdev_acpi_devices_blacklist[] = {
+ { "INT3511", 0 },
+ { "INT3512", 0 },
+ { },
+};
+
static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
if (acpi_device_enumerated(adev))
return AE_OK;
+ /* Skip if black listed */
+ if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist))
+ return AE_OK;
+
if (acpi_serdev_check_resources(ctrl, adev))
return AE_OK;
{
if (WARN_ON(index >= driver->num))
return;
- if (!driver->ports[index])
- driver->ports[index] = port;
+ driver->ports[index] = port;
}
EXPORT_SYMBOL_GPL(tty_port_link_device);
*/
static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
{
- struct cdns3_device *priv_dev;
- struct cdns3 *cdns = data;
+ struct cdns3_device *priv_dev = data;
irqreturn_t ret = IRQ_NONE;
u32 reg;
- priv_dev = cdns->gadget_dev;
-
/* check USB device interrupt */
reg = readl(&priv_dev->regs->usb_ists);
if (reg) {
*/
static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
{
- struct cdns3_device *priv_dev;
- struct cdns3 *cdns = data;
+ struct cdns3_device *priv_dev = data;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
int bit;
u32 reg;
- priv_dev = cdns->gadget_dev;
spin_lock_irqsave(&priv_dev->lock, flags);
reg = readl(&priv_dev->regs->usb_ists);
priv_dev = cdns->gadget_dev;
- devm_free_irq(cdns->dev, cdns->dev_irq, cdns);
+ devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
pm_runtime_mark_last_busy(cdns->dev);
pm_runtime_put_autosuspend(cdns->dev);
ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
cdns3_device_irq_handler,
cdns3_device_thread_irq_handler,
- IRQF_SHARED, dev_name(cdns->dev), cdns);
+ IRQF_SHARED, dev_name(cdns->dev),
+ cdns->gadget_dev);
if (ret)
goto err0;
struct ehci_ci_priv {
struct regulator *reg_vbus;
+ bool enabled;
};
static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
int ret = 0;
int port = HCS_N_PORTS(ehci->hcs_params);
- if (priv->reg_vbus) {
+ if (priv->reg_vbus && enable != priv->enabled) {
if (port > 1) {
dev_warn(dev,
"Not support multi-port regulator control\n");
enable ? "enable" : "disable", ret);
return ret;
}
+ priv->enabled = enable;
}
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
[USB_ENDPOINT_XFER_INT] = 1024,
};
-static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
- int asnum, struct usb_host_interface *ifp, int num_ep,
- unsigned char *buffer, int size)
+static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
+ struct usb_endpoint_descriptor *e2)
+{
+ if (e1->bEndpointAddress == e2->bEndpointAddress)
+ return true;
+
+ if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
+ if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Check for duplicate endpoint addresses in other interfaces and in the
+ * altsetting currently being parsed.
+ */
+static bool config_endpoint_is_duplicate(struct usb_host_config *config,
+ int inum, int asnum, struct usb_endpoint_descriptor *d)
+{
+ struct usb_endpoint_descriptor *epd;
+ struct usb_interface_cache *intfc;
+ struct usb_host_interface *alt;
+ int i, j, k;
+
+ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+ intfc = config->intf_cache[i];
+
+ for (j = 0; j < intfc->num_altsetting; ++j) {
+ alt = &intfc->altsetting[j];
+
+ if (alt->desc.bInterfaceNumber == inum &&
+ alt->desc.bAlternateSetting != asnum)
+ continue;
+
+ for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
+ epd = &alt->endpoint[k].desc;
+
+ if (endpoint_is_duplicate(epd, d))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ struct usb_host_config *config, int inum, int asnum,
+ struct usb_host_interface *ifp, int num_ep,
+ unsigned char *buffer, int size)
{
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
goto skip_to_next_endpoint_or_interface_descriptor;
/* Check for duplicate endpoint addresses */
- for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
- if (ifp->endpoint[i].desc.bEndpointAddress ==
- d->bEndpointAddress) {
- dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
- cfgno, inum, asnum, d->bEndpointAddress);
- goto skip_to_next_endpoint_or_interface_descriptor;
- }
+ if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
}
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
- /* Validate the wMaxPacketSize field */
+ /*
+ * Validate the wMaxPacketSize field.
+ * Some devices have isochronous endpoints in altsetting 0;
+ * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
+ * (see the end of section 5.6.3), so don't warn about them.
+ */
maxp = usb_endpoint_maxp(&endpoint->desc);
- if (maxp == 0) {
- dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
+ if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
- goto skip_to_next_endpoint_or_interface_descriptor;
}
/* Find the highest legal maxpacket size for this endpoint */
if (((struct usb_descriptor_header *) buffer)->bDescriptorType
== USB_DT_INTERFACE)
break;
- retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
- num_ep, buffer, size);
+ retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
+ alt, num_ep, buffer, size);
if (retval < 0)
return retval;
++n;
* PORT_OVER_CURRENT is not. So check for any of them.
*/
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
+ (portchange & USB_PORT_STAT_C_CONNECTION) ||
(portstatus & USB_PORT_STAT_OVERCURRENT) ||
(portchange & USB_PORT_STAT_C_OVERCURRENT))
set_bit(port1, hub->change_bits);
#define SET_ADDRESS_TRIES 2
#define GET_DESCRIPTOR_TRIES 2
#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1))
-#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)scheme)
+#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme))
#define HUB_ROOT_RESET_TIME 60 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
+ /*
+ * For OUT direction, host may send less than the setup
+ * length. Return true for all OUT requests.
+ */
+ if (!req->direction)
+ return true;
+
return req->request.actual == req->request.length;
}
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
- props[prop_idx++].name = "usb3-lpm-capable";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
if (dwc->usb2_lpm_disable)
- props[prop_idx++].name = "usb2-lpm-disable";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
/**
* WORKAROUND: dwc3 revisions <=3.00a have a limitation
* This following flag tells XHCI to do just that.
*/
if (dwc->revision <= DWC3_REVISION_300A)
- props[prop_idx++].name = "quirk-broken-port-ped";
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
if (prop_idx) {
ret = platform_device_add_properties(xhci, props);
tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PHY_TEGRA_XUSB
+ select USB_ROLE_SWITCH
help
Enables NVIDIA Tegra USB 3.0 device mode controller driver.
}
da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
- if (IS_ERR(da8xx_ohci->oc_gpio))
+ if (IS_ERR(da8xx_ohci->oc_gpio)) {
+ error = PTR_ERR(da8xx_ohci->oc_gpio);
goto err;
+ }
if (da8xx_ohci->oc_gpio) {
oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio);
- if (oc_irq < 0)
+ if (oc_irq < 0) {
+ error = oc_irq;
goto err;
+ }
error = devm_request_threaded_irq(dev, oc_irq, NULL,
ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING |
static int jz4740_musb_init(struct musb *musb)
{
struct device *dev = musb->controller->parent;
+ int err;
if (dev->of_node)
musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
else
musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(musb->xceiv)) {
- dev_err(dev, "No transceiver configured\n");
- return PTR_ERR(musb->xceiv);
+ err = PTR_ERR(musb->xceiv);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "No transceiver configured: %d", err);
+ return err;
}
/* Silicon does not implement ConfigData register.
#define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
(2 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
+#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \
+ (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
+ MUSB_DEVCTL_SESSION)
#define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
+ case MUSB_QUIRK_B_DISCONNECT_99:
+ musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+ break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
musb_disable_interrupts(musb);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
+ musb_writeb(musb->mregs, MUSB_POWER, 0);
+
/* Init IRQ workqueue before request_irq */
INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
controller->controller.channel_abort = dma_channel_abort;
if (request_irq(irq, dma_controller_irq, 0,
- dev_name(musb->controller), &controller->controller)) {
+ dev_name(musb->controller), controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
musb_dma_controller_destroy(&controller->controller);
static int ch341_reset_resume(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
- struct ch341_private *priv = usb_get_serial_port_data(port);
+ struct ch341_private *priv;
int ret;
+ priv = usb_get_serial_port_data(port);
+ if (!priv)
+ return 0;
+
/* reconfigure ch341 serial port after bus-reset */
ch341_configure(serial->dev, priv);
if (txCredits) {
port = edge_serial->serial->port[portNumber];
edge_port = usb_get_serial_port_data(port);
- if (edge_port->open) {
+ if (edge_port && edge_port->open) {
spin_lock_irqsave(&edge_port->ep_lock,
flags);
edge_port->txCredits += txCredits;
static void process_rcvd_data(struct edgeport_serial *edge_serial,
unsigned char *buffer, __u16 bufferLength)
{
- struct device *dev = &edge_serial->serial->dev->dev;
+ struct usb_serial *serial = edge_serial->serial;
+ struct device *dev = &serial->dev->dev;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
__u16 lastBufferLength;
/* spit this data back into the tty driver if this
port is open */
- if (rxLen) {
- port = edge_serial->serial->port[
- edge_serial->rxPort];
+ if (rxLen && edge_serial->rxPort < serial->num_ports) {
+ port = serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
- if (edge_port->open) {
+ if (edge_port && edge_port->open) {
dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
__func__, rxLen,
edge_serial->rxPort);
rxLen);
edge_port->port->icount.rx += rxLen;
}
- buffer += rxLen;
}
+ buffer += rxLen;
break;
case EXPECT_HDR3: /* Expect 3rd byte of status header */
__u8 code = edge_serial->rxStatusCode;
/* switch the port pointer to the one being currently talked about */
+ if (edge_serial->rxPort >= edge_serial->serial->num_ports)
+ return;
port = edge_serial->serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
if (edge_port == NULL) {
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
requesttype,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
- 0, 0, buffer, 1, 0);
+ 0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT);
kfree(buffer);
if (retval < 0)
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
+#define QUECTEL_PRODUCT_RM500Q 0x0800
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
/* Interface must have two endpoints */
#define NUMEP2 BIT(16)
+/* Device needs ZLP */
+#define ZLP BIT(17)
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+ .driver_info = ZLP },
+
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
+ .driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
data->use_send_setup = 1;
+ if (device_flags & ZLP)
+ data->use_zlp = 1;
+
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
u8 newMSR = (u8) *ch;
unsigned long flags;
+ /* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
+ if (!port_priv)
+ return;
spin_lock_irqsave(&port_priv->lock, flags);
port_priv->shadowMSR = newMSR;
unsigned long flags;
u8 newLSR = (u8) *ch;
+ /* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
+ if (!port_priv)
+ return;
if (newLSR & UART_LSR_BI)
newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI);
#define MOTOROLA_TETRA_IDS() \
{ USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
{ USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
+ { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \
+ { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \
{ USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
return -EINVAL;
}
+ /* Prevent individual ports from being unbound. */
+ driver->driver.suppress_bind_attrs = true;
+
usb_serial_operations_init(driver);
/* Add this device to our list of devices */
spinlock_t susp_lock;
unsigned int suspended:1;
unsigned int use_send_setup:1;
+ unsigned int use_zlp:1;
int in_flight;
unsigned int open_ports;
void *private;
void (*callback) (struct urb *))
{
struct usb_serial *serial = port->serial;
+ struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
usb_sndbulkpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx);
+ if (intfdata->use_zlp && dir == USB_DIR_OUT)
+ urb->transfer_flags |= URB_ZERO_PACKET;
+
return urb;
}
if (status & TCPC_ALERT_RX_STATUS) {
struct pd_message msg;
- unsigned int cnt;
+ unsigned int cnt, payload_cnt;
u16 header;
regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
+ /*
+ * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14
+ * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is
+ * defined in table 4-36 as one greater than the number of
+ * bytes received. And that number includes the header. So:
+ */
+ if (cnt > 3)
+ payload_cnt = cnt - (1 + sizeof(msg.header));
+ else
+ payload_cnt = 0;
tcpci_read16(tcpci, TCPC_RX_HDR, &header);
msg.header = cpu_to_le16(header);
- if (WARN_ON(cnt > sizeof(msg.payload)))
- cnt = sizeof(msg.payload);
+ if (WARN_ON(payload_cnt > sizeof(msg.payload)))
+ payload_cnt = sizeof(msg.payload);
- if (cnt > 0)
+ if (payload_cnt > 0)
regmap_raw_read(tcpci->regmap, TCPC_RX_DATA,
- &msg.payload, cnt);
+ &msg.payload, payload_cnt);
/* Read complete, clear RX status alert bit */
tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16)
#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17)
#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18)
-#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19)
-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20)
-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21)
-#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22)
-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23)
-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24)
-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25)
-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26)
-#define UCSI_ENABLE_NTFY_ERROR BIT(27)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(21)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(22)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(23)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(24)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(25)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(27)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(28)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(30)
+#define UCSI_ENABLE_NTFY_ERROR BIT(31)
#define UCSI_ENABLE_NTFY_ALL 0xdbe70000
/* SET_UOR command bits */
config MAX77620_WATCHDOG
tristate "Maxim Max77620 Watchdog Timer"
depends on MFD_MAX77620 || COMPILE_TEST
+ select WATCHDOG_CORE
help
This is the driver for the Max77620 watchdog timer.
Say 'Y' here to enable the watchdog timer support for
config TQMX86_WDT
tristate "TQ-Systems TQMX86 Watchdog Timer"
depends on X86
+ select WATCHDOG_CORE
help
This is the driver for the hardware watchdog timer in the TQMX86 IO
controller found on some of their ComExpress Modules.
{
struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
- imx7ulp_wdt_enable(wdt->base, true);
+ imx7ulp_wdt_enable(wdog, true);
imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
/* wait for wdog to fire */
set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
/* Request the IRQ only after the watchdog is disabled */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
/*
* Not all supported platforms specify an interrupt for the
}
/* Optional 2nd interrupt for pretimeout */
- irq = platform_get_irq(pdev, 1);
+ irq = platform_get_irq_optional(pdev, 1);
if (irq > 0) {
orion_wdt_info.options |= WDIOF_PRETIMEOUT;
ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq,
module_platform_driver(rn5t618_wdt_driver);
+MODULE_ALIAS("platform:rn5t618-wdt");
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
MODULE_DESCRIPTION("RN5T618 watchdog driver");
MODULE_LICENSE("GPL v2");
cr_wdt_csr = NCT6102D_WDT_CSR;
break;
case NCT6116_ID:
- ret = nct6102;
+ ret = nct6116;
cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
cr_wdt_control = NCT6102D_WDT_CONTROL;
cr_wdt_csr = NCT6102D_WDT_CSR;
_leave(" = -ENAMETOOLONG");
return ERR_PTR(-ENAMETOOLONG);
}
- if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
+
+ /* Prohibit cell names that contain unprintable chars, '/' and '@' or
+ * that begin with a dot. This also precludes "@cell".
+ */
+ if (name[0] == '.')
return ERR_PTR(-EINVAL);
+ for (i = 0; i < namelen; i++) {
+ char ch = name[i];
+ if (!isprint(ch) || ch == '/' || ch == '@')
+ return ERR_PTR(-EINVAL);
+ }
_enter("%*.*s,%s", namelen, namelen, name, addresses);
unsigned int flags)
{
struct afs_vnode *dvnode = AFS_FS_I(dir);
+ struct afs_fid fid = {};
struct inode *inode;
struct dentry *d;
struct key *key;
afs_stat_v(dvnode, n_lookup);
inode = afs_do_lookup(dir, dentry, key);
key_put(key);
- if (inode == ERR_PTR(-ENOENT)) {
+ if (inode == ERR_PTR(-ENOENT))
inode = afs_try_auto_mntpt(dentry, dir);
- } else {
- dentry->d_fsdata =
- (void *)(unsigned long)dvnode->status.data_version;
- }
+
+ if (!IS_ERR_OR_NULL(inode))
+ fid = AFS_FS_I(inode)->fid;
+
d = d_splice_alias(inode, dentry);
if (!IS_ERR_OR_NULL(d)) {
d->d_fsdata = dentry->d_fsdata;
- trace_afs_lookup(dvnode, &d->d_name,
- inode ? AFS_FS_I(inode) : NULL);
+ trace_afs_lookup(dvnode, &d->d_name, &fid);
} else {
- trace_afs_lookup(dvnode, &dentry->d_name,
- IS_ERR_OR_NULL(inode) ? NULL
- : AFS_FS_I(inode));
+ trace_afs_lookup(dvnode, &dentry->d_name, &fid);
}
return d;
}
&dev_replace->scrub_progress, 0, 1);
ret = btrfs_dev_replace_finishing(fs_info, ret);
- if (ret == -EINPROGRESS) {
+ if (ret == -EINPROGRESS)
ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS;
- } else if (ret != -ECANCELED) {
- WARN_ON(ret);
- }
return ret;
}
static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
- struct inode *dir, u64 objectid,
- const char *name, int name_len)
+ struct inode *dir, struct dentry *dentry)
{
struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
struct btrfs_key key;
+ const char *name = dentry->d_name.name;
+ int name_len = dentry->d_name.len;
u64 index;
int ret;
+ u64 objectid;
u64 dir_ino = btrfs_ino(BTRFS_I(dir));
+ if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
+ objectid = inode->root->root_key.objectid;
+ } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
+ objectid = inode->location.objectid;
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
}
btrfs_release_path(path);
- ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid,
- dir_ino, &index, name, name_len);
- if (ret < 0) {
- if (ret != -ENOENT) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
+ /*
+ * This is a placeholder inode for a subvolume we didn't have a
+ * reference to at the time of the snapshot creation. In the meantime
+ * we could have renamed the real subvol link into our snapshot, so
+ * depending on btrfs_del_root_ref to return -ENOENT here is incorret.
+ * Instead simply lookup the dir_index_item for this entry so we can
+ * remove it. Otherwise we know we have a ref to the root and we can
+ * call btrfs_del_root_ref, and it _shouldn't_ fail.
+ */
+ if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
di = btrfs_search_dir_index_item(root, path, dir_ino,
name, name_len);
if (IS_ERR_OR_NULL(di)) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
index = key.offset;
+ btrfs_release_path(path);
+ } else {
+ ret = btrfs_del_root_ref(trans, objectid,
+ root->root_key.objectid, dir_ino,
+ &index, name, name_len);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
}
- btrfs_release_path(path);
ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
if (ret) {
btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
- ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid,
- dentry->d_name.name, dentry->d_name.len);
+ ret = btrfs_unlink_subvol(trans, dir, dentry);
if (ret) {
err = ret;
btrfs_abort_transaction(trans, ret);
return PTR_ERR(trans);
if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
- err = btrfs_unlink_subvol(trans, dir,
- BTRFS_I(inode)->location.objectid,
- dentry->d_name.name,
- dentry->d_name.len);
+ err = btrfs_unlink_subvol(trans, dir, dentry);
goto out;
}
u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
u64 old_idx = 0;
u64 new_idx = 0;
- u64 root_objectid;
int ret;
bool root_log_pinned = false;
bool dest_log_pinned = false;
/* src is a subvolume */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
- root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
- ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
- old_dentry->d_name.name,
- old_dentry->d_name.len);
+ ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
} else { /* src is an inode */
ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
BTRFS_I(old_dentry->d_inode),
/* dest is a subvolume */
if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
- root_objectid = BTRFS_I(new_inode)->root->root_key.objectid;
- ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
- new_dentry->d_name.name,
- new_dentry->d_name.len);
+ ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
} else { /* dest is an inode */
ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
BTRFS_I(new_dentry->d_inode),
struct inode *new_inode = d_inode(new_dentry);
struct inode *old_inode = d_inode(old_dentry);
u64 index = 0;
- u64 root_objectid;
int ret;
u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
bool log_pinned = false;
BTRFS_I(old_inode), 1);
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
- root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
- ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
- old_dentry->d_name.name,
- old_dentry->d_name.len);
+ ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
} else {
ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
BTRFS_I(d_inode(old_dentry)),
new_inode->i_ctime = current_time(new_inode);
if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
- root_objectid = BTRFS_I(new_inode)->location.objectid;
- ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
- new_dentry->d_name.name,
- new_dentry->d_name.len);
+ ret = btrfs_unlink_subvol(trans, new_dir, new_dentry);
BUG_ON(new_inode->i_nlink == 0);
} else {
ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir),
&sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
0);
- if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa)))
+ /*
+ * Copy scrub args to user space even if btrfs_scrub_dev() returned an
+ * error. This is important as it allows user space to know how much
+ * progress scrub has done. For example, if scrub is canceled we get
+ * -ECANCELED from btrfs_scrub_dev() and return that error back to user
+ * space. Later user space can inspect the progress from the structure
+ * btrfs_ioctl_scrub_args and resume scrub from where it left off
+ * previously (btrfs-progs does this).
+ * If we fail to copy the btrfs_ioctl_scrub_args structure to user space
+ * then return -EFAULT to signal the structure was not copied or it may
+ * be corrupt and unreliable due to a partial copy.
+ */
+ if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
if (!(sa->flags & BTRFS_SCRUB_READONLY))
u64 nr_old_roots = 0;
int ret = 0;
+ /*
+ * If quotas get disabled meanwhile, the resouces need to be freed and
+ * we can't just exit here.
+ */
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
- return 0;
+ goto out_free;
if (new_roots) {
if (!maybe_fs_roots(new_roots))
return 1;
}
+static bool reloc_root_is_dead(struct btrfs_root *root)
+{
+ /*
+ * Pair with set_bit/clear_bit in clean_dirty_subvols and
+ * btrfs_update_reloc_root. We need to see the updated bit before
+ * trying to access reloc_root
+ */
+ smp_rmb();
+ if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
+ return true;
+ return false;
+}
+
+/*
+ * Check if this subvolume tree has valid reloc tree.
+ *
+ * Reloc tree after swap is considered dead, thus not considered as valid.
+ * This is enough for most callers, as they don't distinguish dead reloc root
+ * from no reloc root. But should_ignore_root() below is a special case.
+ */
+static bool have_reloc_root(struct btrfs_root *root)
+{
+ if (reloc_root_is_dead(root))
+ return false;
+ if (!root->reloc_root)
+ return false;
+ return true;
+}
static int should_ignore_root(struct btrfs_root *root)
{
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
return 0;
+ /* This root has been merged with its reloc tree, we can ignore it */
+ if (reloc_root_is_dead(root))
+ return 1;
+
reloc_root = root->reloc_root;
if (!reloc_root)
return 0;
* The subvolume has reloc tree but the swap is finished, no need to
* create/update the dead reloc tree
*/
- if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
+ if (reloc_root_is_dead(root))
return 0;
if (root->reloc_root) {
struct btrfs_root_item *root_item;
int ret;
- if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state) ||
- !root->reloc_root)
+ if (!have_reloc_root(root))
goto out;
reloc_root = root->reloc_root;
if (fs_info->reloc_ctl->merge_reloc_tree &&
btrfs_root_refs(root_item) == 0) {
set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
+ /*
+ * Mark the tree as dead before we change reloc_root so
+ * have_reloc_root will not touch it from now on.
+ */
+ smp_wmb();
__del_reloc_root(reloc_root);
}
if (ret2 < 0 && !ret)
ret = ret2;
}
+ /*
+ * Need barrier to ensure clear_bit() only happens after
+ * root->reloc_root = NULL. Pairs with have_reloc_root.
+ */
+ smp_wmb();
clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
btrfs_put_fs_root(root);
} else {
struct btrfs_root *root = pending->root;
struct reloc_control *rc = root->fs_info->reloc_ctl;
- if (!root->reloc_root || !rc)
+ if (!rc || !have_reloc_root(root))
return;
if (!rc->merge_reloc_tree)
struct reloc_control *rc = root->fs_info->reloc_ctl;
int ret;
- if (!root->reloc_root || !rc)
+ if (!rc || !have_reloc_root(root))
return 0;
rc = root->fs_info->reloc_ctl;
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_root_ref);
-
- WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid);
- WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len);
ptr = (unsigned long)(ref + 1);
- WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len));
+ if ((btrfs_root_ref_dirid(leaf, ref) != dirid) ||
+ (btrfs_root_ref_name_len(leaf, ref) != name_len) ||
+ memcmp_extent_buffer(leaf, name, ptr, name_len)) {
+ err = -ENOENT;
+ goto out;
+ }
*sequence = btrfs_root_ref_sequence(leaf, ref);
ret = btrfs_del_item(trans, tree_root, path);
* This can easily boost the amount of SYSTEM chunks if cleaner
* thread can't be triggered fast enough, and use up all space
* of btrfs_super_block::sys_chunk_array
+ *
+ * While for dev replace, we need to try our best to mark block
+ * group RO, to prevent race between:
+ * - Write duplication
+ * Contains latest data
+ * - Scrub copy
+ * Contains data from commit tree
+ *
+ * If target block group is not marked RO, nocow writes can
+ * be overwritten by scrub copy, causing data corruption.
+ * So for dev-replace, it's not allowed to continue if a block
+ * group is not RO.
*/
- ret = btrfs_inc_block_group_ro(cache, false);
- scrub_pause_off(fs_info);
-
+ ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
if (ret == 0) {
ro_set = 1;
- } else if (ret == -ENOSPC) {
+ } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
/*
* btrfs_inc_block_group_ro return -ENOSPC when it
* failed in creating new chunk for metadata.
- * It is not a problem for scrub/replace, because
+ * It is not a problem for scrub, because
* metadata are always cowed, and our scrub paused
* commit_transactions.
*/
btrfs_warn(fs_info,
"failed setting block group ro: %d", ret);
btrfs_put_block_group(cache);
+ scrub_pause_off(fs_info);
break;
}
+ /*
+ * Now the target block is marked RO, wait for nocow writes to
+ * finish before dev-replace.
+ * COW is fine, as COW never overwrites extents in commit tree.
+ */
+ if (sctx->is_dev_replace) {
+ btrfs_wait_nocow_writers(cache);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
+ cache->length);
+ }
+
+ scrub_pause_off(fs_info);
down_write(&dev_replace->rwsem);
dev_replace->cursor_right = found_key.offset + length;
dev_replace->cursor_left = found_key.offset;
}
}
- num_devices = btrfs_num_devices(fs_info);
+ /*
+ * rw_devices will not change at the moment, device add/delete/replace
+ * are excluded by EXCL_OP
+ */
+ num_devices = fs_info->fs_devices->rw_devices;
/*
* SINGLE profile on-disk has no profile bit, but in-memory we have a
* errors, this only handles the "we need to be able to
* do IO at the final sector" case.
*/
-void guard_bio_eod(int op, struct bio *bio)
+void guard_bio_eod(struct bio *bio)
{
sector_t maxsector;
struct hd_struct *part;
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
- /* Take care of bh's that straddle the end of the device */
- guard_bio_eod(op, bio);
-
if (buffer_meta(bh))
op_flags |= REQ_META;
if (buffer_prio(bh))
op_flags |= REQ_PRIO;
bio_set_op_attrs(bio, op, op_flags);
+ /* Take care of bh's that straddle the end of the device */
+ guard_bio_eod(bio);
+
if (wbc) {
wbc_init_bio(wbc, bio);
wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
/* avoid calling iput_final() in mds dispatch threads */
ceph_async_iput(req->r_inode);
}
- if (req->r_parent)
+ if (req->r_parent) {
ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+ ceph_async_iput(req->r_parent);
+ }
ceph_async_iput(req->r_target_inode);
if (req->r_dentry)
dput(req->r_dentry);
/* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
if (req->r_inode)
ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
- if (req->r_parent)
+ if (req->r_parent) {
ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+ ihold(req->r_parent);
+ }
if (req->r_old_dentry_dir)
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
CEPH_CAP_PIN);
if (owner && !try_module_get(owner))
return NULL;
- kobj = kobject_get(&p->kobj);
+ kobj = kobject_get_unless_zero(&p->kobj);
if (!kobj)
module_put(owner);
return kobj;
struct fuse_args_pages *ap = &ia->ap;
loff_t pos = page_offset(ap->pages[0]);
size_t count = ap->num_pages << PAGE_SHIFT;
+ ssize_t res;
int err;
ap->args.out_pages = true;
if (!err)
return;
} else {
- err = fuse_simple_request(fc, &ap->args);
+ res = fuse_simple_request(fc, &ap->args);
+ err = res < 0 ? res : 0;
}
fuse_readpages_end(fc, &ap->args, err);
}
/*
* buffer.c
*/
-extern void guard_bio_eod(int rw, struct bio *bio);
+extern void guard_bio_eod(struct bio *bio);
extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block, struct iomap *iomap);
task_unlock(current);
}
if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm &&
- wq->mm && mmget_not_zero(wq->mm)) {
- use_mm(wq->mm);
- set_fs(USER_DS);
- worker->mm = wq->mm;
+ wq->mm) {
+ if (mmget_not_zero(wq->mm)) {
+ use_mm(wq->mm);
+ set_fs(USER_DS);
+ worker->mm = wq->mm;
+ } else {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ }
}
if (!worker->creds)
worker->creds = override_creds(wq->creds);
struct iovec *iovec, struct iovec *fast_iov,
struct iov_iter *iter)
{
+ if (req->opcode == IORING_OP_READ_FIXED ||
+ req->opcode == IORING_OP_WRITE_FIXED)
+ return 0;
if (!req->io && io_alloc_async_ctx(req))
return -ENOMEM;
if (!force_nonblock)
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
+ req->result = 0;
io_size = ret;
if (req->flags & REQ_F_LINK)
req->result = io_size;
else
ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
- /*
- * In case of a short read, punt to async. This can happen
- * if we have data partially cached. Alternatively we can
- * return the short read, in which case the application will
- * need to issue another SQE and wait for it. That SQE will
- * need async punt anyway, so it's more efficient to do it
- * here.
- */
- if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
- (req->flags & REQ_F_ISREG) &&
- ret2 > 0 && ret2 < io_size)
- ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) {
kiocb_done(kiocb, ret2, nxt, req->in_async);
if (!force_nonblock)
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
+ req->result = 0;
io_size = ret;
if (req->flags & REQ_F_LINK)
req->result = io_size;
return false;
}
+static void io_link_work_cb(struct io_wq_work **workptr)
+{
+ struct io_wq_work *work = *workptr;
+ struct io_kiocb *link = work->data;
+
+ io_queue_linked_timeout(link);
+ work->func = io_wq_submit_work;
+}
+
+static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
+{
+ struct io_kiocb *link;
+
+ io_prep_async_work(nxt, &link);
+ *workptr = &nxt->work;
+ if (link) {
+ nxt->work.flags |= IO_WQ_WORK_CB;
+ nxt->work.func = io_link_work_cb;
+ nxt->work.data = link;
+ }
+}
+
static void io_fsync_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
io_cqring_add_event(req, ret);
io_put_req_find_next(req, &nxt);
if (nxt)
- *workptr = &nxt->work;
+ io_wq_assign_next(workptr, nxt);
}
static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
io_cqring_add_event(req, ret);
io_put_req_find_next(req, &nxt);
if (nxt)
- *workptr = &nxt->work;
+ io_wq_assign_next(workptr, nxt);
}
static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
return;
__io_accept(req, &nxt, false);
if (nxt)
- *workptr = &nxt->work;
+ io_wq_assign_next(workptr, nxt);
}
#endif
req_set_fail_links(req);
io_put_req_find_next(req, &nxt);
if (nxt)
- *workptr = &nxt->work;
+ io_wq_assign_next(workptr, nxt);
}
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
return ret;
if (ctx->flags & IORING_SETUP_IOPOLL) {
+ const bool in_async = io_wq_current_is_worker();
+
if (req->result == -EAGAIN)
return -EAGAIN;
+ /* workqueue context doesn't hold uring_lock, grab it now */
+ if (in_async)
+ mutex_lock(&ctx->uring_lock);
+
io_iopoll_req_issued(req);
+
+ if (in_async)
+ mutex_unlock(&ctx->uring_lock);
}
return 0;
}
-static void io_link_work_cb(struct io_wq_work **workptr)
-{
- struct io_wq_work *work = *workptr;
- struct io_kiocb *link = work->data;
-
- io_queue_linked_timeout(link);
- work->func = io_wq_submit_work;
-}
-
static void io_wq_submit_work(struct io_wq_work **workptr)
{
struct io_wq_work *work = *workptr;
}
/* if a dependent link is ready, pass it back */
- if (!ret && nxt) {
- struct io_kiocb *link;
-
- io_prep_async_work(nxt, &link);
- *workptr = &nxt->work;
- if (link) {
- nxt->work.flags |= IO_WQ_WORK_CB;
- nxt->work.func = io_link_work_cb;
- nxt->work.data = link;
- }
- }
+ if (!ret && nxt)
+ io_wq_assign_next(workptr, nxt);
}
static bool io_req_op_valid(int op)
return -EINVAL;
if (copy_from_user(&up, arg, sizeof(up)))
return -EFAULT;
+ if (up.resv)
+ return -EINVAL;
if (check_add_overflow(up.offset, nr_args, &done))
return -EOVERFLOW;
if (done > ctx->nr_user_files)
return -EINVAL;
done = 0;
- fds = (__s32 __user *) up.fds;
+ fds = u64_to_user_ptr(up.fds);
while (nr_args) {
struct fixed_file_table *table;
unsigned index;
struct io_ring_ctx *ctx = file->private_data;
io_uring_cancel_files(ctx, data);
- if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
- io_cqring_overflow_flush(ctx, true);
- io_wq_cancel_all(ctx->io_wq);
- }
return 0;
}
{
bio->bi_end_io = mpage_end_io;
bio_set_op_attrs(bio, op, op_flags);
- guard_bio_eod(op, bio);
+ guard_bio_eod(bio);
submit_bio(bio);
return NULL;
}
* may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
* should be allowed, or not, on files that already
* exist.
- * @dir: the sticky parent directory
+ * @dir_mode: mode bits of directory
+ * @dir_uid: owner of directory
* @inode: the inode of the file to open
*
* Block an O_CREAT open of a FIFO (or a regular file) when:
*
* Returns 0 if the open is allowed, -ve on error.
*/
-static int may_create_in_sticky(struct dentry * const dir,
+static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
struct inode * const inode)
{
if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
(!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
- likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
- uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
+ likely(!(dir_mode & S_ISVTX)) ||
+ uid_eq(inode->i_uid, dir_uid) ||
uid_eq(current_fsuid(), inode->i_uid))
return 0;
- if (likely(dir->d_inode->i_mode & 0002) ||
- (dir->d_inode->i_mode & 0020 &&
+ if (likely(dir_mode & 0002) ||
+ (dir_mode & 0020 &&
((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
(sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
const char *operation = S_ISFIFO(inode->i_mode) ?
BUG_ON(!path->dentry->d_op);
BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(path, false);
+ flags = smp_load_acquire(&path->dentry->d_flags);
if (ret < 0)
break;
}
if (IS_ERR(dentry))
return dentry;
if (unlikely(!d_in_lookup(dentry))) {
- if (!(flags & LOOKUP_NO_REVAL)) {
- int error = d_revalidate(dentry, flags);
- if (unlikely(error <= 0)) {
- if (!error) {
- d_invalidate(dentry);
- dput(dentry);
- goto again;
- }
+ int error = d_revalidate(dentry, flags);
+ if (unlikely(error <= 0)) {
+ if (!error) {
+ d_invalidate(dentry);
dput(dentry);
- dentry = ERR_PTR(error);
+ goto again;
}
+ dput(dentry);
+ dentry = ERR_PTR(error);
}
} else {
old = inode->i_op->lookup(inode, dentry, flags);
}
EXPORT_SYMBOL(user_path_at_empty);
-/**
- * mountpoint_last - look up last component for umount
- * @nd: pathwalk nameidata - currently pointing at parent directory of "last"
- *
- * This is a special lookup_last function just for umount. In this case, we
- * need to resolve the path without doing any revalidation.
- *
- * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since
- * mountpoints are always pinned in the dcache, their ancestors are too. Thus,
- * in almost all cases, this lookup will be served out of the dcache. The only
- * cases where it won't are if nd->last refers to a symlink or the path is
- * bogus and it doesn't exist.
- *
- * Returns:
- * -error: if there was an error during lookup. This includes -ENOENT if the
- * lookup found a negative dentry.
- *
- * 0: if we successfully resolved nd->last and found it to not to be a
- * symlink that needs to be followed.
- *
- * 1: if we successfully resolved nd->last and found it to be a symlink
- * that needs to be followed.
- */
-static int
-mountpoint_last(struct nameidata *nd)
-{
- int error = 0;
- struct dentry *dir = nd->path.dentry;
- struct path path;
-
- /* If we're in rcuwalk, drop out of it to handle last component */
- if (nd->flags & LOOKUP_RCU) {
- if (unlazy_walk(nd))
- return -ECHILD;
- }
-
- nd->flags &= ~LOOKUP_PARENT;
-
- if (unlikely(nd->last_type != LAST_NORM)) {
- error = handle_dots(nd, nd->last_type);
- if (error)
- return error;
- path.dentry = dget(nd->path.dentry);
- } else {
- path.dentry = d_lookup(dir, &nd->last);
- if (!path.dentry) {
- /*
- * No cached dentry. Mounted dentries are pinned in the
- * cache, so that means that this dentry is probably
- * a symlink or the path doesn't actually point
- * to a mounted dentry.
- */
- path.dentry = lookup_slow(&nd->last, dir,
- nd->flags | LOOKUP_NO_REVAL);
- if (IS_ERR(path.dentry))
- return PTR_ERR(path.dentry);
- }
- }
- if (d_flags_negative(smp_load_acquire(&path.dentry->d_flags))) {
- dput(path.dentry);
- return -ENOENT;
- }
- path.mnt = nd->path.mnt;
- return step_into(nd, &path, 0, d_backing_inode(path.dentry), 0);
-}
-
/**
* path_mountpoint - look up a path to be umounted
* @nd: lookup context
int err;
while (!(err = link_path_walk(s, nd)) &&
- (err = mountpoint_last(nd)) > 0) {
+ (err = lookup_last(nd)) > 0) {
s = trailing_symlink(nd);
}
+ if (!err && (nd->flags & LOOKUP_RCU))
+ err = unlazy_walk(nd);
+ if (!err)
+ err = handle_lookup_down(nd);
if (!err) {
*path = nd->path;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
- follow_mount(path);
}
terminate_walk(nd);
return err;
struct file *file, const struct open_flags *op)
{
struct dentry *dir = nd->path.dentry;
+ kuid_t dir_uid = dir->d_inode->i_uid;
+ umode_t dir_mode = dir->d_inode->i_mode;
int open_flag = op->open_flag;
bool will_truncate = (open_flag & O_TRUNC) != 0;
bool got_write = false;
error = -EISDIR;
if (d_is_dir(nd->path.dentry))
goto out;
- error = may_create_in_sticky(dir,
+ error = may_create_in_sticky(dir_mode, dir_uid,
d_backing_inode(nd->path.dentry));
if (unlikely(error))
goto out;
TRACE_DEFINE_ENUM(LOOKUP_PARENT);
TRACE_DEFINE_ENUM(LOOKUP_REVAL);
TRACE_DEFINE_ENUM(LOOKUP_RCU);
-TRACE_DEFINE_ENUM(LOOKUP_NO_REVAL);
TRACE_DEFINE_ENUM(LOOKUP_OPEN);
TRACE_DEFINE_ENUM(LOOKUP_CREATE);
TRACE_DEFINE_ENUM(LOOKUP_EXCL);
{ LOOKUP_PARENT, "PARENT" }, \
{ LOOKUP_REVAL, "REVAL" }, \
{ LOOKUP_RCU, "RCU" }, \
- { LOOKUP_NO_REVAL, "NO_REVAL" }, \
{ LOOKUP_OPEN, "OPEN" }, \
{ LOOKUP_CREATE, "CREATE" }, \
{ LOOKUP_EXCL, "EXCL" }, \
prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
&cxt->ecc_info,
cxt->memtype, flags, label);
+ kfree(label);
if (IS_ERR(prz_ar[i])) {
err = PTR_ERR(prz_ar[i]);
dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
name, record_size,
(unsigned long long)*paddr, err);
- kfree(label);
while (i > 0) {
i--;
label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
cxt->memtype, PRZ_FLAG_ZAP_OLD, label);
+ kfree(label);
if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz);
dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
name, sz, (unsigned long long)*paddr, err);
- kfree(label);
return err;
}
/* Initialize general buffer state. */
raw_spin_lock_init(&prz->buffer_lock);
prz->flags = flags;
- prz->label = label;
+ prz->label = kstrdup(label, GFP_KERNEL);
ret = persistent_ram_buffer_map(start, size, prz, memtype);
if (ret)
* filename length, and the above "soft error" worry means
* that it's probably better left alone until we have that
* issue clarified.
+ *
+ * Note the PATH_MAX check - it's arbitrary but the real
+ * kernel limit on a possible path component, not NAME_MAX,
+ * which is the technical standard limit.
*/
static int verify_dirent_name(const char *name, int len)
{
- if (!len)
+ if (len <= 0 || len >= PATH_MAX)
return -EIO;
if (memchr(name, '/', len))
return -EIO;
struct getdents_callback {
struct dir_context ctx;
struct linux_dirent __user * current_dir;
- struct linux_dirent __user * previous;
+ int prev_reclen;
int count;
int error;
};
static int filldir(struct dir_context *ctx, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
- struct linux_dirent __user * dirent;
+ struct linux_dirent __user *dirent, *prev;
struct getdents_callback *buf =
container_of(ctx, struct getdents_callback, ctx);
unsigned long d_ino;
int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
sizeof(long));
+ int prev_reclen;
buf->error = verify_dirent_name(name, namlen);
if (unlikely(buf->error))
buf->error = -EOVERFLOW;
return -EOVERFLOW;
}
- dirent = buf->previous;
- if (dirent && signal_pending(current))
+ prev_reclen = buf->prev_reclen;
+ if (prev_reclen && signal_pending(current))
return -EINTR;
-
- /*
- * Note! This range-checks 'previous' (which may be NULL).
- * The real range was checked in getdents
- */
- if (!user_access_begin(dirent, sizeof(*dirent)))
- goto efault;
- if (dirent)
- unsafe_put_user(offset, &dirent->d_off, efault_end);
dirent = buf->current_dir;
+ prev = (void __user *) dirent - prev_reclen;
+ if (!user_access_begin(prev, reclen + prev_reclen))
+ goto efault;
+
+ /* This might be 'dirent->d_off', but if so it will get overwritten */
+ unsafe_put_user(offset, &prev->d_off, efault_end);
unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
user_access_end();
- buf->previous = dirent;
- dirent = (void __user *)dirent + reclen;
- buf->current_dir = dirent;
+ buf->current_dir = (void __user *)dirent + reclen;
+ buf->prev_reclen = reclen;
buf->count -= reclen;
return 0;
efault_end:
struct linux_dirent __user *, dirent, unsigned int, count)
{
struct fd f;
- struct linux_dirent __user * lastdirent;
struct getdents_callback buf = {
.ctx.actor = filldir,
.count = count,
error = iterate_dir(f.file, &buf.ctx);
if (error >= 0)
error = buf.error;
- lastdirent = buf.previous;
- if (lastdirent) {
+ if (buf.prev_reclen) {
+ struct linux_dirent __user * lastdirent;
+ lastdirent = (void __user *)buf.current_dir - buf.prev_reclen;
+
if (put_user(buf.ctx.pos, &lastdirent->d_off))
error = -EFAULT;
else
struct getdents_callback64 {
struct dir_context ctx;
struct linux_dirent64 __user * current_dir;
- struct linux_dirent64 __user * previous;
+ int prev_reclen;
int count;
int error;
};
static int filldir64(struct dir_context *ctx, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
- struct linux_dirent64 __user *dirent;
+ struct linux_dirent64 __user *dirent, *prev;
struct getdents_callback64 *buf =
container_of(ctx, struct getdents_callback64, ctx);
int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
sizeof(u64));
+ int prev_reclen;
buf->error = verify_dirent_name(name, namlen);
if (unlikely(buf->error))
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
- dirent = buf->previous;
- if (dirent && signal_pending(current))
+ prev_reclen = buf->prev_reclen;
+ if (prev_reclen && signal_pending(current))
return -EINTR;
-
- /*
- * Note! This range-checks 'previous' (which may be NULL).
- * The real range was checked in getdents
- */
- if (!user_access_begin(dirent, sizeof(*dirent)))
- goto efault;
- if (dirent)
- unsafe_put_user(offset, &dirent->d_off, efault_end);
dirent = buf->current_dir;
+ prev = (void __user *)dirent - prev_reclen;
+ if (!user_access_begin(prev, reclen + prev_reclen))
+ goto efault;
+
+ /* This might be 'dirent->d_off', but if so it will get overwritten */
+ unsafe_put_user(offset, &prev->d_off, efault_end);
unsafe_put_user(ino, &dirent->d_ino, efault_end);
unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
unsafe_put_user(d_type, &dirent->d_type, efault_end);
unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
user_access_end();
- buf->previous = dirent;
- dirent = (void __user *)dirent + reclen;
- buf->current_dir = dirent;
+ buf->prev_reclen = reclen;
+ buf->current_dir = (void __user *)dirent + reclen;
buf->count -= reclen;
return 0;
+
efault_end:
user_access_end();
efault:
unsigned int count)
{
struct fd f;
- struct linux_dirent64 __user * lastdirent;
struct getdents_callback64 buf = {
.ctx.actor = filldir64,
.count = count,
error = iterate_dir(f.file, &buf.ctx);
if (error >= 0)
error = buf.error;
- lastdirent = buf.previous;
- if (lastdirent) {
+ if (buf.prev_reclen) {
+ struct linux_dirent64 __user * lastdirent;
typeof(lastdirent->d_off) d_off = buf.ctx.pos;
+
+ lastdirent = (void __user *) buf.current_dir - buf.prev_reclen;
if (__put_user(d_off, &lastdirent->d_off))
error = -EFAULT;
else
out_dir:
dput(dir);
out:
- /* -ENODATA isn't an error */
- if (err == -ENODATA)
+ /*
+ * -ENODATA: this object doesn't have any xattrs
+ * -EOPNOTSUPP: this file system doesn't have xattrs enabled on disk.
+ * Neither are errors
+ */
+ if (err == -ENODATA || err == -EOPNOTSUPP)
err = 0;
return err;
}
*
* Name: acbuffer.h - Support for buffers returned by ACPI predefined names
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acconfig.h - Global configuration constants
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acexcep.h - Exception codes returned by the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acnames.h - Global names and strings
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acoutput.h -- debug output
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acpi.h - Master public include file used to interface to ACPICA
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* interfaces must be implemented by OSL to interface the
* ACPI components to the host operating system.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acpixf.h - External interfaces to the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20191018
+#define ACPI_CA_VERSION 0x20200110
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
*
* Name: acrestyp.h - Defines, types, and structures for resource descriptors
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: actbl.h - Basic ACPI Table Definitions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: actbl1.h - Additional ACPI table definitions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: actbl3.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: actypes.h - Common data types for the entire ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acuuid.h - ACPI-related UUID/GUID definitions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acenv.h - Host and compiler configuration
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
#endif
+/*
+ * acpisrc CR\LF support
+ * Unix file line endings do not include the carriage return.
+ * If the acpisrc utility is being built using a microsoft compiler, it means
+ * that it will be running on a windows machine which means that the output is
+ * expected to have CR/LF newlines. If the acpisrc utility is built with
+ * anything else, it will likely run on a system with LF newlines. This flag
+ * tells the acpisrc utility that newlines will be in the LF format.
+ */
+#define ACPI_SRC_OS_LF_ONLY 0
+
/*! [Begin] no source code translation */
/******************************************************************************
*
* Name: acenvex.h - Extra host and compiler configuration
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acgcc.h - GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acgccex.h - Extra GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: acintel.h - VC specific defines, etc.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: aclinux.h - OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
+#ifndef flush_cache_all
static inline void flush_cache_all(void)
{
}
+#endif
+#ifndef flush_cache_mm
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_dup_mm
static inline void flush_cache_dup_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_range
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}
+#endif
+#ifndef flush_cache_page
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
+#endif
+#ifndef flush_dcache_page
static inline void flush_dcache_page(struct page *page)
{
}
+#endif
+#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_dcache_mmap_unlock
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_icache_range
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
+#endif
+#ifndef flush_icache_user_range
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
}
+#endif
+#ifndef flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
+#endif
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+#ifndef copy_to_user_page
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
+#endif
+
+#ifndef copy_from_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
+#endif
#endif /* __ASM_CACHEFLUSH_H */
* &drm_dp_sideband_msg_tx.state once they are queued
*/
struct mutex qlock;
+
+ /**
+ * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
+ */
+ bool is_waiting_for_dwn_reply;
+
/**
* @tx_msg_downq: List of pending down replies.
*/
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for X1830 DMA bindings.
+ *
+ * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_X1830_DMA_H__
+#define __DT_BINDINGS_DMA_X1830_DMA_H__
+
+/*
+ * Request type numbers for the X1830 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define X1830_DMA_I2S0_TX 0x6
+#define X1830_DMA_I2S0_RX 0x7
+#define X1830_DMA_AUTO 0x8
+#define X1830_DMA_SADC_RX 0x9
+#define X1830_DMA_UART1_TX 0x12
+#define X1830_DMA_UART1_RX 0x13
+#define X1830_DMA_UART0_TX 0x14
+#define X1830_DMA_UART0_RX 0x15
+#define X1830_DMA_SSI0_TX 0x16
+#define X1830_DMA_SSI0_RX 0x17
+#define X1830_DMA_SSI1_TX 0x18
+#define X1830_DMA_SSI1_RX 0x19
+#define X1830_DMA_MSC0_TX 0x1a
+#define X1830_DMA_MSC0_RX 0x1b
+#define X1830_DMA_MSC1_TX 0x1c
+#define X1830_DMA_MSC1_RX 0x1d
+#define X1830_DMA_DMIC_RX 0x21
+#define X1830_DMA_SMB0_TX 0x24
+#define X1830_DMA_SMB0_RX 0x25
+#define X1830_DMA_SMB1_TX 0x26
+#define X1830_DMA_SMB1_RX 0x27
+#define X1830_DMA_DES_TX 0x2e
+#define X1830_DMA_DES_RX 0x2f
+
+#endif /* __DT_BINDINGS_DMA_X1830_DMA_H__ */
#define RESET_VD_RMEM 64
#define RESET_AUDIN 65
#define RESET_DBLK 66
-#define RESET_PIC_DC 66
-#define RESET_PSC 66
-#define RESET_NAND 66
+#define RESET_PIC_DC 67
+#define RESET_PSC 68
+#define RESET_NAND 69
#define RESET_GE2D 70
#define RESET_PARSER_REG 71
#define RESET_PARSER_FETCH 72
/* Validate the processor object's proc_id */
bool acpi_duplicate_processor_id(int proc_id);
+/* Processor _CTS control */
+struct acpi_processor_power;
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+bool acpi_processor_claim_cst_control(void);
+int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info);
+#else
+static inline bool acpi_processor_claim_cst_control(void) { return false; }
+static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info)
+{
+ return -ENODEV;
+}
+#endif
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
unsigned int max_sectors;
unsigned int max_segment_size;
unsigned int physical_block_size;
+ unsigned int logical_block_size;
unsigned int alignment_offset;
unsigned int io_min;
unsigned int io_opt;
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned short logical_block_size;
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
unsigned int max_write_same_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
return q->limits.max_segment_size;
}
-static inline unsigned short queue_logical_block_size(const struct request_queue *q)
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
int retval = 512;
return retval;
}
-static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
{
return queue_logical_block_size(bdev_get_queue(bdev));
}
}
}
-/*
- * Get the last single-page segment from the multi-page bvec and store it
- * in @seg
- */
-static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
- struct bio_vec *seg)
-{
- unsigned total = bvec->bv_offset + bvec->bv_len;
- unsigned last_page = (total - 1) / PAGE_SIZE;
-
- seg->bv_page = bvec->bv_page + last_page;
-
- /* the whole segment is inside the last page */
- if (bvec->bv_offset >= last_page * PAGE_SIZE) {
- seg->bv_offset = bvec->bv_offset % PAGE_SIZE;
- seg->bv_len = bvec->bv_len;
- } else {
- seg->bv_offset = 0;
- seg->bv_len = total - last_page * PAGE_SIZE;
- }
-}
-
#endif /* __LINUX_BVEC_ITER_H */
#include <linux/can/error.h>
#include <linux/can/led.h>
#include <linux/can/netlink.h>
+#include <linux/can/skb.h>
#include <linux/netdevice.h>
/*
#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static inline bool can_skb_headroom_valid(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* preform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ }
+
+ return true;
+}
+
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
static inline bool can_dropped_invalid_skb(struct net_device *dev,
struct sk_buff *skb)
} else
goto inval_skb;
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
return false;
inval_skb:
#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
+#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
struct cpuidle_driver {
const char *name;
struct module *owner;
- int refcnt;
/* used by the cpuidle framework to setup the broadcast timer */
unsigned int bctimer:1;
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
extern struct cpuidle_driver *cpuidle_get_driver(void);
-extern struct cpuidle_driver *cpuidle_driver_ref(void);
-extern void cpuidle_driver_unref(void);
extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
bool disable);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
{return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
-static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
-static inline void cpuidle_driver_unref(void) {}
static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
int idx, bool disable) { }
static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
unsigned int max_state;
};
+/**
+ * struct devfreq_stats - Statistics of devfreq device behavior
+ * @total_trans: Number of devfreq transitions.
+ * @trans_table: Statistics of devfreq transitions.
+ * @time_in_state: Statistics of devfreq states.
+ * @last_update: The last time stats were updated.
+ */
+struct devfreq_stats {
+ unsigned int total_trans;
+ unsigned int *trans_table;
+ u64 *time_in_state;
+ u64 last_update;
+};
+
/**
* struct devfreq - Device devfreq structure
* @node: list node - contains the devices with devfreq that have been
* devfreq.nb to the corresponding register notifier call chain.
* @work: delayed work for load monitoring.
* @previous_freq: previously configured frequency value.
+ * @last_status: devfreq user device info, performance statistics
* @data: Private data of the governor. The devfreq framework does not
* touch this.
* @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs)
* @suspend_freq: frequency of a device set during suspend phase.
* @resume_freq: frequency of a device set in resume phase.
* @suspend_count: suspend requests counter for a device.
- * @total_trans: Number of devfreq transitions
- * @trans_table: Statistics of devfreq transitions
- * @time_in_state: Statistics of devfreq states
- * @last_stat_updated: The last time stat updated
+ * @stats: Statistics of devfreq device behavior
* @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
* @nb_min: Notifier block for DEV_PM_QOS_MIN_FREQUENCY
* @nb_max: Notifier block for DEV_PM_QOS_MAX_FREQUENCY
*
- * This structure stores the devfreq information for a give device.
+ * This structure stores the devfreq information for a given device.
*
* Note that when a governor accesses entries in struct devfreq in its
* functions except for the context of callbacks defined in struct
unsigned long resume_freq;
atomic_t suspend_count;
- /* information for device frequency transition */
- unsigned int total_trans;
- unsigned int *trans_table;
- unsigned long *time_in_state;
- unsigned long last_stat_updated;
+ /* information for device frequency transitions */
+ struct devfreq_stats stats;
struct srcu_notifier_head transition_notifier_list;
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_PSIL_H_
+#define K3_PSIL_H_
+
+#include <linux/types.h>
+
+#define K3_PSIL_DST_THREAD_ID_OFFSET 0x8000
+
+struct device;
+
+/**
+ * enum udma_tp_level - Channel Throughput Levels
+ * @UDMA_TP_NORMAL: Normal channel
+ * @UDMA_TP_HIGH: High Throughput channel
+ * @UDMA_TP_ULTRAHIGH: Ultra High Throughput channel
+ */
+enum udma_tp_level {
+ UDMA_TP_NORMAL = 0,
+ UDMA_TP_HIGH,
+ UDMA_TP_ULTRAHIGH,
+ UDMA_TP_LAST,
+};
+
+/**
+ * enum psil_endpoint_type - PSI-L Endpoint type
+ * @PSIL_EP_NATIVE: Normal channel
+ * @PSIL_EP_PDMA_XY: XY mode PDMA
+ * @PSIL_EP_PDMA_MCAN: MCAN mode PDMA
+ * @PSIL_EP_PDMA_AASRC: AASRC mode PDMA
+ */
+enum psil_endpoint_type {
+ PSIL_EP_NATIVE = 0,
+ PSIL_EP_PDMA_XY,
+ PSIL_EP_PDMA_MCAN,
+ PSIL_EP_PDMA_AASRC,
+};
+
+/**
+ * struct psil_endpoint_config - PSI-L Endpoint configuration
+ * @ep_type: PSI-L endpoint type
+ * @pkt_mode: If set, the channel must be in Packet mode, otherwise in
+ * TR mode
+ * @notdpkt: TDCM must be suppressed on the TX channel
+ * @needs_epib: Endpoint needs EPIB
+ * @psd_size: If set, PSdata is used by the endpoint
+ * @channel_tpl: Desired throughput level for the channel
+ * @pdma_acc32: ACC32 must be enabled on the PDMA side
+ * @pdma_burst: BURST must be enabled on the PDMA side
+ */
+struct psil_endpoint_config {
+ enum psil_endpoint_type ep_type;
+
+ unsigned pkt_mode:1;
+ unsigned notdpkt:1;
+ unsigned needs_epib:1;
+ u32 psd_size;
+ enum udma_tp_level channel_tpl;
+
+ /* PDMA properties, valid for PSIL_EP_PDMA_* */
+ unsigned pdma_acc32:1;
+ unsigned pdma_burst:1;
+};
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+ struct psil_endpoint_config *ep_config);
+
+#endif /* K3_PSIL_H_ */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_UDMA_GLUE_H_
+#define K3_UDMA_GLUE_H_
+
+#include <linux/types.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+
+struct k3_udma_glue_tx_channel_cfg {
+ struct k3_ring_cfg tx_cfg;
+ struct k3_ring_cfg txcq_cfg;
+
+ bool tx_pause_on_err;
+ bool tx_filt_einfo;
+ bool tx_filt_pswords;
+ bool tx_supr_tdpkt;
+ u32 swdata_size;
+};
+
+struct k3_udma_glue_tx_channel;
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+ const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma);
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *desc_dma);
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ bool sync);
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ void *data, void (*cleanup)(void *data, dma_addr_t desc_dma));
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
+
+enum {
+ K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_FLOW_REG = 1,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_FLOW_ID = 2,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG = 4,
+};
+
+/**
+ * k3_udma_glue_rx_flow_cfg - UDMA RX flow cfg
+ *
+ * @rx_cfg: RX ring configuration
+ * @rxfdq_cfg: RX free Host PD ring configuration
+ * @ring_rxq_id: RX ring id (or -1 for any)
+ * @ring_rxfdq0_id: RX free Host PD ring (FDQ) if (or -1 for any)
+ * @rx_error_handling: Rx Error Handling Mode (0 - drop, 1 - re-try)
+ * @src_tag_lo_sel: Rx Source Tag Low Byte Selector in Host PD
+ */
+struct k3_udma_glue_rx_flow_cfg {
+ struct k3_ring_cfg rx_cfg;
+ struct k3_ring_cfg rxfdq_cfg;
+ int ring_rxq_id;
+ int ring_rxfdq0_id;
+ bool rx_error_handling;
+ int src_tag_lo_sel;
+};
+
+/**
+ * k3_udma_glue_rx_channel_cfg - UDMA RX channel cfg
+ *
+ * @psdata_size: SW Data is present in Host PD of @swdata_size bytes
+ * @flow_id_base: first flow_id used by channel.
+ * if @flow_id_base = -1 - range of GP rflows will be
+ * allocated dynamically.
+ * @flow_id_num: number of RX flows used by channel
+ * @flow_id_use_rxchan_id: use RX channel id as flow id,
+ * used only if @flow_id_num = 1
+ * @remote indication that RX channel is remote - some remote CPU
+ * core owns and control the RX channel. Linux Host only
+ * allowed to attach and configure RX Flow within RX
+ * channel. if set - not RX channel operation will be
+ * performed by K3 NAVSS DMA glue interface.
+ * @def_flow_cfg default RX flow configuration,
+ * used only if @flow_id_num = 1
+ */
+struct k3_udma_glue_rx_channel_cfg {
+ u32 swdata_size;
+ int flow_id_base;
+ int flow_id_num;
+ bool flow_id_use_rxchan_id;
+ bool remote;
+
+ struct k3_udma_glue_rx_flow_cfg *def_flow_cfg;
+};
+
+struct k3_udma_glue_rx_channel;
+
+struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
+ struct device *dev,
+ const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ bool sync);
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma);
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t *desc_dma);
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg);
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num);
+void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num);
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma),
+ bool skip_fdq);
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+
+#endif /* K3_UDMA_GLUE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPPI5 descriptors interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __TI_CPPI5_H__
+#define __TI_CPPI5_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+/**
+ * struct cppi5_desc_hdr_t - Descriptor header, present in all types of
+ * descriptors
+ * @pkt_info0: Packet info word 0 (n/a in Buffer desc)
+ * @pkt_info0: Packet info word 1 (n/a in Buffer desc)
+ * @pkt_info0: Packet info word 2 (n/a in Buffer desc)
+ * @src_dst_tag: Packet info word 3 (n/a in Buffer desc)
+ */
+struct cppi5_desc_hdr_t {
+ u32 pkt_info0;
+ u32 pkt_info1;
+ u32 pkt_info2;
+ u32 src_dst_tag;
+} __packed;
+
+/**
+ * struct cppi5_host_desc_t - Host-mode packet and buffer descriptor definition
+ * @hdr: Descriptor header
+ * @next_desc: word 4/5: Linking word
+ * @buf_ptr: word 6/7: Buffer pointer
+ * @buf_info1: word 8: Buffer valid data length
+ * @org_buf_len: word 9: Original buffer length
+ * @org_buf_ptr: word 10/11: Original buffer pointer
+ * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
+ * Protocol Specific Data (optional, 0-128 bytes in
+ * multiples of 4), and/or
+ * Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_host_desc_t {
+ struct cppi5_desc_hdr_t hdr;
+ u64 next_desc;
+ u64 buf_ptr;
+ u32 buf_info1;
+ u32 org_buf_len;
+ u64 org_buf_ptr;
+ u32 epib[0];
+} __packed;
+
+#define CPPI5_DESC_MIN_ALIGN (16U)
+
+#define CPPI5_INFO0_HDESC_EPIB_SIZE (16U)
+#define CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE (128U)
+
+#define CPPI5_INFO0_HDESC_TYPE_SHIFT (30U)
+#define CPPI5_INFO0_HDESC_TYPE_MASK GENMASK(31, 30)
+#define CPPI5_INFO0_DESC_TYPE_VAL_HOST (1U)
+#define CPPI5_INFO0_DESC_TYPE_VAL_MONO (2U)
+#define CPPI5_INFO0_DESC_TYPE_VAL_TR (3U)
+#define CPPI5_INFO0_HDESC_EPIB_PRESENT BIT(29)
+/*
+ * Protocol Specific Words location:
+ * 0 - located in the descriptor,
+ * 1 = located in the SOP Buffer immediately prior to the data.
+ */
+#define CPPI5_INFO0_HDESC_PSINFO_LOCATION BIT(28)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT (22U)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK GENMASK(27, 22)
+#define CPPI5_INFO0_HDESC_PKTLEN_SHIFT (0)
+#define CPPI5_INFO0_HDESC_PKTLEN_MASK GENMASK(21, 0)
+
+#define CPPI5_INFO1_DESC_PKTERROR_SHIFT (28U)
+#define CPPI5_INFO1_DESC_PKTERROR_MASK GENMASK(31, 28)
+#define CPPI5_INFO1_HDESC_PSFLGS_SHIFT (24U)
+#define CPPI5_INFO1_HDESC_PSFLGS_MASK GENMASK(27, 24)
+#define CPPI5_INFO1_DESC_PKTID_SHIFT (14U)
+#define CPPI5_INFO1_DESC_PKTID_MASK GENMASK(23, 14)
+#define CPPI5_INFO1_DESC_FLOWID_SHIFT (0)
+#define CPPI5_INFO1_DESC_FLOWID_MASK GENMASK(13, 0)
+#define CPPI5_INFO1_DESC_FLOWID_DEFAULT CPPI5_INFO1_DESC_FLOWID_MASK
+
+#define CPPI5_INFO2_HDESC_PKTTYPE_SHIFT (27U)
+#define CPPI5_INFO2_HDESC_PKTTYPE_MASK GENMASK(31, 27)
+/* Return Policy: 0 - Entire packet 1 - Each buffer */
+#define CPPI5_INFO2_HDESC_RETPOLICY BIT(18)
+/*
+ * Early Return:
+ * 0 = desc pointers should be returned after all reads have been completed
+ * 1 = desc pointers should be returned immediately upon fetching
+ * the descriptor and beginning to transfer data.
+ */
+#define CPPI5_INFO2_HDESC_EARLYRET BIT(17)
+/*
+ * Return Push Policy:
+ * 0 = Descriptor must be returned to tail of queue
+ * 1 = Descriptor must be returned to head of queue
+ */
+#define CPPI5_INFO2_DESC_RETPUSHPOLICY BIT(16)
+#define CPPI5_INFO2_DESC_RETP_MASK GENMASK(18, 16)
+
+#define CPPI5_INFO2_DESC_RETQ_SHIFT (0)
+#define CPPI5_INFO2_DESC_RETQ_MASK GENMASK(15, 0)
+
+#define CPPI5_INFO3_DESC_SRCTAG_SHIFT (16U)
+#define CPPI5_INFO3_DESC_SRCTAG_MASK GENMASK(31, 16)
+#define CPPI5_INFO3_DESC_DSTTAG_SHIFT (0)
+#define CPPI5_INFO3_DESC_DSTTAG_MASK GENMASK(15, 0)
+
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_SHIFT (0)
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK GENMASK(27, 0)
+
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_SHIFT (0)
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK GENMASK(27, 0)
+
+/**
+ * struct cppi5_desc_epib_t - Host Packet Descriptor Extended Packet Info Block
+ * @timestamp: word 0: application specific timestamp
+ * @sw_info0: word 1: Software Info 0
+ * @sw_info1: word 1: Software Info 1
+ * @sw_info2: word 1: Software Info 2
+ */
+struct cppi5_desc_epib_t {
+ u32 timestamp; /* w0: application specific timestamp */
+ u32 sw_info0; /* w1: Software Info 0 */
+ u32 sw_info1; /* w2: Software Info 1 */
+ u32 sw_info2; /* w3: Software Info 2 */
+};
+
+/**
+ * struct cppi5_monolithic_desc_t - Monolithic-mode packet descriptor
+ * @hdr: Descriptor header
+ * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
+ * Protocol Specific Data (optional, 0-128 bytes in
+ * multiples of 4), and/or
+ * Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_monolithic_desc_t {
+ struct cppi5_desc_hdr_t hdr;
+ u32 epib[0];
+};
+
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U)
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_MASK GENMASK(26, 18)
+
+/*
+ * Reload Count:
+ * 0 = Finish the packet and place the descriptor back on the return queue
+ * 1-0x1ff = Vector to the Reload Index and resume processing
+ * 0x1ff indicates perpetual loop, infinite reload until the channel is stopped
+ */
+#define CPPI5_INFO0_TRDESC_RLDCNT_SHIFT (20U)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MASK GENMASK(28, 20)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MAX (0x1ff)
+#define CPPI5_INFO0_TRDESC_RLDCNT_INFINITE CPPI5_INFO0_TRDESC_RLDCNT_MAX
+#define CPPI5_INFO0_TRDESC_RLDIDX_SHIFT (14U)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MASK GENMASK(19, 14)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MAX (0x3f)
+#define CPPI5_INFO0_TRDESC_LASTIDX_SHIFT (0)
+#define CPPI5_INFO0_TRDESC_LASTIDX_MASK GENMASK(13, 0)
+
+#define CPPI5_INFO1_TRDESC_RECSIZE_SHIFT (24U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_MASK GENMASK(26, 24)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_16B (0)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_32B (1U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_64B (2U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_128B (3U)
+
+static inline void cppi5_desc_dump(void *desc, u32 size)
+{
+ print_hex_dump(KERN_ERR, "dump udmap_desc: ", DUMP_PREFIX_NONE,
+ 32, 4, desc, size, false);
+}
+
+#define CPPI5_TDCM_MARKER (0x1)
+/**
+ * cppi5_desc_is_tdcm - check if the paddr indicates Teardown Complete Message
+ * @paddr: Physical address of the packet popped from the ring
+ *
+ * Returns true if the address indicates TDCM
+ */
+static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr)
+{
+ return (paddr & CPPI5_TDCM_MARKER) ? true : false;
+}
+
+/**
+ * cppi5_desc_get_type - get descriptor type
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns descriptor type:
+ * CPPI5_INFO0_DESC_TYPE_VAL_HOST
+ * CPPI5_INFO0_DESC_TYPE_VAL_MONO
+ * CPPI5_INFO0_DESC_TYPE_VAL_TR
+ */
+static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return (desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_TYPE_MASK) >>
+ CPPI5_INFO0_HDESC_TYPE_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_errflags - get Error Flags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ *
+ * Returns Error Flags from Packet/TR Descriptor
+ */
+static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTERROR_MASK) >>
+ CPPI5_INFO1_DESC_PKTERROR_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_pktids - get Packet and Flow ids from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ *
+ * Returns Packet and Flow ids from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 *pkt_id, u32 *flow_id)
+{
+ *pkt_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTID_MASK) >>
+ CPPI5_INFO1_DESC_PKTID_SHIFT;
+ *flow_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_FLOWID_MASK) >>
+ CPPI5_INFO1_DESC_FLOWID_SHIFT;
+}
+
+/**
+ * cppi5_desc_set_pktids - set Packet and Flow ids in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ */
+static inline void cppi5_desc_set_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 pkt_id, u32 flow_id)
+{
+ desc_hdr->pkt_info1 &= ~(CPPI5_INFO1_DESC_PKTID_MASK |
+ CPPI5_INFO1_DESC_FLOWID_MASK);
+ desc_hdr->pkt_info1 |= (pkt_id << CPPI5_INFO1_DESC_PKTID_SHIFT) &
+ CPPI5_INFO1_DESC_PKTID_MASK;
+ desc_hdr->pkt_info1 |= (flow_id << CPPI5_INFO1_DESC_FLOWID_SHIFT) &
+ CPPI5_INFO1_DESC_FLOWID_MASK;
+}
+
+/**
+ * cppi5_desc_set_retpolicy - set Packet Return Policy in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @flags: fags, supported values
+ * CPPI5_INFO2_HDESC_RETPOLICY
+ * CPPI5_INFO2_HDESC_EARLYRET
+ * CPPI5_INFO2_DESC_RETPUSHPOLICY
+ * @return_ring_id: Packet Return Queue/Ring id, value 0xFFFF reserved
+ */
+static inline void cppi5_desc_set_retpolicy(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 flags, u32 return_ring_id)
+{
+ desc_hdr->pkt_info2 &= ~(CPPI5_INFO2_DESC_RETP_MASK |
+ CPPI5_INFO2_DESC_RETQ_MASK);
+ desc_hdr->pkt_info2 |= flags & CPPI5_INFO2_DESC_RETP_MASK;
+ desc_hdr->pkt_info2 |= return_ring_id & CPPI5_INFO2_DESC_RETQ_MASK;
+}
+
+/**
+ * cppi5_desc_get_tags_ids - get Packet Src/Dst Tags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 *src_tag_id, u32 *dst_tag_id)
+{
+ if (src_tag_id)
+ *src_tag_id = (desc_hdr->src_dst_tag &
+ CPPI5_INFO3_DESC_SRCTAG_MASK) >>
+ CPPI5_INFO3_DESC_SRCTAG_SHIFT;
+ if (dst_tag_id)
+ *dst_tag_id = desc_hdr->src_dst_tag &
+ CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_desc_set_tags_ids - set Packet Src/Dst Tags in HDesc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 src_tag_id, u32 dst_tag_id)
+{
+ desc_hdr->src_dst_tag = (src_tag_id << CPPI5_INFO3_DESC_SRCTAG_SHIFT) &
+ CPPI5_INFO3_DESC_SRCTAG_MASK;
+ desc_hdr->src_dst_tag |= dst_tag_id & CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_hdesc_calc_size - Calculate Host Packet Descriptor size
+ * @epib: is EPIB present
+ * @psdata_size: PSDATA size
+ * @sw_data_size: SWDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size,
+ u32 sw_data_size)
+{
+ u32 desc_size;
+
+ if (psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE)
+ return 0;
+
+ desc_size = sizeof(struct cppi5_host_desc_t) + psdata_size +
+ sw_data_size;
+
+ if (epib)
+ desc_size += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ return ALIGN(desc_size, CPPI5_DESC_MIN_ALIGN);
+}
+
+/**
+ * cppi5_hdesc_init - Init Host Packet Descriptor size
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ * CPPI5_INFO0_HDESC_EPIB_PRESENT
+ * CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ * @psdata_size: PSDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline void cppi5_hdesc_init(struct cppi5_host_desc_t *desc, u32 flags,
+ u32 psdata_size)
+{
+ desc->hdr.pkt_info0 = (CPPI5_INFO0_DESC_TYPE_VAL_HOST <<
+ CPPI5_INFO0_HDESC_TYPE_SHIFT) | (flags);
+ desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+ desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_update_flags - Replace descriptor flags
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ * CPPI5_INFO0_HDESC_EPIB_PRESENT
+ * CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ */
+static inline void cppi5_hdesc_update_flags(struct cppi5_host_desc_t *desc,
+ u32 flags)
+{
+ desc->hdr.pkt_info0 &= ~(CPPI5_INFO0_HDESC_EPIB_PRESENT |
+ CPPI5_INFO0_HDESC_PSINFO_LOCATION);
+ desc->hdr.pkt_info0 |= flags;
+}
+
+/**
+ * cppi5_hdesc_update_psdata_size - Replace PSdata size
+ * @desc: Host packet descriptor
+ * @psdata_size: PSDATA size
+ */
+static inline void
+cppi5_hdesc_update_psdata_size(struct cppi5_host_desc_t *desc, u32 psdata_size)
+{
+ desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+ desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_psdata_size - get PSdata size in bytes
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size = 0;
+
+ if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ return (psdata_size << 2);
+}
+
+/**
+ * cppi5_hdesc_get_pktlen - get Packet Length from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Packet Length from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_set_pktlen - set Packet Length in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
+ u32 pkt_len)
+{
+ desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PKTLEN_MASK;
+ desc->hdr.pkt_info0 |= (pkt_len & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Protocol Specific Flags from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info1 & CPPI5_INFO1_HDESC_PSFLGS_MASK) >>
+ CPPI5_INFO1_HDESC_PSFLGS_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
+ u32 ps_flags)
+{
+ desc->hdr.pkt_info1 &= ~CPPI5_INFO1_HDESC_PSFLGS_MASK;
+ desc->hdr.pkt_info1 |= (ps_flags <<
+ CPPI5_INFO1_HDESC_PSFLGS_SHIFT) &
+ CPPI5_INFO1_HDESC_PSFLGS_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - get Packet Type from HDesc
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info2 & CPPI5_INFO2_HDESC_PKTTYPE_MASK) >>
+ CPPI5_INFO2_HDESC_PKTTYPE_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - set Packet Type in HDesc
+ * @desc: Host packet descriptor
+ * @pkt_type: Packet Type
+ */
+static inline void cppi5_hdesc_set_pkttype(struct cppi5_host_desc_t *desc,
+ u32 pkt_type)
+{
+ desc->hdr.pkt_info2 &= ~CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+ desc->hdr.pkt_info2 |=
+ (pkt_type << CPPI5_INFO2_HDESC_PKTTYPE_SHIFT) &
+ CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+}
+
+/**
+ * cppi5_hdesc_attach_buf - attach buffer to HDesc
+ * @desc: Host packet descriptor
+ * @buf: Buffer physical address
+ * @buf_data_len: Buffer length
+ * @obuf: Original Buffer physical address
+ * @obuf_len: Original Buffer length
+ *
+ * Attaches buffer to Host Packet Descriptor
+ */
+static inline void cppi5_hdesc_attach_buf(struct cppi5_host_desc_t *desc,
+ dma_addr_t buf, u32 buf_data_len,
+ dma_addr_t obuf, u32 obuf_len)
+{
+ desc->buf_ptr = buf;
+ desc->buf_info1 = buf_data_len & CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK;
+ desc->org_buf_ptr = obuf;
+ desc->org_buf_len = obuf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_get_obuf(struct cppi5_host_desc_t *desc,
+ dma_addr_t *obuf, u32 *obuf_len)
+{
+ *obuf = desc->org_buf_ptr;
+ *obuf_len = desc->org_buf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc)
+{
+ desc->buf_ptr = desc->org_buf_ptr;
+ desc->buf_info1 = desc->org_buf_len;
+}
+
+/**
+ * cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc
+ * @desc: Host Packet Descriptor
+ * @buf_desc: Host Buffer Descriptor physical address
+ *
+ * add and link Host Buffer Descriptor to HDesc
+ */
+static inline void cppi5_hdesc_link_hbdesc(struct cppi5_host_desc_t *desc,
+ dma_addr_t hbuf_desc)
+{
+ desc->next_desc = hbuf_desc;
+}
+
+static inline dma_addr_t
+cppi5_hdesc_get_next_hbdesc(struct cppi5_host_desc_t *desc)
+{
+ return (dma_addr_t)desc->next_desc;
+}
+
+static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc)
+{
+ desc->hdr = (struct cppi5_desc_hdr_t) { 0 };
+ desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_epib_present - check if EPIB present
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns true if EPIB present in the packet
+ */
+static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return !!(desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_EPIB_PRESENT);
+}
+
+/**
+ * cppi5_hdesc_get_psdata - Get pointer on PSDATA
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on PSDATA in HDesc.
+ * NULL - if ps_data placed at the start of data buffer.
+ */
+static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size;
+ void *psdata;
+
+ if (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)
+ return NULL;
+
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ if (!psdata_size)
+ return NULL;
+
+ psdata = &desc->epib;
+
+ if (cppi5_hdesc_epib_present(&desc->hdr))
+ psdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ return psdata;
+}
+
+/**
+ * cppi5_hdesc_get_swdata - Get pointer on swdata
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on SWDATA in HDesc.
+ * NOTE. It's caller responsibility to be sure hdesc actually has swdata.
+ */
+static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size = 0;
+ void *swdata;
+
+ if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ swdata = &desc->epib;
+
+ if (cppi5_hdesc_epib_present(&desc->hdr))
+ swdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ swdata += (psdata_size << 2);
+
+ return swdata;
+}
+
+/* ================================== TR ================================== */
+
+#define CPPI5_TR_TYPE_SHIFT (0U)
+#define CPPI5_TR_TYPE_MASK GENMASK(3, 0)
+#define CPPI5_TR_STATIC BIT(4)
+#define CPPI5_TR_WAIT BIT(5)
+#define CPPI5_TR_EVENT_SIZE_SHIFT (6U)
+#define CPPI5_TR_EVENT_SIZE_MASK GENMASK(7, 6)
+#define CPPI5_TR_TRIGGER0_SHIFT (8U)
+#define CPPI5_TR_TRIGGER0_MASK GENMASK(9, 8)
+#define CPPI5_TR_TRIGGER0_TYPE_SHIFT (10U)
+#define CPPI5_TR_TRIGGER0_TYPE_MASK GENMASK(11, 10)
+#define CPPI5_TR_TRIGGER1_SHIFT (12U)
+#define CPPI5_TR_TRIGGER1_MASK GENMASK(13, 12)
+#define CPPI5_TR_TRIGGER1_TYPE_SHIFT (14U)
+#define CPPI5_TR_TRIGGER1_TYPE_MASK GENMASK(15, 14)
+#define CPPI5_TR_CMD_ID_SHIFT (16U)
+#define CPPI5_TR_CMD_ID_MASK GENMASK(23, 16)
+#define CPPI5_TR_CSF_FLAGS_SHIFT (24U)
+#define CPPI5_TR_CSF_FLAGS_MASK GENMASK(31, 24)
+#define CPPI5_TR_CSF_SA_INDIRECT BIT(0)
+#define CPPI5_TR_CSF_DA_INDIRECT BIT(1)
+#define CPPI5_TR_CSF_SUPR_EVT BIT(2)
+#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U)
+#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4)
+#define CPPI5_TR_CSF_EOP BIT(7)
+
+/**
+ * enum cppi5_tr_types - TR types
+ * @CPPI5_TR_TYPE0: One dimensional data move
+ * @CPPI5_TR_TYPE1: Two dimensional data move
+ * @CPPI5_TR_TYPE2: Three dimensional data move
+ * @CPPI5_TR_TYPE3: Four dimensional data move
+ * @CPPI5_TR_TYPE4: Four dimensional data move with data formatting
+ * @CPPI5_TR_TYPE5: Four dimensional Cache Warm
+ * @CPPI5_TR_TYPE8: Four Dimensional Block Move
+ * @CPPI5_TR_TYPE9: Four Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE10: Two Dimensional Block Move
+ * @CPPI5_TR_TYPE11: Two Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE15: Four Dimensional Block Move with Repacking and
+ * Indirection
+ */
+enum cppi5_tr_types {
+ CPPI5_TR_TYPE0 = 0,
+ CPPI5_TR_TYPE1,
+ CPPI5_TR_TYPE2,
+ CPPI5_TR_TYPE3,
+ CPPI5_TR_TYPE4,
+ CPPI5_TR_TYPE5,
+ /* type6-7: Reserved */
+ CPPI5_TR_TYPE8 = 8,
+ CPPI5_TR_TYPE9,
+ CPPI5_TR_TYPE10,
+ CPPI5_TR_TYPE11,
+ /* type12-14: Reserved */
+ CPPI5_TR_TYPE15 = 15,
+ CPPI5_TR_TYPE_MAX
+};
+
+/**
+ * enum cppi5_tr_event_size - TR Flags EVENT_SIZE field specifies when an event
+ * is generated for each TR.
+ * @CPPI5_TR_EVENT_SIZE_COMPLETION: When TR is complete and all status for
+ * the TR has been received
+ * @CPPI5_TR_EVENT_SIZE_ICNT1_DEC: Type 0: when the last data transaction
+ * is sent for the TR
+ * Type 1-11: when ICNT1 is decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT2_DEC: Type 0-1,10-11: when the last data
+ * transaction is sent for the TR
+ * All other types: when ICNT2 is
+ * decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT3_DEC: Type 0-2,10-11: when the last data
+ * transaction is sent for the TR
+ * All other types: when ICNT3 is
+ * decremented
+ */
+enum cppi5_tr_event_size {
+ CPPI5_TR_EVENT_SIZE_COMPLETION,
+ CPPI5_TR_EVENT_SIZE_ICNT1_DEC,
+ CPPI5_TR_EVENT_SIZE_ICNT2_DEC,
+ CPPI5_TR_EVENT_SIZE_ICNT3_DEC,
+ CPPI5_TR_EVENT_SIZE_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger - TR Flags TRIGGERx field specifies the type of trigger
+ * used to enable the TR to transfer data as specified
+ * by TRIGGERx_TYPE field.
+ * @CPPI5_TR_TRIGGER_NONE: No trigger
+ * @CPPI5_TR_TRIGGER_GLOBAL0: Global trigger 0
+ * @CPPI5_TR_TRIGGER_GLOBAL1: Global trigger 1
+ * @CPPI5_TR_TRIGGER_LOCAL_EVENT: Local Event
+ */
+enum cppi5_tr_trigger {
+ CPPI5_TR_TRIGGER_NONE,
+ CPPI5_TR_TRIGGER_GLOBAL0,
+ CPPI5_TR_TRIGGER_GLOBAL1,
+ CPPI5_TR_TRIGGER_LOCAL_EVENT,
+ CPPI5_TR_TRIGGER_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger_type - TR Flags TRIGGERx_TYPE field specifies the type
+ * of data transfer that will be enabled by
+ * receiving a trigger as specified by TRIGGERx.
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC: The second inner most loop (ICNT1) will
+ * be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC: The third inner most loop (ICNT2) will
+ * be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC: The outer most loop (ICNT3) will be
+ * decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ALL: The entire TR will be allowed to
+ * complete
+ */
+enum cppi5_tr_trigger_type {
+ CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ALL,
+ CPPI5_TR_TRIGGER_TYPE_MAX
+};
+
+typedef u32 cppi5_tr_flags_t;
+
+/**
+ * struct cppi5_tr_type0_t - Type 0 (One dimensional data move) TR (16 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @_reserved: Not used
+ * @addr: Starting address for the source data or destination data
+ */
+struct cppi5_tr_type0_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 _reserved;
+ u64 addr;
+} __aligned(16) __packed;
+
+/**
+ * struct cppi5_tr_type1_t - Type 1 (Two dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ */
+struct cppi5_tr_type1_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type2_t - Type 2 (Three dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ * @icnt2: Total loop iteration count for level 2
+ * @_reserved: Not used
+ * @dim2: Signed dimension for loop level 2
+ */
+struct cppi5_tr_type2_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 _reserved;
+ s32 dim2;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type3_t - Type 3 (Four dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ * @icnt2: Total loop iteration count for level 2
+ * @icnt3: Total loop iteration count for level 3 (outermost)
+ * @dim2: Signed dimension for loop level 2
+ * @dim3: Signed dimension for loop level 3
+ */
+struct cppi5_tr_type3_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 icnt3;
+ s32 dim2;
+ s32 dim3;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type15_t - Type 15 (Four Dimensional Block Copy with
+ * Repacking and Indirection Support) TR (64 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost) for
+ * source
+ * @icnt1: Total loop iteration count for level 1 for source
+ * @addr: Starting address for the source data
+ * @dim1: Signed dimension for loop level 1 for source
+ * @icnt2: Total loop iteration count for level 2 for source
+ * @icnt3: Total loop iteration count for level 3 (outermost) for
+ * source
+ * @dim2: Signed dimension for loop level 2 for source
+ * @dim3: Signed dimension for loop level 3 for source
+ * @_reserved: Not used
+ * @ddim1: Signed dimension for loop level 1 for destination
+ * @daddr: Starting address for the destination data
+ * @ddim2: Signed dimension for loop level 2 for destination
+ * @ddim3: Signed dimension for loop level 3 for destination
+ * @dicnt0: Total loop iteration count for level 0 (innermost) for
+ * destination
+ * @dicnt1: Total loop iteration count for level 1 for destination
+ * @dicnt2: Total loop iteration count for level 2 for destination
+ * @sicnt3: Total loop iteration count for level 3 (outermost) for
+ * destination
+ */
+struct cppi5_tr_type15_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 icnt3;
+ s32 dim2;
+ s32 dim3;
+ u32 _reserved;
+ s32 ddim1;
+ u64 daddr;
+ s32 ddim2;
+ s32 ddim3;
+ u16 dicnt0;
+ u16 dicnt1;
+ u16 dicnt2;
+ u16 dicnt3;
+} __aligned(64) __packed;
+
+/**
+ * struct cppi5_tr_resp_t - TR response record
+ * @status: Status type and info
+ * @_reserved: Not used
+ * @cmd_id: Command ID for the TR for TR identification
+ * @flags: Configuration Specific Flags
+ */
+struct cppi5_tr_resp_t {
+ u8 status;
+ u8 _reserved;
+ u8 cmd_id;
+ u8 flags;
+} __packed;
+
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_SHIFT (0U)
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_MASK GENMASK(3, 0)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_SHIFT (4U)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_MASK GENMASK(7, 4)
+#define CPPI5_TR_RESPONSE_CMDID_SHIFT (16U)
+#define CPPI5_TR_RESPONSE_CMDID_MASK GENMASK(23, 16)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_SHIFT (24U)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_MASK GENMASK(31, 24)
+
+/**
+ * enum cppi5_tr_resp_status_type - TR Response Status Type field is used to
+ * determine what type of status is being
+ * returned.
+ * @CPPI5_TR_RESPONSE_STATUS_NONE: No error, completion: completed
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR: Transfer Error, completion: none
+ * or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR: Aborted Error, completion: none
+ * or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR: Submission Error, completion:
+ * none
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR: Unsupported Error, completion:
+ * none
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION: Transfer Exception, completion:
+ * partially completed
+ * @CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH: Teardown Flush, completion: none
+ */
+enum cppi5_tr_resp_status_type {
+ CPPI5_TR_RESPONSE_STATUS_NONE,
+ CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR,
+ CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR,
+ CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION,
+ CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH,
+ CPPI5_TR_RESPONSE_STATUS_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_submission - TR Response Status field values which
+ * corresponds Submission Error
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0: ICNT0 was 0
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL: Channel FIFO was full when TR
+ * received
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN: Channel is not owned by the
+ * submitter
+ */
+enum cppi5_tr_resp_status_submission {
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_unsupported - TR Response Status field values which
+ * corresponds Unsupported Error
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE: TR Type not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC: STATIC not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL: EOL not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC: CONFIGURATION SPECIFIC
+ * not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE: AMODE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE: ELTYPE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT: DFMT not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR: SECTR not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC: AMODE SPECIFIC field
+ * not supported
+ */
+enum cppi5_tr_resp_status_unsupported {
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX
+};
+
+/**
+ * cppi5_trdesc_calc_size - Calculate TR Descriptor size
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ *
+ * Returns required TR Descriptor size
+ */
+static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
+{
+ /*
+ * The Size of a TR descriptor is:
+ * 1 x tr_size : the first 16 bytes is used by the packet info block +
+ * tr_count x tr_size : Transfer Request Records +
+ * tr_count x sizeof(struct cppi5_tr_resp_t) : Transfer Response Records
+ */
+ return tr_size * (tr_count + 1) +
+ sizeof(struct cppi5_tr_resp_t) * tr_count;
+}
+
+/**
+ * cppi5_trdesc_init - Init TR Descriptor
+ * @desc: TR Descriptor
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ * @reload_idx: Absolute index to jump to on the 2nd and following passes
+ * through the TR packet.
+ * @reload_count: Number of times to jump from last entry to reload_idx. 0x1ff
+ * indicates infinite looping.
+ *
+ * Init TR Descriptor
+ */
+static inline void cppi5_trdesc_init(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 tr_count, u32 tr_size, u32 reload_idx,
+ u32 reload_count)
+{
+ desc_hdr->pkt_info0 = CPPI5_INFO0_DESC_TYPE_VAL_TR <<
+ CPPI5_INFO0_HDESC_TYPE_SHIFT;
+ desc_hdr->pkt_info0 |=
+ (reload_count << CPPI5_INFO0_TRDESC_RLDCNT_SHIFT) &
+ CPPI5_INFO0_TRDESC_RLDCNT_MASK;
+ desc_hdr->pkt_info0 |=
+ (reload_idx << CPPI5_INFO0_TRDESC_RLDIDX_SHIFT) &
+ CPPI5_INFO0_TRDESC_RLDIDX_MASK;
+ desc_hdr->pkt_info0 |= (tr_count - 1) & CPPI5_INFO0_TRDESC_LASTIDX_MASK;
+
+ desc_hdr->pkt_info1 |= ((ffs(tr_size >> 4) - 1) <<
+ CPPI5_INFO1_TRDESC_RECSIZE_SHIFT) &
+ CPPI5_INFO1_TRDESC_RECSIZE_MASK;
+}
+
+/**
+ * cppi5_tr_init - Init TR record
+ * @flags: Pointer to the TR's flags
+ * @type: TR type
+ * @static_tr: TR is static
+ * @wait: Wait for TR completion before allow the next TR to start
+ * @event_size: output event generation cfg
+ * @cmd_id: TR identifier (application specifics)
+ *
+ * Init TR record
+ */
+static inline void cppi5_tr_init(cppi5_tr_flags_t *flags,
+ enum cppi5_tr_types type, bool static_tr,
+ bool wait, enum cppi5_tr_event_size event_size,
+ u32 cmd_id)
+{
+ *flags = type;
+ *flags |= (event_size << CPPI5_TR_EVENT_SIZE_SHIFT) &
+ CPPI5_TR_EVENT_SIZE_MASK;
+
+ *flags |= (cmd_id << CPPI5_TR_CMD_ID_SHIFT) &
+ CPPI5_TR_CMD_ID_MASK;
+
+ if (static_tr && (type == CPPI5_TR_TYPE8 || type == CPPI5_TR_TYPE9))
+ *flags |= CPPI5_TR_STATIC;
+
+ if (wait)
+ *flags |= CPPI5_TR_WAIT;
+}
+
+/**
+ * cppi5_tr_set_trigger - Configure trigger0/1 and trigger0/1_type
+ * @flags: Pointer to the TR's flags
+ * @trigger0: trigger0 selection
+ * @trigger0_type: type of data transfer that will be enabled by trigger0
+ * @trigger1: trigger1 selection
+ * @trigger1_type: type of data transfer that will be enabled by trigger1
+ *
+ * Configure the triggers for the TR
+ */
+static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags,
+ enum cppi5_tr_trigger trigger0,
+ enum cppi5_tr_trigger_type trigger0_type,
+ enum cppi5_tr_trigger trigger1,
+ enum cppi5_tr_trigger_type trigger1_type)
+{
+ *flags &= ~(CPPI5_TR_TRIGGER0_MASK | CPPI5_TR_TRIGGER0_TYPE_MASK |
+ CPPI5_TR_TRIGGER1_MASK | CPPI5_TR_TRIGGER1_TYPE_MASK);
+ *flags |= (trigger0 << CPPI5_TR_TRIGGER0_SHIFT) &
+ CPPI5_TR_TRIGGER0_MASK;
+ *flags |= (trigger0_type << CPPI5_TR_TRIGGER0_TYPE_SHIFT) &
+ CPPI5_TR_TRIGGER0_TYPE_MASK;
+
+ *flags |= (trigger1 << CPPI5_TR_TRIGGER1_SHIFT) &
+ CPPI5_TR_TRIGGER1_MASK;
+ *flags |= (trigger1_type << CPPI5_TR_TRIGGER1_TYPE_SHIFT) &
+ CPPI5_TR_TRIGGER1_TYPE_MASK;
+}
+
+/**
+ * cppi5_tr_cflag_set - Update the Configuration specific flags
+ * @flags: Pointer to the TR's flags
+ * @csf: Configuration specific flags
+ *
+ * Set a bit in Configuration Specific Flags section of the TR flags.
+ */
+static inline void cppi5_tr_csf_set(cppi5_tr_flags_t *flags, u32 csf)
+{
+ *flags &= ~CPPI5_TR_CSF_FLAGS_MASK;
+ *flags |= (csf << CPPI5_TR_CSF_FLAGS_SHIFT) &
+ CPPI5_TR_CSF_FLAGS_MASK;
+}
+
+#endif /* __TI_CPPI5_H__ */
* @bytes_transferred: byte counter
*/
+/**
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
+ * client driver and it is attached (via the dmaengine_desc_attach_metadata()
+ * helper) to the descriptor.
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * construct the metadata in the client's buffer
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * 4. when the transfer is completed, the metadata should be available in the
+ * attached buffer
+ *
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
+ * driver. The client driver can ask for the pointer, maximum size and the
+ * currently used size of the metadata and can directly update or read it.
+ * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
+ * provided as helper functions.
+ *
+ * Note: the metadata area for the descriptor is no longer valid after the
+ * transfer has been completed (valid up to the point when the completion
+ * callback returns if used).
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
+ * metadata area
+ * 3. update the metadata at the pointer
+ * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount
+ * of data the client has placed into the metadata buffer
+ * 5. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. submit the transfer
+ * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
+ * pointer to the engine's metadata area
+ * 4. Read out the metadata from the pointer
+ *
+ * Note: the two mode is not compatible and clients must use one mode for a
+ * descriptor.
+ */
+enum dma_desc_metadata_mode {
+ DESC_METADATA_NONE = 0,
+ DESC_METADATA_CLIENT = BIT(0),
+ DESC_METADATA_ENGINE = BIT(1),
+};
+
struct dma_chan_percpu {
/* stats */
unsigned long memcpy_count;
/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @slave: ptr to the device using this channel
* @cookie: last cookie value returned to client
* @completed_cookie: last completed cookie for this channel
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
+ * @name: backlink name for sysfs
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
*/
struct dma_chan {
struct dma_device *device;
+ struct device *slave;
dma_cookie_t cookie;
dma_cookie_t completed_cookie;
/* sysfs */
int chan_id;
struct dma_chan_dev *dev;
+ const char *name;
struct list_head device_node;
struct dma_chan_percpu __percpu *local;
dma_addr_t addr[0];
};
+struct dma_async_tx_descriptor;
+
+struct dma_descriptor_metadata_ops {
+ int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
+ size_t len);
+
+ void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+ int (*set_len)(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+};
+
/**
* struct dma_async_tx_descriptor - async transaction descriptor
* ---dma generic offload fields---
* @cookie: tracking cookie for this transaction, set to -EBUSY if
* this tx is sitting on a dependency list
* @flags: flags to augment operation preparation, control completion, and
- * communicate status
+ * communicate status
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
* descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
* @callback_param: general parameter to pass to the callback routine
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
+ * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
+ * DESC_METADATA_NONE
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
+ * DMA driver if metadata mode is supported with the descriptor
* ---async_tx api specific fields---
* @next: at completion submit this descriptor
* @parent: pointer to the next level up in the dependency chain
dma_async_tx_callback_result callback_result;
void *callback_param;
struct dmaengine_unmap_data *unmap;
+ enum dma_desc_metadata_mode desc_metadata_mode;
+ struct dma_descriptor_metadata_ops *metadata_ops;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
* @residue: the remaining number of bytes left to transmit
* on the selected transfer for states DMA_IN_PROGRESS and
* DMA_PAUSED if this is implemented in the driver, else 0
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
*/
struct dma_tx_state {
dma_cookie_t last;
dma_cookie_t used;
u32 residue;
+ u32 in_flight_bytes;
};
/**
* @global_node: list_head for global dma_device_list
* @filter: information for device/slave to filter function/param mapping
* @cap_mask: one or more dma_capability flags
+ * @desc_metadata_modes: supported metadata modes by the DMA device
* @max_xor: maximum number of xor sources, 0 if no capability
* @max_pq: maximum number of PQ sources and PQ-continue capability
* @copy_align: alignment shift for memcpy operations
* @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
+ * @owner: owner module (automatically set based on the provided dev)
* @src_addr_widths: bit mask of src addr widths the device supports
* Width is specified in bytes, e.g. for a device supporting
* a width of 4 the mask should have BIT(4) set.
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
* @descriptor_reuse: a submitted transfer can be resubmitted after completion
+ * @device_release: called sometime atfer dma_async_device_unregister() is
+ * called and there are no further references to this structure. This
+ * must be implemented to free resources however many existing drivers
+ * do not and are therefore not safe to unbind while in use.
+ *
*/
struct dma_device {
-
+ struct kref ref;
unsigned int chancnt;
unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
struct dma_filter filter;
dma_cap_mask_t cap_mask;
+ enum dma_desc_metadata_mode desc_metadata_modes;
unsigned short max_xor;
unsigned short max_pq;
enum dmaengine_alignment copy_align;
int dev_id;
struct device *dev;
+ struct module *owner;
u32 src_addr_widths;
u32 dst_addr_widths;
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
+ void (*device_release)(struct dma_device *dev);
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
len, flags);
}
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+ enum dma_desc_metadata_mode mode)
+{
+ if (!chan)
+ return false;
+
+ return !!(chan->device->desc_metadata_modes & mode);
+}
+
+#ifdef CONFIG_DMA_ENGINE
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len);
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+#else /* CONFIG_DMA_ENGINE */
+static inline int dmaengine_desc_attach_metadata(
+ struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+ return -EINVAL;
+}
+static inline void *dmaengine_desc_get_metadata_ptr(
+ struct dma_async_tx_descriptor *desc, size_t *payload_len,
+ size_t *max_len)
+{
+ return NULL;
+}
+static inline int dmaengine_desc_set_metadata_len(
+ struct dma_async_tx_descriptor *desc, size_t payload_len)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_DMA_ENGINE */
+
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
* @chan: The channel for which to terminate the transfers
int dma_async_device_register(struct dma_device *device);
int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan);
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
-struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
-struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
#define dma_request_channel(mask, x, y) \
__dma_request_channel(&(mask), x, y, NULL)
-#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
- __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
static inline struct dma_chan
-*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
dma_filter_fn fn, void *fn_param,
struct device *dev, const char *name)
{
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(mask, fn, fn_param, NULL);
+ return __dma_request_channel(&mask, fn, fn_param, NULL);
+}
+
+static inline char *
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
+{
+ switch (dir) {
+ case DMA_DEV_TO_MEM:
+ return "DEV_TO_MEM";
+ case DMA_MEM_TO_DEV:
+ return "MEM_TO_DEV";
+ case DMA_MEM_TO_MEM:
+ return "MEM_TO_MEM";
+ case DMA_DEV_TO_DEV:
+ return "DEV_TO_DEV";
+ default:
+ break;
+ }
+
+ return "invalid";
}
#endif /* DMAENGINE_H */
!(disk->flags & GENHD_FL_NO_PART_SCAN);
}
+static inline bool disk_has_partitions(struct gendisk *disk)
+{
+ bool ret = false;
+
+ rcu_read_lock();
+ if (rcu_dereference(disk->part_tbl)->len > 1)
+ ret = true;
+ rcu_read_unlock();
+
+ return ret;
+}
+
static inline dev_t disk_devt(struct gendisk *disk)
{
return MKDEV(disk->major, disk->first_minor);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
+void gpiod_toggle_active_low(struct gpio_desc *desc);
int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
return -ENOSYS;
}
+static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+}
+
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
hwmon_humidity,
hwmon_fan,
hwmon_pwm,
+ hwmon_intrusion,
hwmon_max,
};
#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
enum hwmon_temp_attributes {
- hwmon_temp_input = 0,
+ hwmon_temp_enable,
+ hwmon_temp_input,
hwmon_temp_type,
hwmon_temp_lcrit,
hwmon_temp_lcrit_hyst,
hwmon_temp_reset_history,
};
+#define HWMON_T_ENABLE BIT(hwmon_temp_enable)
#define HWMON_T_INPUT BIT(hwmon_temp_input)
#define HWMON_T_TYPE BIT(hwmon_temp_type)
#define HWMON_T_LCRIT BIT(hwmon_temp_lcrit)
#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
enum hwmon_in_attributes {
+ hwmon_in_enable,
hwmon_in_input,
hwmon_in_min,
hwmon_in_max,
hwmon_in_max_alarm,
hwmon_in_lcrit_alarm,
hwmon_in_crit_alarm,
- hwmon_in_enable,
};
+#define HWMON_I_ENABLE BIT(hwmon_in_enable)
#define HWMON_I_INPUT BIT(hwmon_in_input)
#define HWMON_I_MIN BIT(hwmon_in_min)
#define HWMON_I_MAX BIT(hwmon_in_max)
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
-#define HWMON_I_ENABLE BIT(hwmon_in_enable)
enum hwmon_curr_attributes {
+ hwmon_curr_enable,
hwmon_curr_input,
hwmon_curr_min,
hwmon_curr_max,
hwmon_curr_crit_alarm,
};
+#define HWMON_C_ENABLE BIT(hwmon_curr_enable)
#define HWMON_C_INPUT BIT(hwmon_curr_input)
#define HWMON_C_MIN BIT(hwmon_curr_min)
#define HWMON_C_MAX BIT(hwmon_curr_max)
#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
enum hwmon_power_attributes {
+ hwmon_power_enable,
hwmon_power_average,
hwmon_power_average_interval,
hwmon_power_average_interval_max,
hwmon_power_crit_alarm,
};
+#define HWMON_P_ENABLE BIT(hwmon_power_enable)
#define HWMON_P_AVERAGE BIT(hwmon_power_average)
#define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval)
#define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max)
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
enum hwmon_energy_attributes {
+ hwmon_energy_enable,
hwmon_energy_input,
hwmon_energy_label,
};
+#define HWMON_E_ENABLE BIT(hwmon_energy_enable)
#define HWMON_E_INPUT BIT(hwmon_energy_input)
#define HWMON_E_LABEL BIT(hwmon_energy_label)
enum hwmon_humidity_attributes {
+ hwmon_humidity_enable,
hwmon_humidity_input,
hwmon_humidity_label,
hwmon_humidity_min,
hwmon_humidity_fault,
};
+#define HWMON_H_ENABLE BIT(hwmon_humidity_enable)
#define HWMON_H_INPUT BIT(hwmon_humidity_input)
#define HWMON_H_LABEL BIT(hwmon_humidity_label)
#define HWMON_H_MIN BIT(hwmon_humidity_min)
#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
enum hwmon_fan_attributes {
+ hwmon_fan_enable,
hwmon_fan_input,
hwmon_fan_label,
hwmon_fan_min,
hwmon_fan_fault,
};
+#define HWMON_F_ENABLE BIT(hwmon_fan_enable)
#define HWMON_F_INPUT BIT(hwmon_fan_input)
#define HWMON_F_LABEL BIT(hwmon_fan_label)
#define HWMON_F_MIN BIT(hwmon_fan_min)
#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
+enum hwmon_intrusion_attributes {
+ hwmon_intrusion_alarm,
+ hwmon_intrusion_beep,
+};
+#define HWMON_INTRUSION_ALARM BIT(hwmon_intrusion_alarm)
+#define HWMON_INTRUSION_BEEP BIT(hwmon_intrusion_beep)
+
/**
* struct hwmon_ops - hwmon device operations
* @is_visible: Callback to return attribute visibility. Mandatory.
return (struct ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + eth_hdr()
+ */
+static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb->data;
+}
+
static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_inner_mac_header(skb);
#define list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
+/**
+ * list_for_each_continue - continue iteration over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ *
+ * Continue to iterate over a list, continuing after the current position.
+ */
+#define list_for_each_continue(pos, head) \
+ for (pos = pos->next; pos != (head); pos = pos->next)
+
/**
* list_for_each_prev - iterate over a list backwards
* @pos: the &struct list_head to use as a loop cursor.
#define RTC_AL_SEC 0x0018
+#define RTC_AL_SEC_MASK 0x003f
+#define RTC_AL_MIN_MASK 0x003f
+#define RTC_AL_HOU_MASK 0x001f
+#define RTC_AL_DOM_MASK 0x001f
+#define RTC_AL_DOW_MASK 0x0007
+#define RTC_AL_MTH_MASK 0x000f
+#define RTC_AL_YEA_MASK 0x007f
+
#define RTC_PDN2 0x002e
#define RTC_PDN2_PWRON_ALARM BIT(4)
/* Some controllers have a CBSY bit */
#define TMIO_MMC_HAVE_CBSY BIT(11)
-/* Some controllers that support HS400 use 4 taps while others use 8. */
-#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13)
-
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
!page_poisoning_enabled();
}
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern void init_debug_pagealloc(void);
#else
-DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
+static inline void init_debug_pagealloc(void) {}
#endif
+extern bool _debug_pagealloc_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
static inline bool debug_pagealloc_enabled(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
+ _debug_pagealloc_enabled_early;
+}
+
+/*
+ * For use in fast paths after init_debug_pagealloc() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool debug_pagealloc_enabled_static(void)
{
if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
return false;
int mmc_gpio_get_cd(struct mmc_host *host);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce, bool *gpio_invert);
+ unsigned int debounce);
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
- unsigned int idx,
- unsigned int debounce, bool *gpio_invert);
+ unsigned int idx, unsigned int debounce);
void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
- NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
- NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
- * memcg_flush_percpu_vmstats() first. */
+ NR_SLAB_RECLAIMABLE,
+ NR_SLAB_UNRECLAIMABLE,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES,
FL_READING,
FL_CACHEDPRG,
/* These 4 come from onenand_state_t, which has been unified here */
- FL_RESETING,
+ FL_RESETTING,
FL_OTPING,
FL_PREPARING_ERASE,
FL_VERIFYING_ERASE,
/* internal use only */
#define LOOKUP_PARENT 0x0010
-#define LOOKUP_NO_REVAL 0x0080
#define LOOKUP_JUMPED 0x1000
#define LOOKUP_ROOT 0x2000
#define LOOKUP_ROOT_GRABBED 0x0008
int dev_get_alias(const struct net_device *, char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
int __dev_set_mtu(struct net_device *, int);
+int dev_validate_mtu(struct net_device *dev, int mtu,
+ struct netlink_ext_ack *extack);
int dev_set_mtu_ext(struct net_device *dev, int mtu,
struct netlink_ext_ack *extack);
int dev_set_mtu(struct net_device *, int);
sizeof(*addr));
}
-/* Calculate the bytes required to store the inclusive range of a-b */
-static inline int
-bitmap_bytes(u32 a, u32 b)
-{
- return 4 * ((((b - a + 8) / 8) + 3) / 4);
-}
-
/* How often should the gc be run by default */
#define IPSET_GC_TIME (3 * 60)
const struct nfnl_callback *cb; /* callback for individual types */
struct module *owner;
int (*commit)(struct net *net, struct sk_buff *skb);
- int (*abort)(struct net *net, struct sk_buff *skb);
+ int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
void (*cleanup)(struct net *net);
bool (*valid_genid)(struct net *net, u32 genid);
};
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
extern void devm_pinctrl_put(struct pinctrl *p);
+extern int pinctrl_select_default_state(struct device *dev);
#ifdef CONFIG_PM
extern int pinctrl_pm_select_default_state(struct device *dev);
{
}
+static inline int pinctrl_select_default_state(struct device *dev)
+{
+ return 0;
+}
+
static inline int pinctrl_pm_select_default_state(struct device *dev)
{
return 0;
* @aggr_mask: group aggregation mask;
* @reg: group interrupt status register;
* @mask: group interrupt mask;
+ * @capability: group capability register;
* @cache: last status value for elements fro the same group;
* @count: number of available elements in the group;
* @ind: element's index inside the group;
u32 aggr_mask;
u32 reg;
u32 mask;
+ u32 capability;
u32 cache;
u8 count;
u8 ind;
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
+#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
/* Misc */
#define ASUS_WMI_DEVID_CAMERA 0x00060013
#ifndef _PMBUS_H_
#define _PMBUS_H_
+#include <linux/bits.h>
+
/* flags */
/*
* communication errors for no explicable reason. For such chips, checking
* the status register must be disabled.
*/
-#define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+#define PMBUS_SKIP_STATUS_CHECK BIT(0)
+
+/*
+ * PMBUS_WRITE_PROTECTED
+ * Set if the chip is write protected and write protection is not determined
+ * by the standard WRITE_PROTECT command.
+ */
+#define PMBUS_WRITE_PROTECTED BIT(1)
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
DEV_PROP_U32,
DEV_PROP_U64,
DEV_PROP_STRING,
+ DEV_PROP_REF,
};
enum dev_dma_attr {
return fwnode_property_read_u64_array(fwnode, propname, NULL, 0);
}
+struct software_node;
+
+/**
+ * struct software_node_ref_args - Reference property with additional arguments
+ * @node: Reference to a software node
+ * @nargs: Number of elements in @args array
+ * @args: Integer arguments
+ */
+struct software_node_ref_args {
+ const struct software_node *node;
+ unsigned int nargs;
+ u64 args[NR_FWNODE_REFERENCE_ARGS];
+};
+
/**
* struct property_entry - "Built-in" device property representation.
* @name: Name of the property.
* @length: Length of data making up the value.
- * @is_array: True when the property is an array.
+ * @is_inline: True when the property value is stored inline.
* @type: Type of the data in unions.
- * @pointer: Pointer to the property (an array of items of the given type).
- * @value: Value of the property (when it is a single item of the given type).
+ * @pointer: Pointer to the property when it is not stored inline.
+ * @value: Value of the property when it is stored inline.
*/
struct property_entry {
const char *name;
size_t length;
- bool is_array;
+ bool is_inline;
enum dev_prop_type type;
union {
const void *pointer;
union {
- u8 u8_data;
- u16 u16_data;
- u32 u32_data;
- u64 u64_data;
- const char *str;
+ u8 u8_data[sizeof(u64) / sizeof(u8)];
+ u16 u16_data[sizeof(u64) / sizeof(u16)];
+ u32 u32_data[sizeof(u64) / sizeof(u32)];
+ u64 u64_data[sizeof(u64) / sizeof(u64)];
+ const char *str[sizeof(u64) / sizeof(char *)];
} value;
};
};
*/
#define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_) \
- sizeof(((struct property_entry *)NULL)->value._elem_)
+ sizeof(((struct property_entry *)NULL)->value._elem_[0])
-#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\
+#define __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, _elsize_, _Type_, \
+ _val_, _len_) \
(struct property_entry) { \
.name = _name_, \
- .length = (_len_) * __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
- .is_array = true, \
+ .length = (_len_) * (_elsize_), \
.type = DEV_PROP_##_Type_, \
{ .pointer = _val_ }, \
}
+#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\
+ __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \
+ __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
+ _Type_, _val_, _len_)
+
#define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \
__PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_)
#define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_) \
__PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_)
#define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \
__PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_)
+#define PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \
+ sizeof(struct software_node_ref_args), \
+ REF, _val_, _len_)
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
#define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \
(struct property_entry) { \
.name = _name_, \
.length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
+ .is_inline = true, \
.type = DEV_PROP_##_Type_, \
- { .value = { ._elem_ = _val_ } }, \
+ { .value = { ._elem_[0] = _val_ } }, \
}
#define PROPERTY_ENTRY_U8(_name_, _val_) \
#define PROPERTY_ENTRY_BOOL(_name_) \
(struct property_entry) { \
.name = _name_, \
+ .is_inline = true, \
+}
+
+#define PROPERTY_ENTRY_REF(_name_, _ref_, ...) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = sizeof(struct software_node_ref_args), \
+ .type = DEV_PROP_REF, \
+ { .pointer = &(const struct software_node_ref_args) { \
+ .node = _ref_, \
+ .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
+ .args = { __VA_ARGS__ }, \
+ } }, \
}
struct property_entry *
/* -------------------------------------------------------------------------- */
/* Software fwnode support - when HW description is incomplete or missing */
-struct software_node;
-
-/**
- * struct software_node_ref_args - Reference with additional arguments
- * @node: Reference to a software node
- * @nargs: Number of elements in @args array
- * @args: Integer arguments
- */
-struct software_node_ref_args {
- const struct software_node *node;
- unsigned int nargs;
- u64 args[NR_FWNODE_REFERENCE_ARGS];
-};
-
-/**
- * struct software_node_reference - Named software node reference property
- * @name: Name of the property
- * @nrefs: Number of elements in @refs array
- * @refs: Array of references with optional arguments
- */
-struct software_node_reference {
- const char *name;
- unsigned int nrefs;
- const struct software_node_ref_args *refs;
-};
-
/**
* struct software_node - Software node description
* @name: Name of the software node
* @parent: Parent of the software node
* @properties: Array of device properties
- * @references: Array of software node reference properties
*/
struct software_node {
const char *name;
const struct software_node *parent;
const struct property_entry *properties;
- const struct software_node_reference *references;
};
bool is_software_node(const struct fwnode_handle *fwnode);
#include <errno.h>
#include <inttypes.h>
-#include <limits.h>
#include <stddef.h>
+#include <string.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <sys/types.h>
#ifndef PAGE_SIZE
# define PAGE_SIZE 4096
#endif
+#ifndef PAGE_SHIFT
+# define PAGE_SHIFT 12
+#endif
extern const char raid6_empty_zero_page[PAGE_SIZE];
#define __init
#define enable_kernel_altivec()
#define disable_kernel_altivec()
+#undef EXPORT_SYMBOL
#define EXPORT_SYMBOL(sym)
+#undef EXPORT_SYMBOL_GPL
#define EXPORT_SYMBOL_GPL(sym)
#define MODULE_LICENSE(licence)
#define MODULE_DESCRIPTION(desc)
__ret ?: ((cond) ? 0 : -ETIMEDOUT); \
})
+/**
+ * regmap_read_poll_timeout_atomic - Poll until a condition is met or a timeout occurs
+ *
+ * @map: Regmap to read from
+ * @addr: Address to poll
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops).
+ * Should be less than ~10us since udelay is used
+ * (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val.
+ *
+ * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h.
+ *
+ * Note: In general regmap cannot be used in atomic context. If you want to use
+ * this macro then first setup your regmap for atomic use (flat or no cache
+ * and MMIO regmap).
+ */
+#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
+({ \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __delay_us = (delay_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int __ret; \
+ for (;;) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ if (__ret) \
+ break; \
+ if (cond) \
+ break; \
+ if ((__timeout_us) && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ break; \
+ } \
+ if (__delay_us) \
+ udelay(__delay_us); \
+ } \
+ __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+})
+
/**
* regmap_field_read_poll_timeout - Poll until a condition is met or timeout
*
const char *const *supply_names,
unsigned int num_supplies);
+bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2);
+
#else
/*
{
}
+static inline bool
+regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
+{
+ return false;
+}
#endif
static inline int regulator_set_voltage_triplet(struct regulator *regulator,
/*
* If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread.
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
- if (clone_flags & CLONE_THREAD) {
+ if (clone_flags & CLONE_VM) {
t->rseq = NULL;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
static inline void sk_psock_restore_proto(struct sock *sk,
struct sk_psock *psock)
{
- sk->sk_write_space = psock->saved_write_space;
+ sk->sk_prot->unhash = psock->saved_unhash;
if (psock->sk_proto) {
struct inet_connection_sock *icsk = inet_csk(sk);
bool has_ulp = !!icsk->icsk_ulp_data;
- if (has_ulp)
- tcp_update_ulp(sk, psock->sk_proto);
- else
+ if (has_ulp) {
+ tcp_update_ulp(sk, psock->sk_proto,
+ psock->saved_write_space);
+ } else {
sk->sk_prot = psock->sk_proto;
+ sk->sk_write_space = psock->saved_write_space;
+ }
psock->sk_proto = NULL;
+ } else {
+ sk->sk_write_space = psock->saved_write_space;
}
}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * K3 Ring Accelerator (RA) subsystem interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __SOC_TI_K3_RINGACC_API_H_
+#define __SOC_TI_K3_RINGACC_API_H_
+
+#include <linux/types.h>
+
+struct device_node;
+
+/**
+ * enum k3_ring_mode - &struct k3_ring_cfg mode
+ *
+ * RA ring operational modes
+ *
+ * @K3_RINGACC_RING_MODE_RING: Exposed Ring mode for SW direct access
+ * @K3_RINGACC_RING_MODE_MESSAGE: Messaging mode. Messaging mode requires
+ * that all accesses to the queue must go through this IP so that all
+ * accesses to the memory are controlled and ordered. This IP then
+ * controls the entire state of the queue, and SW has no directly control,
+ * such as through doorbells and cannot access the storage memory directly.
+ * This is particularly useful when more than one SW or HW entity can be
+ * the producer and/or consumer at the same time
+ * @K3_RINGACC_RING_MODE_CREDENTIALS: Credentials mode is message mode plus
+ * stores credentials with each message, requiring the element size to be
+ * doubled to fit the credentials. Any exposed memory should be protected
+ * by a firewall from unwanted access
+ */
+enum k3_ring_mode {
+ K3_RINGACC_RING_MODE_RING = 0,
+ K3_RINGACC_RING_MODE_MESSAGE,
+ K3_RINGACC_RING_MODE_CREDENTIALS,
+ K3_RINGACC_RING_MODE_INVALID
+};
+
+/**
+ * enum k3_ring_size - &struct k3_ring_cfg elm_size
+ *
+ * RA ring element's sizes in bytes.
+ */
+enum k3_ring_size {
+ K3_RINGACC_RING_ELSIZE_4 = 0,
+ K3_RINGACC_RING_ELSIZE_8,
+ K3_RINGACC_RING_ELSIZE_16,
+ K3_RINGACC_RING_ELSIZE_32,
+ K3_RINGACC_RING_ELSIZE_64,
+ K3_RINGACC_RING_ELSIZE_128,
+ K3_RINGACC_RING_ELSIZE_256,
+ K3_RINGACC_RING_ELSIZE_INVALID
+};
+
+struct k3_ringacc;
+struct k3_ring;
+
+/**
+ * enum k3_ring_cfg - RA ring configuration structure
+ *
+ * @size: Ring size, number of elements
+ * @elm_size: Ring element size
+ * @mode: Ring operational mode
+ * @flags: Ring configuration flags. Possible values:
+ * @K3_RINGACC_RING_SHARED: when set allows to request the same ring
+ * few times. It's usable when the same ring is used as Free Host PD ring
+ * for different flows, for example.
+ * Note: Locking should be done by consumer if required
+ */
+struct k3_ring_cfg {
+ u32 size;
+ enum k3_ring_size elm_size;
+ enum k3_ring_mode mode;
+#define K3_RINGACC_RING_SHARED BIT(1)
+ u32 flags;
+};
+
+#define K3_RINGACC_RING_ID_ANY (-1)
+
+/**
+ * of_k3_ringacc_get_by_phandle - find a RA by phandle property
+ * @np: device node
+ * @propname: property name containing phandle on RA node
+ *
+ * Returns pointer on the RA - struct k3_ringacc
+ * or -ENODEV if not found,
+ * or -EPROBE_DEFER if not yet registered
+ */
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+ const char *property);
+
+#define K3_RINGACC_RING_USE_PROXY BIT(1)
+
+/**
+ * k3_ringacc_request_ring - request ring from ringacc
+ * @ringacc: pointer on ringacc
+ * @id: ring id or K3_RINGACC_RING_ID_ANY for any general purpose ring
+ * @flags:
+ * @K3_RINGACC_RING_USE_PROXY: if set - proxy will be allocated and
+ * used to access ring memory. Sopported only for rings in
+ * Message/Credentials/Queue mode.
+ *
+ * Returns pointer on the Ring - struct k3_ring
+ * or NULL in case of failure.
+ */
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+ int id, u32 flags);
+
+/**
+ * k3_ringacc_ring_reset - ring reset
+ * @ring: pointer on Ring
+ *
+ * Resets ring internal state ((hw)occ, (hw)idx).
+ */
+void k3_ringacc_ring_reset(struct k3_ring *ring);
+/**
+ * k3_ringacc_ring_reset - ring reset for DMA rings
+ * @ring: pointer on Ring
+ *
+ * Resets ring internal state ((hw)occ, (hw)idx). Should be used for rings
+ * which are read by K3 UDMA, like TX or Free Host PD rings.
+ */
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ);
+
+/**
+ * k3_ringacc_ring_free - ring free
+ * @ring: pointer on Ring
+ *
+ * Resets ring and free all alocated resources.
+ */
+int k3_ringacc_ring_free(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_get_ring_id - Get the Ring ID
+ * @ring: pointer on ring
+ *
+ * Returns the Ring ID
+ */
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_get_ring_irq_num - Get the irq number for the ring
+ * @ring: pointer on ring
+ *
+ * Returns the interrupt number which can be used to request the interrupt
+ */
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_cfg - ring configure
+ * @ring: pointer on ring
+ * @cfg: Ring configuration parameters (see &struct k3_ring_cfg)
+ *
+ * Configures ring, including ring memory allocation.
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg);
+
+/**
+ * k3_ringacc_ring_get_size - get ring size
+ * @ring: pointer on ring
+ *
+ * Returns ring size in number of elements.
+ */
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_get_free - get free elements
+ * @ring: pointer on ring
+ *
+ * Returns number of free elements in the ring.
+ */
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_get_occ - get ring occupancy
+ * @ring: pointer on ring
+ *
+ * Returns total number of valid entries on the ring
+ */
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_is_full - checks if ring is full
+ * @ring: pointer on ring
+ *
+ * Returns true if the ring is full
+ */
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_push - push element to the ring tail
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element to the ring tail. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_pop - pop element from the ring head
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element from the ring head. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size..
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_push_head - push element to the ring head
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element to the ring head. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING
+ */
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_pop_tail - pop element from the ring tail
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element from the ring tail. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING
+ */
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring);
+
+#endif /* __SOC_TI_K3_RINGACC_API_H_ */
* GPIO descriptors rather than using global GPIO numbers grabbed by the
* driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
* and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
+ * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
+ * fill in this field with the first unused native CS, to be used by SPI
+ * controller drivers that need to drive a native CS when using GPIO CS.
+ * @max_native_cs: When cs_gpiods is used, and this field is filled in,
+ * spi_register_controller() will validate all native CS (including the
+ * unused native CS) against this value.
* @statistics: statistics for the spi_controller
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
int *cs_gpios;
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
+ u8 unused_native_cs;
+ u8 max_native_cs;
/* statistics */
struct spi_statistics statistics;
/* Helper calls for driver to timestamp transfer */
void spi_take_timestamp_pre(struct spi_controller *ctlr,
struct spi_transfer *xfer,
- const void *tx, bool irqs_off);
+ size_t progress, bool irqs_off);
void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
- const void *tx, bool irqs_off);
+ size_t progress, bool irqs_off);
/* the spi driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
* struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI
* @freq: input clock freq to the core.
* @baudwidth: baud rate divider width of the core.
- * @gpio_cs_count: number of gpio pins used for chipselect.
- * @gpio_cs: array of gpio pins used for chipselect.
*
* freq and baudwidth are used only if the divider is programmable.
*/
struct tiny_spi_platform_data {
unsigned int freq;
unsigned int baudwidth;
- unsigned int gpio_cs_count;
- int *gpio_cs;
};
#endif /* _LINUX_SPI_SPI_OC_TINY_H */
extern void arch_suspend_enable_irqs(void);
extern int pm_suspend(suspend_state_t state);
+extern bool sync_on_suspend_enabled;
#else /* !CONFIG_SUSPEND */
#define suspend_valid_only_mem NULL
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
+static inline bool sync_on_suspend_enabled(void) { return true; }
static inline bool idle_should_enter_s2idle(void) { return false; }
static inline void __init pm_states_init(void) {}
static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * 10G controller driver for Samsung EXYNOS SoCs
+ * 10G controller driver for Samsung Exynos SoCs
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
/* Shift (rsh) a tnum right (by a fixed shift) */
struct tnum tnum_rshift(struct tnum a, u8 shift);
/* Shift (arsh) a tnum right (by a fixed min_shift) */
-struct tnum tnum_arshift(struct tnum a, u8 min_shift);
+struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness);
/* Add two tnums, return @a + @b */
struct tnum tnum_add(struct tnum a, struct tnum b);
/* Subtract two tnums, return @a - @b */
return xa->xa_flags & XA_FLAGS_MARK(mark);
}
+/**
+ * xa_for_each_range() - Iterate over a portion of an XArray.
+ * @xa: XArray.
+ * @index: Index of @entry.
+ * @entry: Entry retrieved from array.
+ * @start: First index to retrieve from array.
+ * @last: Last index to retrieve from array.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index. You may modify @index during the iteration if you
+ * want to skip or reprocess indices. It is safe to modify the array
+ * during the iteration. At the end of the iteration, @entry will be set
+ * to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n). You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_range() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each() iterator instead.
+ * The xas_for_each() iterator will expand into more inline code than
+ * xa_for_each_range().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ */
+#define xa_for_each_range(xa, index, entry, start, last) \
+ for (index = start, \
+ entry = xa_find(xa, &index, last, XA_PRESENT); \
+ entry; \
+ entry = xa_find_after(xa, &index, last, XA_PRESENT))
+
/**
* xa_for_each_start() - Iterate over a portion of an XArray.
* @xa: XArray.
*
* Context: Any context. Takes and releases the RCU lock.
*/
-#define xa_for_each_start(xa, index, entry, start) \
- for (index = start, \
- entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \
- entry; \
- entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
+#define xa_for_each_start(xa, index, entry, start) \
+ xa_for_each_range(xa, index, entry, start, ULONG_MAX)
/**
* xa_for_each() - Iterate over present entries in an XArray.
spin_lock_irqsave(&(xa)->xa_lock, flags)
#define xa_unlock_irqrestore(xa, flags) \
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
+#define xa_lock_nested(xa, subclass) \
+ spin_lock_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_bh_nested(xa, subclass) \
+ spin_lock_bh_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irq_nested(xa, subclass) \
+ spin_lock_irq_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irqsave_nested(xa, flags, subclass) \
+ spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
/*
* Versions of the normal API which require the caller to hold the
*
* @start_radar_detection: Start radar detection in the driver.
*
+ * @end_cac: End running CAC, probably because a related CAC
+ * was finished on another phy.
+ *
* @update_ft_ies: Provide updated Fast BSS Transition information to the
* driver. If the SME is in the driver/firmware, this information can be
* used in building Authentication and Reassociation Request frames.
struct net_device *dev,
struct cfg80211_chan_def *chandef,
u32 cac_time_ms);
+ void (*end_cac)(struct wiphy *wiphy,
+ struct net_device *dev);
int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_ft_ies_params *ftie);
int (*crit_proto_start)(struct wiphy *wiphy,
u32 region_max_snapshots,
u64 region_size);
void devlink_region_destroy(struct devlink_region *region);
-u32 devlink_region_shapshot_id_get(struct devlink *devlink);
+u32 devlink_region_snapshot_id_get(struct devlink *devlink);
int devlink_region_snapshot_create(struct devlink_region *region,
u8 *data, u32 snapshot_id,
devlink_snapshot_data_dest_t *data_destructor);
};
#define NF_FLOW_TIMEOUT (30 * HZ)
+#define nf_flowtable_time_stamp (u32)jiffies
+
+static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
+{
+ return (__s32)(timeout - nf_flowtable_time_stamp);
+}
struct nf_flow_route {
struct {
struct netns_nftables {
struct list_head tables;
struct list_head commit_list;
+ struct list_head module_list;
struct mutex commit_mutex;
unsigned int base_seq;
u8 gencursor;
/* initialize ulp */
int (*init)(struct sock *sk);
/* update ulp */
- void (*update)(struct sock *sk, struct proto *p);
+ void (*update)(struct sock *sk, struct proto *p,
+ void (*write_space)(struct sock *sk));
/* cleanup ulp */
void (*release)(struct sock *sk);
/* diagnostic */
int tcp_set_ulp(struct sock *sk, const char *name);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
-void tcp_update_ulp(struct sock *sk, struct proto *p);
+void tcp_update_ulp(struct sock *sk, struct proto *p,
+ void (*write_space)(struct sock *sk));
#define MODULE_ALIAS_TCP_ULP(name) \
__MODULE_INFO(alias, alias_userspace, name); \
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SiFive L2 Cache Controller header file
+ *
+ */
+
+#ifndef __SOC_SIFIVE_L2_CACHE_H
+#define __SOC_SIFIVE_L2_CACHE_H
+
+extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
+extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
+
+#define SIFIVE_L2_ERR_TYPE_CE 0
+#define SIFIVE_L2_ERR_TYPE_UE 1
+
+#endif /* __SOC_SIFIVE_L2_CACHE_H */
TRACE_EVENT(afs_lookup,
TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name,
- struct afs_vnode *vnode),
+ struct afs_fid *fid),
- TP_ARGS(dvnode, name, vnode),
+ TP_ARGS(dvnode, name, fid),
TP_STRUCT__entry(
__field_struct(struct afs_fid, dfid )
TP_fast_assign(
int __len = min_t(int, name->len, 23);
__entry->dfid = dvnode->fid;
- if (vnode) {
- __entry->fid = vnode->fid;
- } else {
- __entry->fid.vid = 0;
- __entry->fid.vnode = 0;
- __entry->fid.unique = 0;
- }
+ __entry->fid = *fid;
memcpy(__entry->name, name->name, __len);
__entry->name[__len] = 0;
),
__entry->keys = b->keys.set[b->keys.nsets].data->keys;
),
- TP_printk("bucket %zu", __entry->bucket)
+ TP_printk("bucket %zu written block %u + %u",
+ __entry->bucket, __entry->block, __entry->keys)
);
DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \
EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \
EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
- EMe(SCAN_TRUNCATED, "truncated") \
+ EM( SCAN_TRUNCATED, "truncated") \
+ EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
#undef EM
#undef EMe
TP_ARGS(ip, parent_ip),
TP_STRUCT__entry(
- __field(u32, caller_offs)
- __field(u32, parent_offs)
+ __field(s32, caller_offs)
+ __field(s32, parent_offs)
),
TP_fast_assign(
- __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
- __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+ __entry->caller_offs = (s32)(ip - (unsigned long)_stext);
+ __entry->parent_offs = (s32)(parent_ip - (unsigned long)_stext);
),
TP_printk("caller=%pS parent=%pS",
TP_ARGS(dev, flags)
);
+DEFINE_EVENT(rpm_internal, rpm_usage,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
TRACE_EVENT(rpm_return_int,
TP_PROTO(struct device *dev, unsigned long ip, int ret),
TP_PROTO(xen_mc_callback_fn_t fn, void *data),
TP_ARGS(fn, data),
TP_STRUCT__entry(
- __field(xen_mc_callback_fn_t, fn)
+ /*
+ * Use field_struct to avoid is_signed_type()
+ * comparison of a function pointer.
+ */
+ __field_struct(xen_mc_callback_fn_t, fn)
__field(void *, data)
),
TP_fast_assign(
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
+/* 0x10 reserved for arch-specific use */
+/* 0x20 reserved for arch-specific use */
#define PROT_NONE 0x0 /* page can not be accessed */
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
#define BCACHE_SB_MAX_VERSION 4
#define SB_SECTOR 8
+#define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT)
#define SB_SIZE 4096
#define SB_LABEL_SIZE 32
#define SB_JOURNAL_BUCKETS 256U
#define BDEV_DATA_START_DEFAULT 16 /* sectors */
+struct cache_sb_disk {
+ __le64 csum;
+ __le64 offset; /* sector where this sb was written */
+ __le64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __le64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __le64 flags;
+ __le64 seq;
+ __le64 pad[8];
+
+ union {
+ struct {
+ /* Cache devices */
+ __le64 nbuckets; /* device size */
+
+ __le16 block_size; /* sectors */
+ __le16 bucket_size; /* sectors */
+
+ __le16 nr_in_set;
+ __le16 nr_this_dev;
+ };
+ struct {
+ /* Backing devices */
+ __le64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __le32 last_mount; /* time overflow in y2106 */
+
+ __le16 first_bucket;
+ union {
+ __le16 njournal_buckets;
+ __le16 keys;
+ };
+ __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+};
+
struct cache_sb {
__u64 csum;
__u64 offset; /* sector where this sb was written */
/* The first byte of SFEATURE and GFEATURE is the report number */
#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
+#define HIDIOCGRAWUNIQ(len) _IOC(_IOC_READ, 'H', 0x08, len)
#define HIDRAW_FIRST_MINOR 0
#define HIDRAW_MAX_DEVICES 64
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _USR_IDXD_H_
+#define _USR_IDXD_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/* Descriptor flags */
+#define IDXD_OP_FLAG_FENCE 0x0001
+#define IDXD_OP_FLAG_BOF 0x0002
+#define IDXD_OP_FLAG_CRAV 0x0004
+#define IDXD_OP_FLAG_RCR 0x0008
+#define IDXD_OP_FLAG_RCI 0x0010
+#define IDXD_OP_FLAG_CRSTS 0x0020
+#define IDXD_OP_FLAG_CR 0x0080
+#define IDXD_OP_FLAG_CC 0x0100
+#define IDXD_OP_FLAG_ADDR1_TCS 0x0200
+#define IDXD_OP_FLAG_ADDR2_TCS 0x0400
+#define IDXD_OP_FLAG_ADDR3_TCS 0x0800
+#define IDXD_OP_FLAG_CR_TCS 0x1000
+#define IDXD_OP_FLAG_STORD 0x2000
+#define IDXD_OP_FLAG_DRDBK 0x4000
+#define IDXD_OP_FLAG_DSTS 0x8000
+
+/* Opcode */
+enum dsa_opcode {
+ DSA_OPCODE_NOOP = 0,
+ DSA_OPCODE_BATCH,
+ DSA_OPCODE_DRAIN,
+ DSA_OPCODE_MEMMOVE,
+ DSA_OPCODE_MEMFILL,
+ DSA_OPCODE_COMPARE,
+ DSA_OPCODE_COMPVAL,
+ DSA_OPCODE_CR_DELTA,
+ DSA_OPCODE_AP_DELTA,
+ DSA_OPCODE_DUALCAST,
+ DSA_OPCODE_CRCGEN = 0x10,
+ DSA_OPCODE_COPY_CRC,
+ DSA_OPCODE_DIF_CHECK,
+ DSA_OPCODE_DIF_INS,
+ DSA_OPCODE_DIF_STRP,
+ DSA_OPCODE_DIF_UPDT,
+ DSA_OPCODE_CFLUSH = 0x20,
+};
+
+/* Completion record status */
+enum dsa_completion_status {
+ DSA_COMP_NONE = 0,
+ DSA_COMP_SUCCESS,
+ DSA_COMP_SUCCESS_PRED,
+ DSA_COMP_PAGE_FAULT_NOBOF,
+ DSA_COMP_PAGE_FAULT_IR,
+ DSA_COMP_BATCH_FAIL,
+ DSA_COMP_BATCH_PAGE_FAULT,
+ DSA_COMP_DR_OFFSET_NOINC,
+ DSA_COMP_DR_OFFSET_ERANGE,
+ DSA_COMP_DIF_ERR,
+ DSA_COMP_BAD_OPCODE = 0x10,
+ DSA_COMP_INVALID_FLAGS,
+ DSA_COMP_NOZERO_RESERVE,
+ DSA_COMP_XFER_ERANGE,
+ DSA_COMP_DESC_CNT_ERANGE,
+ DSA_COMP_DR_ERANGE,
+ DSA_COMP_OVERLAP_BUFFERS,
+ DSA_COMP_DCAST_ERR,
+ DSA_COMP_DESCLIST_ALIGN,
+ DSA_COMP_INT_HANDLE_INVAL,
+ DSA_COMP_CRA_XLAT,
+ DSA_COMP_CRA_ALIGN,
+ DSA_COMP_ADDR_ALIGN,
+ DSA_COMP_PRIV_BAD,
+ DSA_COMP_TRAFFIC_CLASS_CONF,
+ DSA_COMP_PFAULT_RDBA,
+ DSA_COMP_HW_ERR1,
+ DSA_COMP_HW_ERR_DRB,
+ DSA_COMP_TRANSLATION_FAIL,
+};
+
+#define DSA_COMP_STATUS_MASK 0x7f
+#define DSA_COMP_STATUS_WRITE 0x80
+
+struct dsa_batch_desc {
+ uint32_t pasid:20;
+ uint32_t rsvd:11;
+ uint32_t priv:1;
+ uint32_t flags:24;
+ uint32_t opcode:8;
+ uint64_t completion_addr;
+ uint64_t desc_list_addr;
+ uint64_t rsvd1;
+ uint32_t desc_count;
+ uint16_t interrupt_handle;
+ uint16_t rsvd2;
+ uint8_t rsvd3[24];
+} __attribute__((packed));
+
+struct dsa_hw_desc {
+ uint32_t pasid:20;
+ uint32_t rsvd:11;
+ uint32_t priv:1;
+ uint32_t flags:24;
+ uint32_t opcode:8;
+ uint64_t completion_addr;
+ union {
+ uint64_t src_addr;
+ uint64_t rdback_addr;
+ uint64_t pattern;
+ };
+ union {
+ uint64_t dst_addr;
+ uint64_t rdback_addr2;
+ uint64_t src2_addr;
+ uint64_t comp_pattern;
+ };
+ uint32_t xfer_size;
+ uint16_t int_handle;
+ uint16_t rsvd1;
+ union {
+ uint8_t expected_res;
+ struct {
+ uint64_t delta_addr;
+ uint32_t max_delta_size;
+ };
+ uint32_t delta_rec_size;
+ uint64_t dest2;
+ /* CRC */
+ struct {
+ uint32_t crc_seed;
+ uint32_t crc_rsvd;
+ uint64_t seed_addr;
+ };
+ /* DIF check or strip */
+ struct {
+ uint8_t src_dif_flags;
+ uint8_t dif_chk_res;
+ uint8_t dif_chk_flags;
+ uint8_t dif_chk_res2[5];
+ uint32_t chk_ref_tag_seed;
+ uint16_t chk_app_tag_mask;
+ uint16_t chk_app_tag_seed;
+ };
+ /* DIF insert */
+ struct {
+ uint8_t dif_ins_res;
+ uint8_t dest_dif_flag;
+ uint8_t dif_ins_flags;
+ uint8_t dif_ins_res2[13];
+ uint32_t ins_ref_tag_seed;
+ uint16_t ins_app_tag_mask;
+ uint16_t ins_app_tag_seed;
+ };
+ /* DIF update */
+ struct {
+ uint8_t src_upd_flags;
+ uint8_t upd_dest_flags;
+ uint8_t dif_upd_flags;
+ uint8_t dif_upd_res[5];
+ uint32_t src_ref_tag_seed;
+ uint16_t src_app_tag_mask;
+ uint16_t src_app_tag_seed;
+ uint32_t dest_ref_tag_seed;
+ uint16_t dest_app_tag_mask;
+ uint16_t dest_app_tag_seed;
+ };
+
+ uint8_t op_specific[24];
+ };
+} __attribute__((packed));
+
+struct dsa_raw_desc {
+ uint64_t field[8];
+} __attribute__((packed));
+
+/*
+ * The status field will be modified by hardware, therefore it should be
+ * volatile and prevent the compiler from optimize the read.
+ */
+struct dsa_completion_record {
+ volatile uint8_t status;
+ union {
+ uint8_t result;
+ uint8_t dif_status;
+ };
+ uint16_t rsvd;
+ uint32_t bytes_completed;
+ uint64_t fault_addr;
+ union {
+ uint16_t delta_rec_size;
+ uint16_t crc_val;
+
+ /* DIF check & strip */
+ struct {
+ uint32_t dif_chk_ref_tag;
+ uint16_t dif_chk_app_tag_mask;
+ uint16_t dif_chk_app_tag;
+ };
+
+ /* DIF insert */
+ struct {
+ uint64_t dif_ins_res;
+ uint32_t dif_ins_ref_tag;
+ uint16_t dif_ins_app_tag_mask;
+ uint16_t dif_ins_app_tag;
+ };
+
+ /* DIF update */
+ struct {
+ uint32_t dif_upd_src_ref_tag;
+ uint16_t dif_upd_src_app_tag_mask;
+ uint16_t dif_upd_src_app_tag;
+ uint32_t dif_upd_dest_ref_tag;
+ uint16_t dif_upd_dest_app_tag_mask;
+ uint16_t dif_upd_dest_app_tag;
+ };
+
+ uint8_t op_specific[16];
+ };
+} __attribute__((packed));
+
+struct dsa_raw_completion_record {
+ uint64_t field[4];
+} __attribute__((packed));
+
+#endif
__kernel_ulong_t __sec;
#if defined(__sparc__) && defined(__arch64__)
unsigned int __usec;
+ unsigned int __pad;
#else
__kernel_ulong_t __usec;
#endif
struct io_uring_files_update {
__u32 offset;
- __s32 *fds;
+ __u32 resv;
+ __aligned_u64 /* __s32 * */ fds;
};
#endif
* bigger than MAX_ORDER unless SPARSEMEM.
*/
page_ext_init_flatmem();
+ init_debug_pagealloc();
report_meminit();
mem_init();
kmem_cache_init();
*/
static void cgroup_bpf_release(struct work_struct *work)
{
- struct cgroup *cgrp = container_of(work, struct cgroup,
- bpf.release_work);
+ struct cgroup *p, *cgrp = container_of(work, struct cgroup,
+ bpf.release_work);
enum bpf_cgroup_storage_type stype;
struct bpf_prog_array *old_array;
unsigned int type;
mutex_unlock(&cgroup_mutex);
+ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+ cgroup_bpf_put(p);
+
percpu_ref_exit(&cgrp->bpf.refcnt);
cgroup_put(cgrp);
}
*/
#define NR ARRAY_SIZE(cgrp->bpf.effective)
struct bpf_prog_array *arrays[NR] = {};
+ struct cgroup *p;
int ret, i;
ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
if (ret)
return ret;
+ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+ cgroup_bpf_get(p);
+
for (i = 0; i < NR; i++)
INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
return TNUM(a.value >> shift, a.mask >> shift);
}
-struct tnum tnum_arshift(struct tnum a, u8 min_shift)
+struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness)
{
/* if a.value is negative, arithmetic shifting by minimum shift
* will have larger negative offset compared to more shifting.
* If a.value is nonnegative, arithmetic shifting by minimum shift
* will have larger positive offset compare to more shifting.
*/
- return TNUM((s64)a.value >> min_shift, (s64)a.mask >> min_shift);
+ if (insn_bitness == 32)
+ return TNUM((u32)(((s32)a.value) >> min_shift),
+ (u32)(((s32)a.mask) >> min_shift));
+ else
+ return TNUM((s64)a.value >> min_shift,
+ (s64)a.mask >> min_shift);
}
struct tnum tnum_add(struct tnum a, struct tnum b)
/* Upon reaching here, src_known is true and
* umax_val is equal to umin_val.
*/
- dst_reg->smin_value >>= umin_val;
- dst_reg->smax_value >>= umin_val;
- dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
+ if (insn_bitness == 32) {
+ dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val);
+ dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val);
+ } else {
+ dst_reg->smin_value >>= umin_val;
+ dst_reg->smax_value >>= umin_val;
+ }
+
+ dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val,
+ insn_bitness);
/* blow away the dst_reg umin_value/umax_value and rely on
* dst_reg var_off to refine the result.
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
+ static const int ctx_reg = BPF_REG_6;
u8 mode = BPF_MODE(insn->code);
int i, err;
}
/* check whether implicit source operand (register R6) is readable */
- err = check_reg_arg(env, BPF_REG_6, SRC_OP);
+ err = check_reg_arg(env, ctx_reg, SRC_OP);
if (err)
return err;
return -EINVAL;
}
- if (regs[BPF_REG_6].type != PTR_TO_CTX) {
+ if (regs[ctx_reg].type != PTR_TO_CTX) {
verbose(env,
"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
return -EINVAL;
return err;
}
+ err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg);
+ if (err < 0)
+ return err;
+
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);
}
EXPORT_SYMBOL(__cpuhp_remove_state);
+#ifdef CONFIG_HOTPLUG_SMT
+static void cpuhp_offline_cpu_device(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+
+ dev->offline = true;
+ /* Tell user space about the state change */
+ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+}
+
+static void cpuhp_online_cpu_device(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+
+ dev->offline = false;
+ /* Tell user space about the state change */
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+}
+
+int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+{
+ int cpu, ret = 0;
+
+ cpu_maps_update_begin();
+ for_each_online_cpu(cpu) {
+ if (topology_is_primary_thread(cpu))
+ continue;
+ ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
+ if (ret)
+ break;
+ /*
+ * As this needs to hold the cpu maps lock it's impossible
+ * to call device_offline() because that ends up calling
+ * cpu_down() which takes cpu maps lock. cpu maps lock
+ * needs to be held as this might race against in kernel
+ * abusers of the hotplug machinery (thermal management).
+ *
+ * So nothing would update device:offline state. That would
+ * leave the sysfs entry stale and prevent onlining after
+ * smt control has been changed to 'off' again. This is
+ * called under the sysfs hotplug lock, so it is properly
+ * serialized against the regular offline usage.
+ */
+ cpuhp_offline_cpu_device(cpu);
+ }
+ if (!ret)
+ cpu_smt_control = ctrlval;
+ cpu_maps_update_done();
+ return ret;
+}
+
+int cpuhp_smt_enable(void)
+{
+ int cpu, ret = 0;
+
+ cpu_maps_update_begin();
+ cpu_smt_control = CPU_SMT_ENABLED;
+ for_each_present_cpu(cpu) {
+ /* Skip online CPUs and CPUs on offline nodes */
+ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+ continue;
+ ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
+ if (ret)
+ break;
+ /* See comment in cpuhp_smt_disable() */
+ cpuhp_online_cpu_device(cpu);
+ }
+ cpu_maps_update_done();
+ return ret;
+}
+#endif
+
#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
static ssize_t show_cpuhp_state(struct device *dev,
struct device_attribute *attr, char *buf)
#ifdef CONFIG_HOTPLUG_SMT
-static void cpuhp_offline_cpu_device(unsigned int cpu)
-{
- struct device *dev = get_cpu_device(cpu);
-
- dev->offline = true;
- /* Tell user space about the state change */
- kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
-}
-
-static void cpuhp_online_cpu_device(unsigned int cpu)
-{
- struct device *dev = get_cpu_device(cpu);
-
- dev->offline = false;
- /* Tell user space about the state change */
- kobject_uevent(&dev->kobj, KOBJ_ONLINE);
-}
-
-int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
-{
- int cpu, ret = 0;
-
- cpu_maps_update_begin();
- for_each_online_cpu(cpu) {
- if (topology_is_primary_thread(cpu))
- continue;
- ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
- if (ret)
- break;
- /*
- * As this needs to hold the cpu maps lock it's impossible
- * to call device_offline() because that ends up calling
- * cpu_down() which takes cpu maps lock. cpu maps lock
- * needs to be held as this might race against in kernel
- * abusers of the hotplug machinery (thermal management).
- *
- * So nothing would update device:offline state. That would
- * leave the sysfs entry stale and prevent onlining after
- * smt control has been changed to 'off' again. This is
- * called under the sysfs hotplug lock, so it is properly
- * serialized against the regular offline usage.
- */
- cpuhp_offline_cpu_device(cpu);
- }
- if (!ret)
- cpu_smt_control = ctrlval;
- cpu_maps_update_done();
- return ret;
-}
-
-int cpuhp_smt_enable(void)
-{
- int cpu, ret = 0;
-
- cpu_maps_update_begin();
- cpu_smt_control = CPU_SMT_ENABLED;
- for_each_present_cpu(cpu) {
- /* Skip online CPUs and CPUs on offline nodes */
- if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
- continue;
- ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
- if (ret)
- break;
- /* See comment in cpuhp_smt_disable() */
- cpuhp_online_cpu_device(cpu);
- }
- cpu_maps_update_done();
- return ret;
-}
-
-
static ssize_t
__store_smt_control(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
put_cred(cred);
#ifdef CONFIG_KEYS_REQUEST_CACHE
- key_put(current->cached_requested_key);
- current->cached_requested_key = NULL;
+ key_put(tsk->cached_requested_key);
+ tsk->cached_requested_key = NULL;
#endif
}
}
}
- if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader))
+ if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
+ err = -EINVAL;
goto err_locked;
+ }
/*
* Must be under the same ctx::mutex as perf_install_in_context(),
#endif
#ifdef __ARCH_WANT_SYS_CLONE3
+
+/*
+ * copy_thread implementations handle CLONE_SETTLS by reading the TLS value from
+ * the registers containing the syscall arguments for clone. This doesn't work
+ * with clone3 since the TLS value is passed in clone_args instead.
+ */
+#ifndef CONFIG_HAVE_COPY_THREAD_TLS
+#error clone3 requires copy_thread_tls support in arch
+#endif
+
noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
struct clone_args __user *uargs,
size_t usize)
/**
* wait_for_owner_exiting - Block until the owner has exited
+ * @ret: owner's current futex lock status
* @exiting: Pointer to the exiting task
*
* Caller must hold a refcount on @exiting.
kimage_terminate(image);
+ ret = machine_kexec_post_load(image);
+ if (ret)
+ goto out;
+
/* Install the new kernel and uninstall the old */
image = xchg(dest_image, image);
kimage_free_page_list(&image->unusable_pages);
}
+
+int __weak machine_kexec_post_load(struct kimage *image)
+{
+ return 0;
+}
+
void kimage_terminate(struct kimage *image)
{
if (*image->entry != 0)
* CPU hotplug again; so re-enable it here.
*/
cpu_hotplug_enable();
- pr_emerg("Starting new kernel\n");
+ pr_notice("Starting new kernel\n");
machine_shutdown();
}
kimage_terminate(image);
+ ret = machine_kexec_post_load(image);
+ if (ret)
+ goto out;
+
/*
* Free up any temporary buffers allocated which are not needed
* after image has been loaded
int kimage_is_destination_range(struct kimage *image,
unsigned long start, unsigned long end);
+int machine_kexec_post_load(struct kimage *image);
+
extern struct mutex kexec_mutex;
#ifdef CONFIG_KEXEC_FILE
struct lock_trace *trace, *t2;
struct hlist_head *hash_head;
u32 hash;
- unsigned int max_entries;
+ int max_entries;
BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
LOCK_TRACE_SIZE_IN_LONGS;
- trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
- if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES -
- LOCK_TRACE_SIZE_IN_LONGS - 1) {
+ if (max_entries <= 0) {
if (!debug_locks_off_graph_unlock())
return NULL;
return NULL;
}
+ trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
hash = jhash(trace->entries, trace->nr_entries *
sizeof(trace->entries[0]), 0);
* In this case, we attempt to acquire the lock again
* without sleeping.
*/
- if ((wstate == WRITER_HANDOFF) &&
- (rwsem_spin_on_owner(sem, 0) == OWNER_NULL))
+ if (wstate == WRITER_HANDOFF &&
+ rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
goto trylock_again;
/* Block until there are no active lockers. */
Skip the kernel sys_sync() before freezing user processes.
Some systems prefer not to pay this cost on every invocation
of suspend, or they are content with invoking sync() from
- user-space before invoking suspend. Say Y if that's your case.
+ user-space before invoking suspend. There's a run-time switch
+ at '/sys/power/sync_on_suspend' to configure this behaviour.
+ This setting changes the default for the run-tim switch. Say Y
+ to change the default to disable the kernel sys_sync().
config HIBERNATE_CALLBACKS
bool
* Copyright (C) 2012 Bojan Smojver <bojan@rexursive.com>
*/
-#define pr_fmt(fmt) "PM: " fmt
+#define pr_fmt(fmt) "PM: hibernation: " fmt
#include <linux/export.h>
#include <linux/suspend.h>
#ifdef CONFIG_PM_DEBUG
static void hibernation_debug_sleep(void)
{
- pr_info("hibernation debug: Waiting for 5 seconds.\n");
+ pr_info("debug: Waiting for 5 seconds.\n");
mdelay(5000);
}
error = dpm_suspend_end(PMSG_FREEZE);
if (error) {
- pr_err("Some devices failed to power down, aborting hibernation\n");
+ pr_err("Some devices failed to power down, aborting\n");
return error;
}
error = syscore_suspend();
if (error) {
- pr_err("Some system devices failed to power down, aborting hibernation\n");
+ pr_err("Some system devices failed to power down, aborting\n");
goto Enable_irqs;
}
restore_processor_state();
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
if (error)
- pr_err("Error %d creating hibernation image\n", error);
+ pr_err("Error %d creating image\n", error);
if (!in_suspend) {
events_check_enabled = false;
if (!error)
hibernation_restore(flags & SF_PLATFORM_MODE);
- pr_err("Failed to load hibernation image, recovering.\n");
+ pr_err("Failed to load image, recovering.\n");
swsusp_free();
free_basic_memory_bitmaps();
Unlock:
else
flags |= SF_CRC32_MODE;
- pm_pr_dbg("Writing image.\n");
+ pm_pr_dbg("Writing hibernation image.\n");
error = swsusp_write(flags);
swsusp_free();
if (!error) {
in_suspend = 0;
pm_restore_gfp_mask();
} else {
- pm_pr_dbg("Image restored successfully.\n");
+ pm_pr_dbg("Hibernation image restored successfully.\n");
}
Free_bitmaps:
goto Close_Finish;
}
- pm_pr_dbg("Preparing processes for restore.\n");
+ pm_pr_dbg("Preparing processes for hibernation restore.\n");
error = freeze_processes();
if (error)
goto Close_Finish;
Finish:
__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
pm_restore_console();
- pr_info("resume from hibernation failed (%d)\n", error);
+ pr_info("resume failed (%d)\n", error);
atomic_inc(&snapshot_device_available);
/* For success case, the suspend path will release the lock */
Unlock:
lock_system_sleep();
swsusp_resume_device = res;
unlock_system_sleep();
- pm_pr_dbg("Configured resume from disk to %u\n", swsusp_resume_device);
+ pm_pr_dbg("Configured hibernation resume from disk to %u\n",
+ swsusp_resume_device);
noresume = 0;
software_resume();
return n;
}
power_attr(mem_sleep);
+
+/*
+ * sync_on_suspend: invoke ksys_sync_helper() before suspend.
+ *
+ * show() returns whether ksys_sync_helper() is invoked before suspend.
+ * store() accepts 0 or 1. 0 disables ksys_sync_helper() and 1 enables it.
+ */
+bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC);
+
+static ssize_t sync_on_suspend_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", sync_on_suspend_enabled);
+}
+
+static ssize_t sync_on_suspend_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val > 1)
+ return -EINVAL;
+
+ sync_on_suspend_enabled = !!val;
+ return n;
+}
+
+power_attr(sync_on_suspend);
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_PM_SLEEP_DEBUG
&wakeup_count_attr.attr,
#ifdef CONFIG_SUSPEND
&mem_sleep_attr.attr,
+ &sync_on_suspend_attr.attr,
#endif
#ifdef CONFIG_PM_AUTOSLEEP
&autosleep_attr.attr,
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*/
-#define pr_fmt(fmt) "PM: " fmt
+#define pr_fmt(fmt) "PM: hibernation: " fmt
#include <linux/version.h>
#include <linux/module.h>
void clear_free_pages(void)
{
-#ifdef CONFIG_PAGE_POISONING_ZERO
struct memory_bitmap *bm = free_pages_map;
unsigned long pfn;
if (WARN_ON(!(free_pages_map)))
return;
- memory_bm_position_reset(bm);
- pfn = memory_bm_next_pfn(bm);
- while (pfn != BM_END_OF_MAP) {
- if (pfn_valid(pfn))
- clear_highpage(pfn_to_page(pfn));
-
+ if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
+ memory_bm_position_reset(bm);
pfn = memory_bm_next_pfn(bm);
+ while (pfn != BM_END_OF_MAP) {
+ if (pfn_valid(pfn))
+ clear_highpage(pfn_to_page(pfn));
+
+ pfn = memory_bm_next_pfn(bm);
+ }
+ memory_bm_position_reset(bm);
+ pr_info("free pages cleared after restore\n");
}
- memory_bm_position_reset(bm);
- pr_info("free pages cleared after restore\n");
-#endif /* PAGE_POISONING_ZERO */
}
/**
*/
static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
{
- x *= multiplier;
- do_div(x, base);
- return (unsigned long)x;
+ return div64_u64(x * multiplier, base);
}
static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
ktime_t start, stop;
int error;
- pr_info("Preallocating image memory... ");
+ pr_info("Preallocating image memory\n");
start = ktime_get();
error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
- if (error)
+ if (error) {
+ pr_err("Cannot allocate original bitmap\n");
goto err_out;
+ }
error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
- if (error)
+ if (error) {
+ pr_err("Cannot allocate copy bitmap\n");
goto err_out;
+ }
alloc_normal = 0;
alloc_highmem = 0;
alloc -= pages;
pages += pages_highmem;
pages_highmem = preallocate_image_highmem(alloc);
- if (pages_highmem < alloc)
+ if (pages_highmem < alloc) {
+ pr_err("Image allocation is %lu pages short\n",
+ alloc - pages_highmem);
goto err_out;
+ }
pages += pages_highmem;
/*
* size is the desired number of saveable pages to leave in
out:
stop = ktime_get();
- pr_cont("done (allocated %lu pages)\n", pages);
+ pr_info("Allocated %lu pages for snapshot\n", pages);
swsusp_show_speed(start, stop, pages, "Allocated");
return 0;
err_out:
- pr_cont("\n");
swsusp_free();
return -ENOMEM;
}
{
unsigned int nr_pages, nr_highmem;
- pr_info("Creating hibernation image:\n");
+ pr_info("Creating image:\n");
drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_copy_pages = nr_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
- pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
+ pr_info("Image created (%d pages copied)\n", nr_pages);
return 0;
}
if (state == PM_SUSPEND_TO_IDLE)
s2idle_begin();
- if (!IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC)) {
+ if (sync_on_suspend_enabled) {
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
ksys_sync_helper();
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
static char info_test[] __initdata =
KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
- unsigned long now;
+ time64_t now;
struct rtc_wkalrm alm;
int status;
printk(err_readtime, dev_name(&rtc->dev), status);
return;
}
- rtc_tm_to_time(&alm.time, &now);
+ now = rtc_tm_to_time64(&alm.time);
memset(&alm, 0, sizeof alm);
- rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
+ rtc_time64_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
alm.enabled = true;
status = rtc_set_alarm(rtc, &alm);
return ret;
}
-static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
+static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns,
+ unsigned int mode)
{
+ int ret;
+
if (mode & PTRACE_MODE_NOAUDIT)
- return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
+ ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT);
else
- return has_ns_capability(current, ns, CAP_SYS_PTRACE);
+ ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE);
+
+ return ret == 0;
}
/* Returns 0 on success, -errno on denial. */
gid_eq(caller_gid, tcred->sgid) &&
gid_eq(caller_gid, tcred->gid))
goto ok;
- if (ptrace_has_cap(tcred->user_ns, mode))
+ if (ptrace_has_cap(cred, tcred->user_ns, mode))
goto ok;
rcu_read_unlock();
return -EPERM;
mm = task->mm;
if (mm &&
((get_dumpable(mm) != SUID_DUMP_USER) &&
- !ptrace_has_cap(mm->user_ns, mode)))
+ !ptrace_has_cap(cred, mm->user_ns, mode)))
return -EPERM;
return security_ptrace_access_check(task, mode);
int ret;
if (flags & RSEQ_FLAG_UNREGISTER) {
+ if (flags & ~RSEQ_FLAG_UNREGISTER)
+ return -EINVAL;
/* Unregister rseq for current thread. */
if (current->rseq != rseq || !current->rseq)
return -EINVAL;
#ifdef CONFIG_COMPAT
COMPAT_SYS_NI(timer_create);
+#endif
+
+#if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA)
COMPAT_SYS_NI(getitimer);
COMPAT_SYS_NI(setitimer);
#endif
/*
* Do a quick check without holding jiffies_lock:
+ * The READ_ONCE() pairs with two updates done later in this function.
*/
- delta = ktime_sub(now, last_jiffies_update);
+ delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
if (delta < tick_period)
return;
if (delta >= tick_period) {
delta = ktime_sub(delta, tick_period);
- last_jiffies_update = ktime_add(last_jiffies_update,
- tick_period);
+ /* Pairs with the lockless read in this function. */
+ WRITE_ONCE(last_jiffies_update,
+ ktime_add(last_jiffies_update, tick_period));
/* Slow path for long timeouts */
if (unlikely(delta >= tick_period)) {
ticks = ktime_divns(delta, incr);
- last_jiffies_update = ktime_add_ns(last_jiffies_update,
- incr * ticks);
+ /* Pairs with the lockless read in this function. */
+ WRITE_ONCE(last_jiffies_update,
+ ktime_add_ns(last_jiffies_update,
+ incr * ticks));
}
do_timer(++ticks);
return 0;
}
+/*
+ * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
+ * functions. But those archs currently don't support direct functions
+ * anyway, and ftrace_find_rec_direct() is just a stub for them.
+ * Define MCOUNT_INSN_SIZE to keep those archs compiling.
+ */
+#ifndef MCOUNT_INSN_SIZE
+/* Make sure this only works without direct calls */
+# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+# error MCOUNT_INSN_SIZE not defined with direct calls enabled
+# endif
+# define MCOUNT_INSN_SIZE 0
+#endif
+
int function_graph_enter(unsigned long ret, unsigned long func,
unsigned long frame_pointer, unsigned long *retp)
{
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- avg = rec->time;
- do_div(avg, rec->counter);
+ avg = div64_ul(rec->time, rec->counter);
if (tracing_thresh && (avg < tracing_thresh))
goto out;
#endif
* Divide only 1000 for ns^2 -> us^2 conversion.
* trace_print_graph_duration will divide 1000 again.
*/
- do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
+ stddev = div64_ul(stddev,
+ rec->counter * (rec->counter - 1) * 1000);
}
trace_seq_init(&s);
{
/* sched_clock_stable() is determined in late_initcall */
if (!trace_boot_clock && !sched_clock_stable()) {
+ if (security_locked_down(LOCKDOWN_TRACEFS)) {
+ pr_warn("Can not set tracing clock due to lockdown\n");
+ return -EPERM;
+ }
+
printk(KERN_WARNING
"Unstable clock detected, switching default tracing clock to \"global\"\n"
"If you want to keep using the local clock, then add:\n"
struct ftrace_event_field *field;
unsigned long flags;
hist_field_fn_t fn;
+ unsigned int ref;
unsigned int size;
unsigned int offset;
unsigned int is_signed;
struct event_trigger_data *test;
struct hist_field *hist_field;
+ lockdep_assert_held(&event_mutex);
+
hist_field = find_var_field(hist_data, var_name);
if (hist_field)
return hist_field;
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
test_data = test->private_data;
hist_field = find_var_field(test_data, var_name);
struct event_trigger_data *test;
struct hist_field *hist_field;
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ lockdep_assert_held(&event_mutex);
+
+ list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
test_data = test->private_data;
hist_field = find_var_field(test_data, var_name);
return field_op;
}
+static void get_hist_field(struct hist_field *hist_field)
+{
+ hist_field->ref++;
+}
+
static void __destroy_hist_field(struct hist_field *hist_field)
{
+ if (--hist_field->ref > 1)
+ return;
+
kfree(hist_field->var.name);
kfree(hist_field->name);
kfree(hist_field->type);
if (!hist_field)
return NULL;
+ hist_field->ref = 1;
+
hist_field->hist_data = hist_data;
if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
{
unsigned long flags = HIST_FIELD_FL_VAR_REF;
struct hist_field *ref_field;
+ int i;
+
+ /* Check if the variable already exists */
+ for (i = 0; i < hist_data->n_var_refs; i++) {
+ ref_field = hist_data->var_refs[i];
+ if (ref_field->var.idx == var_field->var.idx &&
+ ref_field->var.hist_data == var_field->hist_data) {
+ get_hist_field(ref_field);
+ return ref_field;
+ }
+ }
ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
if (ref_field) {
{
struct event_trigger_data *test;
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ lockdep_assert_held(&event_mutex);
+
+ list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
if (test->private_data == hist_data)
return test->filter_str;
struct event_trigger_data *test;
unsigned int n_keys;
+ lockdep_assert_held(&event_mutex);
+
n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
hist_data = test->private_data;
goto out_unlock;
}
- list_for_each_entry_rcu(data, &event_file->triggers, list) {
+ list_for_each_entry(data, &event_file->triggers, list) {
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
hist_trigger_show(m, data, n++);
}
if (hist_data->attrs->name && !named_data)
goto new;
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ lockdep_assert_held(&event_mutex);
+
+ list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
if (!hist_trigger_match(data, test, named_data, false))
continue;
struct event_trigger_data *test, *named_data = NULL;
bool match = false;
+ lockdep_assert_held(&event_mutex);
+
if (hist_data->attrs->name)
named_data = find_named_trigger(hist_data->attrs->name);
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
if (hist_trigger_match(data, test, named_data, false)) {
match = true;
struct hist_trigger_data *hist_data = data->private_data;
struct event_trigger_data *test, *named_data = NULL;
+ lockdep_assert_held(&event_mutex);
+
if (hist_data->attrs->name)
named_data = find_named_trigger(hist_data->attrs->name);
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
if (!hist_trigger_match(data, test, named_data, false))
continue;
struct event_trigger_data *test, *named_data = NULL;
bool unregistered = false;
+ lockdep_assert_held(&event_mutex);
+
if (hist_data->attrs->name)
named_data = find_named_trigger(hist_data->attrs->name);
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
if (!hist_trigger_match(data, test, named_data, false))
continue;
struct hist_trigger_data *hist_data;
struct event_trigger_data *test;
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ lockdep_assert_held(&event_mutex);
+
+ list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
hist_data = test->private_data;
if (check_var_refs(hist_data))
struct enable_trigger_data *enable_data = data->private_data;
struct event_trigger_data *test;
- list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
+ list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
+ lockdep_is_held(&event_mutex)) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
if (enable_data->enable)
test->paused = false;
unsigned long irq_flags;
void *entry = NULL;
int entry_size;
- u64 val;
+ u64 val = 0;
int len;
entry = trace_alloc_entry(call, &entry_size);
struct event_trigger_data *data;
bool set_cond = false;
- list_for_each_entry_rcu(data, &file->triggers, list) {
+ lockdep_assert_held(&event_mutex);
+
+ list_for_each_entry(data, &file->triggers, list) {
if (data->filter || event_command_post_trigger(data->cmd_ops) ||
event_command_needs_rec(data->cmd_ops)) {
set_cond = true;
struct event_trigger_data *test;
int ret = 0;
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ lockdep_assert_held(&event_mutex);
+
+ list_for_each_entry(test, &file->triggers, list) {
if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
ret = -EEXIST;
goto out;
struct event_trigger_data *data;
bool unregistered = false;
- list_for_each_entry_rcu(data, &file->triggers, list) {
+ lockdep_assert_held(&event_mutex);
+
+ list_for_each_entry(data, &file->triggers, list) {
if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
unregistered = true;
list_del_rcu(&data->list);
struct event_trigger_data *test;
int ret = 0;
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ lockdep_assert_held(&event_mutex);
+
+ list_for_each_entry(test, &file->triggers, list) {
test_enable_data = test->private_data;
if (test_enable_data &&
(test->cmd_ops->trigger_type ==
struct event_trigger_data *data;
bool unregistered = false;
- list_for_each_entry_rcu(data, &file->triggers, list) {
+ lockdep_assert_held(&event_mutex);
+
+ list_for_each_entry(data, &file->triggers, list) {
enable_data = data->private_data;
if (enable_data &&
(data->cmd_ops->trigger_type ==
INIT_HLIST_NODE(&tk->rp.kp.hlist);
INIT_LIST_HEAD(&tk->rp.kp.list);
- ret = trace_probe_init(&tk->tp, event, group);
+ ret = trace_probe_init(&tk->tp, event, group, false);
if (ret < 0)
goto error;
}
int trace_probe_init(struct trace_probe *tp, const char *event,
- const char *group)
+ const char *group, bool alloc_filter)
{
struct trace_event_call *call;
+ size_t size = sizeof(struct trace_probe_event);
int ret = 0;
if (!event || !group)
return -EINVAL;
- tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL);
+ if (alloc_filter)
+ size += sizeof(struct trace_uprobe_filter);
+
+ tp->event = kzalloc(size, GFP_KERNEL);
if (!tp->event)
return -ENOMEM;
const struct fetch_type *type; /* Type of this argument */
};
+struct trace_uprobe_filter {
+ rwlock_t rwlock;
+ int nr_systemwide;
+ struct list_head perf_events;
+};
+
/* Event call and class holder */
struct trace_probe_event {
unsigned int flags; /* For TP_FLAG_* */
struct trace_event_call call;
struct list_head files;
struct list_head probes;
+ struct trace_uprobe_filter filter[0];
};
struct trace_probe {
}
int trace_probe_init(struct trace_probe *tp, const char *event,
- const char *group);
+ const char *group, bool alloc_filter);
void trace_probe_cleanup(struct trace_probe *tp);
int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
void trace_probe_unlink(struct trace_probe *tp);
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_migrate_task\n");
- return;
+ goto fail_deprobe_sched_switch;
}
wakeup_reset(tr);
printk(KERN_ERR "failed to start wakeup tracer\n");
return;
+fail_deprobe_sched_switch:
+ unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
fail_deprobe:
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
- unsigned int save_len = s->seq.len;
+ unsigned int save_len = s->seq.len;
if (s->full)
return 0;
local_irq_restore(flags);
}
+/* Some archs may not define MCOUNT_INSN_SIZE */
+#ifndef MCOUNT_INSN_SIZE
+# define MCOUNT_INSN_SIZE 0
+#endif
+
static void
stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
#define DATAOF_TRACE_ENTRY(entry, is_return) \
((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
-struct trace_uprobe_filter {
- rwlock_t rwlock;
- int nr_systemwide;
- struct list_head perf_events;
-};
-
static int trace_uprobe_create(int argc, const char **argv);
static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
static int trace_uprobe_release(struct dyn_event *ev);
*/
struct trace_uprobe {
struct dyn_event devent;
- struct trace_uprobe_filter filter;
struct uprobe_consumer consumer;
struct path path;
struct inode *inode;
if (!tu)
return ERR_PTR(-ENOMEM);
- ret = trace_probe_init(&tu->tp, event, group);
+ ret = trace_probe_init(&tu->tp, event, group, true);
if (ret < 0)
goto error;
tu->consumer.handler = uprobe_dispatcher;
if (is_ret)
tu->consumer.ret_handler = uretprobe_dispatcher;
- init_trace_uprobe_filter(&tu->filter);
+ init_trace_uprobe_filter(tu->tp.event->filter);
return tu;
error:
struct trace_probe *pos;
struct trace_uprobe *tu;
+ tu = container_of(tp, struct trace_uprobe, tp);
+ WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
+
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
tu = container_of(pos, struct trace_uprobe, tp);
if (!tu->inode)
continue;
- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
-
uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
tu->inode = NULL;
}
}
tu = container_of(tp, struct trace_uprobe, tp);
- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
+ WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
if (enabled)
return 0;
}
static inline bool
-uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
+trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
+ struct perf_event *event)
{
- return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
+ return __uprobe_perf_filter(filter, event->hw.target->mm);
}
-static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
+static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
+ struct perf_event *event)
{
bool done;
- write_lock(&tu->filter.rwlock);
+ write_lock(&filter->rwlock);
if (event->hw.target) {
list_del(&event->hw.tp_list);
- done = tu->filter.nr_systemwide ||
+ done = filter->nr_systemwide ||
(event->hw.target->flags & PF_EXITING) ||
- uprobe_filter_event(tu, event);
+ trace_uprobe_filter_event(filter, event);
} else {
- tu->filter.nr_systemwide--;
- done = tu->filter.nr_systemwide;
+ filter->nr_systemwide--;
+ done = filter->nr_systemwide;
}
- write_unlock(&tu->filter.rwlock);
+ write_unlock(&filter->rwlock);
- if (!done)
- return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
-
- return 0;
+ return done;
}
-static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
+/* This returns true if the filter always covers target mm */
+static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
+ struct perf_event *event)
{
bool done;
- int err;
- write_lock(&tu->filter.rwlock);
+ write_lock(&filter->rwlock);
if (event->hw.target) {
/*
* event->parent != NULL means copy_process(), we can avoid
* attr.enable_on_exec means that exec/mmap will install the
* breakpoints we need.
*/
- done = tu->filter.nr_systemwide ||
+ done = filter->nr_systemwide ||
event->parent || event->attr.enable_on_exec ||
- uprobe_filter_event(tu, event);
- list_add(&event->hw.tp_list, &tu->filter.perf_events);
+ trace_uprobe_filter_event(filter, event);
+ list_add(&event->hw.tp_list, &filter->perf_events);
} else {
- done = tu->filter.nr_systemwide;
- tu->filter.nr_systemwide++;
+ done = filter->nr_systemwide;
+ filter->nr_systemwide++;
}
- write_unlock(&tu->filter.rwlock);
+ write_unlock(&filter->rwlock);
- err = 0;
- if (!done) {
- err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
- if (err)
- uprobe_perf_close(tu, event);
- }
- return err;
+ return done;
}
-static int uprobe_perf_multi_call(struct trace_event_call *call,
- struct perf_event *event,
- int (*op)(struct trace_uprobe *tu, struct perf_event *event))
+static int uprobe_perf_close(struct trace_event_call *call,
+ struct perf_event *event)
{
struct trace_probe *pos, *tp;
struct trace_uprobe *tu;
if (WARN_ON_ONCE(!tp))
return -ENODEV;
+ tu = container_of(tp, struct trace_uprobe, tp);
+ if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
+ return 0;
+
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
tu = container_of(pos, struct trace_uprobe, tp);
- ret = op(tu, event);
+ ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
if (ret)
break;
}
return ret;
}
+
+static int uprobe_perf_open(struct trace_event_call *call,
+ struct perf_event *event)
+{
+ struct trace_probe *pos, *tp;
+ struct trace_uprobe *tu;
+ int err = 0;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENODEV;
+
+ tu = container_of(tp, struct trace_uprobe, tp);
+ if (trace_uprobe_filter_add(tu->tp.event->filter, event))
+ return 0;
+
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+ if (err) {
+ uprobe_perf_close(call, event);
+ break;
+ }
+ }
+
+ return err;
+}
+
static bool uprobe_perf_filter(struct uprobe_consumer *uc,
enum uprobe_filter_ctx ctx, struct mm_struct *mm)
{
+ struct trace_uprobe_filter *filter;
struct trace_uprobe *tu;
int ret;
tu = container_of(uc, struct trace_uprobe, consumer);
- read_lock(&tu->filter.rwlock);
- ret = __uprobe_perf_filter(&tu->filter, mm);
- read_unlock(&tu->filter.rwlock);
+ filter = tu->tp.event->filter;
+
+ read_lock(&filter->rwlock);
+ ret = __uprobe_perf_filter(filter, mm);
+ read_unlock(&filter->rwlock);
return ret;
}
return 0;
case TRACE_REG_PERF_OPEN:
- return uprobe_perf_multi_call(event, data, uprobe_perf_open);
+ return uprobe_perf_open(event, data);
case TRACE_REG_PERF_CLOSE:
- return uprobe_perf_multi_call(event, data, uprobe_perf_close);
+ return uprobe_perf_close(event, data);
#endif
default:
KCOV_INSTRUMENT_stackdepot.o := n
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
- fdt_empty_tree.o
+ fdt_empty_tree.o fdt_addresses.o
$(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/crc64.h>
#include "crc64table.h"
MODULE_DESCRIPTION("CRC64 calculations");
--- /dev/null
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_addresses.c"
*/
static void *shadow_get(void *obj, unsigned long id)
{
- void *ret = klp_shadow_get(obj, id);
+ int **sv;
+ sv = klp_shadow_get(obj, id);
pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n",
- __func__, ptr_id(obj), id, ptr_id(ret));
+ __func__, ptr_id(obj), id, ptr_id(sv));
- return ret;
+ return sv;
}
static void *shadow_alloc(void *obj, unsigned long id, size_t size,
gfp_t gfp_flags, klp_shadow_ctor_t ctor,
void *ctor_data)
{
- void *ret = klp_shadow_alloc(obj, id, size, gfp_flags, ctor,
- ctor_data);
+ int **var = ctor_data;
+ int **sv;
+
+ sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var);
pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
__func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
- ptr_id(ctor_data), ptr_id(ret));
- return ret;
+ ptr_id(*var), ptr_id(sv));
+
+ return sv;
}
static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size,
gfp_t gfp_flags, klp_shadow_ctor_t ctor,
void *ctor_data)
{
- void *ret = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor,
- ctor_data);
+ int **var = ctor_data;
+ int **sv;
+
+ sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var);
pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
__func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
- ptr_id(ctor_data), ptr_id(ret));
- return ret;
+ ptr_id(*var), ptr_id(sv));
+
+ return sv;
}
static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
/* Shadow variable constructor - remember simple pointer data */
static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
{
- int **shadow_int = shadow_data;
- *shadow_int = ctor_data;
+ int **sv = shadow_data;
+ int **var = ctor_data;
+
+ if (!var)
+ return -EINVAL;
+
+ *sv = *var;
pr_info("%s: PTR%d -> PTR%d\n",
- __func__, ptr_id(shadow_int), ptr_id(ctor_data));
+ __func__, ptr_id(sv), ptr_id(*var));
return 0;
}
static void shadow_dtor(void *obj, void *shadow_data)
{
+ int **sv = shadow_data;
+
pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n",
- __func__, ptr_id(obj), ptr_id(shadow_data));
+ __func__, ptr_id(obj), ptr_id(sv));
}
static int test_klp_shadow_vars_init(void)
{
void *obj = THIS_MODULE;
int id = 0x1234;
- size_t size = sizeof(int *);
gfp_t gfp_flags = GFP_KERNEL;
int var1, var2, var3, var4;
+ int *pv1, *pv2, *pv3, *pv4;
int **sv1, **sv2, **sv3, **sv4;
- void *ret;
+ int **sv;
+
+ pv1 = &var1;
+ pv2 = &var2;
+ pv3 = &var3;
+ pv4 = &var4;
ptr_id(NULL);
- ptr_id(&var1);
- ptr_id(&var2);
- ptr_id(&var3);
- ptr_id(&var4);
+ ptr_id(pv1);
+ ptr_id(pv2);
+ ptr_id(pv3);
+ ptr_id(pv4);
/*
* With an empty shadow variable hash table, expect not to find
* any matches.
*/
- ret = shadow_get(obj, id);
- if (!ret)
+ sv = shadow_get(obj, id);
+ if (!sv)
pr_info(" got expected NULL result\n");
/*
* Allocate a few shadow variables with different <obj> and <id>.
*/
- sv1 = shadow_alloc(obj, id, size, gfp_flags, shadow_ctor, &var1);
+ sv1 = shadow_alloc(obj, id, sizeof(pv1), gfp_flags, shadow_ctor, &pv1);
if (!sv1)
return -ENOMEM;
- sv2 = shadow_alloc(obj + 1, id, size, gfp_flags, shadow_ctor, &var2);
+ sv2 = shadow_alloc(obj + 1, id, sizeof(pv2), gfp_flags, shadow_ctor, &pv2);
if (!sv2)
return -ENOMEM;
- sv3 = shadow_alloc(obj, id + 1, size, gfp_flags, shadow_ctor, &var3);
+ sv3 = shadow_alloc(obj, id + 1, sizeof(pv3), gfp_flags, shadow_ctor, &pv3);
if (!sv3)
return -ENOMEM;
* Verify we can find our new shadow variables and that they point
* to expected data.
*/
- ret = shadow_get(obj, id);
- if (!ret)
+ sv = shadow_get(obj, id);
+ if (!sv)
return -EINVAL;
- if (ret == sv1 && *sv1 == &var1)
+ if (sv == sv1 && *sv1 == pv1)
pr_info(" got expected PTR%d -> PTR%d result\n",
ptr_id(sv1), ptr_id(*sv1));
- ret = shadow_get(obj + 1, id);
- if (!ret)
+ sv = shadow_get(obj + 1, id);
+ if (!sv)
return -EINVAL;
- if (ret == sv2 && *sv2 == &var2)
+ if (sv == sv2 && *sv2 == pv2)
pr_info(" got expected PTR%d -> PTR%d result\n",
ptr_id(sv2), ptr_id(*sv2));
- ret = shadow_get(obj, id + 1);
- if (!ret)
+ sv = shadow_get(obj, id + 1);
+ if (!sv)
return -EINVAL;
- if (ret == sv3 && *sv3 == &var3)
+ if (sv == sv3 && *sv3 == pv3)
pr_info(" got expected PTR%d -> PTR%d result\n",
ptr_id(sv3), ptr_id(*sv3));
* Allocate or get a few more, this time with the same <obj>, <id>.
* The second invocation should return the same shadow var.
*/
- sv4 = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4);
+ sv4 = shadow_get_or_alloc(obj + 2, id, sizeof(pv4), gfp_flags, shadow_ctor, &pv4);
if (!sv4)
return -ENOMEM;
- ret = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4);
- if (!ret)
+ sv = shadow_get_or_alloc(obj + 2, id, sizeof(pv4), gfp_flags, shadow_ctor, &pv4);
+ if (!sv)
return -EINVAL;
- if (ret == sv4 && *sv4 == &var4)
+ if (sv == sv4 && *sv4 == pv4)
pr_info(" got expected PTR%d -> PTR%d result\n",
ptr_id(sv4), ptr_id(*sv4));
* longer find them.
*/
shadow_free(obj, id, shadow_dtor); /* sv1 */
- ret = shadow_get(obj, id);
- if (!ret)
+ sv = shadow_get(obj, id);
+ if (!sv)
pr_info(" got expected NULL result\n");
shadow_free(obj + 1, id, shadow_dtor); /* sv2 */
- ret = shadow_get(obj + 1, id);
- if (!ret)
+ sv = shadow_get(obj + 1, id);
+ if (!sv)
pr_info(" got expected NULL result\n");
shadow_free(obj + 2, id, shadow_dtor); /* sv4 */
- ret = shadow_get(obj + 2, id);
- if (!ret)
+ sv = shadow_get(obj + 2, id);
+ if (!sv)
pr_info(" got expected NULL result\n");
/*
* We should still find an <id+1> variable.
*/
- ret = shadow_get(obj, id + 1);
- if (!ret)
+ sv = shadow_get(obj, id + 1);
+ if (!sv)
return -EINVAL;
- if (ret == sv3 && *sv3 == &var3)
+ if (sv == sv3 && *sv3 == pv3)
pr_info(" got expected PTR%d -> PTR%d result\n",
ptr_id(sv3), ptr_id(*sv3));
* Free all the <id+1> variables, too.
*/
shadow_free_all(id + 1, shadow_dtor); /* sv3 */
- ret = shadow_get(obj, id);
- if (!ret)
+ sv = shadow_get(obj, id);
+ if (!sv)
pr_info(" shadow_get() got expected NULL result\n");
#define time_before(x, y) ((x) < (y))
#endif
+#define RAID6_TEST_DISKS 8
+#define RAID6_TEST_DISKS_ORDER 3
+
static inline const struct raid6_recov_calls *raid6_choose_recov(void)
{
const struct raid6_recov_calls *const *algo;
}
static inline const struct raid6_calls *raid6_choose_gen(
- void *(*const dptrs)[(65536/PAGE_SIZE)+2], const int disks)
+ void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
{
unsigned long perf, bestgenperf, bestxorperf, j0, j1;
int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
best = *algo;
}
pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
- (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
+ (perf * HZ * (disks-2)) >>
+ (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
if (!(*algo)->xor_syndrome)
continue;
bestxorperf = perf;
pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name,
- (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1));
+ (perf * HZ * (disks-2)) >>
+ (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
}
}
if (best) {
- pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
- best->name,
- (bestgenperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
- if (best->xor_syndrome)
- pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
- (bestxorperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2+1));
+ if (IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
+ pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
+ best->name,
+ (bestgenperf * HZ * (disks-2)) >>
+ (20 - PAGE_SHIFT+RAID6_TIME_JIFFIES_LG2));
+ if (best->xor_syndrome)
+ pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
+ (bestxorperf * HZ * (disks-2)) >>
+ (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
+ } else
+ pr_info("raid6: skip pq benchmark and using algorithm %s\n",
+ best->name);
raid6_call = *best;
} else
pr_err("raid6: Yikes! No algorithm found!\n");
int __init raid6_select_algo(void)
{
- const int disks = (65536/PAGE_SIZE)+2;
+ const int disks = RAID6_TEST_DISKS;
const struct raid6_calls *gen_best;
const struct raid6_recov_calls *rec_best;
- char *syndromes;
- void *dptrs[(65536/PAGE_SIZE)+2];
- int i;
-
- for (i = 0; i < disks-2; i++)
- dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i;
-
- /* Normal code - use a 2-page allocation to avoid D$ conflict */
- syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
+ char *disk_ptr, *p;
+ void *dptrs[RAID6_TEST_DISKS];
+ int i, cycle;
- if (!syndromes) {
+ /* prepare the buffer and fill it circularly with gfmul table */
+ disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER);
+ if (!disk_ptr) {
pr_err("raid6: Yikes! No memory available.\n");
return -ENOMEM;
}
- dptrs[disks-2] = syndromes;
- dptrs[disks-1] = syndromes + PAGE_SIZE;
+ p = disk_ptr;
+ for (i = 0; i < disks; i++)
+ dptrs[i] = p + PAGE_SIZE * i;
+
+ cycle = ((disks - 2) * PAGE_SIZE) / 65536;
+ for (i = 0; i < cycle; i++) {
+ memcpy(p, raid6_gfmul, 65536);
+ p += 65536;
+ }
+
+ if ((disks - 2) * PAGE_SIZE % 65536)
+ memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
/* select raid gen_syndrome function */
gen_best = raid6_choose_gen(&dptrs, disks);
/* select raid recover functions */
rec_best = raid6_choose_recov();
- free_pages((unsigned long)syndromes, 1);
+ free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER);
return gen_best && rec_best ? 0 : -EINVAL;
}
uint8_t v;
uint8_t exptbl[256], invtbl[256];
- printf("#include <linux/raid/pq.h>\n");
printf("#include <linux/export.h>\n");
+ printf("#include <linux/raid/pq.h>\n");
/* Compute multiplication table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
unsigned long res = 0;
- /*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
if (IS_UNALIGNED(src, dst))
goto byte_at_a_time;
unsigned long max = max_addr - src_addr;
long retval;
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
kasan_check_write(dst, count);
check_object_size(dst, count, false);
if (user_access_begin(src, max)) {
unsigned long align, res = 0;
unsigned long c;
- /*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
/*
* Do everything aligned. But that means that we
* need to also expand the maximum..
unsigned long max = max_addr - src_addr;
long retval;
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
if (user_access_begin(str, max)) {
retval = do_strnlen_user(str, count, max);
user_access_end();
/*
* test_xarray.c: Test the XArray API
* Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2019-2020 Oracle
* Author: Matthew Wilcox <willy@infradead.org>
*/
XA_BUG_ON(xa, !xa_empty(xa));
}
-static noinline void check_multi_find(struct xarray *xa)
+static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
{
#ifdef CONFIG_XARRAY_MULTI
+ unsigned long multi = 3 << order;
+ unsigned long next = 4 << order;
unsigned long index;
- xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
- XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
+ xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
index = 0;
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(12));
- XA_BUG_ON(xa, index != 12);
- index = 13;
+ xa_mk_value(multi));
+ XA_BUG_ON(xa, index != multi);
+ index = multi + 1;
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(12));
- XA_BUG_ON(xa, (index < 12) || (index >= 16));
+ xa_mk_value(multi));
+ XA_BUG_ON(xa, (index < multi) || (index >= next));
XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(16));
- XA_BUG_ON(xa, index != 16);
-
- xa_erase_index(xa, 12);
- xa_erase_index(xa, 16);
+ xa_mk_value(next));
+ XA_BUG_ON(xa, index != next);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
+ XA_BUG_ON(xa, index != next);
+
+ xa_erase_index(xa, multi);
+ xa_erase_index(xa, next);
+ xa_erase_index(xa, next + 1);
XA_BUG_ON(xa, !xa_empty(xa));
#endif
}
xa_destroy(xa);
}
+static noinline void check_find_4(struct xarray *xa)
+{
+ unsigned long index = 0;
+ void *entry;
+
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+ XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
+
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+ XA_BUG_ON(xa, entry);
+
+ xa_erase_index(xa, ULONG_MAX);
+}
+
static noinline void check_find(struct xarray *xa)
{
+ unsigned i;
+
check_find_1(xa);
check_find_2(xa);
check_find_3(xa);
- check_multi_find(xa);
+ check_find_4(xa);
+
+ for (i = 2; i < 10; i++)
+ check_multi_find_1(xa, i);
check_multi_find_2(xa);
}
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_move_max(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+ rcu_read_unlock();
+
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+ xas_pause(&xas);
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+ rcu_read_unlock();
+
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
static noinline void check_move_small(struct xarray *xa, unsigned long idx)
{
XA_STATE(xas, xa, 0);
xa_destroy(xa);
check_move_tiny(xa);
+ check_move_max(xa);
for (i = 0; i < 16; i++)
check_move_small(xa, 1UL << i);
return 0;
}
+static __maybe_unused
int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
{
int ret = __cvdso_clock_getres_common(clock, res);
// SPDX-License-Identifier: GPL-2.0+
/*
* XArray implementation
- * Copyright (c) 2017 Microsoft Corporation
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2018-2020 Oracle
* Author: Matthew Wilcox <willy@infradead.org>
*/
if (xas_invalid(xas))
return;
+ xas->xa_node = XAS_RESTART;
if (node) {
unsigned int offset = xas->xa_offset;
while (++offset < XA_CHUNK_SIZE) {
break;
}
xas->xa_index += (offset - xas->xa_offset) << node->shift;
+ if (xas->xa_index == 0)
+ xas->xa_node = XAS_BOUNDS;
} else {
xas->xa_index++;
}
- xas->xa_node = XAS_RESTART;
}
EXPORT_SYMBOL_GPL(xas_pause);
{
void *entry;
- if (xas_error(xas))
+ if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
return NULL;
+ if (xas->xa_index > max)
+ return set_bounds(xas);
if (!xas->xa_node) {
xas->xa_index = 1;
return set_bounds(xas);
- } else if (xas_top(xas->xa_node)) {
+ } else if (xas->xa_node == XAS_RESTART) {
entry = xas_load(xas);
if (entry || xas_not_node(xas->xa_node))
return entry;
if (xas_error(xas))
return NULL;
+ if (xas->xa_index > max)
+ goto max;
if (!xas->xa_node) {
xas->xa_index = 1;
}
EXPORT_SYMBOL(xa_find);
+static bool xas_sibling(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned long mask;
+
+ if (!node)
+ return false;
+ mask = (XA_CHUNK_SIZE << node->shift) - 1;
+ return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
+}
+
/**
* xa_find_after() - Search the XArray for a present entry.
* @xa: XArray.
XA_STATE(xas, xa, *indexp + 1);
void *entry;
+ if (xas.xa_index == 0)
+ return NULL;
+
rcu_read_lock();
for (;;) {
if ((__force unsigned int)filter < XA_MAX_MARKS)
entry = xas_find_marked(&xas, max, filter);
else
entry = xas_find(&xas, max);
- if (xas.xa_node == XAS_BOUNDS)
+
+ if (xas_invalid(&xas))
break;
- if (xas.xa_shift) {
- if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
- continue;
- } else {
- if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
- continue;
- }
+ if (xas_sibling(&xas))
+ continue;
if (!xas_retry(&xas, entry))
break;
}
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
}
-static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
+static unsigned long __thp_get_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len,
loff_t off, unsigned long flags, unsigned long size)
{
- unsigned long addr;
loff_t off_end = off + len;
loff_t off_align = round_up(off, size);
- unsigned long len_pad;
+ unsigned long len_pad, ret;
if (off_end <= off_align || (off_end - off_align) < size)
return 0;
if (len_pad < len || (off + len_pad) < off)
return 0;
- addr = current->mm->get_unmapped_area(filp, 0, len_pad,
+ ret = current->mm->get_unmapped_area(filp, addr, len_pad,
off >> PAGE_SHIFT, flags);
- if (IS_ERR_VALUE(addr))
+
+ /*
+ * The failure might be due to length padding. The caller will retry
+ * without the padding.
+ */
+ if (IS_ERR_VALUE(ret))
return 0;
- addr += (off - addr) & (size - 1);
- return addr;
+ /*
+ * Do not try to align to THP boundary if allocation at the address
+ * hint succeeds.
+ */
+ if (ret == addr)
+ return addr;
+
+ ret += (off - ret) & (size - 1);
+ return ret;
}
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
+ unsigned long ret;
loff_t off = (loff_t)pgoff << PAGE_SHIFT;
- if (addr)
- goto out;
if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
goto out;
- addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE);
- if (addr)
- return addr;
-
- out:
+ ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
+ if (ret)
+ return ret;
+out:
return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
}
EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
}
}
-static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
+static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
{
- unsigned long stat[MEMCG_NR_STAT];
+ unsigned long stat[MEMCG_NR_STAT] = {0};
struct mem_cgroup *mi;
int node, cpu, i;
- int min_idx, max_idx;
-
- if (slab_only) {
- min_idx = NR_SLAB_RECLAIMABLE;
- max_idx = NR_SLAB_UNRECLAIMABLE;
- } else {
- min_idx = 0;
- max_idx = MEMCG_NR_STAT;
- }
-
- for (i = min_idx; i < max_idx; i++)
- stat[i] = 0;
for_each_online_cpu(cpu)
- for (i = min_idx; i < max_idx; i++)
+ for (i = 0; i < MEMCG_NR_STAT; i++)
stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
- for (i = min_idx; i < max_idx; i++)
+ for (i = 0; i < MEMCG_NR_STAT; i++)
atomic_long_add(stat[i], &mi->vmstats[i]);
- if (!slab_only)
- max_idx = NR_VM_NODE_STAT_ITEMS;
-
for_each_node(node) {
struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
struct mem_cgroup_per_node *pi;
- for (i = min_idx; i < max_idx; i++)
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
stat[i] = 0;
for_each_online_cpu(cpu)
- for (i = min_idx; i < max_idx; i++)
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
stat[i] += per_cpu(
pn->lruvec_stat_cpu->count[i], cpu);
for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
- for (i = min_idx; i < max_idx; i++)
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
atomic_long_add(stat[i], &pi->lruvec_stat[i]);
}
}
parent = root_mem_cgroup;
/*
- * Deactivate and reparent kmem_caches. Then flush percpu
- * slab statistics to have precise values at the parent and
- * all ancestor levels. It's required to keep slab stats
- * accurate after the reparenting of kmem_caches.
+ * Deactivate and reparent kmem_caches.
*/
memcg_deactivate_kmem_caches(memcg, parent);
- memcg_flush_percpu_vmstats(memcg, true);
kmemcg_id = memcg->kmemcg_id;
BUG_ON(kmemcg_id < 0);
* Flush percpu vmstats and vmevents to guarantee the value correctness
* on parent's and all ancestor levels.
*/
- memcg_flush_percpu_vmstats(memcg, false);
+ memcg_flush_percpu_vmstats(memcg);
memcg_flush_percpu_vmevents(memcg);
__mem_cgroup_free(memcg);
}
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
mpol_cond_put(pol);
+ /*
+ * First, try to allocate THP only on local node, but
+ * don't reclaim unnecessarily, just compact.
+ */
page = __alloc_pages_node(hpage_node,
- gfp | __GFP_THISNODE, order);
+ gfp | __GFP_THISNODE | __GFP_NORETRY, order);
/*
* If hugepage allocations are configured to always
* synchronous compact or the vma has been madvised
* to prefer hugepage backing, retry allowing remote
- * memory as well.
+ * memory with both reclaim and compact as well.
*/
if (!page && (gfp & __GFP_DIRECT_RECLAIM))
page = __alloc_pages_node(hpage_node,
- gfp | __GFP_NORETRY, order);
+ gfp, order);
goto out;
}
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (copy) copy w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
- *
- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
- * MAP_PRIVATE:
- * r: (no) no
- * w: (no) no
- * x: (yes) yes
*/
pgprot_t protection_map[16] __ro_after_init = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
if (this_bw < tot_bw) {
if (min) {
min *= this_bw;
- do_div(min, tot_bw);
+ min = div64_ul(min, tot_bw);
}
if (max < 100) {
max *= this_bw;
- do_div(max, tot_bw);
+ max = div64_ul(max, tot_bw);
}
}
struct wb_domain *dom = dtc_dom(dtc);
unsigned long thresh = dtc->thresh;
u64 wb_thresh;
- long numerator, denominator;
+ unsigned long numerator, denominator;
unsigned long wb_min_ratio, wb_max_ratio;
/*
wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
wb_thresh *= numerator;
- do_div(wb_thresh, denominator);
+ wb_thresh = div64_ul(wb_thresh, denominator);
wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
bw = written - min(written, wb->written_stamp);
bw *= HZ;
if (unlikely(elapsed > period)) {
- do_div(bw, elapsed);
+ bw = div64_ul(bw, elapsed);
avg = bw;
goto out;
}
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
-#else
+bool _debug_pagealloc_enabled_early __read_mostly
+ = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
+EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
-#endif
EXPORT_SYMBOL(_debug_pagealloc_enabled);
DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
static int __init early_debug_pagealloc(char *buf)
{
- bool enable = false;
-
- if (kstrtobool(buf, &enable))
- return -EINVAL;
-
- if (enable)
- static_branch_enable(&_debug_pagealloc_enabled);
-
- return 0;
+ return kstrtobool(buf, &_debug_pagealloc_enabled_early);
}
early_param("debug_pagealloc", early_debug_pagealloc);
-static void init_debug_guardpage(void)
+void init_debug_pagealloc(void)
{
if (!debug_pagealloc_enabled())
return;
+ static_branch_enable(&_debug_pagealloc_enabled);
+
if (!debug_guardpage_minorder())
return;
*/
arch_free_page(page, order);
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_static())
kernel_map_pages(page, 1 << order, 0);
kasan_free_nondeferred_pages(page, order);
static bool bulkfree_pcp_prepare(struct page *page)
{
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_static())
return free_pages_check(page);
else
return false;
*/
static bool free_pcp_prepare(struct page *page)
{
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_static())
return free_pages_prepare(page, 0, true);
else
return free_pages_prepare(page, 0, false);
for_each_populated_zone(zone)
set_zone_contiguous(zone);
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
- init_debug_guardpage();
-#endif
}
#ifdef CONFIG_CMA
*/
static inline bool check_pcp_refill(struct page *page)
{
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_static())
return check_new_page(page);
else
return false;
}
static inline bool check_new_pcp(struct page *page)
{
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_static())
return check_new_page(page);
else
return false;
set_page_refcounted(page);
arch_alloc_page(page, order);
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_static())
kernel_map_pages(page, 1 << order, 1);
kasan_alloc_pages(page, order);
kernel_poison_pages(page, 1 << order, 1);
if (page)
goto got_pg;
- if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
- !(gfp_mask & __GFP_RETRY_MAYFAIL)) {
+ /*
+ * Checks for costly allocations with __GFP_NORETRY, which
+ * includes some THP page fault allocations
+ */
+ if (costly_order && (gfp_mask & __GFP_NORETRY)) {
/*
* If allocating entire pageblock(s) and compaction
* failed because all zones are below low watermarks
if (compact_result == COMPACT_SKIPPED ||
compact_result == COMPACT_DEFERRED)
goto nopage;
- }
-
- /*
- * Checks for costly allocations with __GFP_NORETRY, which
- * includes THP page fault allocations
- */
- if (costly_order && (gfp_mask & __GFP_NORETRY)) {
- /*
- * If compaction is deferred for high-order allocations,
- * it is because sync compaction recently failed. If
- * this is the case and the caller requested a THP
- * allocation, we do not want to heavily disrupt the
- * system, so we fail the allocation instead of entering
- * direct reclaim.
- */
- if (compact_result == COMPACT_DEFERRED)
- goto nopage;
/*
* Looks like reclaim/compaction is worth trying, but
/*
* Our priority is to support MAP_SHARED mapped hugely;
* and support MAP_PRIVATE mapped hugely too, until it is COWed.
- * But if caller specified an address hint, respect that as before.
+ * But if caller specified an address hint and we allocated area there
+ * successfully, respect that as before.
*/
- if (uaddr)
+ if (uaddr == addr)
return addr;
if (shmem_huge != SHMEM_HUGE_FORCE) {
if (inflated_len < len)
return addr;
- inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
+ inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
if (IS_ERR_VALUE(inflated_addr))
return addr;
if (inflated_addr & ~PAGE_MASK)
#if DEBUG
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
{
- if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
+ if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
(cachep->size % PAGE_SIZE) == 0)
return true;
* to check size >= 256. It guarantees that all necessary small
* sized slab is initialized in current slab initialization sequence.
*/
- if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
+ if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
size >= 256 && cachep->object_size > cache_line_size()) {
if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
size_t tmp_size = ALIGN(size, PAGE_SIZE);
* deactivates the memcg kmem_caches through workqueue. Make sure all
* previous workitems on workqueue are processed.
*/
- flush_workqueue(memcg_kmem_cache_wq);
+ if (likely(memcg_kmem_cache_wq))
+ flush_workqueue(memcg_kmem_cache_wq);
/*
* If we're racing with children kmem_cache deactivation, it might
unsigned long freepointer_addr;
void *p;
- if (!debug_pagealloc_enabled())
+ if (!debug_pagealloc_enabled_static())
return get_freepointer(s, object);
freepointer_addr = (unsigned long)object + s->offset;
if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
unsigned long section_nr = pfn_to_section_nr(pfn);
- if (!section_is_early) {
+ /*
+ * When removing an early section, the usage map is kept (as the
+ * usage maps of other sections fall into the same page). It
+ * will be re-used when re-adding the section - which is then no
+ * longer an early section. If the usage map is PageReserved, it
+ * was allocated during boot.
+ */
+ if (!PageReserved(virt_to_page(ms->usage))) {
kfree(ms->usage);
ms->usage = NULL;
}
{
flush_cache_vunmap(va->va_start, va->va_end);
unmap_vmap_area(va);
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_static())
flush_tlb_kernel_range(va->va_start, va->va_end);
free_vmap_area_noflush(va);
vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_static())
flush_tlb_kernel_range((unsigned long)addr,
(unsigned long)addr + size);
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+void vlan_dev_uninit(struct net_device *dev);
bool vlan_dev_inherit_address(struct net_device *dev,
struct net_device *real_dev);
return 0;
}
-static void vlan_dev_uninit(struct net_device *dev)
+/* Note: this function might be called multiple times for the same device. */
+void vlan_dev_uninit(struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct ifla_vlan_flags *flags;
struct ifla_vlan_qos_mapping *m;
struct nlattr *attr;
- int rem;
+ int rem, err;
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
- vlan_dev_change_flags(dev, flags->flags, flags->mask);
+ err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
+ if (err)
+ return err;
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
m = nla_data(attr);
- vlan_dev_set_egress_priority(dev, m->from, m->to);
+ err = vlan_dev_set_egress_priority(dev, m->from, m->to);
+ if (err)
+ return err;
}
}
return 0;
return -EINVAL;
err = vlan_changelink(dev, tb, data, extack);
- if (err < 0)
- return err;
-
- return register_vlan_dev(dev, extack);
+ if (!err)
+ err = register_vlan_dev(dev, extack);
+ if (err)
+ vlan_dev_uninit(dev);
+ return err;
}
static inline size_t vlan_qos_map_size(unsigned int n)
static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = vcc_walk(seq, 1);
- if (v)
- (*pos)++;
+ (*pos)++;
return v;
}
u32 hash = 0;
const struct batadv_dat_entry *dat = data;
const unsigned char *key;
+ __be16 vid;
u32 i;
key = (const unsigned char *)&dat->ip;
hash ^= (hash >> 6);
}
- key = (const unsigned char *)&dat->vid;
+ vid = htons(dat->vid);
+ key = (__force const unsigned char *)&vid;
for (i = 0; i < sizeof(dat->vid); i++) {
hash += key[i];
hash += (hash << 10);
hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
- pr_warn("Headroom to small\n");
+ pr_warn("Headroom too small\n");
kfree_skb(skb);
return -EIO;
}
put_online_cpus();
}
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+static void gro_normal_list(struct napi_struct *napi)
+{
+ if (!napi->rx_count)
+ return;
+ netif_receive_skb_list_internal(&napi->rx_list);
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+}
+
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
+{
+ list_add_tail(&skb->list, &napi->rx_list);
+ if (++napi->rx_count >= gro_normal_batch)
+ gro_normal_list(napi);
+}
+
INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
-static int napi_gro_complete(struct sk_buff *skb)
+static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
struct packet_offload *ptype;
__be16 type = skb->protocol;
}
out:
- return netif_receive_skb_internal(skb);
+ gro_normal_one(napi, skb);
+ return NET_RX_SUCCESS;
}
static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
return;
skb_list_del_init(skb);
- napi_gro_complete(skb);
+ napi_gro_complete(napi, skb);
napi->gro_hash[index].count--;
}
}
}
-static void gro_flush_oldest(struct list_head *head)
+static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
{
struct sk_buff *oldest;
* SKB to the chain.
*/
skb_list_del_init(oldest);
- napi_gro_complete(oldest);
+ napi_gro_complete(napi, oldest);
}
INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
if (pp) {
skb_list_del_init(pp);
- napi_gro_complete(pp);
+ napi_gro_complete(napi, pp);
napi->gro_hash[hash].count--;
}
goto normal;
if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
- gro_flush_oldest(gro_head);
+ gro_flush_oldest(napi, gro_head);
} else {
napi->gro_hash[hash].count++;
}
}
EXPORT_SYMBOL(gro_find_complete_by_type);
-/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static void gro_normal_list(struct napi_struct *napi)
-{
- if (!napi->rx_count)
- return;
- netif_receive_skb_list_internal(&napi->rx_list);
- INIT_LIST_HEAD(&napi->rx_list);
- napi->rx_count = 0;
-}
-
-/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
- * pass the whole batch up to the stack.
- */
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
-{
- list_add_tail(&skb->list, &napi->rx_list);
- if (++napi->rx_count >= gro_normal_batch)
- gro_normal_list(napi);
-}
-
static void napi_skb_free_stolen_head(struct sk_buff *skb)
{
skb_dst_drop(skb);
NAPIF_STATE_IN_BUSY_POLL)))
return false;
- gro_normal_list(n);
-
if (n->gro_bitmask) {
unsigned long timeout = 0;
hrtimer_start(&n->timer, ns_to_ktime(timeout),
HRTIMER_MODE_REL_PINNED);
}
+
+ gro_normal_list(n);
+
if (unlikely(!list_empty(&n->poll_list))) {
/* If n->poll_list is not empty, we need to mask irqs */
local_irq_save(flags);
goto out_unlock;
}
- gro_normal_list(n);
-
if (n->gro_bitmask) {
/* flush too old packets
* If HZ < 1000, flush all packets.
napi_gro_flush(n, HZ >= 1000);
}
+ gro_normal_list(n);
+
/* Some drivers may have called napi_schedule
* prior to exhausting their budget.
*/
}
EXPORT_SYMBOL(__dev_set_mtu);
+int dev_validate_mtu(struct net_device *dev, int new_mtu,
+ struct netlink_ext_ack *extack)
+{
+ /* MTU must be positive, and in range */
+ if (new_mtu < 0 || new_mtu < dev->min_mtu) {
+ NL_SET_ERR_MSG(extack, "mtu less than device minimum");
+ return -EINVAL;
+ }
+
+ if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
+ NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* dev_set_mtu_ext - Change maximum transfer unit
* @dev: device
if (new_mtu == dev->mtu)
return 0;
- /* MTU must be positive, and in range */
- if (new_mtu < 0 || new_mtu < dev->min_mtu) {
- NL_SET_ERR_MSG(extack, "mtu less than device minimum");
- return -EINVAL;
- }
-
- if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
- NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
- return -EINVAL;
- }
+ err = dev_validate_mtu(dev, new_mtu, extack);
+ if (err)
+ return err;
if (!netif_device_present(dev))
return -ENODEV;
void netdev_update_lockdep_key(struct net_device *dev)
{
- struct netdev_queue *queue;
- int i;
-
- lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
lockdep_unregister_key(&dev->addr_list_lock_key);
-
- lockdep_register_key(&dev->qdisc_xmit_lock_key);
lockdep_register_key(&dev->addr_list_lock_key);
lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
- for (i = 0; i < dev->num_tx_queues; i++) {
- queue = netdev_get_tx_queue(dev, i);
-
- lockdep_set_class(&queue->_xmit_lock,
- &dev->qdisc_xmit_lock_key);
- }
}
EXPORT_SYMBOL(netdev_update_lockdep_key);
goto err_uninit;
ret = netdev_register_kobject(dev);
- if (ret)
+ if (ret) {
+ dev->reg_state = NETREG_UNREGISTERED;
goto err_uninit;
+ }
dev->reg_state = NETREG_REGISTERED;
__netdev_update_features(dev);
devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA;
}
-#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 30)
+#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600)
static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port)
{
EXPORT_SYMBOL_GPL(devlink_region_destroy);
/**
- * devlink_region_shapshot_id_get - get snapshot ID
+ * devlink_region_snapshot_id_get - get snapshot ID
*
* This callback should be called when adding a new snapshot,
* Driver should use the same id for multiple snapshots taken
*
* @devlink: devlink
*/
-u32 devlink_region_shapshot_id_get(struct devlink *devlink)
+u32 devlink_region_snapshot_id_get(struct devlink *devlink)
{
u32 id;
return id;
}
-EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get);
+EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
/**
* devlink_region_snapshot_create - create a new snapshot
/* First find the starting scatterlist element */
i = msg->sg.start;
do {
+ offset += len;
len = sk_msg_elem(msg, i)->length;
if (start < offset + len)
break;
- offset += len;
sk_msg_iter_var_next(i);
} while (i != msg->sg.end);
u32, len, u64, flags)
{
struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
- u32 new, i = 0, l, space, copy = 0, offset = 0;
+ u32 new, i = 0, l = 0, space, copy = 0, offset = 0;
u8 *raw, *to, *from;
struct page *page;
/* First find the starting scatterlist element */
i = msg->sg.start;
do {
+ offset += l;
l = sk_msg_elem(msg, i)->length;
if (start < offset + l)
break;
- offset += l;
sk_msg_iter_var_next(i);
} while (i != msg->sg.end);
sk_msg_iter_var_next(i);
sg_unmark_end(psge);
+ sg_unmark_end(&rsge);
sk_msg_iter_next(msg, end);
}
BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
u32, len, u64, flags)
{
- u32 i = 0, l, space, offset = 0;
+ u32 i = 0, l = 0, space, offset = 0;
u64 last = start + len;
int pop;
/* First find the starting scatterlist element */
i = msg->sg.start;
do {
+ offset += l;
l = sk_msg_elem(msg, i)->length;
if (start < offset + l)
break;
- offset += l;
sk_msg_iter_var_next(i);
} while (i != msg->sg.end);
if (sk) {
sk = sk_to_full_sk(sk);
if (!sk_fullsock(sk)) {
- if (!sock_flag(sk, SOCK_RCU_FREE))
- sock_gen_put(sk);
+ sock_gen_put(sk);
return NULL;
}
}
if (sk) {
sk = sk_to_full_sk(sk);
if (!sk_fullsock(sk)) {
- if (!sock_flag(sk, SOCK_RCU_FREE))
- sock_gen_put(sk);
+ sock_gen_put(sk);
return NULL;
}
}
BPF_CALL_1(bpf_sk_release, struct sock *, sk)
{
- if (!sock_flag(sk, SOCK_RCU_FREE))
+ /* Only full sockets have sk->sk_flags. */
+ if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE))
sock_gen_put(sk);
return 0;
}
*pos = cpu+1;
return per_cpu_ptr(tbl->stats, cpu);
}
+ (*pos)++;
return NULL;
}
dev->rtnl_link_ops = ops;
dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
- if (tb[IFLA_MTU])
- dev->mtu = nla_get_u32(tb[IFLA_MTU]);
+ if (tb[IFLA_MTU]) {
+ u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+ int err;
+
+ err = dev_validate_mtu(dev, mtu, extack);
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+ dev->mtu = mtu;
+ }
if (tb[IFLA_ADDRESS]) {
memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
nla_len(tb[IFLA_ADDRESS]));
struct sock *sk;
sk = xchg(psk, NULL);
- if (sk)
+ if (sk) {
+ lock_sock(sk);
sock_map_unref(sk, psk);
+ release_sock(sk);
+ }
}
raw_spin_unlock_bh(&stab->lock);
rcu_read_unlock();
raw_spin_lock_bh(&bucket->lock);
hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
hlist_del_rcu(&elem->node);
+ lock_sock(elem->sk);
sock_map_unref(elem->sk, elem);
+ release_sock(elem->sk);
}
raw_spin_unlock_bh(&bucket->lock);
}
}
EXPORT_SYMBOL(inet_proto_csum_replace4);
+/**
+ * inet_proto_csum_replace16 - update layer 4 header checksum field
+ * @sum: Layer 4 header checksum field
+ * @skb: sk_buff for the packet
+ * @from: old IPv6 address
+ * @to: new IPv6 address
+ * @pseudohdr: True if layer 4 header checksum includes pseudoheader
+ *
+ * Update layer 4 header as per the update in IPv6 src/dst address.
+ *
+ * There is no need to update skb->csum in this function, because update in two
+ * fields a.) IPv6 src/dst address and b.) L4 header checksum cancels each other
+ * for skb->csum calculation. Whereas inet_proto_csum_replace4 function needs to
+ * update skb->csum, because update in 3 fields a.) IPv4 src/dst address,
+ * b.) IPv4 Header checksum and c.) L4 header checksum results in same diff as
+ * L4 Header checksum for skb->csum calculation.
+ */
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
bool pseudohdr)
if (skb->ip_summed != CHECKSUM_PARTIAL) {
*sum = csum_fold(csum_partial(diff, sizeof(diff),
~csum_unfold(*sum)));
- if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
- skb->csum = ~csum_partial(diff, sizeof(diff),
- ~skb->csum);
} else if (pseudohdr)
*sum = ~csum_fold(csum_partial(diff, sizeof(diff),
csum_unfold(*sum)));
}
static const struct dsa_device_ops gswip_netdev_ops = {
- .name = "gwsip",
+ .name = "gswip",
.proto = DSA_TAG_PROTO_GSWIP,
.xmit = gswip_tag_xmit,
.rcv = gswip_tag_rcv,
struct dsa_port *dp = dsa_slave_to_port(dev);
u16 *phdr, hdr;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
if (skb_cow_head(skb, 0) < 0)
return NULL;
void hsr_debugfs_create_root(void);
void hsr_debugfs_remove_root(void);
#else
-static inline void void hsr_debugfs_rename(struct net_device *dev)
+static inline void hsr_debugfs_rename(struct net_device *dev)
{
}
static inline void hsr_debugfs_init(struct hsr_priv *priv,
if (!x)
goto out_reset;
+ skb->mark = xfrm_smark_get(skb->mark, x);
+
sp->xvec[sp->len++] = x;
sp->olen++;
int count = cb->args[2];
t_key key = cb->args[3];
+ /* First time here, count and key are both always 0. Count > 0
+ * and key == 0 means the dump has wrapped around and we are done.
+ */
+ if (count && !key)
+ return skb->len;
+
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
int err;
[FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
[FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
[FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
- [FOU_ATTR_LOCAL_V6] = { .type = sizeof(struct in6_addr), },
- [FOU_ATTR_PEER_V6] = { .type = sizeof(struct in6_addr), },
+ [FOU_ATTR_LOCAL_V6] = { .len = sizeof(struct in6_addr), },
+ [FOU_ATTR_PEER_V6] = { .len = sizeof(struct in6_addr), },
[FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
[FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
};
iph->version = 4;
iph->ihl = 5;
- if (tunnel->collect_md) {
- dev->features |= NETIF_F_NETNS_LOCAL;
+ if (tunnel->collect_md)
netif_keep_dst(dev);
- }
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
int mtu;
if (!dst) {
- dev->stats.tx_carrier_errors++;
- goto tx_error_icmp;
+ struct rtable *rt;
+
+ fl->u.ip4.flowi4_oif = dev->ifindex;
+ fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+ rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
+ if (IS_ERR(rt)) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
+ dst = &rt->dst;
+ skb_dst_set(skb, dst);
}
dst_hold(dst);
return 1;
}
-static inline int check_target(struct arpt_entry *e, const char *name)
+static int check_target(struct arpt_entry *e, struct net *net, const char *name)
{
struct xt_entry_target *t = arpt_get_target(e);
struct xt_tgchk_param par = {
+ .net = net,
.table = name,
.entryinfo = e,
.target = t->u.kernel.target,
return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
}
-static inline int
-find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
+static int
+find_check_entry(struct arpt_entry *e, struct net *net, const char *name,
+ unsigned int size,
struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
}
t->u.kernel.target = target;
- ret = check_target(e, name);
+ ret = check_target(e, net, name);
if (ret)
goto err;
return 0;
return 0;
}
-static inline void cleanup_entry(struct arpt_entry *e)
+static void cleanup_entry(struct arpt_entry *e, struct net *net)
{
struct xt_tgdtor_param par;
struct xt_entry_target *t;
t = arpt_get_target(e);
+ par.net = net;
par.target = t->u.kernel.target;
par.targinfo = t->data;
par.family = NFPROTO_ARP;
/* Checks and translates the user-supplied table segment (held in
* newinfo).
*/
-static int translate_table(struct xt_table_info *newinfo, void *entry0,
+static int translate_table(struct net *net,
+ struct xt_table_info *newinfo,
+ void *entry0,
const struct arpt_replace *repl)
{
struct xt_percpu_counter_alloc_state alloc_state = { 0 };
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
- ret = find_check_entry(iter, repl->name, repl->size,
+ ret = find_check_entry(iter, net, repl->name, repl->size,
&alloc_state);
if (ret != 0)
break;
xt_entry_foreach(iter, entry0, newinfo->size) {
if (i-- == 0)
break;
- cleanup_entry(iter);
+ cleanup_entry(iter, net);
}
return ret;
}
/* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries;
xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
- cleanup_entry(iter);
+ cleanup_entry(iter, net);
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
goto free_newinfo;
}
- ret = translate_table(newinfo, loc_cpu_entry, &tmp);
+ ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
- cleanup_entry(iter);
+ cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
}
-static int translate_compat_table(struct xt_table_info **pinfo,
+static int translate_compat_table(struct net *net,
+ struct xt_table_info **pinfo,
void **pentry0,
const struct compat_arpt_replace *compatr)
{
repl.num_counters = 0;
repl.counters = NULL;
repl.size = newinfo->size;
- ret = translate_table(newinfo, entry1, &repl);
+ ret = translate_table(net, newinfo, entry1, &repl);
if (ret)
goto free_newinfo;
goto free_newinfo;
}
- ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
+ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
- cleanup_entry(iter);
+ cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
return ret;
}
-static void __arpt_unregister_table(struct xt_table *table)
+static void __arpt_unregister_table(struct net *net, struct xt_table *table)
{
struct xt_table_info *private;
void *loc_cpu_entry;
/* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries;
xt_entry_foreach(iter, loc_cpu_entry, private->size)
- cleanup_entry(iter);
+ cleanup_entry(iter, net);
if (private->number > private->initial_entries)
module_put(table_owner);
xt_free_table_info(private);
loc_cpu_entry = newinfo->entries;
memcpy(loc_cpu_entry, repl->entries, repl->size);
- ret = translate_table(newinfo, loc_cpu_entry, repl);
+ ret = translate_table(net, newinfo, loc_cpu_entry, repl);
if (ret != 0)
goto out_free;
ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
if (ret != 0) {
- __arpt_unregister_table(new_table);
+ __arpt_unregister_table(net, new_table);
*res = NULL;
}
const struct nf_hook_ops *ops)
{
nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
- __arpt_unregister_table(table);
+ __arpt_unregister_table(net, table);
}
/* The built-in targets: standard (NULL) and error. */
*pos = cpu+1;
return &per_cpu(rt_cache_stat, cpu);
}
+ (*pos)++;
return NULL;
}
{
struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
+ tcp_sk(sk)->highest_sack = NULL;
while (p) {
struct sk_buff *skb = rb_to_skb(p);
WRITE_ONCE(tp->write_seq, seq);
icsk->icsk_backoff = 0;
- tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
* bandwidth sample. Delivered is in packets and interval_us in uS and
* ratio will be <<1 for most connections. So delivered is first scaled.
*/
- bw = (u64)rs->delivered * BW_UNIT;
- do_div(bw, rs->interval_us);
+ bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
/* If this sample is application-limited, it is likely to have a very
* low delivered count that represents application behavior rather than
struct sk_psock *psock;
int copied, ret;
- if (unlikely(flags & MSG_ERRQUEUE))
- return inet_recv_error(sk, msg, len, addr_len);
- if (!skb_queue_empty(&sk->sk_receive_queue))
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-
psock = sk_psock_get(sk);
if (unlikely(!psock))
return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return inet_recv_error(sk, msg, len, addr_len);
+ if (!skb_queue_empty(&sk->sk_receive_queue) &&
+ sk_psock_queue_empty(psock))
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
lock_sock(sk);
msg_bytes_ready:
copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
timeo = sock_rcvtimeo(sk, nonblock);
data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
if (data) {
- if (skb_queue_empty(&sk->sk_receive_queue))
+ if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
release_sock(sk);
sk_psock_put(sk, psock);
*/
delta = msg->sg.size;
psock->eval = sk_psock_msg_verdict(sk, psock, msg);
- if (msg->sg.size < delta)
- delta -= msg->sg.size;
- else
- delta = 0;
+ delta -= msg->sg.size;
}
if (msg->cork_bytes &&
/* This must be called before lost_out is incremented */
static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
{
- if (!tp->retransmit_skb_hint ||
- before(TCP_SKB_CB(skb)->seq,
- TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
+ if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) ||
+ (tp->retransmit_skb_hint &&
+ before(TCP_SKB_CB(skb)->seq,
+ TCP_SKB_CB(tp->retransmit_skb_hint)->seq)))
tp->retransmit_skb_hint = skb;
}
}
/* Ignore very old stuff early */
- if (!after(sp[used_sacks].end_seq, prior_snd_una))
+ if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
+ if (i == 0)
+ first_sack_index = -1;
continue;
+ }
used_sacks++;
}
tp->retransmit_skb_hint = NULL;
if (unlikely(skb == tp->lost_skb_hint))
tp->lost_skb_hint = NULL;
+ tcp_highest_sack_replace(sk, skb, next);
tcp_rtx_queue_unlink_and_free(skb, sk);
}
if (!nskb)
return -ENOMEM;
INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
+ tcp_highest_sack_replace(sk, skb, nskb);
tcp_rtx_queue_unlink_and_free(skb, sk);
__skb_header_release(nskb);
tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
rcu_read_unlock();
}
-void tcp_update_ulp(struct sock *sk, struct proto *proto)
+void tcp_update_ulp(struct sock *sk, struct proto *proto,
+ void (*write_space)(struct sock *sk))
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (!icsk->icsk_ulp_ops) {
+ sk->sk_write_space = write_space;
sk->sk_prot = proto;
return;
}
if (icsk->icsk_ulp_ops->update)
- icsk->icsk_ulp_ops->update(sk, proto);
+ icsk->icsk_ulp_ops->update(sk, proto, write_space);
}
void tcp_cleanup_ulp(struct sock *sk)
if (likely(partial)) {
up->forward_deficit += size;
size = up->forward_deficit;
- if (size < (sk->sk_rcvbuf >> 2))
+ if (size < (sk->sk_rcvbuf >> 2) &&
+ !skb_queue_empty(&up->reader_queue))
return;
} else {
size += up->forward_deficit;
if (!x)
goto out_reset;
+ skb->mark = xfrm_smark_get(skb->mark, x);
+
sp->xvec[sp->len++] = x;
sp->olen++;
struct net *net = seq_file_net(seq);
struct ipv6_route_iter *iter = seq->private;
+ ++(*pos);
if (!v)
goto iter_table;
n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
- if (n) {
- ++*pos;
+ if (n)
return n;
- }
iter_table:
ipv6_route_check_sernum(iter);
r = fib6_walk_continue(&iter->w);
spin_unlock_bh(&iter->tbl->tb6_lock);
if (r > 0) {
- if (v)
- ++*pos;
return iter->w.leaf;
} else if (r < 0) {
fib6_walker_unlink(net, &iter->w);
dev->mtu -= 8;
if (tunnel->parms.collect_md) {
- dev->features |= NETIF_F_NETNS_LOCAL;
netif_keep_dst(dev);
}
ip6gre_tnl_init_features(dev);
dev->needs_free_netdev = true;
dev->priv_destructor = ip6gre_dev_free;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netif_keep_dst(dev);
dev->needs_free_netdev = true;
dev->priv_destructor = ip6gre_dev_free;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netif_keep_dst(dev);
if (err)
return err;
ip6_tnl_link_config(t);
- if (t->parms.collect_md) {
- dev->features |= NETIF_F_NETNS_LOCAL;
+ if (t->parms.collect_md)
netif_keep_dst(dev);
- }
return 0;
}
int err = -1;
int mtu;
- if (!dst)
- goto tx_err_link_failure;
+ if (!dst) {
+ fl->u.ip6.flowi6_oif = dev->ifindex;
+ fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+ dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+ if (dst->error) {
+ dst_release(dst);
+ dst = NULL;
+ goto tx_err_link_failure;
+ }
+ skb_dst_set(skb, dst);
+ }
dst_hold(dst);
dst = xfrm_lookup(t->net, dst, fl, NULL, 0);
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/dst_cache.h>
+#include <net/ip_tunnels.h>
#ifdef CONFIG_IPV6_SEG6_HMAC
#include <net/seg6_hmac.h>
#endif
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
- skb->encapsulation = 0;
+ if (iptunnel_pull_offloads(skb))
+ return false;
return true;
}
return err;
}
+static void ieee80211_end_cac(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+
+ mutex_lock(&local->mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ /* it might be waiting for the local->mtx, but then
+ * by the time it gets it, sdata->wdev.cac_started
+ * will no longer be true
+ */
+ cancel_delayed_work(&sdata->dfs_cac_timer_work);
+
+ if (sdata->wdev.cac_started) {
+ ieee80211_vif_release_channel(sdata);
+ sdata->wdev.cac_started = false;
+ }
+ }
+ mutex_unlock(&local->mtx);
+}
+
static struct cfg80211_beacon_data *
cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
{
#endif
.get_channel = ieee80211_cfg_get_channel,
.start_radar_detection = ieee80211_start_radar_detection,
+ .end_cac = ieee80211_end_cac,
.channel_switch = ieee80211_channel_switch,
.set_qos_map = ieee80211_set_qos_map,
.set_ap_chanwidth = ieee80211_set_ap_chanwidth,
unsigned long fail_avg =
ewma_mesh_fail_avg_read(&sta->mesh->fail_avg);
+ if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
+ return MAX_METRIC;
+
/* Try to get rate based on HW/SW RC algorithm.
* Rate is returned in units of Kbps, correct this
* to comply with airtime calculation units
if ((keyid >> 6) != key->conf.keyidx)
return TKIP_DECRYPT_INVALID_KEYIDX;
- if (rx_ctx->ctx.state != TKIP_STATE_NOT_INIT &&
- (iv32 < rx_ctx->iv32 ||
- (iv32 == rx_ctx->iv32 && iv16 <= rx_ctx->iv16)))
+ /* Reject replays if the received TSC is smaller than or equal to the
+ * last received value in a valid message, but with an exception for
+ * the case where a new key has been set and no valid frame using that
+ * key has yet received and the local RSC was initialized to 0. This
+ * exception allows the very first frame sent by the transmitter to be
+ * accepted even if that transmitter were to use TSC 0 (IEEE 802.11
+ * described TSC to be initialized to 1 whenever a new key is taken into
+ * use).
+ */
+ if (iv32 < rx_ctx->iv32 ||
+ (iv32 == rx_ctx->iv32 &&
+ (iv16 < rx_ctx->iv16 ||
+ (iv16 == rx_ctx->iv16 &&
+ (rx_ctx->iv32 || rx_ctx->iv16 ||
+ rx_ctx->ctx.state != TKIP_STATE_NOT_INIT)))))
return TKIP_DECRYPT_REPLAY;
if (only_iv) {
if (SET_WITH_TIMEOUT(set))
del_timer_sync(&map->gc);
- ip_set_free(map->members);
if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
mtype_ext_cleanup(set);
+ ip_set_free(map->members);
ip_set_free(map);
set->data = NULL;
if (set->extensions & IPSET_EXT_DESTROY)
mtype_ext_cleanup(set);
- memset(map->members, 0, map->memsize);
+ bitmap_zero(map->members, map->elements);
set->elements = 0;
set->ext_size = 0;
}
/* Type structure */
struct bitmap_ip {
- void *members; /* the set members */
+ unsigned long *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
u32 first_ip, u32 last_ip,
u32 elements, u32 hosts, u8 netmask)
{
- map->members = ip_set_alloc(map->memsize);
+ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_ip = first_ip;
if (!map)
return -ENOMEM;
- map->memsize = bitmap_bytes(0, elements - 1);
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ip;
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
/* Type structure */
struct bitmap_ipmac {
- void *members; /* the set members */
+ unsigned long *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
u32 first_ip, u32 last_ip, u32 elements)
{
- map->members = ip_set_alloc(map->memsize);
+ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_ip = first_ip;
if (!map)
return -ENOMEM;
- map->memsize = bitmap_bytes(0, elements - 1);
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ipmac;
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
kfree(map);
/* Type structure */
struct bitmap_port {
- void *members; /* the set members */
+ unsigned long *members; /* the set members */
u16 first_port; /* host byte order, included in range */
u16 last_port; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
init_map_port(struct ip_set *set, struct bitmap_port *map,
u16 first_port, u16 last_port)
{
- map->members = ip_set_alloc(map->memsize);
+ map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_port = first_port;
return -ENOMEM;
map->elements = elements;
- map->memsize = bitmap_bytes(0, map->elements);
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_port;
if (!init_map_port(set, map, first_port, last_port)) {
kfree(map);
struct ip_set *set;
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
int ret = 0;
+ u32 lineno;
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
return -IPSET_ERR_PROTOCOL;
rcu_read_lock_bh();
- ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
+ ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
rcu_read_unlock_bh();
/* Userspace can't trigger element to be re-added */
if (ret == -EAGAIN)
p = msg_end;
if (p + sizeof(s->v4) > buffer+buflen) {
- IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, too small\n");
return;
}
s = (union ip_vs_sync_conn *)p;
unsigned int *timeouts = data;
int i;
+ if (!timeouts)
+ timeouts = dn->dccp_timeout;
+
/* set default DCCP timeouts. */
for (i=0; i<CT_DCCP_MAX; i++)
timeouts[i] = dn->dccp_timeout[i];
{
/* ORIGINAL */
/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
-/* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
/* REPLY */
/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
-/* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
}
- ct->proto.sctp.state = new_state;
+ ct->proto.sctp.state = SCTP_CONNTRACK_NONE;
}
return true;
struct nf_sctp_net *sn = nf_sctp_pernet(net);
int i;
+ if (!timeouts)
+ timeouts = sn->timeouts;
+
/* set default SCTP timeouts. */
for (i=0; i<SCTP_CONNTRACK_MAX; i++)
timeouts[i] = sn->timeouts[i];
#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
-static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
-{
- return (__s32)(timeout - (u32)jiffies);
-}
-
static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
{
int err;
- flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
err = rhashtable_insert_fast(&flow_table->rhashtable,
&flow->tuplehash[0].node,
if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
return NF_DROP;
- flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
iph = ip_hdr(skb);
ip_decrease_ttl(iph);
skb->tstamp = 0;
if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
return NF_DROP;
- flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
ip6h = ipv6_hdr(skb);
ip6h->hop_limit--;
skb->tstamp = 0;
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
- const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple;
struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+ const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
+ const struct dst_entry *dst_cache;
+ unsigned char ha[ETH_ALEN];
struct neighbour *n;
u32 mask, val;
+ u8 nud_state;
u16 val16;
- n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4);
+ dst_cache = flow->tuplehash[dir].tuple.dst_cache;
+ n = dst_neigh_lookup(dst_cache, daddr);
if (!n)
return -ENOENT;
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(ha, n->ha);
+ read_unlock_bh(&n->lock);
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_release(n);
+ return -ENOENT;
+ }
+
mask = ~0xffffffff;
- memcpy(&val, n->ha, 4);
+ memcpy(&val, ha, 4);
flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
&val, &mask);
mask = ~0x0000ffff;
- memcpy(&val16, n->ha + 4, 2);
+ memcpy(&val16, ha + 4, 2);
val = val16;
flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
&val, &mask);
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
- u32 mask = ~htonl(0xffff0000), port;
+ u32 mask, port;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
offset = 0; /* offsetof(struct tcphdr, source); */
+ port = htonl(port << 16);
+ mask = ~htonl(0xffff0000);
break;
case FLOW_OFFLOAD_DIR_REPLY:
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
offset = 0; /* offsetof(struct tcphdr, dest); */
+ port = htonl(port);
+ mask = ~htonl(0xffff);
break;
default:
return;
}
- port = htonl(port << 16);
+
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
&port, &mask);
}
struct nf_flow_rule *flow_rule)
{
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
- u32 mask = ~htonl(0xffff), port;
+ u32 mask, port;
u32 offset;
switch (dir) {
case FLOW_OFFLOAD_DIR_ORIGINAL:
- port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
- offset = 0; /* offsetof(struct tcphdr, source); */
+ port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
+ offset = 0; /* offsetof(struct tcphdr, dest); */
+ port = htonl(port);
+ mask = ~htonl(0xffff);
break;
case FLOW_OFFLOAD_DIR_REPLY:
- port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
- offset = 0; /* offsetof(struct tcphdr, dest); */
+ port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
+ offset = 0; /* offsetof(struct tcphdr, source); */
+ port = htonl(port << 16);
+ mask = ~htonl(0xffff0000);
break;
default:
return;
}
- port = htonl(port);
+
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
&port, &mask);
}
struct flow_offload *flow)
{
struct flow_offload_work *offload;
- s64 delta;
+ __s32 delta;
- delta = flow->timeout - jiffies;
+ delta = nf_flow_timeout_delta(flow->timeout);
if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
flow->flags & FLOW_OFFLOAD_HW_DYING)
return;
return false;
hdr = (struct icmphdr *)(skb->data + hdroff);
+ switch (hdr->type) {
+ case ICMP_ECHO:
+ case ICMP_ECHOREPLY:
+ case ICMP_TIMESTAMP:
+ case ICMP_TIMESTAMPREPLY:
+ case ICMP_INFO_REQUEST:
+ case ICMP_INFO_REPLY:
+ case ICMP_ADDRESS:
+ case ICMP_ADDRESSREPLY:
+ break;
+ default:
+ return true;
+ }
inet_proto_csum_replace2(&hdr->checksum, skb,
hdr->un.echo.id, tuple->src.u.icmp.id, false);
hdr->un.echo.id = tuple->src.u.icmp.id;
#include <net/net_namespace.h>
#include <net/sock.h>
+#define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
+
static LIST_HEAD(nf_tables_expressions);
static LIST_HEAD(nf_tables_objects);
static LIST_HEAD(nf_tables_flowtables);
static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
+static const struct nft_chain_type *
+__nft_chain_type_get(u8 family, enum nft_chain_types type)
+{
+ if (family >= NFPROTO_NUMPROTO ||
+ type >= NFT_CHAIN_T_MAX)
+ return NULL;
+
+ return chain_type[family][type];
+}
+
static const struct nft_chain_type *
__nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
{
+ const struct nft_chain_type *type;
int i;
for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
- if (chain_type[family][i] != NULL &&
- !nla_strcmp(nla, chain_type[family][i]->name))
- return chain_type[family][i];
+ type = __nft_chain_type_get(family, i);
+ if (!type)
+ continue;
+ if (!nla_strcmp(nla, type->name))
+ return type;
}
return NULL;
}
-/*
- * Loading a module requires dropping mutex that guards the
- * transaction.
- * We first need to abort any pending transactions as once
- * mutex is unlocked a different client could start a new
- * transaction. It must not see any 'future generation'
- * changes * as these changes will never happen.
- */
-#ifdef CONFIG_MODULES
-static int __nf_tables_abort(struct net *net);
+struct nft_module_request {
+ struct list_head list;
+ char module[MODULE_NAME_LEN];
+ bool done;
+};
-static void nft_request_module(struct net *net, const char *fmt, ...)
+#ifdef CONFIG_MODULES
+static int nft_request_module(struct net *net, const char *fmt, ...)
{
char module_name[MODULE_NAME_LEN];
+ struct nft_module_request *req;
va_list args;
int ret;
- __nf_tables_abort(net);
-
va_start(args, fmt);
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
va_end(args);
- if (WARN(ret >= MODULE_NAME_LEN, "truncated: '%s' (len %d)", module_name, ret))
- return;
+ if (ret >= MODULE_NAME_LEN)
+ return 0;
- mutex_unlock(&net->nft.commit_mutex);
- request_module("%s", module_name);
- mutex_lock(&net->nft.commit_mutex);
+ list_for_each_entry(req, &net->nft.module_list, list) {
+ if (!strcmp(req->module, module_name)) {
+ if (req->done)
+ return 0;
+
+ /* A request to load this module already exists. */
+ return -EAGAIN;
+ }
+ }
+
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->done = false;
+ strlcpy(req->module, module_name, MODULE_NAME_LEN);
+ list_add_tail(&req->list, &net->nft.module_list);
+
+ return -EAGAIN;
}
#endif
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (autoload) {
- nft_request_module(net, "nft-chain-%u-%.*s", family,
- nla_len(nla), (const char *)nla_data(nla));
- type = __nf_tables_chain_type_lookup(nla, family);
- if (type != NULL)
+ if (nft_request_module(net, "nft-chain-%u-%.*s", family,
+ nla_len(nla),
+ (const char *)nla_data(nla)) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
}
list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) {
+ if (!nft_is_active_next(ctx->net, flowtable))
+ continue;
+
err = nft_delflowtable(ctx, flowtable);
if (err < 0)
goto out;
}
list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) {
+ if (!nft_is_active_next(ctx->net, obj))
+ continue;
+
err = nft_delobj(ctx, obj);
if (err < 0)
goto out;
void nft_register_chain_type(const struct nft_chain_type *ctype)
{
- if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO))
- return;
-
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) {
+ if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) {
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return;
}
.len = NFT_CHAIN_MAXNAMELEN - 1 },
[NFTA_CHAIN_HOOK] = { .type = NLA_NESTED },
[NFTA_CHAIN_POLICY] = { .type = NLA_U32 },
- [NFTA_CHAIN_TYPE] = { .type = NLA_STRING },
+ [NFTA_CHAIN_TYPE] = { .type = NLA_STRING,
+ .len = NFT_MODULE_AUTOLOAD_LIMIT },
[NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED },
[NFTA_CHAIN_FLAGS] = { .type = NLA_U32 },
};
goto err_hook;
}
if (nft_hook_list_find(hook_list, hook)) {
+ kfree(hook);
err = -EEXIST;
goto err_hook;
}
hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
- type = chain_type[family][NFT_CHAIN_T_DEFAULT];
+ type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT);
+ if (!type)
+ return -EOPNOTSUPP;
+
if (nla[NFTA_CHAIN_TYPE]) {
type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
family, autoload);
static int nft_expr_type_request_module(struct net *net, u8 family,
struct nlattr *nla)
{
- nft_request_module(net, "nft-expr-%u-%.*s", family,
- nla_len(nla), (char *)nla_data(nla));
- if (__nft_expr_type_get(family, nla))
+ if (nft_request_module(net, "nft-expr-%u-%.*s", family,
+ nla_len(nla), (char *)nla_data(nla)) == -EAGAIN)
return -EAGAIN;
return 0;
if (nft_expr_type_request_module(net, family, nla) == -EAGAIN)
return ERR_PTR(-EAGAIN);
- nft_request_module(net, "nft-expr-%.*s",
- nla_len(nla), (char *)nla_data(nla));
- if (__nft_expr_type_get(family, nla))
+ if (nft_request_module(net, "nft-expr-%.*s",
+ nla_len(nla),
+ (char *)nla_data(nla)) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
}
static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = {
- [NFTA_EXPR_NAME] = { .type = NLA_STRING },
+ [NFTA_EXPR_NAME] = { .type = NLA_STRING,
+ .len = NFT_MODULE_AUTOLOAD_LIMIT },
[NFTA_EXPR_DATA] = { .type = NLA_NESTED },
};
err = PTR_ERR(ops);
#ifdef CONFIG_MODULES
if (err == -EAGAIN)
- nft_expr_type_request_module(ctx->net,
- ctx->family,
- tb[NFTA_EXPR_NAME]);
+ if (nft_expr_type_request_module(ctx->net,
+ ctx->family,
+ tb[NFTA_EXPR_NAME]) != -EAGAIN)
+ err = -ENOENT;
#endif
goto err1;
}
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (list_empty(&nf_tables_set_types)) {
- nft_request_module(ctx->net, "nft-set");
- if (!list_empty(&nf_tables_set_types))
+ if (nft_request_module(ctx->net, "nft-set") == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
[NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
[NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED },
- [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING },
+ [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING,
+ .len = NFT_OBJ_MAXNAMELEN - 1 },
};
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (type == NULL) {
- nft_request_module(net, "nft-obj-%u", objtype);
- if (__nft_obj_type_get(objtype))
+ if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
if (type == NULL) {
- nft_request_module(net, "nf-flowtable-%u", family);
- if (__nft_flowtable_type_get(family))
+ if (nft_request_module(net, "nf-flowtable-%u", family) == -EAGAIN)
return ERR_PTR(-EAGAIN);
}
#endif
return ERR_PTR(-ENOENT);
}
+/* Only called from error and netdev event paths. */
static void nft_unregister_flowtable_hook(struct net *net,
struct nft_flowtable *flowtable,
struct nft_hook *hook)
struct nft_hook *hook;
list_for_each_entry(hook, &flowtable->hook_list, list)
- nft_unregister_flowtable_hook(net, flowtable, hook);
+ nf_unregister_net_hook(net, &hook->ops);
}
static int nft_register_flowtable_net_hooks(struct net *net,
{
struct nft_hook *hook, *next;
+ flowtable->data.type->free(&flowtable->data);
list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+ flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
+ FLOW_BLOCK_UNBIND);
list_del_rcu(&hook->list);
kfree(hook);
}
kfree(flowtable->name);
- flowtable->data.type->free(&flowtable->data);
module_put(flowtable->data.type->owner);
kfree(flowtable);
}
if (hook->ops.dev != dev)
continue;
+ /* flow_offload_netdev_event() cleans up entries for us. */
nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook);
list_del_rcu(&hook->list);
kfree_rcu(hook, rcu);
list_del_rcu(&chain->list);
}
+static void nf_tables_module_autoload_cleanup(struct net *net)
+{
+ struct nft_module_request *req, *next;
+
+ WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
+ list_for_each_entry_safe(req, next, &net->nft.module_list, list) {
+ WARN_ON_ONCE(!req->done);
+ list_del(&req->list);
+ kfree(req);
+ }
+}
+
static void nf_tables_commit_release(struct net *net)
{
struct nft_trans *trans;
* to prevent expensive synchronize_rcu() in commit phase.
*/
if (list_empty(&net->nft.commit_list)) {
+ nf_tables_module_autoload_cleanup(net);
mutex_unlock(&net->nft.commit_mutex);
return;
}
list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
spin_unlock(&nf_tables_destroy_list_lock);
+ nf_tables_module_autoload_cleanup(net);
mutex_unlock(&net->nft.commit_mutex);
schedule_work(&trans_destroy_work);
return 0;
}
+static void nf_tables_module_autoload(struct net *net)
+{
+ struct nft_module_request *req, *next;
+ LIST_HEAD(module_list);
+
+ list_splice_init(&net->nft.module_list, &module_list);
+ mutex_unlock(&net->nft.commit_mutex);
+ list_for_each_entry_safe(req, next, &module_list, list) {
+ if (req->done) {
+ list_del(&req->list);
+ kfree(req);
+ } else {
+ request_module("%s", req->module);
+ req->done = true;
+ }
+ }
+ mutex_lock(&net->nft.commit_mutex);
+ list_splice(&module_list, &net->nft.module_list);
+}
+
static void nf_tables_abort_release(struct nft_trans *trans)
{
switch (trans->msg_type) {
kfree(trans);
}
-static int __nf_tables_abort(struct net *net)
+static int __nf_tables_abort(struct net *net, bool autoload)
{
struct nft_trans *trans, *next;
struct nft_trans_elem *te;
nf_tables_abort_release(trans);
}
+ if (autoload)
+ nf_tables_module_autoload(net);
+ else
+ nf_tables_module_autoload_cleanup(net);
+
return 0;
}
nft_validate_state_update(net, NFT_VALIDATE_SKIP);
}
-static int nf_tables_abort(struct net *net, struct sk_buff *skb)
+static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
{
- int ret = __nf_tables_abort(net);
+ int ret = __nf_tables_abort(net, autoload);
mutex_unlock(&net->nft.commit_mutex);
{
INIT_LIST_HEAD(&net->nft.tables);
INIT_LIST_HEAD(&net->nft.commit_list);
+ INIT_LIST_HEAD(&net->nft.module_list);
mutex_init(&net->nft.commit_mutex);
net->nft.base_seq = 1;
net->nft.validate_state = NFT_VALIDATE_SKIP;
{
mutex_lock(&net->nft.commit_mutex);
if (!list_empty(&net->nft.commit_list))
- __nf_tables_abort(net);
+ __nf_tables_abort(net, false);
__nft_release_tables(net);
mutex_unlock(&net->nft.commit_mutex);
WARN_ON_ONCE(!list_empty(&net->nft.tables));
mutex_lock(&net->nft.commit_mutex);
chain = __nft_offload_get_chain(dev);
- if (chain) {
+ if (chain && chain->flags & NFT_CHAIN_HW_OFFLOAD) {
struct nft_base_chain *basechain;
basechain = nft_base_chain(chain);
}
done:
if (status & NFNL_BATCH_REPLAY) {
- ss->abort(net, oskb);
+ ss->abort(net, oskb, true);
nfnl_err_reset(&err_list);
kfree_skb(skb);
module_put(ss->owner);
status |= NFNL_BATCH_REPLAY;
goto done;
} else if (err) {
- ss->abort(net, oskb);
+ ss->abort(net, oskb, false);
netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
}
} else {
- ss->abort(net, oskb);
+ ss->abort(net, oskb, false);
}
if (ss->cleanup)
ss->cleanup(net);
static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
- struct nft_flow_offload *priv = nft_expr_priv(expr);
-
- priv->flowtable->use--;
nf_ct_netns_put(ctx->net, ctx->family);
}
int err;
u8 ttl;
+ if (!tb[NFTA_OSF_DREG])
+ return -EINVAL;
+
if (tb[NFTA_OSF_TTL]) {
ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
if (ttl > 2)
struct nft_tunnel *priv = nft_expr_priv(expr);
u32 len;
- if (!tb[NFTA_TUNNEL_KEY] &&
+ if (!tb[NFTA_TUNNEL_KEY] ||
!tb[NFTA_TUNNEL_DREG])
return -EINVAL;
if (err < 0)
return err;
+ if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
+ return -EINVAL;
+
version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
switch (version) {
case ERSPAN_VERSION:
hdr->size = cpu_to_le32(len);
hdr->confirm_rx = 0;
- skb_put_padto(skb, ALIGN(len, 4));
+ skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
mutex_lock(&node->ep_lock);
if (node->ep)
int rc;
if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
- printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
+ printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
rc = -EINVAL;
goto out;
}
return tcf_idr_search(tn, a, index);
}
+static void tcf_ctinfo_cleanup(struct tc_action *a)
+{
+ struct tcf_ctinfo *ci = to_ctinfo(a);
+ struct tcf_ctinfo_params *cp;
+
+ cp = rcu_dereference_protected(ci->params, 1);
+ if (cp)
+ kfree_rcu(cp, rcu);
+}
+
static struct tc_action_ops act_ctinfo_ops = {
.kind = "ctinfo",
.id = TCA_ID_CTINFO,
.act = tcf_ctinfo_act,
.dump = tcf_ctinfo_dump,
.init = tcf_ctinfo_init,
+ .cleanup= tcf_ctinfo_cleanup,
.walk = tcf_ctinfo_walker,
.lookup = tcf_ctinfo_search,
.size = sizeof(struct tcf_ctinfo),
}
ife = to_ife(*a);
+ if (ret == ACT_P_CREATED)
+ INIT_LIST_HEAD(&ife->metalist);
+
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
if (err < 0)
goto release_idr;
p->eth_type = ife_type;
}
-
- if (ret == ACT_P_CREATED)
- INIT_LIST_HEAD(&ife->metalist);
-
if (tb[TCA_IFE_METALST]) {
err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
tb[TCA_IFE_METALST], NULL,
&chain_info));
mutex_unlock(&chain->filter_chain_lock);
- tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
- protocol, prio, chain, rtnl_held,
- extack);
+ tp_new = tcf_proto_create(name, protocol, prio, chain,
+ rtnl_held, extack);
if (IS_ERR(tp_new)) {
err = PTR_ERR(tp_new);
goto errout_tp;
}
em->data = (unsigned long) v;
}
+ em->datalen = data_len;
}
}
em->matchid = em_hdr->matchid;
em->flags = em_hdr->flags;
- em->datalen = data_len;
em->net = net;
err = 0;
q->avg_window_begin));
u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
- do_div(b, window_interval);
+ b = div64_u64(b, window_interval);
q->avg_peak_bandwidth =
cake_ewma(q->avg_peak_bandwidth, b,
b > q->avg_peak_bandwidth ? 2 : 8);
if (tb[TCA_FQ_QUANTUM]) {
u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
- if (quantum > 0)
+ if (quantum > 0 && quantum <= (1 << 20)) {
q->quantum = quantum;
- else
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
err = -EINVAL;
+ }
}
if (tb[TCA_FQ_INITIAL_QUANTUM])
struct tc_prio_qopt_offload graft_offload;
unsigned long band = arg - 1;
- if (new == NULL)
- new = &noop_qdisc;
+ if (!new) {
+ new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ TC_H_MAKE(sch->handle, arg), extack);
+ if (!new)
+ new = &noop_qdisc;
+ else
+ qdisc_hash_add(new, true);
+ }
*old = qdisc_replace(sch, new, &q->queues[band]);
/* Generate an INIT ACK chunk. */
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
0);
- if (!new_obj)
- goto nomem;
+ if (!new_obj) {
+ error = -ENOMEM;
+ break;
+ }
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
if (!new_obj) {
if (cmd->obj.chunk)
sctp_chunk_free(cmd->obj.chunk);
- goto nomem;
+ error = -ENOMEM;
+ break;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
/* Generate a SHUTDOWN chunk. */
new_obj = sctp_make_shutdown(asoc, chunk);
- if (!new_obj)
- goto nomem;
+ if (!new_obj) {
+ error = -ENOMEM;
+ break;
+ }
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
break;
break;
}
- if (error)
+ if (error) {
+ cmd = sctp_next_cmd(commands);
+ while (cmd) {
+ if (cmd->verb == SCTP_CMD_REPLY)
+ sctp_chunk_free(cmd->obj.chunk);
+ cmd = sctp_next_cmd(commands);
+ }
break;
+ }
}
-out:
/* If this is in response to a received chunk, wait until
* we are done with the packet to open the queue so that we don't
* send multiple packets in response to a single request.
sp->data_ready_signalled = 0;
return error;
-nomem:
- error = -ENOMEM;
- goto out;
}
static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_sendctx *sc);
static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
-static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf);
+static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
static struct rpcrdma_regbuf *
ia->ri_id->device->name,
rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
#endif
+ init_completion(&ia->ri_remove_done);
set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
ep->rep_connected = -ENODEV;
xprt_force_disconnect(xprt);
int rc;
init_completion(&ia->ri_done);
- init_completion(&ia->ri_remove_done);
id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
xprt, RDMA_PS_TCP, IB_QPT_RC);
/* The ULP is responsible for ensuring all DMA
* mappings and MRs are gone.
*/
- rpcrdma_reps_destroy(buf);
+ rpcrdma_reps_unmap(r_xprt);
list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
struct ib_qp_init_attr *qp_init_attr)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct rpcrdma_ep *ep = &r_xprt->rx_ep;
int rc, err;
trace_xprtrdma_reinsert(r_xprt);
pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
goto out2;
}
+ memcpy(qp_init_attr, &ep->rep_attr, sizeof(*qp_init_attr));
rc = -ENETUNREACH;
err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr);
rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
rep->rr_recv_wr.num_sge = 1;
rep->rr_temp = temp;
+ list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
return rep;
out_free:
static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
{
+ list_del(&rep->rr_all);
rpcrdma_regbuf_free(rep->rr_rdmabuf);
kfree(rep);
}
static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
struct rpcrdma_rep *rep)
{
- if (!rep->rr_temp)
- llist_add(&rep->rr_node, &buf->rb_free_reps);
- else
- rpcrdma_rep_destroy(rep);
+ llist_add(&rep->rr_node, &buf->rb_free_reps);
+}
+
+static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
+{
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct rpcrdma_rep *rep;
+
+ list_for_each_entry(rep, &buf->rb_all_reps, rr_all)
+ rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
}
static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
INIT_LIST_HEAD(&buf->rb_send_bufs);
INIT_LIST_HEAD(&buf->rb_allreqs);
+ INIT_LIST_HEAD(&buf->rb_all_reps);
rc = -ENOMEM;
for (i = 0; i < buf->rb_max_requests; i++) {
wr = NULL;
while (needed) {
rep = rpcrdma_rep_get_locked(buf);
+ if (rep && rep->rr_temp) {
+ rpcrdma_rep_destroy(rep);
+ continue;
+ }
if (!rep)
rep = rpcrdma_rep_create(r_xprt, temp);
if (!rep)
struct xdr_stream rr_stream;
struct llist_node rr_node;
struct ib_recv_wr rr_recv_wr;
+ struct list_head rr_all;
};
/* To reduce the rate at which a transport invokes ib_post_recv
struct list_head rb_allreqs;
struct list_head rb_all_mrs;
+ struct list_head rb_all_reps;
struct llist_head rb_free_reps;
core.o link.o discover.o msg.o \
name_distr.o subscr.o monitor.o name_table.o net.o \
netlink.o netlink_compat.o node.o socket.o eth_media.o \
- topsrv.o socket.o group.o trace.o
+ topsrv.o group.o trace.o
CFLAGS_trace.o += -I$(src)
obj-$(CONFIG_TIPC_DIAG) += diag.o
-
-tipc_diag-y := diag.o
return -ENOMEM;
}
- attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1,
- sizeof(struct nlattr *), GFP_KERNEL);
+ attrbuf = kcalloc(tipc_genl_family.maxattr + 1,
+ sizeof(struct nlattr *), GFP_KERNEL);
if (!attrbuf) {
err = -ENOMEM;
goto err_out;
*
* Caller must hold socket lock
*/
-static void tsk_rej_rx_queue(struct sock *sk)
+static void tsk_rej_rx_queue(struct sock *sk, int error)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
- tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
+ tipc_sk_respond(sk, skb, error);
}
static bool tipc_sk_connected(struct sock *sk)
/* Remove pending SYN */
__skb_queue_purge(&sk->sk_write_queue);
- /* Reject all unreceived messages, except on an active connection
- * (which disconnects locally & sends a 'FIN+' to peer).
- */
- while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- if (TIPC_SKB_CB(skb)->bytes_read) {
- kfree_skb(skb);
- continue;
- }
- if (!tipc_sk_type_connectionless(sk) &&
- sk->sk_state != TIPC_DISCONNECTING) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- tipc_node_remove_conn(net, dnode, tsk->portid);
- }
- tipc_sk_respond(sk, skb, error);
+ /* Remove partially received buffer if any */
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb && TIPC_SKB_CB(skb)->bytes_read) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ kfree_skb(skb);
}
- if (tipc_sk_type_connectionless(sk))
+ /* Reject all unreceived messages if connectionless */
+ if (tipc_sk_type_connectionless(sk)) {
+ tsk_rej_rx_queue(sk, error);
return;
+ }
- if (sk->sk_state != TIPC_DISCONNECTING) {
+ switch (sk->sk_state) {
+ case TIPC_CONNECTING:
+ case TIPC_ESTABLISHED:
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ tipc_node_remove_conn(net, dnode, tsk->portid);
+ /* Send a FIN+/- to its peer */
+ skb = __skb_dequeue(&sk->sk_receive_queue);
+ if (skb) {
+ __skb_queue_purge(&sk->sk_receive_queue);
+ tipc_sk_respond(sk, skb, error);
+ break;
+ }
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
tsk_own_node(tsk), tsk_peer_port(tsk),
tsk->portid, error);
if (skb)
tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
- tipc_node_remove_conn(net, dnode, tsk->portid);
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ break;
+ case TIPC_LISTEN:
+ /* Reject all SYN messages */
+ tsk_rej_rx_queue(sk, error);
+ break;
+ default:
+ __skb_queue_purge(&sk->sk_receive_queue);
+ break;
}
}
return sock_intr_errno(*timeo_p);
add_wait_queue(sk_sleep(sk), &wait);
- done = sk_wait_event(sk, timeo_p,
- sk->sk_state != TIPC_CONNECTING, &wait);
+ done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
+ &wait);
remove_wait_queue(sk_sleep(sk), &wait);
} while (!done);
return 0;
* Reject any stray messages received by new socket
* before the socket lock was taken (very, very unlikely)
*/
- tsk_rej_rx_queue(new_sk);
+ tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
/* Connect new socket to it's peer */
tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
return rc;
}
-static void tls_update(struct sock *sk, struct proto *p)
+static void tls_update(struct sock *sk, struct proto *p,
+ void (*write_space)(struct sock *sk))
{
struct tls_context *ctx;
ctx = tls_get_ctx(sk);
- if (likely(ctx))
+ if (likely(ctx)) {
+ ctx->sk_write_space = write_space;
ctx->sk_proto = p;
- else
+ } else {
sk->sk_prot = p;
+ sk->sk_write_space = write_space;
+ }
}
static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
return ret;
ret = crypto_wait_req(ret, &ctx->async_wait);
- } else if (ret == -EBADMSG) {
- TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
}
if (async)
split_point = msg_pl->apply_bytes;
split = split_point && split_point < msg_pl->sg.size;
+ if (unlikely((!split &&
+ msg_pl->sg.size +
+ prot->overhead_size > msg_en->sg.size) ||
+ (split &&
+ split_point +
+ prot->overhead_size > msg_en->sg.size))) {
+ split = true;
+ split_point = msg_en->sg.size;
+ }
if (split) {
rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
split_point, prot->overhead_size,
&orig_end);
if (rc < 0)
return rc;
+ /* This can happen if above tls_split_open_record allocates
+ * a single large encryption buffer instead of two smaller
+ * ones. In this case adjust pointers and continue without
+ * split.
+ */
+ if (!msg_pl->sg.size) {
+ tls_merge_open_record(sk, rec, tmp, orig_end);
+ msg_pl = &rec->msg_plaintext;
+ msg_en = &rec->msg_encrypted;
+ split = false;
+ }
sk_msg_trim(sk, msg_en, msg_pl->sg.size +
prot->overhead_size);
}
sg_mark_end(sk_msg_elem(msg_pl, i));
}
+ if (msg_pl->sg.end < msg_pl->sg.start) {
+ sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
+ MAX_SKB_FRAGS - msg_pl->sg.start + 1,
+ msg_pl->sg.data);
+ }
+
i = msg_pl->sg.start;
sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
psock = sk_psock_get(sk);
if (!psock || !policy) {
err = tls_push_record(sk, flags, record_type);
- if (err) {
+ if (err && err != -EINPROGRESS) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
}
if (psock->eval == __SK_NONE) {
delta = msg->sg.size;
psock->eval = sk_psock_msg_verdict(sk, psock, msg);
- if (delta < msg->sg.size)
- delta -= msg->sg.size;
- else
- delta = 0;
+ delta -= msg->sg.size;
}
if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
!enospc && !full_record) {
switch (psock->eval) {
case __SK_PASS:
err = tls_push_record(sk, flags, record_type);
- if (err < 0) {
+ if (err && err != -EINPROGRESS) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
goto out_err;
if (err == -EINPROGRESS)
tls_advance_record_sn(sk, prot,
&tls_ctx->rx);
-
+ else if (err == -EBADMSG)
+ TLS_INC_STATS(sock_net(sk),
+ LINUX_MIB_TLSDECRYPTERROR);
return err;
}
} else {
****************************************************************************
* The only valid Service GUIDs, from the perspectives of both the host and *
* Linux VM, that can be connected by the other end, must conform to this *
- * format: <port>-facb-11e6-bd58-64006a7986d3, and the "port" must be in *
- * this range [0, 0x7FFFFFFF]. *
+ * format: <port>-facb-11e6-bd58-64006a7986d3. *
****************************************************************************
*
* When we write apps on the host to connect(), the GUID ServiceID is used.
* When we write apps in Linux VM to connect(), we only need to specify the
* port and the driver will form the GUID and use that to request the host.
*
- * From the perspective of Linux VM:
- * 1. the local ephemeral port (i.e. the local auto-bound port when we call
- * connect() without explicit bind()) is generated by __vsock_bind_stream(),
- * and the range is [1024, 0xFFFFFFFF).
- * 2. the remote ephemeral port (i.e. the auto-generated remote port for
- * a connect request initiated by the host's connect()) is generated by
- * hvs_remote_addr_init() and the range is [0x80000000, 0xFFFFFFFF).
*/
-#define MAX_LISTEN_PORT ((u32)0x7FFFFFFF)
-#define MAX_VM_LISTEN_PORT MAX_LISTEN_PORT
-#define MAX_HOST_LISTEN_PORT MAX_LISTEN_PORT
-#define MIN_HOST_EPHEMERAL_PORT (MAX_HOST_LISTEN_PORT + 1)
-
/* 00000000-facb-11e6-bd58-64006a7986d3 */
static const guid_t srv_id_template =
GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
vsock_addr_init(addr, VMADDR_CID_ANY, port);
}
-static void hvs_remote_addr_init(struct sockaddr_vm *remote,
- struct sockaddr_vm *local)
-{
- static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
- struct sock *sk;
-
- /* Remote peer is always the host */
- vsock_addr_init(remote, VMADDR_CID_HOST, VMADDR_PORT_ANY);
-
- while (1) {
- /* Wrap around ? */
- if (host_ephemeral_port < MIN_HOST_EPHEMERAL_PORT ||
- host_ephemeral_port == VMADDR_PORT_ANY)
- host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
-
- remote->svm_port = host_ephemeral_port++;
-
- sk = vsock_find_connected_socket(remote, local);
- if (!sk) {
- /* Found an available ephemeral port */
- return;
- }
-
- /* Release refcnt got in vsock_find_connected_socket */
- sock_put(sk);
- }
-}
-
static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
{
set_channel_pending_send_size(chan,
if_type = &chan->offermsg.offer.if_type;
if_instance = &chan->offermsg.offer.if_instance;
conn_from_host = chan->offermsg.offer.u.pipe.user_def[0];
-
- /* The host or the VM should only listen on a port in
- * [0, MAX_LISTEN_PORT]
- */
- if (!is_valid_srv_id(if_type) ||
- get_port_by_srv_id(if_type) > MAX_LISTEN_PORT)
+ if (!is_valid_srv_id(if_type))
return;
hvs_addr_init(&addr, conn_from_host ? if_type : if_instance);
vnew = vsock_sk(new);
hvs_addr_init(&vnew->local_addr, if_type);
- hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
+ /* Remote peer is always the host */
+ vsock_addr_init(&vnew->remote_addr,
+ VMADDR_CID_HOST, VMADDR_PORT_ANY);
+ vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance);
ret = vsock_assign_transport(vnew, vsock_sk(sk));
/* Transport assigned (looking at remote_addr) must be the
* same where we received the request.
static bool hvs_stream_allow(u32 cid, u32 port)
{
- /* The host's port range [MIN_HOST_EPHEMERAL_PORT, 0xFFFFFFFF) is
- * reserved as ephemeral ports, which are used as the host's ports
- * when the host initiates connections.
- *
- * Perform this check in the guest so an immediate error is produced
- * instead of a timeout.
- */
- if (port > MAX_HOST_LISTEN_PORT)
- return false;
-
if (cid == VMADDR_CID_HOST)
return true;
if (err)
return err;
+ cfg80211_sinfo_release_content(&sinfo);
if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
wdev->cqm_config->last_rssi_event_value =
(s8) sinfo.rx_beacon_signal_avg;
if (err)
return err;
+ cfg80211_sinfo_release_content(&sinfo);
+
return rdev_probe_mesh_link(rdev, dev, dest, buf, len);
}
rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
{
int ret;
+
+ if (!rdev->ops->set_wiphy_params)
+ return -EOPNOTSUPP;
+
trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
+static inline void
+rdev_end_cac(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ trace_rdev_end_cac(&rdev->wiphy, dev);
+ if (rdev->ops->end_cac)
+ rdev->ops->end_cac(&rdev->wiphy, dev);
+ trace_rdev_return_void(&rdev->wiphy);
+}
+
static inline int
rdev_set_mcast_rate(struct cfg80211_registered_device *rdev,
struct net_device *dev,
static void handle_channel_custom(struct wiphy *wiphy,
struct ieee80211_channel *chan,
- const struct ieee80211_regdomain *regd)
+ const struct ieee80211_regdomain *regd,
+ u32 min_bw)
{
u32 bw_flags = 0;
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
u32 bw;
- for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
+ for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(chan->center_freq),
regd, bw);
if (!IS_ERR(reg_rule))
if (!sband)
return;
+ /*
+ * We currently assume that you always want at least 20 MHz,
+ * otherwise channel 12 might get enabled if this rule is
+ * compatible to US, which permits 2402 - 2472 MHz.
+ */
for (i = 0; i < sband->n_channels; i++)
- handle_channel_custom(wiphy, &sband->channels[i], regd);
+ handle_channel_custom(wiphy, &sband->channels[i], regd,
+ MHZ_TO_KHZ(20));
}
/* Used by drivers prior to wiphy registration */
}
EXPORT_SYMBOL(regulatory_pre_cac_allowed);
+static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
+{
+ struct wireless_dev *wdev;
+ /* If we finished CAC or received radar, we should end any
+ * CAC running on the same channels.
+ * the check !cfg80211_chandef_dfs_usable contain 2 options:
+ * either all channels are available - those the CAC_FINISHED
+ * event has effected another wdev state, or there is a channel
+ * in unavailable state in wdev chandef - those the RADAR_DETECTED
+ * event has effected another wdev state.
+ * In both cases we should end the CAC on the wdev.
+ */
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+ if (wdev->cac_started &&
+ !cfg80211_chandef_dfs_usable(&rdev->wiphy, &wdev->chandef))
+ rdev_end_cac(rdev, wdev->netdev);
+ }
+}
+
void regulatory_propagate_dfs_state(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
enum nl80211_dfs_state dfs_state,
cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state);
if (event == NL80211_RADAR_DETECTED ||
- event == NL80211_RADAR_CAC_FINISHED)
+ event == NL80211_RADAR_CAC_FINISHED) {
cfg80211_sched_dfs_chan_update(rdev);
+ cfg80211_check_and_end_cac(rdev);
+ }
nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL);
}
if (wdev->conn_owner_nlportid) {
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
- cfg80211_leave_ibss(rdev, wdev->netdev, false);
+ __cfg80211_leave_ibss(rdev, wdev->netdev, false);
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- cfg80211_stop_ap(rdev, wdev->netdev, false);
+ __cfg80211_stop_ap(rdev, wdev->netdev, false);
break;
case NL80211_IFTYPE_MESH_POINT:
- cfg80211_leave_mesh(rdev, wdev->netdev);
+ __cfg80211_leave_mesh(rdev, wdev->netdev);
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
TP_ARGS(wiphy, netdev)
);
+DEFINE_EVENT(wiphy_netdev_evt, rdev_end_cac,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
+ TP_ARGS(wiphy, netdev)
+);
+
DECLARE_EVENT_CLASS(station_add_change,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac,
struct station_parameters *params),
struct skb_shared_info *sh = skb_shinfo(skb);
int page_offset;
- page_ref_inc(page);
+ get_page(page);
page_offset = ptr - page_address(page);
skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size);
}
return NULL;
}
-static int iw_handler_get_iwstats(struct net_device * dev,
+/* noinline to avoid a bogus warning with -O3 */
+static noinline int iw_handler_get_iwstats(struct net_device * dev,
struct iw_request_info * info,
union iwreq_data * wrqu,
char * extra)
if (sk->sk_state == TCP_ESTABLISHED)
goto out;
+ rc = -EALREADY; /* Do nothing if call is already in progress */
+ if (sk->sk_state == TCP_SYN_SENT)
+ goto out;
+
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
/* Now the loop */
rc = -EINPROGRESS;
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
- goto out_put_neigh;
+ goto out;
rc = x25_wait_for_connection_establishment(sk);
if (rc)
int err = -1;
int mtu;
- if (!dst)
- goto tx_err_link_failure;
-
dst_hold(dst);
dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
if (IS_ERR(dst)) {
mtu = dst_mtu(dst);
if (!skb->ignore_df && skb->len > mtu) {
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)
{
struct xfrm_if *xi = netdev_priv(dev);
struct net_device_stats *stats = &xi->dev->stats;
+ struct dst_entry *dst = skb_dst(skb);
struct flowi fl;
int ret;
case htons(ETH_P_IPV6):
xfrm_decode_session(skb, &fl, AF_INET6);
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ if (!dst) {
+ fl.u.ip6.flowi6_oif = dev->ifindex;
+ fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+ dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
+ if (dst->error) {
+ dst_release(dst);
+ stats->tx_carrier_errors++;
+ goto tx_err;
+ }
+ skb_dst_set(skb, dst);
+ }
break;
case htons(ETH_P_IP):
xfrm_decode_session(skb, &fl, AF_INET);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ if (!dst) {
+ struct rtable *rt;
+
+ fl.u.ip4.flowi4_oif = dev->ifindex;
+ fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+ rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
+ if (IS_ERR(rt)) {
+ stats->tx_carrier_errors++;
+ goto tx_err;
+ }
+ skb_dst_set(skb, &rt->dst);
+ }
break;
default:
goto tx_err;
{
dev->netdev_ops = &xfrmi_netdev_ops;
dev->type = ARPHRD_NONE;
- dev->hard_header_len = ETH_HLEN;
- dev->min_header_len = ETH_HLEN;
dev->mtu = ETH_DATA_LEN;
dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = ETH_DATA_LEN;
- dev->addr_len = ETH_ALEN;
+ dev->max_mtu = IP_MAX_MTU;
dev->flags = IFF_NOARP;
dev->needs_free_netdev = true;
dev->priv_destructor = xfrmi_dev_free;
*/
static int shadow_leak_ctor(void *obj, void *shadow_data, void *ctor_data)
{
- void **shadow_leak = shadow_data;
- void *leak = ctor_data;
+ int **shadow_leak = shadow_data;
+ int **leak = ctor_data;
- *shadow_leak = leak;
+ if (!ctor_data)
+ return -EINVAL;
+
+ *shadow_leak = *leak;
return 0;
}
static struct dummy *livepatch_fix1_dummy_alloc(void)
{
struct dummy *d;
- void *leak;
+ int *leak;
+ int **shadow_leak;
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
* variable. A patched dummy_free routine can later fetch this
* pointer to handle resource release.
*/
- leak = kzalloc(sizeof(int), GFP_KERNEL);
- if (!leak) {
- kfree(d);
- return NULL;
+ leak = kzalloc(sizeof(*leak), GFP_KERNEL);
+ if (!leak)
+ goto err_leak;
+
+ shadow_leak = klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
+ shadow_leak_ctor, &leak);
+ if (!shadow_leak) {
+ pr_err("%s: failed to allocate shadow variable for the leaking pointer: dummy @ %p, leak @ %p\n",
+ __func__, d, leak);
+ goto err_shadow;
}
- klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
- shadow_leak_ctor, leak);
-
pr_info("%s: dummy @ %p, expires @ %lx\n",
__func__, d, d->jiffies_expire);
return d;
+
+err_shadow:
+ kfree(leak);
+err_leak:
+ kfree(d);
+ return NULL;
}
static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data)
{
void *d = obj;
- void **shadow_leak = shadow_data;
+ int **shadow_leak = shadow_data;
kfree(*shadow_leak);
pr_info("%s: dummy @ %p, prevented leak @ %p\n",
static void livepatch_fix1_dummy_free(struct dummy *d)
{
- void **shadow_leak;
+ int **shadow_leak;
/*
* Patch: fetch the saved SV_LEAK shadow variable, detach and
static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data)
{
void *d = obj;
- void **shadow_leak = shadow_data;
+ int **shadow_leak = shadow_data;
kfree(*shadow_leak);
pr_info("%s: dummy @ %p, prevented leak @ %p\n",
static void livepatch_fix2_dummy_free(struct dummy *d)
{
- void **shadow_leak;
+ int **shadow_leak;
int *shadow_count;
/* Patch: copy the memory leak patch from the fix1 module. */
static __used noinline struct dummy *dummy_alloc(void)
{
struct dummy *d;
- void *leak;
+ int *leak;
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
msecs_to_jiffies(1000 * EXPIRE_PERIOD);
/* Oops, forgot to save leak! */
- leak = kzalloc(sizeof(int), GFP_KERNEL);
+ leak = kzalloc(sizeof(*leak), GFP_KERNEL);
if (!leak) {
kfree(d);
return NULL;
# Return y if the linker supports <flag>, n otherwise
ld-option = $(success,$(LD) -v $(1))
+# $(as-instr,<instr>)
+# Return y if the assembler supports <instr>, n otherwise
+as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -)
+
# check if $(CC) and $(LD) exist
$(error-if,$(failure,command -v $(CC)),compiler '$(CC)' not found)
$(error-if,$(failure,command -v $(LD)),linker '$(LD)' not found)
#define R_AARCH64_ABS64 257
#endif
+#define R_ARM_PC24 1
+#define R_ARM_THM_CALL 10
+#define R_ARM_CALL 28
+
static int fd_map; /* File descriptor for file being modified. */
static int mmap_failed; /* Boolean flag. */
static char gpfx; /* prefix for global symbol name (sometimes '_') */
#define RECORD_MCOUNT_64
#include "recordmcount.h"
+static int arm_is_fake_mcount(Elf32_Rel const *rp)
+{
+ switch (ELF32_R_TYPE(w(rp->r_info))) {
+ case R_ARM_THM_CALL:
+ case R_ARM_CALL:
+ case R_ARM_PC24:
+ return 0;
+ }
+
+ return 1;
+}
+
/* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
* http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
* We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
altmcount = "__gnu_mcount_nc";
make_nop = make_nop_arm;
rel_type_nop = R_ARM_NONE;
+ is_fake_mcount32 = arm_is_fake_mcount;
gpfx = 0;
break;
case EM_AARCH64:
q = queueptr(idx);
if (q == NULL)
continue;
- if ((tmr = q->timer) == NULL ||
- (ti = tmr->timeri) == NULL) {
- queuefree(q);
- continue;
- }
+ mutex_lock(&q->timer_mutex);
+ tmr = q->timer;
+ if (!tmr)
+ goto unlock;
+ ti = tmr->timeri;
+ if (!ti)
+ goto unlock;
snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name);
resolution = snd_timer_resolution(ti) * tmr->ticks;
snd_iprintf(buffer, " Period time : %lu.%09lu\n", resolution / 1000000000, resolution % 1000000000);
snd_iprintf(buffer, " Skew : %u / %u\n", tmr->skew, tmr->skew_base);
+unlock:
+ mutex_unlock(&q->timer_mutex);
queuefree(q);
}
}
int j;
for (j = i + 1; j < 9; ++j) {
- if (pointers[i * 2] == pointers[j * 2])
+ if (pointers[i * 2] == pointers[j * 2]) {
+ // Fallback to limited functionality.
+ err = -ENXIO;
goto end;
+ }
}
}
if ((before ^ after) & mask) {
struct snd_firewire_tascam_change *entry =
&tscm->queue[tscm->push_pos];
+ unsigned long flag;
- spin_lock_irq(&tscm->lock);
+ spin_lock_irqsave(&tscm->lock, flag);
entry->index = index;
entry->before = before;
entry->after = after;
if (++tscm->push_pos >= SND_TSCM_QUEUE_COUNT)
tscm->push_pos = 0;
- spin_unlock_irq(&tscm->lock);
+ spin_unlock_irqrestore(&tscm->lock, flag);
wake_up(&tscm->hwdep_wait);
}
.reg_write = hda_reg_write,
.use_single_read = true,
.use_single_write = true,
- .disable_locking = true,
};
/**
static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] =
CONFIG_SND_HDA_INPUT_BEEP_MODE};
#endif
-static bool dsp_driver = 1;
+static bool dmic_detect = 1;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode "
"(0=off, 1=on) (default=1).");
#endif
-module_param(dsp_driver, bool, 0444);
-MODULE_PARM_DESC(dsp_driver, "Allow DSP driver selection (bypass this driver) "
- "(0=off, 1=on) (default=1)");
+module_param(dmic_detect, bool, 0444);
+MODULE_PARM_DESC(dmic_detect, "Allow DSP driver selection (bypass this driver) "
+ "(0=off, 1=on) (default=1); "
+ "deprecated, use snd-intel-dspcfg.dsp_driver option instead");
#ifdef CONFIG_PM
static int param_set_xint(const char *val, const struct kernel_param *kp);
/*
* stop probe if another Intel's DSP driver should be activated
*/
- if (dsp_driver) {
+ if (dmic_detect) {
err = snd_intel_dsp_driver_probe(pci);
if (err != SND_INTEL_DSP_DRIVER_ANY &&
err != SND_INTEL_DSP_DRIVER_LEGACY)
return -ENODEV;
+ } else {
+ dev_warn(&pci->dev, "dmic_detect option is deprecated, pass snd-intel-dspcfg.dsp_driver=1 option instead\n");
}
err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
case 0x10ec0672:
alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
break;
+ case 0x10ec0222:
case 0x10ec0623:
alc_update_coef_idx(codec, 0x19, 1<<13, 0);
break;
break;
case 0x10ec0899:
case 0x10ec0900:
+ case 0x10ec0b00:
case 0x10ec1168:
case 0x10ec1220:
alc_update_coef_idx(codec, 0x7, 1<<1, 0);
case 0x10ec0882:
case 0x10ec0885:
case 0x10ec0900:
+ case 0x10ec0b00:
case 0x10ec1220:
break;
default:
SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882),
+ HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882),
HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882),
{} /* terminator */
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
MODULE_DEVICE_TABLE(of, cros_ec_codec_of_match);
#endif
+static const struct acpi_device_id cros_ec_codec_acpi_id[] = {
+ { "GOOG0013", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_codec_acpi_id);
+
static struct platform_driver cros_ec_codec_platform_driver = {
.driver = {
.name = "cros-ec-codec",
.of_match_table = of_match_ptr(cros_ec_codec_of_match),
+ .acpi_match_table = ACPI_PTR(cros_ec_codec_acpi_id),
},
.probe = cros_ec_codec_platform_probe,
};
struct hdac_hda_priv *hda_pvt;
hda_pvt = dev_get_drvdata(&hdev->dev);
- cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work);
+ if (hda_pvt && hda_pvt->codec.registered)
+ cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work);
+
return 0;
}
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS,
- MICB_1_INT_TX2_INT_RBIAS_EN_MASK,
- MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE);
snd_soc_component_update_bits(component, reg, MICB_1_EN_PULL_DOWN_EN_MASK, 0);
snd_soc_component_update_bits(component, CDC_A_MICB_1_EN,
MICB_1_EN_OPA_STG2_TAIL_CURR_MASK,
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct pm8916_wcd_analog_priv *wcd = snd_soc_component_get_drvdata(component);
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS,
+ MICB_1_INT_TX1_INT_RBIAS_EN_MASK,
+ MICB_1_INT_TX1_INT_RBIAS_EN_ENABLE);
+ break;
+ }
+
return pm8916_wcd_analog_enable_micbias_int(component, event, w->reg,
wcd->micbias1_cap_mode);
}
struct pm8916_wcd_analog_priv *wcd = snd_soc_component_get_drvdata(component);
switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_update_bits(component, CDC_A_MICB_1_INT_RBIAS,
+ MICB_1_INT_TX2_INT_RBIAS_EN_MASK,
+ MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE);
+ break;
case SND_SOC_DAPM_POST_PMU:
pm8916_mbhc_configure_bias(wcd, true);
break;
SND_SOC_DAPM_SUPPLY("MIC BIAS External1", CDC_A_MICB_1_EN, 7, 0,
pm8916_wcd_analog_enable_micbias_ext1,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("MIC BIAS External2", CDC_A_MICB_2_EN, 7, 0,
pm8916_wcd_analog_enable_micbias_ext2,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_ADC_E("ADC1", NULL, CDC_A_TX_1_EN, 7, 0,
pm8916_wcd_analog_enable_adc,
snd_soc_component_write(component, rx_gain_reg[w->shift],
snd_soc_component_read32(component, rx_gain_reg[w->shift]));
break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_update_bits(component, LPASS_CDC_CLK_RX_RESET_CTL,
+ 1 << w->shift, 1 << w->shift);
+ snd_soc_component_update_bits(component, LPASS_CDC_CLK_RX_RESET_CTL,
+ 1 << w->shift, 0x0);
+ break;
}
return 0;
}
{
struct rt5640_priv *rt5640 = snd_soc_component_get_drvdata(component);
+ /*
+ * soc_remove_component() force-disables jack and thus rt5640->jack
+ * could be NULL at the time of driver's module unloading.
+ */
+ if (!rt5640->jack)
+ return;
+
disable_irq(rt5640->irq);
rt5640_cancel_work(rt5640);
ARRAY_SIZE(fsl_audmix_dai));
if (ret) {
dev_err(dev, "failed to register ASoC DAI\n");
- return ret;
+ goto err_disable_pm;
}
priv->pdev = platform_device_register_data(dev, mdrv, 0, NULL, 0);
if (IS_ERR(priv->pdev)) {
ret = PTR_ERR(priv->pdev);
dev_err(dev, "failed to register platform %s: %d\n", mdrv, ret);
+ goto err_disable_pm;
}
+ return 0;
+
+err_disable_pm:
+ pm_runtime_disable(dev);
return ret;
}
{
struct fsl_audmix *priv = dev_get_drvdata(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
if (priv->pdev)
platform_device_unregister(priv->pdev);
DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
DMI_MATCH(DMI_PRODUCT_NAME, "NB41"),
},
- .driver_data = (void *)(BYT_CHT_ES8316_INTMIC_IN2_MAP
+ .driver_data = (void *)(BYT_CHT_ES8316_SSP0
+ | BYT_CHT_ES8316_INTMIC_IN2_MAP
| BYT_CHT_ES8316_JD_INVERTED),
},
{ /* Teclast X98 Plus II */
#include <linux/clk.h>
#include <linux/dmi.h>
#include <linux/slab.h>
-#include <asm/cpu_device_id.h>
#include <linux/acpi.h>
#include <sound/core.h>
#include <sound/jack.h>
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
+ if (!rtd->pcm)
+ return;
+
for_each_rtd_components(rtd, rtdcom, component)
if (component->driver->pcm_destruct)
component->driver->pcm_destruct(component, rtd->pcm);
goto free_rtd;
rtd->dev = dev;
+ INIT_LIST_HEAD(&rtd->list);
+ INIT_LIST_HEAD(&rtd->component_list);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
dev_set_drvdata(dev, rtd);
INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
/*
* rtd remaining settings
*/
- INIT_LIST_HEAD(&rtd->component_list);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
-
rtd->card = card;
rtd->dai_link = dai_link;
if (!rtd->dai_link->ops)
/* convert non BE into BE */
dai_link->no_pcm = 1;
+ dai_link->dpcm_playback = 1;
+ dai_link->dpcm_capture = 1;
/* override any BE fixups */
dai_link->be_hw_params_fixup =
if (dobj->ops && dobj->ops->link_unload)
dobj->ops->link_unload(comp, dobj);
+ list_del(&dobj->list);
+ snd_soc_remove_dai_link(comp->card, link);
+
kfree(link->name);
kfree(link->stream_name);
kfree(link->cpus->dai_name);
-
- list_del(&dobj->list);
- snd_soc_remove_dai_link(comp->card, link);
kfree(link);
}
priv->pd_dev = devm_kmalloc_array(&pdev->dev, priv->num_domains,
sizeof(*priv->pd_dev), GFP_KERNEL);
- if (!priv)
+ if (!priv->pd_dev)
return -ENOMEM;
priv->link = devm_kmalloc_array(&pdev->dev, priv->num_domains,
}
sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
return 0;
exit_pdev_unregister:
#define IDISP_VID_INTEL 0x80860000
/* load the legacy HDA codec driver */
-#ifdef MODULE
-static void hda_codec_load_module(struct hda_codec *codec)
+static int hda_codec_load_module(struct hda_codec *codec)
{
+#ifdef MODULE
char alias[MODULE_NAME_LEN];
const char *module = alias;
snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias));
dev_dbg(&codec->core.dev, "loading codec module: %s\n", module);
request_module(module);
-}
-#else
-static void hda_codec_load_module(struct hda_codec *codec) {}
#endif
+ return device_attach(hda_codec_dev(codec));
+}
/* enable controller wake up event for all codecs with jack connectors */
void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
if ((mach_params && mach_params->common_hdmi_codec_drv) ||
(resp & 0xFFFF0000) != IDISP_VID_INTEL) {
hdev->type = HDA_DEV_LEGACY;
- hda_codec_load_module(&hda_priv->codec);
+ ret = hda_codec_load_module(&hda_priv->codec);
+ /*
+ * handle ret==0 (no driver bound) as an error, but pass
+ * other return codes without modification
+ */
+ if (ret == 0)
+ ret = -ENOENT;
}
- return 0;
+ return ret;
#else
hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
if (!hdev)
link_dev = hda_link_stream_assign(bus, substream);
if (!link_dev)
return -EBUSY;
+
+ snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
}
stream_tag = hdac_stream(link_dev)->stream_tag;
if (ret < 0)
return ret;
- snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
-
link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
if (!link)
return -EINVAL;
bus = hstream->bus;
rtd = snd_pcm_substream_chip(substream);
link_dev = snd_soc_dai_get_dma_data(dai, substream);
+
+ if (!link_dev) {
+ dev_dbg(dai->dev,
+ "%s: link_dev is not assigned\n", __func__);
+ return -EINVAL;
+ }
+
hda_stream = hstream_to_sof_hda_stream(link_dev);
/* free the link DMA channel in the FW */
if (!ret)
break;
- dev_err(sdev->dev, "error: Error code=0x%x: FW status=0x%x\n",
+ dev_dbg(sdev->dev, "iteration %d of Core En/ROM load failed: %d\n",
+ i, ret);
+ dev_dbg(sdev->dev, "Error code=0x%x: FW status=0x%x\n",
snd_sof_dsp_read(sdev, HDA_DSP_BAR,
HDA_DSP_SRAM_REG_ROM_ERROR),
snd_sof_dsp_read(sdev, HDA_DSP_BAR,
HDA_DSP_SRAM_REG_ROM_STATUS));
- dev_err(sdev->dev, "error: iteration %d of Core En/ROM load failed: %d\n",
- i, ret);
}
if (i == HDA_FW_BOOT_ATTEMPTS) {
{
struct snd_sof_ipc *ipc = sdev->ipc;
+ if (!ipc)
+ return;
+
/* disable sending of ipc's */
mutex_lock(&ipc->tx_mutex);
ipc->disable_ipc_tx = true;
* sampling frequency. If no sample rate is already specified, then
* set one.
*/
- mutex_lock(&player->ctrl_lock);
if (runtime) {
switch (runtime->rate) {
case 22050:
player->stream_settings.iec958.status[3 + (n * 4)] << 24;
SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status);
}
- mutex_unlock(&player->ctrl_lock);
/* Update the channel status */
if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
SET_UNIPERIF_CTRL_ZERO_STUFF_HW(player);
+ mutex_lock(&player->ctrl_lock);
/* Update the channel status */
uni_player_set_channel_status(player, runtime);
+ mutex_unlock(&player->ctrl_lock);
/* Clear the user validity user bits */
SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
iec958->status[1] = ucontrol->value.iec958.status[1];
iec958->status[2] = ucontrol->value.iec958.status[2];
iec958->status[3] = ucontrol->value.iec958.status[3];
- mutex_unlock(&player->ctrl_lock);
spin_lock_irqsave(&player->irq_lock, flags);
if (player->substream && player->substream->runtime)
uni_player_set_channel_status(player, NULL);
spin_unlock_irqrestore(&player->irq_lock, flags);
+ mutex_unlock(&player->ctrl_lock);
+
return 0;
}
.name = "stm32_dfsdm_audio",
};
-static void memcpy_32to16(void *dest, const void *src, size_t n)
+static void stm32_memcpy_32to16(void *dest, const void *src, size_t n)
{
unsigned int i = 0;
u16 *d = (u16 *)dest, *s = (u16 *)src;
s++;
- for (i = n; i > 0; i--) {
+ for (i = n >> 1; i > 0; i--) {
*d++ = *s++;
s++;
}
if ((priv->pos + src_size) > buff_size) {
if (format == SNDRV_PCM_FORMAT_S16_LE)
- memcpy_32to16(&pcm_buff[priv->pos], src_buff,
- buff_size - priv->pos);
+ stm32_memcpy_32to16(&pcm_buff[priv->pos], src_buff,
+ buff_size - priv->pos);
else
memcpy(&pcm_buff[priv->pos], src_buff,
buff_size - priv->pos);
}
if (format == SNDRV_PCM_FORMAT_S16_LE)
- memcpy_32to16(&pcm_buff[priv->pos],
- &src_buff[src_size - cur_size], cur_size);
+ stm32_memcpy_32to16(&pcm_buff[priv->pos],
+ &src_buff[src_size - cur_size], cur_size);
else
memcpy(&pcm_buff[priv->pos], &src_buff[src_size - cur_size],
cur_size);
}
}
+static int stm32_sai_sub_reg_up(struct stm32_sai_sub_data *sai,
+ unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ int ret;
+
+ ret = clk_enable(sai->pdata->pclk);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(sai->regmap, reg, mask, val);
+
+ clk_disable(sai->pdata->pclk);
+
+ return ret;
+}
+
+static int stm32_sai_sub_reg_wr(struct stm32_sai_sub_data *sai,
+ unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ int ret;
+
+ ret = clk_enable(sai->pdata->pclk);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write_bits(sai->regmap, reg, mask, val);
+
+ clk_disable(sai->pdata->pclk);
+
+ return ret;
+}
+
+static int stm32_sai_sub_reg_rd(struct stm32_sai_sub_data *sai,
+ unsigned int reg, unsigned int *val)
+{
+ int ret;
+
+ ret = clk_enable(sai->pdata->pclk);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(sai->regmap, reg, val);
+
+ clk_disable(sai->pdata->pclk);
+
+ return ret;
+}
+
static const struct regmap_config stm32_sai_sub_regmap_config_f4 = {
.reg_bits = 32,
.reg_stride = 4,
mask = SAI_XCR1_MCKDIV_MASK(SAI_XCR1_MCKDIV_WIDTH(version));
cr1 = SAI_XCR1_MCKDIV_SET(div);
- ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1);
+ ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, mask, cr1);
if (ret < 0)
dev_err(&sai->pdev->dev, "Failed to update CR1 register\n");
dev_dbg(&sai->pdev->dev, "Enable master clock\n");
- return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
- SAI_XCR1_MCKEN, SAI_XCR1_MCKEN);
+ return stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+ SAI_XCR1_MCKEN, SAI_XCR1_MCKEN);
}
static void stm32_sai_mclk_disable(struct clk_hw *hw)
dev_dbg(&sai->pdev->dev, "Disable master clock\n");
- regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0);
+ stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0);
}
static const struct clk_ops mclk_ops = {
unsigned int sr, imr, flags;
snd_pcm_state_t status = SNDRV_PCM_STATE_RUNNING;
- regmap_read(sai->regmap, STM_SAI_IMR_REGX, &imr);
- regmap_read(sai->regmap, STM_SAI_SR_REGX, &sr);
+ stm32_sai_sub_reg_rd(sai, STM_SAI_IMR_REGX, &imr);
+ stm32_sai_sub_reg_rd(sai, STM_SAI_SR_REGX, &sr);
flags = sr & imr;
if (!flags)
return IRQ_NONE;
- regmap_write_bits(sai->regmap, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK,
- SAI_XCLRFR_MASK);
+ stm32_sai_sub_reg_wr(sai, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK,
+ SAI_XCLRFR_MASK);
if (!sai->substream) {
dev_err(&pdev->dev, "Device stopped. Spurious IRQ 0x%x\n", sr);
int ret;
if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
- ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
- SAI_XCR1_NODIV,
+ ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+ SAI_XCR1_NODIV,
freq ? 0 : SAI_XCR1_NODIV);
if (ret < 0)
return ret;
slotr_mask |= SAI_XSLOTR_SLOTEN_MASK;
- regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX, slotr_mask, slotr);
+ stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX, slotr_mask, slotr);
sai->slot_width = slot_width;
sai->slots = slots;
cr1_mask |= SAI_XCR1_CKSTR;
frcr_mask |= SAI_XFRCR_FSPOL;
- regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr);
+ stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr);
/* DAI clock master masks */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
cr1_mask |= SAI_XCR1_SLAVE;
conf_update:
- ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
+ ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1);
if (ret < 0) {
dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
return ret;
}
/* Enable ITs */
- regmap_write_bits(sai->regmap, STM_SAI_CLRFR_REGX,
- SAI_XCLRFR_MASK, SAI_XCLRFR_MASK);
+ stm32_sai_sub_reg_wr(sai, STM_SAI_CLRFR_REGX,
+ SAI_XCLRFR_MASK, SAI_XCLRFR_MASK);
imr = SAI_XIMR_OVRUDRIE;
if (STM_SAI_IS_CAPTURE(sai)) {
- regmap_read(sai->regmap, STM_SAI_CR2_REGX, &cr2);
+ stm32_sai_sub_reg_rd(sai, STM_SAI_CR2_REGX, &cr2);
if (cr2 & SAI_XCR2_MUTECNT_MASK)
imr |= SAI_XIMR_MUTEDETIE;
}
else
imr |= SAI_XIMR_AFSDETIE | SAI_XIMR_LFSDETIE;
- regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX,
- SAI_XIMR_MASK, imr);
+ stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX,
+ SAI_XIMR_MASK, imr);
return 0;
}
* SAI fifo threshold is set to half fifo, to keep enough space
* for DMA incoming bursts.
*/
- regmap_write_bits(sai->regmap, STM_SAI_CR2_REGX,
- SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK,
- SAI_XCR2_FFLUSH |
- SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF));
+ stm32_sai_sub_reg_wr(sai, STM_SAI_CR2_REGX,
+ SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK,
+ SAI_XCR2_FFLUSH |
+ SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF));
/* DS bits in CR1 not set for SPDIF (size forced to 24 bits).*/
if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
if ((sai->slots == 2) && (params_channels(params) == 1))
cr1 |= SAI_XCR1_MONO;
- ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
+ ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1);
if (ret < 0) {
dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
return ret;
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
int slotr, slot_sz;
- regmap_read(sai->regmap, STM_SAI_SLOTR_REGX, &slotr);
+ stm32_sai_sub_reg_rd(sai, STM_SAI_SLOTR_REGX, &slotr);
/*
* If SLOTSZ is set to auto in SLOTR, align slot width on data size
sai->slots = 2;
/* The number of slots in the audio frame is equal to NBSLOT[3:0] + 1*/
- regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX,
- SAI_XSLOTR_NBSLOT_MASK,
- SAI_XSLOTR_NBSLOT_SET((sai->slots - 1)));
+ stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX,
+ SAI_XSLOTR_NBSLOT_MASK,
+ SAI_XSLOTR_NBSLOT_SET((sai->slots - 1)));
/* Set default slots mask if not already set from DT */
if (!(slotr & SAI_XSLOTR_SLOTEN_MASK)) {
sai->slot_mask = (1 << sai->slots) - 1;
- regmap_update_bits(sai->regmap,
- STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK,
- SAI_XSLOTR_SLOTEN_SET(sai->slot_mask));
+ stm32_sai_sub_reg_up(sai,
+ STM_SAI_SLOTR_REGX, SAI_XSLOTR_SLOTEN_MASK,
+ SAI_XSLOTR_SLOTEN_SET(sai->slot_mask));
}
dev_dbg(cpu_dai->dev, "Slots %d, slot width %d\n",
dev_dbg(cpu_dai->dev, "Frame length %d, frame active %d\n",
sai->fs_length, fs_active);
- regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr);
+ stm32_sai_sub_reg_up(sai, STM_SAI_FRCR_REGX, frcr_mask, frcr);
if ((sai->fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_LSB) {
offset = sai->slot_width - sai->data_size;
- regmap_update_bits(sai->regmap, STM_SAI_SLOTR_REGX,
- SAI_XSLOTR_FBOFF_MASK,
- SAI_XSLOTR_FBOFF_SET(offset));
+ stm32_sai_sub_reg_up(sai, STM_SAI_SLOTR_REGX,
+ SAI_XSLOTR_FBOFF_MASK,
+ SAI_XSLOTR_FBOFF_SET(offset));
}
}
return -EINVAL;
}
- regmap_update_bits(sai->regmap,
- STM_SAI_CR1_REGX,
- SAI_XCR1_OSR, cr1);
+ stm32_sai_sub_reg_up(sai,
+ STM_SAI_CR1_REGX,
+ SAI_XCR1_OSR, cr1);
div = stm32_sai_get_clk_div(sai, sai_clk_rate,
sai->mclk_rate);
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
dev_dbg(cpu_dai->dev, "Enable DMA and SAI\n");
- regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
- SAI_XCR1_DMAEN, SAI_XCR1_DMAEN);
+ stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+ SAI_XCR1_DMAEN, SAI_XCR1_DMAEN);
/* Enable SAI */
- ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
- SAI_XCR1_SAIEN, SAI_XCR1_SAIEN);
+ ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+ SAI_XCR1_SAIEN, SAI_XCR1_SAIEN);
if (ret < 0)
dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
break;
case SNDRV_PCM_TRIGGER_STOP:
dev_dbg(cpu_dai->dev, "Disable DMA and SAI\n");
- regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX,
- SAI_XIMR_MASK, 0);
+ stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX,
+ SAI_XIMR_MASK, 0);
- regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
- SAI_XCR1_SAIEN,
- (unsigned int)~SAI_XCR1_SAIEN);
+ stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+ SAI_XCR1_SAIEN,
+ (unsigned int)~SAI_XCR1_SAIEN);
- ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
- SAI_XCR1_DMAEN,
- (unsigned int)~SAI_XCR1_DMAEN);
+ ret = stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX,
+ SAI_XCR1_DMAEN,
+ (unsigned int)~SAI_XCR1_DMAEN);
if (ret < 0)
dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
unsigned long flags;
- regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
+ stm32_sai_sub_reg_up(sai, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
clk_disable_unprepare(sai->sai_ck);
cr1_mask |= SAI_XCR1_SYNCEN_MASK;
cr1 |= SAI_XCR1_SYNCEN_SET(sai->sync);
- return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
+ return stm32_sai_sub_reg_up(sai, STM_SAI_CR1_REGX, cr1_mask, cr1);
}
static const struct snd_soc_dai_ops stm32_sai_pcm_dai_ops = {
if (STM_SAI_HAS_PDM(sai) && STM_SAI_IS_SUB_A(sai))
sai->regmap_config = &stm32_sai_sub_regmap_config_h7;
- sai->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "sai_ck",
- base, sai->regmap_config);
+ /*
+ * Do not manage peripheral clock through regmap framework as this
+ * can lead to circular locking issue with sai master clock provider.
+ * Manage peripheral clock directly in driver instead.
+ */
+ sai->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ sai->regmap_config);
if (IS_ERR(sai->regmap)) {
dev_err(&pdev->dev, "Failed to initialize MMIO\n");
return PTR_ERR(sai->regmap);
return PTR_ERR(sai->sai_ck);
}
+ ret = clk_prepare(sai->pdata->pclk);
+ if (ret < 0)
+ return ret;
+
if (STM_SAI_IS_F4(sai->pdata))
return 0;
return 0;
}
+static int stm32_sai_sub_remove(struct platform_device *pdev)
+{
+ struct stm32_sai_sub_data *sai = dev_get_drvdata(&pdev->dev);
+
+ clk_unprepare(sai->pdata->pclk);
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int stm32_sai_sub_suspend(struct device *dev)
{
struct stm32_sai_sub_data *sai = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(sai->pdata->pclk);
+ if (ret < 0)
+ return ret;
regcache_cache_only(sai->regmap, true);
regcache_mark_dirty(sai->regmap);
+
+ clk_disable(sai->pdata->pclk);
+
return 0;
}
static int stm32_sai_sub_resume(struct device *dev)
{
struct stm32_sai_sub_data *sai = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(sai->pdata->pclk);
+ if (ret < 0)
+ return ret;
regcache_cache_only(sai->regmap, false);
- return regcache_sync(sai->regmap);
+ ret = regcache_sync(sai->regmap);
+
+ clk_disable(sai->pdata->pclk);
+
+ return ret;
}
#endif /* CONFIG_PM_SLEEP */
.pm = &stm32_sai_sub_pm_ops,
},
.probe = stm32_sai_sub_probe,
+ .remove = stm32_sai_sub_remove,
};
module_platform_driver(stm32_sai_sub_driver);
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
#include <linux/reset.h>
* @slave_config: dma slave channel runtime config pointer
* @phys_addr: SPDIFRX registers physical base address
* @lock: synchronization enabling lock
+ * @irq_lock: prevent race condition with IRQ on stream state
* @cs: channel status buffer
* @ub: user data buffer
* @irq: SPDIFRX interrupt line
struct dma_slave_config slave_config;
dma_addr_t phys_addr;
spinlock_t lock; /* Sync enabling lock */
+ spinlock_t irq_lock; /* Prevent race condition on stream state */
unsigned char cs[SPDIFRX_CS_BYTES_NB];
unsigned char ub[SPDIFRX_UB_BYTES_NB];
int irq;
static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
{
int cr, cr_mask, imr, ret;
+ unsigned long flags;
/* Enable IRQs */
imr = SPDIFRX_IMR_IFEIE | SPDIFRX_IMR_SYNCDIE | SPDIFRX_IMR_PERRIE;
if (ret)
return ret;
- spin_lock(&spdifrx->lock);
+ spin_lock_irqsave(&spdifrx->lock, flags);
spdifrx->refcount++;
"Failed to start synchronization\n");
}
- spin_unlock(&spdifrx->lock);
+ spin_unlock_irqrestore(&spdifrx->lock, flags);
return ret;
}
static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx)
{
int cr, cr_mask, reg;
+ unsigned long flags;
- spin_lock(&spdifrx->lock);
+ spin_lock_irqsave(&spdifrx->lock, flags);
if (--spdifrx->refcount) {
- spin_unlock(&spdifrx->lock);
+ spin_unlock_irqrestore(&spdifrx->lock, flags);
return;
}
regmap_read(spdifrx->regmap, STM32_SPDIFRX_DR, ®);
regmap_read(spdifrx->regmap, STM32_SPDIFRX_CSR, ®);
- spin_unlock(&spdifrx->lock);
+ spin_unlock_irqrestore(&spdifrx->lock, flags);
}
static int stm32_spdifrx_dma_ctrl_register(struct device *dev,
memset(spdifrx->cs, 0, SPDIFRX_CS_BYTES_NB);
memset(spdifrx->ub, 0, SPDIFRX_UB_BYTES_NB);
- pinctrl_pm_select_default_state(&spdifrx->pdev->dev);
-
ret = stm32_spdifrx_dma_ctrl_start(spdifrx);
if (ret < 0)
return ret;
end:
clk_disable_unprepare(spdifrx->kclk);
- pinctrl_pm_select_sleep_state(&spdifrx->pdev->dev);
return ret;
}
static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
{
struct stm32_spdifrx_data *spdifrx = (struct stm32_spdifrx_data *)devid;
- struct snd_pcm_substream *substream = spdifrx->substream;
struct platform_device *pdev = spdifrx->pdev;
unsigned int cr, mask, sr, imr;
unsigned int flags, sync_state;
return IRQ_HANDLED;
}
- if (substream)
- snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+ spin_lock(&spdifrx->irq_lock);
+ if (spdifrx->substream)
+ snd_pcm_stop(spdifrx->substream,
+ SNDRV_PCM_STATE_DISCONNECTED);
+ spin_unlock(&spdifrx->irq_lock);
return IRQ_HANDLED;
}
- if (err_xrun && substream)
- snd_pcm_stop_xrun(substream);
+ spin_lock(&spdifrx->irq_lock);
+ if (err_xrun && spdifrx->substream)
+ snd_pcm_stop_xrun(spdifrx->substream);
+ spin_unlock(&spdifrx->irq_lock);
return IRQ_HANDLED;
}
struct snd_soc_dai *cpu_dai)
{
struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&spdifrx->irq_lock, flags);
spdifrx->substream = substream;
+ spin_unlock_irqrestore(&spdifrx->irq_lock, flags);
ret = clk_prepare_enable(spdifrx->kclk);
if (ret)
struct snd_soc_dai *cpu_dai)
{
struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+ unsigned long flags;
+ spin_lock_irqsave(&spdifrx->irq_lock, flags);
spdifrx->substream = NULL;
+ spin_unlock_irqrestore(&spdifrx->irq_lock, flags);
+
clk_disable_unprepare(spdifrx->kclk);
}
spdifrx->pdev = pdev;
init_completion(&spdifrx->cs_completion);
spin_lock_init(&spdifrx->lock);
+ spin_lock_init(&spdifrx->irq_lock);
platform_set_drvdata(pdev, spdifrx);
add_sync_ep_from_ifnum:
iface = usb_ifnum_to_if(dev, ifnum);
- if (!iface || iface->num_altsetting == 0)
+ if (!iface || iface->num_altsetting < 2)
return -EINVAL;
alts = &iface->altsetting[1];
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
+ case USB_ID(0x05a7, 0x1020): /* Bose Companion 5 */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
bool is_plain_text)
{
if (is_plain_text)
- jsonw_printf(jw, "%p", data);
+ jsonw_printf(jw, "%p", *(void **)data);
else
jsonw_printf(jw, "%lu", *(unsigned long *)data);
}
}
filter_type = add_filter_type(filter, event->id);
- if (filter_type == NULL)
+ if (filter_type == NULL) {
+ free_arg(arg);
return TEP_ERRNO__MEM_ALLOC_FAILED;
+ }
if (filter_type->filter)
free_arg(filter_type->filter);
struct stat st;
bool has_br_stack = false;
int branch_mode = -1;
+ int last_key = 0;
bool branch_call_mode = false;
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
sort_order = sort_tmp;
}
- if (setup_sorting(session->evlist) < 0) {
+ if ((last_key != K_SWITCH_INPUT_DATA) &&
+ (setup_sorting(session->evlist) < 0)) {
if (sort_order)
parse_options_usage(report_usage, options, "s", 1);
if (field_order)
ret = __cmd_report(&report);
if (ret == K_SWITCH_INPUT_DATA) {
perf_session__delete(session);
+ last_key = K_SWITCH_INPUT_DATA;
goto repeat;
} else
ret = 0;
list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list)
#define hists__for_each_format(hists, format) \
- perf_hpp_list__for_each_format((hists)->hpp_list, fmt)
+ perf_hpp_list__for_each_format((hists)->hpp_list, format)
#define hists__for_each_sort_list(hists, format) \
- perf_hpp_list__for_each_sort_list((hists)->hpp_list, fmt)
+ perf_hpp_list__for_each_sort_list((hists)->hpp_list, format)
extern struct perf_hpp_fmt perf_hpp__format[];
if (curr_map == NULL)
return -1;
+ if (curr_dso->kernel)
+ map__kmap(curr_map)->kmaps = kmaps;
+
if (adjust_kernel_syms) {
curr_map->start = shdr->sh_addr + ref_reloc(kmap);
curr_map->end = curr_map->start + shdr->sh_size;
OUTPUT=$(srctree)/
ifeq ("$(origin O)", "command line")
- OUTPUT := $(O)/power/acpi/
+ OUTPUT := $(O)/tools/power/acpi/
endif
#$(info Determined 'OUTPUT' to be $(OUTPUT))
*
* Module Name: cfsize - Common get file size function
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: getopt
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: oslinuxtbl - Linux OSL for obtaining ACPI tables
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: osunixdir - Unix directory access interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: osunixmap - Unix OSL for file mappings
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: osunixxf - UNIX OSL interfaces
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: acpidump.h - Include file for acpi_dump utility
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: apdump - Dump routines for ACPI tables (acpidump)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: apfiles - File-related functions for acpidump utility
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
*
* Module Name: apmain - Main module for the acpidump utility
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
int arg;
};
-static const char *version_str = "v1.1";
+static const char *version_str = "v1.2";
static const int supported_api_ver = 1;
static struct isst_if_platform_info isst_platform_info;
static char *progname;
goto disp_result;
}
- if (auto_mode) {
- if (status) {
- ret = set_pbf_core_power(cpu);
- if (ret)
- goto disp_result;
- } else {
- isst_pm_qos_config(cpu, 0, 0);
- }
+ if (auto_mode && status) {
+ ret = set_pbf_core_power(cpu);
+ if (ret)
+ goto disp_result;
}
ret = isst_set_pbf_fact_status(cpu, 1, status);
}
}
+ if (auto_mode && !status)
+ isst_pm_qos_config(cpu, 0, 0);
+
disp_result:
if (status)
isst_display_result(cpu, outf, "base-freq", "enable",
int ret;
int status = *(int *)arg4;
- if (auto_mode) {
- if (status) {
- ret = isst_pm_qos_config(cpu, 1, 1);
- if (ret)
- goto disp_results;
- } else {
- isst_pm_qos_config(cpu, 0, 0);
- }
+ if (auto_mode && status) {
+ ret = isst_pm_qos_config(cpu, 1, 1);
+ if (ret)
+ goto disp_results;
}
ret = isst_set_pbf_fact_status(cpu, 0, status);
ret = isst_set_trl(cpu, fact_trl);
if (ret && auto_mode)
isst_pm_qos_config(cpu, 0, 0);
+ } else {
+ if (auto_mode)
+ isst_pm_qos_config(cpu, 0, 0);
}
disp_results:
if (ret)
goto error_disp;
}
- isst_display_result(i, outf, "turbo-freq --auto", "enable", 0);
+ isst_display_result(-1, outf, "turbo-freq --auto", "enable", 0);
}
return;
#include "isst.h"
+int isst_write_pm_config(int cpu, int cp_state)
+{
+ unsigned int req, resp;
+ int ret;
+
+ if (cp_state)
+ req = BIT(16);
+ else
+ req = 0;
+
+ ret = isst_send_mbox_command(cpu, WRITE_PM_CONFIG, PM_FEATURE, 0, req,
+ &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d WRITE_PM_CONFIG resp:%x\n", cpu, resp);
+
+ return 0;
+}
+
+int isst_read_pm_config(int cpu, int *cp_state, int *cp_cap)
+{
+ unsigned int resp;
+ int ret;
+
+ ret = isst_send_mbox_command(cpu, READ_PM_CONFIG, PM_FEATURE, 0, 0,
+ &resp);
+ if (ret)
+ return ret;
+
+ debug_printf("cpu:%d READ_PM_CONFIG resp:%x\n", cpu, resp);
+
+ *cp_state = resp & BIT(16);
+ *cp_cap = resp & BIT(0) ? 1 : 0;
+
+ return 0;
+}
+
int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev)
{
unsigned int resp;
int isst_get_ctdp_control(int cpu, int config_index,
struct isst_pkg_ctdp_level_info *ctdp_level)
{
+ int cp_state, cp_cap;
unsigned int resp;
int ret;
ctdp_level->fact_enabled = !!(resp & BIT(16));
ctdp_level->pbf_enabled = !!(resp & BIT(17));
+ ret = isst_read_pm_config(cpu, &cp_state, &cp_cap);
+ if (ret) {
+ debug_printf("cpu:%d pm_config is not supported \n", cpu);
+ } else {
+ debug_printf("cpu:%d pm_config SST-CP state:%d cap:%d \n", cpu, cp_state, cp_cap);
+ ctdp_level->sst_cp_support = cp_cap;
+ ctdp_level->sst_cp_enabled = cp_state;
+ }
+
debug_printf(
"cpu:%d CONFIG_TDP_GET_TDP_CONTROL resp:%x fact_support:%d pbf_support: %d fact_enabled:%d pbf_enabled:%d\n",
cpu, resp, ctdp_level->fact_support, ctdp_level->pbf_support,
debug_printf("Turbo-freq feature must be disabled first\n");
return -EINVAL;
}
+ ret = isst_write_pm_config(cpu, 0);
+ if (ret)
+ perror("isst_write_pm_config\n");
+ } else {
+ ret = isst_write_pm_config(cpu, 1);
+ if (ret)
+ perror("isst_write_pm_config\n");
}
ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
snprintf(value, sizeof(value), "unsupported");
format_and_print(outf, base_level + 4, header, value);
+ snprintf(header, sizeof(header),
+ "speed-select-core-power");
+ if (ctdp_level->sst_cp_support) {
+ if (ctdp_level->sst_cp_enabled)
+ snprintf(value, sizeof(value), "enabled");
+ else
+ snprintf(value, sizeof(value), "disabled");
+ } else
+ snprintf(value, sizeof(value), "unsupported");
+ format_and_print(outf, base_level + 4, header, value);
+
if (is_clx_n_platform()) {
if (ctdp_level->pbf_support)
_isst_pbf_display_information(cpu, outf,
char header[256];
char value[256];
- snprintf(header, sizeof(header), "package-%d",
- get_physical_package_id(cpu));
- format_and_print(outf, 1, header, NULL);
- snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
- format_and_print(outf, 2, header, NULL);
- snprintf(header, sizeof(header), "cpu-%d", cpu);
- format_and_print(outf, 3, header, NULL);
+ if (cpu >= 0) {
+ snprintf(header, sizeof(header), "package-%d",
+ get_physical_package_id(cpu));
+ format_and_print(outf, 1, header, NULL);
+ snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
+ format_and_print(outf, 2, header, NULL);
+ snprintf(header, sizeof(header), "cpu-%d", cpu);
+ format_and_print(outf, 3, header, NULL);
+ }
snprintf(header, sizeof(header), "%s", feature);
format_and_print(outf, 4, header, NULL);
snprintf(header, sizeof(header), "%s", cmd);
#define PM_CLOS_OFFSET 0x08
#define PQR_ASSOC_OFFSET 0x20
+#define READ_PM_CONFIG 0x94
+#define WRITE_PM_CONFIG 0x95
+#define PM_FEATURE 0x03
+
#define DISP_FREQ_MULTIPLIER 100
struct isst_clos_config {
int pbf_support;
int fact_enabled;
int pbf_enabled;
+ int sst_cp_support;
+ int sst_cp_enabled;
int tdp_ratio;
int active;
int tdp_control;
stop_traffic
local ucth1=${uc_rate[1]}
- start_traffic $h1 own bc bc
+ start_traffic $h1 192.0.2.65 bc bc
local d0=$(date +%s)
local t0=$(ethtool_stats_get $h3 rx_octets_prio_0)
ret = 100 * ($ucth1 - $ucth2) / $ucth1
if (ret > 0) { ret } else { 0 }
")
- check_err $(bc <<< "$deg > 25")
+
+ # Minimum shaper of 200Mbps on MC TCs should cause about 20% of
+ # degradation on 1Gbps link.
+ check_err $(bc <<< "$deg < 15") "Minimum shaper not in effect"
+ check_err $(bc <<< "$deg > 25") "MC traffic degrades UC performance too much"
local interval=$((d1 - d0))
local mc_ir=$(rate $u0 $u1 $interval)
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
ALL_TESTS="loopback_test"
NUM_NETIFS=2
source tc_common.sh
h1_create
h2_create
+
+ if ethtool -k $h1 | grep loopback | grep -q fixed; then
+ log_test "SKIP: dev $h1 does not support loopback feature"
+ exit $ksft_skip
+ fi
}
cleanup()