1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Quadrature Encoder Peripheral driver
5 * Copyright (C) 2019-2021 Intel Corporation
7 * Author: Felipe Balbi (Intel)
8 * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
9 * Author: Raymond Tan <raymond.tan@intel.com>
11 #include <linux/counter.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
18 #define INTEL_QEPCON 0x00
19 #define INTEL_QEPFLT 0x04
20 #define INTEL_QEPCOUNT 0x08
21 #define INTEL_QEPMAX 0x0c
22 #define INTEL_QEPWDT 0x10
23 #define INTEL_QEPCAPDIV 0x14
24 #define INTEL_QEPCNTR 0x18
25 #define INTEL_QEPCAPBUF 0x1c
26 #define INTEL_QEPINT_STAT 0x20
27 #define INTEL_QEPINT_MASK 0x24
30 #define INTEL_QEPCON_EN BIT(0)
31 #define INTEL_QEPCON_FLT_EN BIT(1)
32 #define INTEL_QEPCON_EDGE_A BIT(2)
33 #define INTEL_QEPCON_EDGE_B BIT(3)
34 #define INTEL_QEPCON_EDGE_INDX BIT(4)
35 #define INTEL_QEPCON_SWPAB BIT(5)
36 #define INTEL_QEPCON_OP_MODE BIT(6)
37 #define INTEL_QEPCON_PH_ERR BIT(7)
38 #define INTEL_QEPCON_COUNT_RST_MODE BIT(8)
39 #define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9)
40 #define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9)
41 #define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0)
42 #define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1)
43 #define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2)
44 #define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3)
45 #define INTEL_QEPCON_CAP_MODE BIT(11)
46 #define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12)
47 #define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12)
48 #define INTEL_QEPCON_FIFO_EMPTY BIT(15)
51 #define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff)
54 #define INTEL_QEPINT_FIFOCRIT BIT(5)
55 #define INTEL_QEPINT_FIFOENTRY BIT(4)
56 #define INTEL_QEPINT_QEPDIR BIT(3)
57 #define INTEL_QEPINT_QEPRST_UP BIT(2)
58 #define INTEL_QEPINT_QEPRST_DOWN BIT(1)
59 #define INTEL_QEPINT_WDT BIT(0)
61 #define INTEL_QEPINT_MASK_ALL GENMASK(5, 0)
63 #define INTEL_QEP_CLK_PERIOD_NS 10
66 struct counter_device counter;
71 /* Context save registers */
77 static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset)
79 return readl(qep->regs + offset);
82 static inline void intel_qep_writel(struct intel_qep *qep,
83 u32 offset, u32 value)
85 writel(value, qep->regs + offset);
88 static void intel_qep_init(struct intel_qep *qep)
92 reg = intel_qep_readl(qep, INTEL_QEPCON);
93 reg &= ~INTEL_QEPCON_EN;
94 intel_qep_writel(qep, INTEL_QEPCON, reg);
97 * Make sure peripheral is disabled by flushing the write with
100 reg = intel_qep_readl(qep, INTEL_QEPCON);
102 reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN);
103 reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B |
104 INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE;
105 intel_qep_writel(qep, INTEL_QEPCON, reg);
106 intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
109 static int intel_qep_count_read(struct counter_device *counter,
110 struct counter_count *count, u64 *val)
112 struct intel_qep *const qep = counter->priv;
114 pm_runtime_get_sync(qep->dev);
115 *val = intel_qep_readl(qep, INTEL_QEPCOUNT);
116 pm_runtime_put(qep->dev);
121 static const enum counter_function intel_qep_count_functions[] = {
122 COUNTER_FUNCTION_QUADRATURE_X4,
125 static int intel_qep_function_read(struct counter_device *counter,
126 struct counter_count *count,
127 enum counter_function *function)
129 *function = COUNTER_FUNCTION_QUADRATURE_X4;
134 static const enum counter_synapse_action intel_qep_synapse_actions[] = {
135 COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
138 static int intel_qep_action_read(struct counter_device *counter,
139 struct counter_count *count,
140 struct counter_synapse *synapse,
141 enum counter_synapse_action *action)
143 *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
147 static const struct counter_ops intel_qep_counter_ops = {
148 .count_read = intel_qep_count_read,
149 .function_read = intel_qep_function_read,
150 .action_read = intel_qep_action_read,
153 #define INTEL_QEP_SIGNAL(_id, _name) { \
158 static struct counter_signal intel_qep_signals[] = {
159 INTEL_QEP_SIGNAL(0, "Phase A"),
160 INTEL_QEP_SIGNAL(1, "Phase B"),
161 INTEL_QEP_SIGNAL(2, "Index"),
164 #define INTEL_QEP_SYNAPSE(_signal_id) { \
165 .actions_list = intel_qep_synapse_actions, \
166 .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), \
167 .signal = &intel_qep_signals[(_signal_id)], \
170 static struct counter_synapse intel_qep_count_synapses[] = {
171 INTEL_QEP_SYNAPSE(0),
172 INTEL_QEP_SYNAPSE(1),
173 INTEL_QEP_SYNAPSE(2),
176 static int intel_qep_ceiling_read(struct counter_device *counter,
177 struct counter_count *count, u64 *ceiling)
179 struct intel_qep *qep = counter->priv;
181 pm_runtime_get_sync(qep->dev);
182 *ceiling = intel_qep_readl(qep, INTEL_QEPMAX);
183 pm_runtime_put(qep->dev);
188 static int intel_qep_ceiling_write(struct counter_device *counter,
189 struct counter_count *count, u64 max)
191 struct intel_qep *qep = counter->priv;
194 /* Intel QEP ceiling configuration only supports 32-bit values */
198 mutex_lock(&qep->lock);
204 pm_runtime_get_sync(qep->dev);
205 intel_qep_writel(qep, INTEL_QEPMAX, max);
206 pm_runtime_put(qep->dev);
209 mutex_unlock(&qep->lock);
213 static int intel_qep_enable_read(struct counter_device *counter,
214 struct counter_count *count, u8 *enable)
216 struct intel_qep *qep = counter->priv;
218 *enable = qep->enabled;
223 static int intel_qep_enable_write(struct counter_device *counter,
224 struct counter_count *count, u8 val)
226 struct intel_qep *qep = counter->priv;
230 mutex_lock(&qep->lock);
231 changed = val ^ qep->enabled;
235 pm_runtime_get_sync(qep->dev);
236 reg = intel_qep_readl(qep, INTEL_QEPCON);
238 /* Enable peripheral and keep runtime PM always on */
239 reg |= INTEL_QEPCON_EN;
240 pm_runtime_get_noresume(qep->dev);
242 /* Let runtime PM be idle and disable peripheral */
243 pm_runtime_put_noidle(qep->dev);
244 reg &= ~INTEL_QEPCON_EN;
246 intel_qep_writel(qep, INTEL_QEPCON, reg);
247 pm_runtime_put(qep->dev);
251 mutex_unlock(&qep->lock);
255 static int intel_qep_spike_filter_ns_read(struct counter_device *counter,
256 struct counter_count *count,
259 struct intel_qep *qep = counter->priv;
262 pm_runtime_get_sync(qep->dev);
263 reg = intel_qep_readl(qep, INTEL_QEPCON);
264 if (!(reg & INTEL_QEPCON_FLT_EN)) {
265 pm_runtime_put(qep->dev);
268 reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT));
269 pm_runtime_put(qep->dev);
271 *length = (reg + 2) * INTEL_QEP_CLK_PERIOD_NS;
276 static int intel_qep_spike_filter_ns_write(struct counter_device *counter,
277 struct counter_count *count,
280 struct intel_qep *qep = counter->priv;
286 * Spike filter length is (MAX_COUNT + 2) clock periods.
287 * Disable filter when userspace writes 0, enable for valid
288 * nanoseconds values and error out otherwise.
290 do_div(length, INTEL_QEP_CLK_PERIOD_NS);
294 } else if (length >= 2) {
301 if (length > INTEL_QEPFLT_MAX_COUNT(length))
304 mutex_lock(&qep->lock);
310 pm_runtime_get_sync(qep->dev);
311 reg = intel_qep_readl(qep, INTEL_QEPCON);
313 reg |= INTEL_QEPCON_FLT_EN;
315 reg &= ~INTEL_QEPCON_FLT_EN;
316 intel_qep_writel(qep, INTEL_QEPFLT, length);
317 intel_qep_writel(qep, INTEL_QEPCON, reg);
318 pm_runtime_put(qep->dev);
321 mutex_unlock(&qep->lock);
325 static int intel_qep_preset_enable_read(struct counter_device *counter,
326 struct counter_count *count,
329 struct intel_qep *qep = counter->priv;
332 pm_runtime_get_sync(qep->dev);
333 reg = intel_qep_readl(qep, INTEL_QEPCON);
334 pm_runtime_put(qep->dev);
336 *preset_enable = !(reg & INTEL_QEPCON_COUNT_RST_MODE);
341 static int intel_qep_preset_enable_write(struct counter_device *counter,
342 struct counter_count *count, u8 val)
344 struct intel_qep *qep = counter->priv;
348 mutex_lock(&qep->lock);
354 pm_runtime_get_sync(qep->dev);
355 reg = intel_qep_readl(qep, INTEL_QEPCON);
357 reg &= ~INTEL_QEPCON_COUNT_RST_MODE;
359 reg |= INTEL_QEPCON_COUNT_RST_MODE;
361 intel_qep_writel(qep, INTEL_QEPCON, reg);
362 pm_runtime_put(qep->dev);
365 mutex_unlock(&qep->lock);
370 static struct counter_comp intel_qep_count_ext[] = {
371 COUNTER_COMP_ENABLE(intel_qep_enable_read, intel_qep_enable_write),
372 COUNTER_COMP_CEILING(intel_qep_ceiling_read, intel_qep_ceiling_write),
373 COUNTER_COMP_PRESET_ENABLE(intel_qep_preset_enable_read,
374 intel_qep_preset_enable_write),
375 COUNTER_COMP_COUNT_U64("spike_filter_ns",
376 intel_qep_spike_filter_ns_read,
377 intel_qep_spike_filter_ns_write),
380 static struct counter_count intel_qep_counter_count[] = {
383 .name = "Channel 1 Count",
384 .functions_list = intel_qep_count_functions,
385 .num_functions = ARRAY_SIZE(intel_qep_count_functions),
386 .synapses = intel_qep_count_synapses,
387 .num_synapses = ARRAY_SIZE(intel_qep_count_synapses),
388 .ext = intel_qep_count_ext,
389 .num_ext = ARRAY_SIZE(intel_qep_count_ext),
393 static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
395 struct intel_qep *qep;
396 struct device *dev = &pci->dev;
400 qep = devm_kzalloc(dev, sizeof(*qep), GFP_KERNEL);
404 ret = pcim_enable_device(pci);
410 ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
414 regs = pcim_iomap_table(pci)[0];
420 mutex_init(&qep->lock);
423 pci_set_drvdata(pci, qep);
425 qep->counter.name = pci_name(pci);
426 qep->counter.parent = dev;
427 qep->counter.ops = &intel_qep_counter_ops;
428 qep->counter.counts = intel_qep_counter_count;
429 qep->counter.num_counts = ARRAY_SIZE(intel_qep_counter_count);
430 qep->counter.signals = intel_qep_signals;
431 qep->counter.num_signals = ARRAY_SIZE(intel_qep_signals);
432 qep->counter.priv = qep;
433 qep->enabled = false;
436 pm_runtime_allow(dev);
438 return devm_counter_register(&pci->dev, &qep->counter);
441 static void intel_qep_remove(struct pci_dev *pci)
443 struct intel_qep *qep = pci_get_drvdata(pci);
444 struct device *dev = &pci->dev;
446 pm_runtime_forbid(dev);
450 intel_qep_writel(qep, INTEL_QEPCON, 0);
453 static int __maybe_unused intel_qep_suspend(struct device *dev)
455 struct pci_dev *pdev = to_pci_dev(dev);
456 struct intel_qep *qep = pci_get_drvdata(pdev);
458 qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON);
459 qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT);
460 qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX);
465 static int __maybe_unused intel_qep_resume(struct device *dev)
467 struct pci_dev *pdev = to_pci_dev(dev);
468 struct intel_qep *qep = pci_get_drvdata(pdev);
471 * Make sure peripheral is disabled when restoring registers and
472 * control register bits that are writable only when the peripheral
475 intel_qep_writel(qep, INTEL_QEPCON, 0);
476 intel_qep_readl(qep, INTEL_QEPCON);
478 intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt);
479 intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax);
480 intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
482 /* Restore all other control register bits except enable status */
483 intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN);
484 intel_qep_readl(qep, INTEL_QEPCON);
486 /* Restore enable status */
487 intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon);
492 static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops,
493 intel_qep_suspend, intel_qep_resume, NULL);
495 static const struct pci_device_id intel_qep_id_table[] = {
497 { PCI_VDEVICE(INTEL, 0x4bc3), },
498 { PCI_VDEVICE(INTEL, 0x4b81), },
499 { PCI_VDEVICE(INTEL, 0x4b82), },
500 { PCI_VDEVICE(INTEL, 0x4b83), },
501 { } /* Terminating Entry */
503 MODULE_DEVICE_TABLE(pci, intel_qep_id_table);
505 static struct pci_driver intel_qep_driver = {
507 .id_table = intel_qep_id_table,
508 .probe = intel_qep_probe,
509 .remove = intel_qep_remove,
511 .pm = &intel_qep_pm_ops,
515 module_pci_driver(intel_qep_driver);
517 MODULE_AUTHOR("Felipe Balbi (Intel)");
518 MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
519 MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
520 MODULE_LICENSE("GPL");
521 MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");