2 * Copyright (c) 2011-2015 Xilinx Inc.
3 * Copyright (c) 2015, National Instruments Corp.
5 * FPGA Manager Driver for Xilinx Zynq, heavily based on xdevcfg driver
6 * in their vendor tree.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/clk.h>
19 #include <linux/completion.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/fpga/fpga-mgr.h>
23 #include <linux/interrupt.h>
25 #include <linux/iopoll.h>
26 #include <linux/module.h>
27 #include <linux/mfd/syscon.h>
28 #include <linux/of_address.h>
29 #include <linux/of_irq.h>
31 #include <linux/regmap.h>
32 #include <linux/string.h>
33 #include <linux/scatterlist.h>
35 /* Offsets into SLCR regmap */
37 /* FPGA Software Reset Control */
38 #define SLCR_FPGA_RST_CTRL_OFFSET 0x240
39 /* Level Shifters Enable */
40 #define SLCR_LVL_SHFTR_EN_OFFSET 0x900
42 /* Constant Definitions */
44 /* Control Register */
45 #define CTRL_OFFSET 0x00
47 #define LOCK_OFFSET 0x04
48 /* Interrupt Status Register */
49 #define INT_STS_OFFSET 0x0c
50 /* Interrupt Mask Register */
51 #define INT_MASK_OFFSET 0x10
53 #define STATUS_OFFSET 0x14
54 /* DMA Source Address Register */
55 #define DMA_SRC_ADDR_OFFSET 0x18
56 /* DMA Destination Address Reg */
57 #define DMA_DST_ADDR_OFFSET 0x1c
58 /* DMA Source Transfer Length */
59 #define DMA_SRC_LEN_OFFSET 0x20
60 /* DMA Destination Transfer */
61 #define DMA_DEST_LEN_OFFSET 0x24
63 #define UNLOCK_OFFSET 0x34
64 /* Misc. Control Register */
65 #define MCTRL_OFFSET 0x80
67 /* Control Register Bit definitions */
69 /* Signal to reset FPGA */
70 #define CTRL_PCFG_PROG_B_MASK BIT(30)
71 /* Enable PCAP for PR */
72 #define CTRL_PCAP_PR_MASK BIT(27)
74 #define CTRL_PCAP_MODE_MASK BIT(26)
75 /* Lower rate to allow decrypt on the fly */
76 #define CTRL_PCAP_RATE_EN_MASK BIT(25)
77 /* System booted in secure mode */
78 #define CTRL_SEC_EN_MASK BIT(7)
80 /* Miscellaneous Control Register bit definitions */
81 /* Internal PCAP loopback */
82 #define MCTRL_PCAP_LPBK_MASK BIT(4)
84 /* Status register bit definitions */
86 /* FPGA init status */
87 #define STATUS_DMA_Q_F BIT(31)
88 #define STATUS_DMA_Q_E BIT(30)
89 #define STATUS_PCFG_INIT_MASK BIT(4)
91 /* Interrupt Status/Mask Register Bit definitions */
92 /* DMA command done */
93 #define IXR_DMA_DONE_MASK BIT(13)
94 /* DMA and PCAP cmd done */
95 #define IXR_D_P_DONE_MASK BIT(12)
97 #define IXR_PCFG_DONE_MASK BIT(2)
98 #define IXR_ERROR_FLAGS_MASK 0x00F0C860
99 #define IXR_ALL_MASK 0xF8F7F87F
101 /* Miscellaneous constant values */
103 /* Invalid DMA addr */
104 #define DMA_INVALID_ADDRESS GENMASK(31, 0)
105 /* Used to unlock the dev */
106 #define UNLOCK_MASK 0x757bdf0d
107 /* Timeout for polling reset bits */
108 #define INIT_POLL_TIMEOUT 2500000
109 /* Delay for polling reset bits */
110 #define INIT_POLL_DELAY 20
111 /* Signal this is the last DMA transfer, wait for the AXI and PCAP before
114 #define DMA_SRC_LAST_TRANSFER 1
115 /* Timeout for DMA completion */
116 #define DMA_TIMEOUT_MS 5000
118 /* Masks for controlling stuff in SLCR */
119 /* Disable all Level shifters */
120 #define LVL_SHFTR_DISABLE_ALL_MASK 0x0
121 /* Enable Level shifters from PS to PL */
122 #define LVL_SHFTR_ENABLE_PS_TO_PL 0xa
123 /* Enable Level shifters from PL to PS */
124 #define LVL_SHFTR_ENABLE_PL_TO_PS 0xf
125 /* Enable global resets */
126 #define FPGA_RST_ALL_MASK 0xf
127 /* Disable global resets */
128 #define FPGA_RST_NONE_MASK 0x0
130 struct zynq_fpga_priv {
134 void __iomem *io_base;
138 unsigned int dma_elm;
139 unsigned int dma_nelms;
140 struct scatterlist *cur_sg;
142 struct completion dma_done;
145 static inline void zynq_fpga_write(struct zynq_fpga_priv *priv, u32 offset,
148 writel(val, priv->io_base + offset);
151 static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv,
154 return readl(priv->io_base + offset);
157 #define zynq_fpga_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
158 readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \
161 /* Cause the specified irq mask bits to generate IRQs */
162 static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable)
164 zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable);
167 /* Must be called with dma_lock held */
168 static void zynq_step_dma(struct zynq_fpga_priv *priv)
174 first = priv->dma_elm == 0;
175 while (priv->cur_sg) {
176 /* Feed the DMA queue until it is full. */
177 if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F)
180 addr = sg_dma_address(priv->cur_sg);
181 len = sg_dma_len(priv->cur_sg);
182 if (priv->dma_elm + 1 == priv->dma_nelms) {
183 /* The last transfer waits for the PCAP to finish too,
184 * notice this also changes the irq_mask to ignore
185 * IXR_DMA_DONE_MASK which ensures we do not trigger
186 * the completion too early.
188 addr |= DMA_SRC_LAST_TRANSFER;
191 priv->cur_sg = sg_next(priv->cur_sg);
195 zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr);
196 zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS);
197 zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4);
198 zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
201 /* Once the first transfer is queued we can turn on the ISR, future
202 * calls to zynq_step_dma will happen from the ISR context. The
203 * dma_lock spinlock guarentees this handover is done coherently, the
204 * ISR enable is put at the end to avoid another CPU spinning in the
207 if (first && priv->cur_sg) {
208 zynq_fpga_set_irq(priv,
209 IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
210 } else if (!priv->cur_sg) {
211 /* The last transfer changes to DMA & PCAP mode since we do
212 * not want to continue until everything has been flushed into
215 zynq_fpga_set_irq(priv,
216 IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK);
220 static irqreturn_t zynq_fpga_isr(int irq, void *data)
222 struct zynq_fpga_priv *priv = data;
225 /* If anything other than DMA completion is reported stop and hand
226 * control back to zynq_fpga_ops_write, something went wrong,
227 * otherwise progress the DMA.
229 spin_lock(&priv->dma_lock);
230 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
231 if (!(intr_status & IXR_ERROR_FLAGS_MASK) &&
232 (intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) {
233 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK);
235 spin_unlock(&priv->dma_lock);
238 spin_unlock(&priv->dma_lock);
240 zynq_fpga_set_irq(priv, 0);
241 complete(&priv->dma_done);
246 /* Sanity check the proposed bitstream. It must start with the sync word in
247 * the correct byte order, and be dword aligned. The input is a Xilinx .bin
248 * file with every 32 bit quantity swapped.
250 static bool zynq_fpga_has_sync(const u8 *buf, size_t count)
252 for (; count >= 4; buf += 4, count -= 4)
253 if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 &&
259 static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
260 struct fpga_image_info *info,
261 const char *buf, size_t count)
263 struct zynq_fpga_priv *priv;
269 err = clk_enable(priv->clk);
273 /* check if bitstream is encrypted & and system's still secure */
274 if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM) {
275 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
276 if (!(ctrl & CTRL_SEC_EN_MASK)) {
278 "System not secure, can't use crypted bitstreams\n");
284 /* don't globally reset PL if we're doing partial reconfig */
285 if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
286 if (!zynq_fpga_has_sync(buf, count)) {
288 "Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n");
293 /* assert AXI interface resets */
294 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
297 /* disable all level shifters */
298 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
299 LVL_SHFTR_DISABLE_ALL_MASK);
300 /* enable level shifters from PS to PL */
301 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
302 LVL_SHFTR_ENABLE_PS_TO_PL);
304 /* create a rising edge on PCFG_INIT. PCFG_INIT follows
305 * PCFG_PROG_B, so we need to poll it after setting PCFG_PROG_B
306 * to make sure the rising edge actually happens.
307 * Note: PCFG_PROG_B is low active, sequence as described in
308 * UG585 v1.10 page 211
310 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
311 ctrl |= CTRL_PCFG_PROG_B_MASK;
313 zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
315 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
316 status & STATUS_PCFG_INIT_MASK,
320 dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
324 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
325 ctrl &= ~CTRL_PCFG_PROG_B_MASK;
327 zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
329 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
330 !(status & STATUS_PCFG_INIT_MASK),
334 dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n");
338 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
339 ctrl |= CTRL_PCFG_PROG_B_MASK;
341 zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
343 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
344 status & STATUS_PCFG_INIT_MASK,
348 dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
353 /* set configuration register with following options:
354 * - enable PCAP interface
355 * - set throughput for maximum speed (if bistream not crypted)
356 * - set CPU in user mode
358 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
359 if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM)
360 zynq_fpga_write(priv, CTRL_OFFSET,
361 (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
362 | CTRL_PCAP_RATE_EN_MASK | ctrl));
364 zynq_fpga_write(priv, CTRL_OFFSET,
365 (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
369 /* We expect that the command queue is empty right now. */
370 status = zynq_fpga_read(priv, STATUS_OFFSET);
371 if ((status & STATUS_DMA_Q_F) ||
372 (status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) {
373 dev_err(&mgr->dev, "DMA command queue not right\n");
378 /* ensure internal PCAP loopback is disabled */
379 ctrl = zynq_fpga_read(priv, MCTRL_OFFSET);
380 zynq_fpga_write(priv, MCTRL_OFFSET, (~MCTRL_PCAP_LPBK_MASK & ctrl));
382 clk_disable(priv->clk);
387 clk_disable(priv->clk);
392 static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
394 struct zynq_fpga_priv *priv;
398 unsigned long timeout;
400 struct scatterlist *sg;
405 /* The hardware can only DMA multiples of 4 bytes, and it requires the
406 * starting addresses to be aligned to 64 bits (UG585 pg 212).
408 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
409 if ((sg->offset % 8) || (sg->length % 4)) {
411 "Invalid bitstream, chunks must be aligned\n");
417 dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
418 if (priv->dma_nelms == 0) {
419 dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
424 err = clk_enable(priv->clk);
428 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
429 reinit_completion(&priv->dma_done);
431 /* zynq_step_dma will turn on interrupts */
432 spin_lock_irqsave(&priv->dma_lock, flags);
434 priv->cur_sg = sgt->sgl;
436 spin_unlock_irqrestore(&priv->dma_lock, flags);
438 timeout = wait_for_completion_timeout(&priv->dma_done,
439 msecs_to_jiffies(DMA_TIMEOUT_MS));
441 spin_lock_irqsave(&priv->dma_lock, flags);
442 zynq_fpga_set_irq(priv, 0);
444 spin_unlock_irqrestore(&priv->dma_lock, flags);
446 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
447 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
449 /* There doesn't seem to be a way to force cancel any DMA, so if
450 * something went wrong we are relying on the hardware to have halted
451 * the DMA before we get here, if there was we could use
452 * wait_for_completion_interruptible too.
455 if (intr_status & IXR_ERROR_FLAGS_MASK) {
456 why = "DMA reported error";
462 !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
464 why = "DMA timed out";
466 why = "DMA did not complete";
476 "%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n",
479 zynq_fpga_read(priv, CTRL_OFFSET),
480 zynq_fpga_read(priv, LOCK_OFFSET),
481 zynq_fpga_read(priv, INT_MASK_OFFSET),
482 zynq_fpga_read(priv, STATUS_OFFSET),
483 zynq_fpga_read(priv, MCTRL_OFFSET));
486 clk_disable(priv->clk);
489 dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
493 static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
494 struct fpga_image_info *info)
496 struct zynq_fpga_priv *priv = mgr->priv;
500 err = clk_enable(priv->clk);
504 err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status,
505 intr_status & IXR_PCFG_DONE_MASK,
509 clk_disable(priv->clk);
514 /* for the partial reconfig case we didn't touch the level shifters */
515 if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
516 /* enable level shifters from PL to PS */
517 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
518 LVL_SHFTR_ENABLE_PL_TO_PS);
520 /* deassert AXI interface resets */
521 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
528 static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr)
532 struct zynq_fpga_priv *priv;
536 err = clk_enable(priv->clk);
538 return FPGA_MGR_STATE_UNKNOWN;
540 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
541 clk_disable(priv->clk);
543 if (intr_status & IXR_PCFG_DONE_MASK)
544 return FPGA_MGR_STATE_OPERATING;
546 return FPGA_MGR_STATE_UNKNOWN;
549 static const struct fpga_manager_ops zynq_fpga_ops = {
550 .initial_header_size = 128,
551 .state = zynq_fpga_ops_state,
552 .write_init = zynq_fpga_ops_write_init,
553 .write_sg = zynq_fpga_ops_write,
554 .write_complete = zynq_fpga_ops_write_complete,
557 static int zynq_fpga_probe(struct platform_device *pdev)
559 struct device *dev = &pdev->dev;
560 struct zynq_fpga_priv *priv;
561 struct resource *res;
564 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
567 spin_lock_init(&priv->dma_lock);
569 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
570 priv->io_base = devm_ioremap_resource(dev, res);
571 if (IS_ERR(priv->io_base))
572 return PTR_ERR(priv->io_base);
574 priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node,
576 if (IS_ERR(priv->slcr)) {
577 dev_err(dev, "unable to get zynq-slcr regmap\n");
578 return PTR_ERR(priv->slcr);
581 init_completion(&priv->dma_done);
583 priv->irq = platform_get_irq(pdev, 0);
585 dev_err(dev, "No IRQ available\n");
589 priv->clk = devm_clk_get(dev, "ref_clk");
590 if (IS_ERR(priv->clk)) {
591 dev_err(dev, "input clock not found\n");
592 return PTR_ERR(priv->clk);
595 err = clk_prepare_enable(priv->clk);
597 dev_err(dev, "unable to enable clock\n");
601 /* unlock the device */
602 zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
604 zynq_fpga_set_irq(priv, 0);
605 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
606 err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
609 dev_err(dev, "unable to request IRQ\n");
610 clk_disable_unprepare(priv->clk);
614 clk_disable(priv->clk);
616 err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager",
617 &zynq_fpga_ops, priv);
619 dev_err(dev, "unable to register FPGA manager\n");
620 clk_unprepare(priv->clk);
627 static int zynq_fpga_remove(struct platform_device *pdev)
629 struct zynq_fpga_priv *priv;
630 struct fpga_manager *mgr;
632 mgr = platform_get_drvdata(pdev);
635 fpga_mgr_unregister(&pdev->dev);
637 clk_unprepare(priv->clk);
643 static const struct of_device_id zynq_fpga_of_match[] = {
644 { .compatible = "xlnx,zynq-devcfg-1.0", },
648 MODULE_DEVICE_TABLE(of, zynq_fpga_of_match);
651 static struct platform_driver zynq_fpga_driver = {
652 .probe = zynq_fpga_probe,
653 .remove = zynq_fpga_remove,
655 .name = "zynq_fpga_manager",
656 .of_match_table = of_match_ptr(zynq_fpga_of_match),
660 module_platform_driver(zynq_fpga_driver);
662 MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
663 MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>");
664 MODULE_DESCRIPTION("Xilinx Zynq FPGA Manager");
665 MODULE_LICENSE("GPL v2");