1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Exceet Electronics GmbH
4 * Copyright (C) 2018 Bootlin
6 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
8 #include <linux/dmaengine.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/spi/spi.h>
11 #include <linux/spi/spi-mem.h>
13 #include "internals.h"
15 #define SPI_MEM_MAX_BUSWIDTH 8
18 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
20 * @ctlr: the SPI controller requesting this dma_map()
21 * @op: the memory operation containing the buffer to map
22 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
25 * Some controllers might want to do DMA on the data buffer embedded in @op.
26 * This helper prepares everything for you and provides a ready-to-use
27 * sg_table. This function is not intended to be called from spi drivers.
28 * Only SPI controller drivers should use it.
29 * Note that the caller must ensure the memory region pointed by
30 * op->data.buf.{in,out} is DMA-able before calling this function.
32 * Return: 0 in case of success, a negative error code otherwise.
34 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
35 const struct spi_mem_op *op,
38 struct device *dmadev;
43 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
44 dmadev = ctlr->dma_tx->device->dev;
45 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
46 dmadev = ctlr->dma_rx->device->dev;
48 dmadev = ctlr->dev.parent;
53 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
54 op->data.dir == SPI_MEM_DATA_IN ?
55 DMA_FROM_DEVICE : DMA_TO_DEVICE);
57 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
60 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
62 * @ctlr: the SPI controller requesting this dma_unmap()
63 * @op: the memory operation containing the buffer to unmap
64 * @sgt: a pointer to an sg_table previously initialized by
65 * spi_controller_dma_map_mem_op_data()
67 * Some controllers might want to do DMA on the data buffer embedded in @op.
68 * This helper prepares things so that the CPU can access the
69 * op->data.buf.{in,out} buffer again.
71 * This function is not intended to be called from SPI drivers. Only SPI
72 * controller drivers should use it.
74 * This function should be called after the DMA operation has finished and is
75 * only valid if the previous spi_controller_dma_map_mem_op_data() call
78 * Return: 0 in case of success, a negative error code otherwise.
80 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
81 const struct spi_mem_op *op,
84 struct device *dmadev;
89 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
90 dmadev = ctlr->dma_tx->device->dev;
91 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
92 dmadev = ctlr->dma_rx->device->dev;
94 dmadev = ctlr->dev.parent;
96 spi_unmap_buf(ctlr, dmadev, sgt,
97 op->data.dir == SPI_MEM_DATA_IN ?
98 DMA_FROM_DEVICE : DMA_TO_DEVICE);
100 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
102 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
104 u32 mode = mem->spi->mode;
112 (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
114 (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
120 if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
121 (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
127 if ((tx && (mode & SPI_TX_OCTAL)) ||
128 (!tx && (mode & SPI_RX_OCTAL)))
140 static bool spi_mem_check_buswidth(struct spi_mem *mem,
141 const struct spi_mem_op *op)
143 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
146 if (op->addr.nbytes &&
147 spi_check_buswidth_req(mem, op->addr.buswidth, true))
150 if (op->dummy.nbytes &&
151 spi_check_buswidth_req(mem, op->dummy.buswidth, true))
154 if (op->data.dir != SPI_MEM_NO_DATA &&
155 spi_check_buswidth_req(mem, op->data.buswidth,
156 op->data.dir == SPI_MEM_DATA_OUT))
162 bool spi_mem_dtr_supports_op(struct spi_mem *mem,
163 const struct spi_mem_op *op)
165 if (op->cmd.nbytes != 2)
168 return spi_mem_check_buswidth(mem, op);
170 EXPORT_SYMBOL_GPL(spi_mem_dtr_supports_op);
172 bool spi_mem_default_supports_op(struct spi_mem *mem,
173 const struct spi_mem_op *op)
175 if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
178 if (op->cmd.nbytes != 1)
181 return spi_mem_check_buswidth(mem, op);
183 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
185 static bool spi_mem_buswidth_is_valid(u8 buswidth)
187 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
193 static int spi_mem_check_op(const struct spi_mem_op *op)
195 if (!op->cmd.buswidth || !op->cmd.nbytes)
198 if ((op->addr.nbytes && !op->addr.buswidth) ||
199 (op->dummy.nbytes && !op->dummy.buswidth) ||
200 (op->data.nbytes && !op->data.buswidth))
203 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
204 !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
205 !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
206 !spi_mem_buswidth_is_valid(op->data.buswidth))
212 static bool spi_mem_internal_supports_op(struct spi_mem *mem,
213 const struct spi_mem_op *op)
215 struct spi_controller *ctlr = mem->spi->controller;
217 if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
218 return ctlr->mem_ops->supports_op(mem, op);
220 return spi_mem_default_supports_op(mem, op);
224 * spi_mem_supports_op() - Check if a memory device and the controller it is
225 * connected to support a specific memory operation
226 * @mem: the SPI memory
227 * @op: the memory operation to check
229 * Some controllers are only supporting Single or Dual IOs, others might only
230 * support specific opcodes, or it can even be that the controller and device
231 * both support Quad IOs but the hardware prevents you from using it because
232 * only 2 IO lines are connected.
234 * This function checks whether a specific operation is supported.
236 * Return: true if @op is supported, false otherwise.
238 bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
240 if (spi_mem_check_op(op))
243 return spi_mem_internal_supports_op(mem, op);
245 EXPORT_SYMBOL_GPL(spi_mem_supports_op);
247 static int spi_mem_access_start(struct spi_mem *mem)
249 struct spi_controller *ctlr = mem->spi->controller;
252 * Flush the message queue before executing our SPI memory
253 * operation to prevent preemption of regular SPI transfers.
255 spi_flush_queue(ctlr);
257 if (ctlr->auto_runtime_pm) {
260 ret = pm_runtime_get_sync(ctlr->dev.parent);
262 pm_runtime_put_noidle(ctlr->dev.parent);
263 dev_err(&ctlr->dev, "Failed to power device: %d\n",
269 mutex_lock(&ctlr->bus_lock_mutex);
270 mutex_lock(&ctlr->io_mutex);
275 static void spi_mem_access_end(struct spi_mem *mem)
277 struct spi_controller *ctlr = mem->spi->controller;
279 mutex_unlock(&ctlr->io_mutex);
280 mutex_unlock(&ctlr->bus_lock_mutex);
282 if (ctlr->auto_runtime_pm)
283 pm_runtime_put(ctlr->dev.parent);
287 * spi_mem_exec_op() - Execute a memory operation
288 * @mem: the SPI memory
289 * @op: the memory operation to execute
291 * Executes a memory operation.
293 * This function first checks that @op is supported and then tries to execute
296 * Return: 0 in case of success, a negative error code otherwise.
298 int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
300 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
301 struct spi_controller *ctlr = mem->spi->controller;
302 struct spi_transfer xfers[4] = { };
303 struct spi_message msg;
307 ret = spi_mem_check_op(op);
311 if (!spi_mem_internal_supports_op(mem, op))
314 if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
315 ret = spi_mem_access_start(mem);
319 ret = ctlr->mem_ops->exec_op(mem, op);
321 spi_mem_access_end(mem);
324 * Some controllers only optimize specific paths (typically the
325 * read path) and expect the core to use the regular SPI
326 * interface in other cases.
328 if (!ret || ret != -ENOTSUPP)
332 tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
335 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
336 * we're guaranteed that this buffer is DMA-able, as required by the
339 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
343 spi_message_init(&msg);
345 tmpbuf[0] = op->cmd.opcode;
346 xfers[xferpos].tx_buf = tmpbuf;
347 xfers[xferpos].len = op->cmd.nbytes;
348 xfers[xferpos].tx_nbits = op->cmd.buswidth;
349 spi_message_add_tail(&xfers[xferpos], &msg);
353 if (op->addr.nbytes) {
356 for (i = 0; i < op->addr.nbytes; i++)
357 tmpbuf[i + 1] = op->addr.val >>
358 (8 * (op->addr.nbytes - i - 1));
360 xfers[xferpos].tx_buf = tmpbuf + 1;
361 xfers[xferpos].len = op->addr.nbytes;
362 xfers[xferpos].tx_nbits = op->addr.buswidth;
363 spi_message_add_tail(&xfers[xferpos], &msg);
365 totalxferlen += op->addr.nbytes;
368 if (op->dummy.nbytes) {
369 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
370 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
371 xfers[xferpos].len = op->dummy.nbytes;
372 xfers[xferpos].tx_nbits = op->dummy.buswidth;
373 xfers[xferpos].dummy_data = 1;
374 spi_message_add_tail(&xfers[xferpos], &msg);
376 totalxferlen += op->dummy.nbytes;
379 if (op->data.nbytes) {
380 if (op->data.dir == SPI_MEM_DATA_IN) {
381 xfers[xferpos].rx_buf = op->data.buf.in;
382 xfers[xferpos].rx_nbits = op->data.buswidth;
384 xfers[xferpos].tx_buf = op->data.buf.out;
385 xfers[xferpos].tx_nbits = op->data.buswidth;
388 xfers[xferpos].len = op->data.nbytes;
389 spi_message_add_tail(&xfers[xferpos], &msg);
391 totalxferlen += op->data.nbytes;
394 ret = spi_sync(mem->spi, &msg);
401 if (msg.actual_length != totalxferlen)
406 EXPORT_SYMBOL_GPL(spi_mem_exec_op);
409 * spi_mem_get_name() - Return the SPI mem device name to be used by the
410 * upper layer if necessary
411 * @mem: the SPI memory
413 * This function allows SPI mem users to retrieve the SPI mem device name.
414 * It is useful if the upper layer needs to expose a custom name for
415 * compatibility reasons.
417 * Return: a string containing the name of the memory device to be used
418 * by the SPI mem user
420 const char *spi_mem_get_name(struct spi_mem *mem)
424 EXPORT_SYMBOL_GPL(spi_mem_get_name);
427 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
428 * match controller limitations
429 * @mem: the SPI memory
430 * @op: the operation to adjust
432 * Some controllers have FIFO limitations and must split a data transfer
433 * operation into multiple ones, others require a specific alignment for
434 * optimized accesses. This function allows SPI mem drivers to split a single
435 * operation into multiple sub-operations when required.
437 * Return: a negative error code if the controller can't properly adjust @op,
438 * 0 otherwise. Note that @op->data.nbytes will be updated if @op
439 * can't be handled in a single step.
441 int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
443 struct spi_controller *ctlr = mem->spi->controller;
446 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
447 return ctlr->mem_ops->adjust_op_size(mem, op);
449 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
450 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
452 if (len > spi_max_transfer_size(mem->spi))
455 op->data.nbytes = min3((size_t)op->data.nbytes,
456 spi_max_transfer_size(mem->spi),
457 spi_max_message_size(mem->spi) -
459 if (!op->data.nbytes)
465 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
467 static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
468 u64 offs, size_t len, void *buf)
470 struct spi_mem_op op = desc->info.op_tmpl;
473 op.addr.val = desc->info.offset + offs;
474 op.data.buf.in = buf;
475 op.data.nbytes = len;
476 ret = spi_mem_adjust_op_size(desc->mem, &op);
480 ret = spi_mem_exec_op(desc->mem, &op);
484 return op.data.nbytes;
487 static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
488 u64 offs, size_t len, const void *buf)
490 struct spi_mem_op op = desc->info.op_tmpl;
493 op.addr.val = desc->info.offset + offs;
494 op.data.buf.out = buf;
495 op.data.nbytes = len;
496 ret = spi_mem_adjust_op_size(desc->mem, &op);
500 ret = spi_mem_exec_op(desc->mem, &op);
504 return op.data.nbytes;
508 * spi_mem_dirmap_create() - Create a direct mapping descriptor
509 * @mem: SPI mem device this direct mapping should be created for
510 * @info: direct mapping information
512 * This function is creating a direct mapping descriptor which can then be used
513 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
514 * If the SPI controller driver does not support direct mapping, this function
515 * falls back to an implementation using spi_mem_exec_op(), so that the caller
516 * doesn't have to bother implementing a fallback on his own.
518 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
520 struct spi_mem_dirmap_desc *
521 spi_mem_dirmap_create(struct spi_mem *mem,
522 const struct spi_mem_dirmap_info *info)
524 struct spi_controller *ctlr = mem->spi->controller;
525 struct spi_mem_dirmap_desc *desc;
528 /* Make sure the number of address cycles is between 1 and 8 bytes. */
529 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
530 return ERR_PTR(-EINVAL);
532 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
533 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
534 return ERR_PTR(-EINVAL);
536 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
538 return ERR_PTR(-ENOMEM);
542 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
543 ret = ctlr->mem_ops->dirmap_create(desc);
546 desc->nodirmap = true;
547 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
560 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
563 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
564 * @desc: the direct mapping descriptor to destroy
566 * This function destroys a direct mapping descriptor previously created by
567 * spi_mem_dirmap_create().
569 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
571 struct spi_controller *ctlr = desc->mem->spi->controller;
573 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
574 ctlr->mem_ops->dirmap_destroy(desc);
578 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
580 static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
582 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
584 spi_mem_dirmap_destroy(desc);
588 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
590 * @dev: device the dirmap desc will be attached to
591 * @mem: SPI mem device this direct mapping should be created for
592 * @info: direct mapping information
594 * devm_ variant of the spi_mem_dirmap_create() function. See
595 * spi_mem_dirmap_create() for more details.
597 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
599 struct spi_mem_dirmap_desc *
600 devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
601 const struct spi_mem_dirmap_info *info)
603 struct spi_mem_dirmap_desc **ptr, *desc;
605 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
608 return ERR_PTR(-ENOMEM);
610 desc = spi_mem_dirmap_create(mem, info);
615 devres_add(dev, ptr);
620 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
622 static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
624 struct spi_mem_dirmap_desc **ptr = res;
626 if (WARN_ON(!ptr || !*ptr))
633 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
635 * @dev: device the dirmap desc is attached to
636 * @desc: the direct mapping descriptor to destroy
638 * devm_ variant of the spi_mem_dirmap_destroy() function. See
639 * spi_mem_dirmap_destroy() for more details.
641 void devm_spi_mem_dirmap_destroy(struct device *dev,
642 struct spi_mem_dirmap_desc *desc)
644 devres_release(dev, devm_spi_mem_dirmap_release,
645 devm_spi_mem_dirmap_match, desc);
647 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
650 * spi_mem_dirmap_read() - Read data through a direct mapping
651 * @desc: direct mapping descriptor
652 * @offs: offset to start reading from. Note that this is not an absolute
653 * offset, but the offset within the direct mapping which already has
655 * @len: length in bytes
656 * @buf: destination buffer. This buffer must be DMA-able
658 * This function reads data from a memory device using a direct mapping
659 * previously instantiated with spi_mem_dirmap_create().
661 * Return: the amount of data read from the memory device or a negative error
662 * code. Note that the returned size might be smaller than @len, and the caller
663 * is responsible for calling spi_mem_dirmap_read() again when that happens.
665 ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
666 u64 offs, size_t len, void *buf)
668 struct spi_controller *ctlr = desc->mem->spi->controller;
671 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
677 if (desc->nodirmap) {
678 ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
679 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
680 ret = spi_mem_access_start(desc->mem);
684 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
686 spi_mem_access_end(desc->mem);
693 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
696 * spi_mem_dirmap_write() - Write data through a direct mapping
697 * @desc: direct mapping descriptor
698 * @offs: offset to start writing from. Note that this is not an absolute
699 * offset, but the offset within the direct mapping which already has
701 * @len: length in bytes
702 * @buf: source buffer. This buffer must be DMA-able
704 * This function writes data to a memory device using a direct mapping
705 * previously instantiated with spi_mem_dirmap_create().
707 * Return: the amount of data written to the memory device or a negative error
708 * code. Note that the returned size might be smaller than @len, and the caller
709 * is responsible for calling spi_mem_dirmap_write() again when that happens.
711 ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
712 u64 offs, size_t len, const void *buf)
714 struct spi_controller *ctlr = desc->mem->spi->controller;
717 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
723 if (desc->nodirmap) {
724 ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
725 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
726 ret = spi_mem_access_start(desc->mem);
730 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
732 spi_mem_access_end(desc->mem);
739 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
741 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
743 return container_of(drv, struct spi_mem_driver, spidrv.driver);
746 static int spi_mem_probe(struct spi_device *spi)
748 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
749 struct spi_controller *ctlr = spi->controller;
752 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
758 if (ctlr->mem_ops && ctlr->mem_ops->get_name)
759 mem->name = ctlr->mem_ops->get_name(mem);
761 mem->name = dev_name(&spi->dev);
763 if (IS_ERR_OR_NULL(mem->name))
764 return PTR_ERR_OR_ZERO(mem->name);
766 spi_set_drvdata(spi, mem);
768 return memdrv->probe(mem);
771 static int spi_mem_remove(struct spi_device *spi)
773 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
774 struct spi_mem *mem = spi_get_drvdata(spi);
777 return memdrv->remove(mem);
782 static void spi_mem_shutdown(struct spi_device *spi)
784 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
785 struct spi_mem *mem = spi_get_drvdata(spi);
787 if (memdrv->shutdown)
788 memdrv->shutdown(mem);
792 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
793 * @memdrv: the SPI memory driver to register
794 * @owner: the owner of this driver
796 * Registers a SPI memory driver.
798 * Return: 0 in case of success, a negative error core otherwise.
801 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
802 struct module *owner)
804 memdrv->spidrv.probe = spi_mem_probe;
805 memdrv->spidrv.remove = spi_mem_remove;
806 memdrv->spidrv.shutdown = spi_mem_shutdown;
808 return __spi_register_driver(owner, &memdrv->spidrv);
810 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
813 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
814 * @memdrv: the SPI memory driver to unregister
816 * Unregisters a SPI memory driver.
818 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
820 spi_unregister_driver(&memdrv->spidrv);
822 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);