1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <uapi/linux/cxl_mem.h>
4 #include <linux/security.h>
5 #include <linux/debugfs.h>
6 #include <linux/module.h>
7 #include <linux/sizes.h>
8 #include <linux/mutex.h>
9 #include <linux/cdev.h>
10 #include <linux/idr.h>
11 #include <linux/pci.h>
13 #include <linux/io-64-nonatomic-lo-hi.h>
21 * This implements the PCI exclusive functionality for a CXL device as it is
22 * defined by the Compute Express Link specification. CXL devices may surface
23 * certain functionality even if it isn't CXL enabled.
25 * The driver has several responsibilities, mainly:
26 * - Create the memX device and register on the CXL bus.
27 * - Enumerate device's register interface and map them.
28 * - Probe the device attributes to establish sysfs interface.
29 * - Provide an IOCTL interface to userspace to communicate with the device for
30 * things like firmware update.
33 #define cxl_doorbell_busy(cxlm) \
34 (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
35 CXLDEV_MBOX_CTRL_DOORBELL)
37 /* CXL 2.0 - 8.2.8.4 */
38 #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
41 CXL_MBOX_OP_INVALID = 0x0000,
42 CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID,
43 CXL_MBOX_OP_GET_FW_INFO = 0x0200,
44 CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
45 CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
46 CXL_MBOX_OP_GET_LOG = 0x0401,
47 CXL_MBOX_OP_IDENTIFY = 0x4000,
48 CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
49 CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
50 CXL_MBOX_OP_GET_LSA = 0x4102,
51 CXL_MBOX_OP_SET_LSA = 0x4103,
52 CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200,
53 CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204,
54 CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
55 CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
56 CXL_MBOX_OP_MAX = 0x10000
60 * struct mbox_cmd - A command to be submitted to hardware.
61 * @opcode: (input) The command set and command submitted to hardware.
62 * @payload_in: (input) Pointer to the input payload.
63 * @payload_out: (output) Pointer to the output payload. Must be allocated by
65 * @size_in: (input) Number of bytes to load from @payload_in.
66 * @size_out: (input) Max number of bytes loaded into @payload_out.
67 * (output) Number of bytes generated by the device. For fixed size
68 * outputs commands this is always expected to be deterministic. For
69 * variable sized output commands, it tells the exact number of bytes
71 * @return_code: (output) Error code returned from hardware.
73 * This is the primary mechanism used to send commands to the hardware.
74 * All the fields except @payload_* correspond exactly to the fields described in
75 * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
76 * @payload_out are written to, and read from the Command Payload Registers
77 * defined in CXL 2.0 8.2.8.4.8.
86 #define CXL_MBOX_SUCCESS 0
89 static int cxl_mem_major;
90 static DEFINE_IDA(cxl_memdev_ida);
91 static DECLARE_RWSEM(cxl_memdev_rwsem);
92 static struct dentry *cxl_debugfs;
93 static bool cxl_raw_allow_all;
100 /* See CXL 2.0 Table 170. Get Log Input Payload */
101 static const uuid_t log_uuid[] = {
102 [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96,
103 0xb1, 0x62, 0x3b, 0x3f, 0x17),
104 [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f,
105 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86),
109 * struct cxl_mem_command - Driver representation of a memory device command
110 * @info: Command information as it exists for the UAPI
111 * @opcode: The actual bits used for the mailbox protocol
112 * @flags: Set of flags effecting driver behavior.
114 * * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag
115 * will be enabled by the driver regardless of what hardware may have
118 * The cxl_mem_command is the driver's internal representation of commands that
119 * are supported by the driver. Some of these commands may not be supported by
120 * the hardware. The driver will use @info to validate the fields passed in by
121 * the user then submit the @opcode to the hardware.
123 * See struct cxl_command_info.
125 struct cxl_mem_command {
126 struct cxl_command_info info;
129 #define CXL_CMD_FLAG_NONE 0
130 #define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
133 #define CXL_CMD(_id, sin, sout, _flags) \
134 [CXL_MEM_COMMAND_ID_##_id] = { \
136 .id = CXL_MEM_COMMAND_ID_##_id, \
140 .opcode = CXL_MBOX_OP_##_id, \
145 * This table defines the supported mailbox commands for the driver. This table
146 * is made up of a UAPI structure. Non-negative values as parameters in the
147 * table will be validated against the user's input. For example, if size_in is
148 * 0, and the user passed in 1, it is an error.
150 static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
151 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
152 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
153 CXL_CMD(RAW, ~0, ~0, 0),
155 CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
156 CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
157 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
158 CXL_CMD(GET_LSA, 0x8, ~0, 0),
159 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
160 CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE),
164 * Commands that RAW doesn't permit. The rationale for each:
166 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
167 * coordination of transaction timeout values at the root bridge level.
169 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
170 * and needs to be coordinated with HDM updates.
172 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
173 * driver and any writes from userspace invalidates those contents.
175 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
176 * to the device after it is marked clean, userspace can not make that
179 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
180 * is kept up to date with patrol notifications and error management.
182 static u16 cxl_disabled_raw_commands[] = {
183 CXL_MBOX_OP_ACTIVATE_FW,
184 CXL_MBOX_OP_SET_PARTITION_INFO,
186 CXL_MBOX_OP_SET_SHUTDOWN_STATE,
187 CXL_MBOX_OP_SCAN_MEDIA,
188 CXL_MBOX_OP_GET_SCAN_MEDIA,
192 * Command sets that RAW doesn't permit. All opcodes in this set are
193 * disabled because they pass plain text security payloads over the
194 * user/kernel boundary. This functionality is intended to be wrapped
195 * behind the keys ABI which allows for encrypted payloads in the UAPI
197 static u8 security_command_sets[] = {
199 0x45, /* Persistent Memory Data-at-rest Security */
200 0x46, /* Security Passthrough */
203 #define cxl_for_each_cmd(cmd) \
204 for ((cmd) = &mem_commands[0]; \
205 ((cmd) - mem_commands) < ARRAY_SIZE(mem_commands); (cmd)++)
207 #define cxl_cmd_count ARRAY_SIZE(mem_commands)
209 static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm)
211 const unsigned long start = jiffies;
212 unsigned long end = start;
214 while (cxl_doorbell_busy(cxlm)) {
217 if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
218 /* Check again in case preempted before timeout test */
219 if (!cxl_doorbell_busy(cxlm))
226 dev_dbg(&cxlm->pdev->dev, "Doorbell wait took %dms",
227 jiffies_to_msecs(end) - jiffies_to_msecs(start));
231 static bool cxl_is_security_command(u16 opcode)
235 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
236 if (security_command_sets[i] == (opcode >> 8))
241 static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm,
242 struct mbox_cmd *mbox_cmd)
244 struct device *dev = &cxlm->pdev->dev;
246 dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n",
247 mbox_cmd->opcode, mbox_cmd->size_in);
251 * __cxl_mem_mbox_send_cmd() - Execute a mailbox command
252 * @cxlm: The CXL memory device to communicate with.
253 * @mbox_cmd: Command to send to the memory device.
255 * Context: Any context. Expects mbox_mutex to be held.
256 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
257 * Caller should check the return code in @mbox_cmd to make sure it
260 * This is a generic form of the CXL mailbox send command thus only using the
261 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
262 * devices, and perhaps other types of CXL devices may have further information
263 * available upon error conditions. Driver facilities wishing to send mailbox
264 * commands should use the wrapper command.
266 * The CXL spec allows for up to two mailboxes. The intention is for the primary
267 * mailbox to be OS controlled and the secondary mailbox to be used by system
268 * firmware. This allows the OS and firmware to communicate with the device and
269 * not need to coordinate with each other. The driver only uses the primary
272 static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm,
273 struct mbox_cmd *mbox_cmd)
275 void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
276 u64 cmd_reg, status_reg;
280 lockdep_assert_held(&cxlm->mbox_mutex);
283 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
284 * 1. Caller reads MB Control Register to verify doorbell is clear
285 * 2. Caller writes Command Register
286 * 3. Caller writes Command Payload Registers if input payload is non-empty
287 * 4. Caller writes MB Control Register to set doorbell
288 * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
289 * 6. Caller reads MB Status Register to fetch Return code
290 * 7. If command successful, Caller reads Command Register to get Payload Length
291 * 8. If output payload is non-empty, host reads Command Payload Registers
293 * Hardware is free to do whatever it wants before the doorbell is rung,
294 * and isn't allowed to change anything after it clears the doorbell. As
295 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
296 * also happen in any order (though some orders might not make sense).
300 if (cxl_doorbell_busy(cxlm)) {
301 dev_err_ratelimited(&cxlm->pdev->dev,
302 "Mailbox re-busy after acquiring\n");
306 cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
308 if (mbox_cmd->size_in) {
309 if (WARN_ON(!mbox_cmd->payload_in))
312 cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
314 memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
318 writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
321 dev_dbg(&cxlm->pdev->dev, "Sending command\n");
322 writel(CXLDEV_MBOX_CTRL_DOORBELL,
323 cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
326 rc = cxl_mem_wait_for_doorbell(cxlm);
327 if (rc == -ETIMEDOUT) {
328 cxl_mem_mbox_timeout(cxlm, mbox_cmd);
333 status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
334 mbox_cmd->return_code =
335 FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
337 if (mbox_cmd->return_code != 0) {
338 dev_dbg(&cxlm->pdev->dev, "Mailbox operation had an error\n");
343 cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
344 out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
347 if (out_len && mbox_cmd->payload_out) {
349 * Sanitize the copy. If hardware misbehaves, out_len per the
350 * spec can actually be greater than the max allowed size (21
351 * bits available but spec defined 1M max). The caller also may
352 * have requested less data than the hardware supplied even
355 size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len);
357 memcpy_fromio(mbox_cmd->payload_out, payload, n);
358 mbox_cmd->size_out = n;
360 mbox_cmd->size_out = 0;
367 * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox.
368 * @cxlm: The memory device to gain access to.
370 * Context: Any context. Takes the mbox_mutex.
371 * Return: 0 if exclusive access was acquired.
373 static int cxl_mem_mbox_get(struct cxl_mem *cxlm)
375 struct device *dev = &cxlm->pdev->dev;
379 mutex_lock_io(&cxlm->mbox_mutex);
382 * XXX: There is some amount of ambiguity in the 2.0 version of the spec
383 * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the
384 * bit is to allow firmware running on the device to notify the driver
385 * that it's ready to receive commands. It is unclear if the bit needs
386 * to be read for each transaction mailbox, ie. the firmware can switch
387 * it on and off as needed. Second, there is no defined timeout for
388 * mailbox ready, like there is for the doorbell interface.
391 * 1. The firmware might toggle the Mailbox Interface Ready bit, check
392 * it for every command.
394 * 2. If the doorbell is clear, the firmware should have first set the
395 * Mailbox Interface Ready bit. Therefore, waiting for the doorbell
396 * to be ready is sufficient.
398 rc = cxl_mem_wait_for_doorbell(cxlm);
400 dev_warn(dev, "Mailbox interface not ready\n");
404 md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET);
405 if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) {
406 dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n");
412 * Hardware shouldn't allow a ready status but also have failure bits
413 * set. Spit out an error, this should be a bug report
416 if (md_status & CXLMDEV_DEV_FATAL) {
417 dev_err(dev, "mbox: reported ready, but fatal\n");
420 if (md_status & CXLMDEV_FW_HALT) {
421 dev_err(dev, "mbox: reported ready, but halted\n");
424 if (CXLMDEV_RESET_NEEDED(md_status)) {
425 dev_err(dev, "mbox: reported ready, but reset needed\n");
433 mutex_unlock(&cxlm->mbox_mutex);
438 * cxl_mem_mbox_put() - Release exclusive access to the mailbox.
439 * @cxlm: The CXL memory device to communicate with.
441 * Context: Any context. Expects mbox_mutex to be held.
443 static void cxl_mem_mbox_put(struct cxl_mem *cxlm)
445 mutex_unlock(&cxlm->mbox_mutex);
449 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
450 * @cxlm: The CXL memory device to communicate with.
451 * @cmd: The validated command.
452 * @in_payload: Pointer to userspace's input payload.
453 * @out_payload: Pointer to userspace's output payload.
454 * @size_out: (Input) Max payload size to copy out.
455 * (Output) Payload size hardware generated.
456 * @retval: Hardware generated return code from the operation.
459 * * %0 - Mailbox transaction succeeded. This implies the mailbox
460 * protocol completed successfully not that the operation itself
462 * * %-ENOMEM - Couldn't allocate a bounce buffer.
463 * * %-EFAULT - Something happened with copy_to/from_user.
464 * * %-EINTR - Mailbox acquisition interrupted.
465 * * %-EXXX - Transaction level failures.
467 * Creates the appropriate mailbox command and dispatches it on behalf of a
468 * userspace request. The input and output payloads are copied between
471 * See cxl_send_cmd().
473 static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
474 const struct cxl_mem_command *cmd,
475 u64 in_payload, u64 out_payload,
476 s32 *size_out, u32 *retval)
478 struct device *dev = &cxlm->pdev->dev;
479 struct mbox_cmd mbox_cmd = {
480 .opcode = cmd->opcode,
481 .size_in = cmd->info.size_in,
482 .size_out = cmd->info.size_out,
486 if (cmd->info.size_out) {
487 mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL);
488 if (!mbox_cmd.payload_out)
492 if (cmd->info.size_in) {
493 mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
495 if (IS_ERR(mbox_cmd.payload_in)) {
496 kvfree(mbox_cmd.payload_out);
497 return PTR_ERR(mbox_cmd.payload_in);
501 rc = cxl_mem_mbox_get(cxlm);
506 "Submitting %s command for user\n"
509 cxl_command_names[cmd->info.id].name, mbox_cmd.opcode,
512 dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW,
513 "raw command path used\n");
515 rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd);
516 cxl_mem_mbox_put(cxlm);
521 * @size_out contains the max size that's allowed to be written back out
522 * to userspace. While the payload may have written more output than
523 * this it will have to be ignored.
525 if (mbox_cmd.size_out) {
526 dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out,
527 "Invalid return size\n");
528 if (copy_to_user(u64_to_user_ptr(out_payload),
529 mbox_cmd.payload_out, mbox_cmd.size_out)) {
535 *size_out = mbox_cmd.size_out;
536 *retval = mbox_cmd.return_code;
539 kvfree(mbox_cmd.payload_in);
540 kvfree(mbox_cmd.payload_out);
544 static bool cxl_mem_raw_command_allowed(u16 opcode)
548 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
551 if (security_locked_down(LOCKDOWN_NONE))
554 if (cxl_raw_allow_all)
557 if (cxl_is_security_command(opcode))
560 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
561 if (cxl_disabled_raw_commands[i] == opcode)
568 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
569 * @cxlm: &struct cxl_mem device whose mailbox will be used.
570 * @send_cmd: &struct cxl_send_command copied in from userspace.
571 * @out_cmd: Sanitized and populated &struct cxl_mem_command.
574 * * %0 - @out_cmd is ready to send.
575 * * %-ENOTTY - Invalid command specified.
576 * * %-EINVAL - Reserved fields or invalid values were used.
577 * * %-ENOMEM - Input or output buffer wasn't sized properly.
578 * * %-EPERM - Attempted to use a protected command.
580 * The result of this command is a fully validated command in @out_cmd that is
581 * safe to send to the hardware.
583 * See handle_mailbox_cmd_from_user()
585 static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
586 const struct cxl_send_command *send_cmd,
587 struct cxl_mem_command *out_cmd)
589 const struct cxl_command_info *info;
590 struct cxl_mem_command *c;
592 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
596 * The user can never specify an input payload larger than what hardware
597 * supports, but output can be arbitrarily large (simply write out as
598 * much data as the hardware provides).
600 if (send_cmd->in.size > cxlm->payload_size)
604 * Checks are bypassed for raw commands but a WARN/taint will occur
605 * later in the callchain
607 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) {
608 const struct cxl_mem_command temp = {
610 .id = CXL_MEM_COMMAND_ID_RAW,
612 .size_in = send_cmd->in.size,
613 .size_out = send_cmd->out.size,
615 .opcode = send_cmd->raw.opcode
618 if (send_cmd->raw.rsvd)
622 * Unlike supported commands, the output size of RAW commands
623 * gets passed along without further checking, so it must be
626 if (send_cmd->out.size > cxlm->payload_size)
629 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
632 memcpy(out_cmd, &temp, sizeof(temp));
637 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
643 if (send_cmd->in.rsvd || send_cmd->out.rsvd)
646 /* Convert user's command into the internal representation */
647 c = &mem_commands[send_cmd->id];
650 /* Check that the command is enabled for hardware */
651 if (!test_bit(info->id, cxlm->enabled_cmds))
654 /* Check the input buffer is the expected size */
655 if (info->size_in >= 0 && info->size_in != send_cmd->in.size)
658 /* Check the output buffer is at least large enough */
659 if (info->size_out >= 0 && send_cmd->out.size < info->size_out)
662 memcpy(out_cmd, c, sizeof(*c));
663 out_cmd->info.size_in = send_cmd->in.size;
665 * XXX: out_cmd->info.size_out will be controlled by the driver, and the
666 * specified number of bytes @send_cmd->out.size will be copied back out
673 static int cxl_query_cmd(struct cxl_memdev *cxlmd,
674 struct cxl_mem_query_commands __user *q)
676 struct device *dev = &cxlmd->dev;
677 struct cxl_mem_command *cmd;
681 dev_dbg(dev, "Query IOCTL\n");
683 if (get_user(n_commands, &q->n_commands))
686 /* returns the total number if 0 elements are requested. */
688 return put_user(cxl_cmd_count, &q->n_commands);
691 * otherwise, return max(n_commands, total commands) cxl_command_info
694 cxl_for_each_cmd(cmd) {
695 const struct cxl_command_info *info = &cmd->info;
697 if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
707 static int cxl_send_cmd(struct cxl_memdev *cxlmd,
708 struct cxl_send_command __user *s)
710 struct cxl_mem *cxlm = cxlmd->cxlm;
711 struct device *dev = &cxlmd->dev;
712 struct cxl_send_command send;
713 struct cxl_mem_command c;
716 dev_dbg(dev, "Send IOCTL\n");
718 if (copy_from_user(&send, s, sizeof(send)))
721 rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c);
725 /* Prepare to handle a full payload for variable sized output */
726 if (c.info.size_out < 0)
727 c.info.size_out = cxlm->payload_size;
729 rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload,
730 send.out.payload, &send.out.size,
735 if (copy_to_user(s, &send, sizeof(send)))
741 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
745 case CXL_MEM_QUERY_COMMANDS:
746 return cxl_query_cmd(cxlmd, (void __user *)arg);
747 case CXL_MEM_SEND_COMMAND:
748 return cxl_send_cmd(cxlmd, (void __user *)arg);
754 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
757 struct cxl_memdev *cxlmd = file->private_data;
760 down_read(&cxl_memdev_rwsem);
762 rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
763 up_read(&cxl_memdev_rwsem);
768 static int cxl_memdev_open(struct inode *inode, struct file *file)
770 struct cxl_memdev *cxlmd =
771 container_of(inode->i_cdev, typeof(*cxlmd), cdev);
773 get_device(&cxlmd->dev);
774 file->private_data = cxlmd;
779 static int cxl_memdev_release_file(struct inode *inode, struct file *file)
781 struct cxl_memdev *cxlmd =
782 container_of(inode->i_cdev, typeof(*cxlmd), cdev);
784 put_device(&cxlmd->dev);
789 static const struct file_operations cxl_memdev_fops = {
790 .owner = THIS_MODULE,
791 .unlocked_ioctl = cxl_memdev_ioctl,
792 .open = cxl_memdev_open,
793 .release = cxl_memdev_release_file,
794 .compat_ioctl = compat_ptr_ioctl,
795 .llseek = noop_llseek,
798 static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
800 struct cxl_mem_command *c;
803 if (c->opcode == opcode)
810 * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device.
811 * @cxlm: The CXL memory device to communicate with.
812 * @opcode: Opcode for the mailbox command.
813 * @in: The input payload for the mailbox command.
814 * @in_size: The length of the input payload
815 * @out: Caller allocated buffer for the output.
816 * @out_size: Expected size of output.
818 * Context: Any context. Will acquire and release mbox_mutex.
820 * * %>=0 - Number of bytes returned in @out.
821 * * %-E2BIG - Payload is too large for hardware.
822 * * %-EBUSY - Couldn't acquire exclusive mailbox access.
823 * * %-EFAULT - Hardware error occurred.
824 * * %-ENXIO - Command completed, but device reported an error.
825 * * %-EIO - Unexpected output size.
827 * Mailbox commands may execute successfully yet the device itself reported an
828 * error. While this distinction can be useful for commands from userspace, the
829 * kernel will only be able to use results when both are successful.
831 * See __cxl_mem_mbox_send_cmd()
833 static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode,
834 void *in, size_t in_size,
835 void *out, size_t out_size)
837 const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
838 struct mbox_cmd mbox_cmd = {
842 .size_out = out_size,
847 if (out_size > cxlm->payload_size)
850 rc = cxl_mem_mbox_get(cxlm);
854 rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd);
855 cxl_mem_mbox_put(cxlm);
859 /* TODO: Map return code to proper kernel style errno */
860 if (mbox_cmd.return_code != CXL_MBOX_SUCCESS)
864 * Variable sized commands can't be validated and so it's up to the
865 * caller to do that if they wish.
867 if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size)
874 * cxl_mem_setup_regs() - Setup necessary MMIO.
875 * @cxlm: The CXL memory device to communicate with.
877 * Return: 0 if all necessary registers mapped.
879 * A memory device is required by spec to implement a certain set of MMIO
880 * regions. The purpose of this function is to enumerate and map those
883 static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
885 struct device *dev = &cxlm->pdev->dev;
886 struct cxl_regs *regs = &cxlm->regs;
888 cxl_setup_device_regs(dev, cxlm->base, ®s->device_regs);
890 if (!regs->status || !regs->mbox || !regs->memdev) {
891 dev_err(dev, "registers not found: %s%s%s\n",
892 !regs->status ? "status " : "",
893 !regs->mbox ? "mbox " : "",
894 !regs->memdev ? "memdev" : "");
901 static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm)
903 const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
906 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
909 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
911 * If the size is too small, mandatory commands will not work and so
912 * there's no point in going forward. If the size is too large, there's
913 * no harm is soft limiting it.
915 cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M);
916 if (cxlm->payload_size < 256) {
917 dev_err(&cxlm->pdev->dev, "Mailbox is too small (%zub)",
922 dev_dbg(&cxlm->pdev->dev, "Mailbox payload sized %zu",
928 static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev)
930 struct device *dev = &pdev->dev;
931 struct cxl_mem *cxlm;
933 cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL);
935 dev_err(dev, "No memory available\n");
936 return ERR_PTR(-ENOMEM);
939 mutex_init(&cxlm->mbox_mutex);
942 devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count),
943 sizeof(unsigned long),
944 GFP_KERNEL | __GFP_ZERO);
945 if (!cxlm->enabled_cmds) {
946 dev_err(dev, "No memory available for bitmap\n");
947 return ERR_PTR(-ENOMEM);
953 static int cxl_mem_map_regblock(struct cxl_mem *cxlm, u32 reg_lo, u32 reg_hi)
955 struct pci_dev *pdev = cxlm->pdev;
956 struct device *dev = &pdev->dev;
962 offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
963 bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
965 /* Basic sanity check that BAR is big enough */
966 if (pci_resource_len(pdev, bar) < offset) {
967 dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar,
968 &pdev->resource[bar], (unsigned long long)offset);
972 rc = pcim_iomap_regions(pdev, BIT(bar), pci_name(pdev));
974 dev_err(dev, "failed to map registers\n");
977 regs = pcim_iomap_table(pdev)[bar];
979 cxlm->base = regs + offset;
981 dev_dbg(dev, "Mapped CXL Memory Device resource\n");
985 static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec)
989 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC);
996 pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor);
997 pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id);
998 if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id)
1001 pos = pci_find_next_ext_capability(pdev, pos,
1002 PCI_EXT_CAP_ID_DVSEC);
1008 static struct cxl_memdev *to_cxl_memdev(struct device *dev)
1010 return container_of(dev, struct cxl_memdev, dev);
1013 static void cxl_memdev_release(struct device *dev)
1015 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
1017 ida_free(&cxl_memdev_ida, cxlmd->id);
1021 static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
1024 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
1027 static ssize_t firmware_version_show(struct device *dev,
1028 struct device_attribute *attr, char *buf)
1030 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
1031 struct cxl_mem *cxlm = cxlmd->cxlm;
1033 return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
1035 static DEVICE_ATTR_RO(firmware_version);
1037 static ssize_t payload_max_show(struct device *dev,
1038 struct device_attribute *attr, char *buf)
1040 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
1041 struct cxl_mem *cxlm = cxlmd->cxlm;
1043 return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
1045 static DEVICE_ATTR_RO(payload_max);
1047 static ssize_t label_storage_size_show(struct device *dev,
1048 struct device_attribute *attr, char *buf)
1050 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
1051 struct cxl_mem *cxlm = cxlmd->cxlm;
1053 return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
1055 static DEVICE_ATTR_RO(label_storage_size);
1057 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
1060 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
1061 struct cxl_mem *cxlm = cxlmd->cxlm;
1062 unsigned long long len = range_len(&cxlm->ram_range);
1064 return sysfs_emit(buf, "%#llx\n", len);
1067 static struct device_attribute dev_attr_ram_size =
1068 __ATTR(size, 0444, ram_size_show, NULL);
1070 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
1073 struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
1074 struct cxl_mem *cxlm = cxlmd->cxlm;
1075 unsigned long long len = range_len(&cxlm->pmem_range);
1077 return sysfs_emit(buf, "%#llx\n", len);
1080 static struct device_attribute dev_attr_pmem_size =
1081 __ATTR(size, 0444, pmem_size_show, NULL);
1083 static struct attribute *cxl_memdev_attributes[] = {
1084 &dev_attr_firmware_version.attr,
1085 &dev_attr_payload_max.attr,
1086 &dev_attr_label_storage_size.attr,
1090 static struct attribute *cxl_memdev_pmem_attributes[] = {
1091 &dev_attr_pmem_size.attr,
1095 static struct attribute *cxl_memdev_ram_attributes[] = {
1096 &dev_attr_ram_size.attr,
1100 static struct attribute_group cxl_memdev_attribute_group = {
1101 .attrs = cxl_memdev_attributes,
1104 static struct attribute_group cxl_memdev_ram_attribute_group = {
1106 .attrs = cxl_memdev_ram_attributes,
1109 static struct attribute_group cxl_memdev_pmem_attribute_group = {
1111 .attrs = cxl_memdev_pmem_attributes,
1114 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
1115 &cxl_memdev_attribute_group,
1116 &cxl_memdev_ram_attribute_group,
1117 &cxl_memdev_pmem_attribute_group,
1121 static const struct device_type cxl_memdev_type = {
1122 .name = "cxl_memdev",
1123 .release = cxl_memdev_release,
1124 .devnode = cxl_memdev_devnode,
1125 .groups = cxl_memdev_attribute_groups,
1128 static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
1130 down_write(&cxl_memdev_rwsem);
1132 up_write(&cxl_memdev_rwsem);
1135 static void cxl_memdev_unregister(void *_cxlmd)
1137 struct cxl_memdev *cxlmd = _cxlmd;
1138 struct device *dev = &cxlmd->dev;
1140 cdev_device_del(&cxlmd->cdev, dev);
1141 cxl_memdev_shutdown(cxlmd);
1145 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
1147 struct pci_dev *pdev = cxlm->pdev;
1148 struct cxl_memdev *cxlmd;
1153 cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
1155 return ERR_PTR(-ENOMEM);
1157 rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
1163 device_initialize(dev);
1164 dev->parent = &pdev->dev;
1165 dev->bus = &cxl_bus_type;
1166 dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
1167 dev->type = &cxl_memdev_type;
1168 device_set_pm_not_required(dev);
1170 cdev = &cxlmd->cdev;
1171 cdev_init(cdev, &cxl_memdev_fops);
1179 static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
1181 struct cxl_memdev *cxlmd;
1186 cxlmd = cxl_memdev_alloc(cxlm);
1188 return PTR_ERR(cxlmd);
1191 rc = dev_set_name(dev, "mem%d", cxlmd->id);
1196 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
1197 * needed as this is ordered with cdev_add() publishing the device.
1201 cdev = &cxlmd->cdev;
1202 rc = cdev_device_add(cdev, dev);
1206 return devm_add_action_or_reset(dev->parent, cxl_memdev_unregister,
1211 * The cdev was briefly live, shutdown any ioctl operations that
1214 cxl_memdev_shutdown(cxlmd);
1219 static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
1221 u32 remaining = size;
1225 u32 xfer_size = min_t(u32, remaining, cxlm->payload_size);
1226 struct cxl_mbox_get_log {
1232 .offset = cpu_to_le32(offset),
1233 .length = cpu_to_le32(xfer_size)
1237 rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log,
1238 sizeof(log), out, xfer_size);
1243 remaining -= xfer_size;
1244 offset += xfer_size;
1251 * cxl_walk_cel() - Walk through the Command Effects Log.
1253 * @size: Length of the Command Effects Log.
1256 * Iterate over each entry in the CEL and determine if the driver supports the
1257 * command. If so, the command is enabled for the device and can be used later.
1259 static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
1264 } __packed * cel_entry;
1265 const int cel_entries = size / sizeof(*cel_entry);
1268 cel_entry = (struct cel_entry *)cel;
1270 for (i = 0; i < cel_entries; i++) {
1271 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
1272 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
1275 dev_dbg(&cxlm->pdev->dev,
1276 "Opcode 0x%04x unsupported by driver", opcode);
1280 set_bit(cmd->info.id, cxlm->enabled_cmds);
1284 struct cxl_mbox_get_supported_logs {
1293 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
1295 struct cxl_mbox_get_supported_logs *ret;
1298 ret = kvmalloc(cxlm->payload_size, GFP_KERNEL);
1300 return ERR_PTR(-ENOMEM);
1302 rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL,
1303 0, ret, cxlm->payload_size);
1313 * cxl_mem_enumerate_cmds() - Enumerate commands for a device.
1314 * @cxlm: The device.
1316 * Returns 0 if enumerate completed successfully.
1318 * CXL devices have optional support for certain commands. This function will
1319 * determine the set of supported commands for the hardware and update the
1320 * enabled_cmds bitmap in the @cxlm.
1322 static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
1324 struct cxl_mbox_get_supported_logs *gsl;
1325 struct device *dev = &cxlm->pdev->dev;
1326 struct cxl_mem_command *cmd;
1329 gsl = cxl_get_gsl(cxlm);
1331 return PTR_ERR(gsl);
1334 for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
1335 u32 size = le32_to_cpu(gsl->entry[i].size);
1336 uuid_t uuid = gsl->entry[i].uuid;
1339 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
1341 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
1344 log = kvmalloc(size, GFP_KERNEL);
1350 rc = cxl_xfer_log(cxlm, &uuid, size, log);
1356 cxl_walk_cel(cxlm, size, log);
1359 /* In case CEL was bogus, enable some default commands. */
1360 cxl_for_each_cmd(cmd)
1361 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
1362 set_bit(cmd->info.id, cxlm->enabled_cmds);
1364 /* Found the required CEL */
1374 * cxl_mem_identify() - Send the IDENTIFY command to the device.
1375 * @cxlm: The device to identify.
1377 * Return: 0 if identify was executed successfully.
1379 * This will dispatch the identify command to the device and on success populate
1380 * structures to be exported to sysfs.
1382 static int cxl_mem_identify(struct cxl_mem *cxlm)
1384 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
1385 struct cxl_mbox_identify {
1386 char fw_revision[0x10];
1387 __le64 total_capacity;
1388 __le64 volatile_capacity;
1389 __le64 persistent_capacity;
1390 __le64 partition_align;
1391 __le16 info_event_log_size;
1392 __le16 warning_event_log_size;
1393 __le16 failure_event_log_size;
1394 __le16 fatal_event_log_size;
1396 u8 poison_list_max_mer[3];
1397 __le16 inject_poison_limit;
1399 u8 qos_telemetry_caps;
1403 rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
1409 * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias.
1410 * For now, only the capacity is exported in sysfs
1412 cxlm->ram_range.start = 0;
1413 cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
1415 cxlm->pmem_range.start = 0;
1416 cxlm->pmem_range.end =
1417 le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
1419 cxlm->lsa_size = le32_to_cpu(id.lsa_size);
1420 memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
1425 static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1427 struct device *dev = &pdev->dev;
1428 u32 regloc_size, regblocks;
1429 struct cxl_mem *cxlm;
1432 rc = pcim_enable_device(pdev);
1436 cxlm = cxl_mem_create(pdev);
1438 return PTR_ERR(cxlm);
1440 regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_OFFSET);
1442 dev_err(dev, "register location dvsec not found\n");
1446 /* Get the size of the Register Locator DVSEC */
1447 pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size);
1448 regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size);
1450 regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
1451 regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
1453 for (i = 0; i < regblocks; i++, regloc += 8) {
1457 /* "register low and high" contain other bits */
1458 pci_read_config_dword(pdev, regloc, ®_lo);
1459 pci_read_config_dword(pdev, regloc + 4, ®_hi);
1461 reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo);
1463 if (reg_type == CXL_REGLOC_RBI_MEMDEV) {
1464 rc = cxl_mem_map_regblock(cxlm, reg_lo, reg_hi);
1471 if (i == regblocks) {
1472 dev_err(dev, "Missing register locator for device registers\n");
1476 rc = cxl_mem_setup_regs(cxlm);
1480 rc = cxl_mem_setup_mailbox(cxlm);
1484 rc = cxl_mem_enumerate_cmds(cxlm);
1488 rc = cxl_mem_identify(cxlm);
1492 return cxl_mem_add_memdev(cxlm);
1495 static const struct pci_device_id cxl_mem_pci_tbl[] = {
1496 /* PCI class code for CXL.mem Type-3 Devices */
1497 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
1498 { /* terminate list */ },
1500 MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
1502 static struct pci_driver cxl_mem_driver = {
1503 .name = KBUILD_MODNAME,
1504 .id_table = cxl_mem_pci_tbl,
1505 .probe = cxl_mem_probe,
1507 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1511 static __init int cxl_mem_init(void)
1513 struct dentry *mbox_debugfs;
1517 /* Double check the anonymous union trickery in struct cxl_regs */
1518 BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
1519 offsetof(struct cxl_regs, device_regs.memdev));
1521 rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
1525 cxl_mem_major = MAJOR(devt);
1527 rc = pci_register_driver(&cxl_mem_driver);
1529 unregister_chrdev_region(MKDEV(cxl_mem_major, 0),
1534 cxl_debugfs = debugfs_create_dir("cxl", NULL);
1535 mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
1536 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
1537 &cxl_raw_allow_all);
1542 static __exit void cxl_mem_exit(void)
1544 debugfs_remove_recursive(cxl_debugfs);
1545 pci_unregister_driver(&cxl_mem_driver);
1546 unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
1549 MODULE_LICENSE("GPL v2");
1550 module_init(cxl_mem_init);
1551 module_exit(cxl_mem_exit);
1552 MODULE_IMPORT_NS(CXL);