1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/security.h>
5 #include <linux/debugfs.h>
6 #include <linux/mutex.h>
13 static bool cxl_raw_allow_all;
18 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
19 * implementation is used by the cxl_pci driver to initialize the device
20 * and implement the cxl_mem.h IOCTL UAPI. It also implements the
21 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
24 #define cxl_for_each_cmd(cmd) \
25 for ((cmd) = &cxl_mem_commands[0]; \
26 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
28 #define CXL_CMD(_id, sin, sout, _flags) \
29 [CXL_MEM_COMMAND_ID_##_id] = { \
31 .id = CXL_MEM_COMMAND_ID_##_id, \
35 .opcode = CXL_MBOX_OP_##_id, \
39 #define CXL_VARIABLE_PAYLOAD ~0U
41 * This table defines the supported mailbox commands for the driver. This table
42 * is made up of a UAPI structure. Non-negative values as parameters in the
43 * table will be validated against the user's input. For example, if size_in is
44 * 0, and the user passed in 1, it is an error.
46 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
47 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
48 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
49 CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
51 CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
52 CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
53 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
54 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
55 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
56 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
57 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
58 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
59 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
60 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
61 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
62 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
63 CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0),
64 CXL_CMD(INJECT_POISON, 0x8, 0, 0),
65 CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
66 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
67 CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
68 CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
72 * Commands that RAW doesn't permit. The rationale for each:
74 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
75 * coordination of transaction timeout values at the root bridge level.
77 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
78 * and needs to be coordinated with HDM updates.
80 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
81 * driver and any writes from userspace invalidates those contents.
83 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
84 * to the device after it is marked clean, userspace can not make that
87 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
88 * is kept up to date with patrol notifications and error management.
90 static u16 cxl_disabled_raw_commands[] = {
91 CXL_MBOX_OP_ACTIVATE_FW,
92 CXL_MBOX_OP_SET_PARTITION_INFO,
94 CXL_MBOX_OP_SET_SHUTDOWN_STATE,
95 CXL_MBOX_OP_SCAN_MEDIA,
96 CXL_MBOX_OP_GET_SCAN_MEDIA,
100 * Command sets that RAW doesn't permit. All opcodes in this set are
101 * disabled because they pass plain text security payloads over the
102 * user/kernel boundary. This functionality is intended to be wrapped
103 * behind the keys ABI which allows for encrypted payloads in the UAPI
105 static u8 security_command_sets[] = {
107 0x45, /* Persistent Memory Data-at-rest Security */
108 0x46, /* Security Passthrough */
111 static bool cxl_is_security_command(u16 opcode)
115 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
116 if (security_command_sets[i] == (opcode >> 8))
121 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
123 struct cxl_mem_command *c;
126 if (c->opcode == opcode)
132 static const char *cxl_mem_opcode_to_name(u16 opcode)
134 struct cxl_mem_command *c;
136 c = cxl_mem_find_command(opcode);
140 return cxl_command_names[c->info.id].name;
144 * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
145 * @cxlds: The device data for the operation
146 * @mbox_cmd: initialized command to execute
148 * Context: Any context.
150 * * %>=0 - Number of bytes returned in @out.
151 * * %-E2BIG - Payload is too large for hardware.
152 * * %-EBUSY - Couldn't acquire exclusive mailbox access.
153 * * %-EFAULT - Hardware error occurred.
154 * * %-ENXIO - Command completed, but device reported an error.
155 * * %-EIO - Unexpected output size.
157 * Mailbox commands may execute successfully yet the device itself reported an
158 * error. While this distinction can be useful for commands from userspace, the
159 * kernel will only be able to use results when both are successful.
161 int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
162 struct cxl_mbox_cmd *mbox_cmd)
164 size_t out_size, min_out;
167 if (mbox_cmd->size_in > cxlds->payload_size ||
168 mbox_cmd->size_out > cxlds->payload_size)
171 out_size = mbox_cmd->size_out;
172 min_out = mbox_cmd->min_out;
173 rc = cxlds->mbox_send(cxlds, mbox_cmd);
177 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS)
178 return cxl_mbox_cmd_rc2errno(mbox_cmd);
184 * Variable sized output needs to at least satisfy the caller's
185 * minimum if not the fully requested size.
190 if (mbox_cmd->size_out < min_out)
194 EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL);
196 static bool cxl_mem_raw_command_allowed(u16 opcode)
200 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
203 if (security_locked_down(LOCKDOWN_PCI_ACCESS))
206 if (cxl_raw_allow_all)
209 if (cxl_is_security_command(opcode))
212 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
213 if (cxl_disabled_raw_commands[i] == opcode)
220 * cxl_payload_from_user_allowed() - Check contents of in_payload.
221 * @opcode: The mailbox command opcode.
222 * @payload_in: Pointer to the input payload passed in from user space.
225 * * true - payload_in passes check for @opcode.
226 * * false - payload_in contains invalid or unsupported values.
228 * The driver may inspect payload contents before sending a mailbox
229 * command from user space to the device. The intent is to reject
230 * commands with input payloads that are known to be unsafe. This
231 * check is not intended to replace the users careful selection of
232 * mailbox command parameters and makes no guarantee that the user
233 * command will succeed, nor that it is appropriate.
235 * The specific checks are determined by the opcode.
237 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
240 case CXL_MBOX_OP_SET_PARTITION_INFO: {
241 struct cxl_mbox_set_partition_info *pi = payload_in;
243 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
253 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
254 struct cxl_dev_state *cxlds, u16 opcode,
255 size_t in_size, size_t out_size, u64 in_payload)
257 *mbox = (struct cxl_mbox_cmd) {
263 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
265 if (IS_ERR(mbox->payload_in))
266 return PTR_ERR(mbox->payload_in);
268 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
269 dev_dbg(cxlds->dev, "%s: input payload not allowed\n",
270 cxl_mem_opcode_to_name(opcode));
271 kvfree(mbox->payload_in);
276 /* Prepare to handle a full payload for variable sized output */
277 if (out_size == CXL_VARIABLE_PAYLOAD)
278 mbox->size_out = cxlds->payload_size;
280 mbox->size_out = out_size;
282 if (mbox->size_out) {
283 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
284 if (!mbox->payload_out) {
285 kvfree(mbox->payload_in);
292 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
294 kvfree(mbox->payload_in);
295 kvfree(mbox->payload_out);
298 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
299 const struct cxl_send_command *send_cmd,
300 struct cxl_dev_state *cxlds)
302 if (send_cmd->raw.rsvd)
306 * Unlike supported commands, the output size of RAW commands
307 * gets passed along without further checking, so it must be
310 if (send_cmd->out.size > cxlds->payload_size)
313 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
316 dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n");
318 *mem_cmd = (struct cxl_mem_command) {
320 .id = CXL_MEM_COMMAND_ID_RAW,
321 .size_in = send_cmd->in.size,
322 .size_out = send_cmd->out.size,
324 .opcode = send_cmd->raw.opcode
330 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
331 const struct cxl_send_command *send_cmd,
332 struct cxl_dev_state *cxlds)
334 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
335 const struct cxl_command_info *info = &c->info;
337 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
343 if (send_cmd->in.rsvd || send_cmd->out.rsvd)
346 /* Check that the command is enabled for hardware */
347 if (!test_bit(info->id, cxlds->enabled_cmds))
350 /* Check that the command is not claimed for exclusive kernel use */
351 if (test_bit(info->id, cxlds->exclusive_cmds))
354 /* Check the input buffer is the expected size */
355 if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
356 (info->size_in != send_cmd->in.size))
359 /* Check the output buffer is at least large enough */
360 if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
361 (send_cmd->out.size < info->size_out))
364 *mem_cmd = (struct cxl_mem_command) {
367 .flags = info->flags,
368 .size_in = send_cmd->in.size,
369 .size_out = send_cmd->out.size,
378 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
379 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
380 * @cxlds: The device data for the operation
381 * @send_cmd: &struct cxl_send_command copied in from userspace.
384 * * %0 - @out_cmd is ready to send.
385 * * %-ENOTTY - Invalid command specified.
386 * * %-EINVAL - Reserved fields or invalid values were used.
387 * * %-ENOMEM - Input or output buffer wasn't sized properly.
388 * * %-EPERM - Attempted to use a protected command.
389 * * %-EBUSY - Kernel has claimed exclusive access to this opcode
391 * The result of this command is a fully validated command in @mbox_cmd that is
392 * safe to send to the hardware.
394 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
395 struct cxl_dev_state *cxlds,
396 const struct cxl_send_command *send_cmd)
398 struct cxl_mem_command mem_cmd;
401 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
405 * The user can never specify an input payload larger than what hardware
406 * supports, but output can be arbitrarily large (simply write out as
407 * much data as the hardware provides).
409 if (send_cmd->in.size > cxlds->payload_size)
412 /* Sanitize and construct a cxl_mem_command */
413 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
414 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds);
416 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds);
421 /* Sanitize and construct a cxl_mbox_cmd */
422 return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode,
423 mem_cmd.info.size_in, mem_cmd.info.size_out,
424 send_cmd->in.payload);
427 int cxl_query_cmd(struct cxl_memdev *cxlmd,
428 struct cxl_mem_query_commands __user *q)
430 struct device *dev = &cxlmd->dev;
431 struct cxl_mem_command *cmd;
435 dev_dbg(dev, "Query IOCTL\n");
437 if (get_user(n_commands, &q->n_commands))
440 /* returns the total number if 0 elements are requested. */
442 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
445 * otherwise, return max(n_commands, total commands) cxl_command_info
448 cxl_for_each_cmd(cmd) {
449 const struct cxl_command_info *info = &cmd->info;
451 if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
462 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
463 * @cxlds: The device data for the operation
464 * @mbox_cmd: The validated mailbox command.
465 * @out_payload: Pointer to userspace's output payload.
466 * @size_out: (Input) Max payload size to copy out.
467 * (Output) Payload size hardware generated.
468 * @retval: Hardware generated return code from the operation.
471 * * %0 - Mailbox transaction succeeded. This implies the mailbox
472 * protocol completed successfully not that the operation itself
474 * * %-ENOMEM - Couldn't allocate a bounce buffer.
475 * * %-EFAULT - Something happened with copy_to/from_user.
476 * * %-EINTR - Mailbox acquisition interrupted.
477 * * %-EXXX - Transaction level failures.
479 * Dispatches a mailbox command on behalf of a userspace request.
480 * The output payload is copied to userspace.
482 * See cxl_send_cmd().
484 static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
485 struct cxl_mbox_cmd *mbox_cmd,
486 u64 out_payload, s32 *size_out,
489 struct device *dev = cxlds->dev;
493 "Submitting %s command for user\n"
496 cxl_mem_opcode_to_name(mbox_cmd->opcode),
497 mbox_cmd->opcode, mbox_cmd->size_in);
499 rc = cxlds->mbox_send(cxlds, mbox_cmd);
504 * @size_out contains the max size that's allowed to be written back out
505 * to userspace. While the payload may have written more output than
506 * this it will have to be ignored.
508 if (mbox_cmd->size_out) {
509 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
510 "Invalid return size\n");
511 if (copy_to_user(u64_to_user_ptr(out_payload),
512 mbox_cmd->payload_out, mbox_cmd->size_out)) {
518 *size_out = mbox_cmd->size_out;
519 *retval = mbox_cmd->return_code;
522 cxl_mbox_cmd_dtor(mbox_cmd);
526 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
528 struct cxl_dev_state *cxlds = cxlmd->cxlds;
529 struct device *dev = &cxlmd->dev;
530 struct cxl_send_command send;
531 struct cxl_mbox_cmd mbox_cmd;
534 dev_dbg(dev, "Send IOCTL\n");
536 if (copy_from_user(&send, s, sizeof(send)))
539 rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send);
543 rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload,
544 &send.out.size, &send.retval);
548 if (copy_to_user(s, &send, sizeof(send)))
554 static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
556 u32 remaining = size;
560 u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
561 struct cxl_mbox_cmd mbox_cmd;
562 struct cxl_mbox_get_log log;
565 log = (struct cxl_mbox_get_log) {
567 .offset = cpu_to_le32(offset),
568 .length = cpu_to_le32(xfer_size),
571 mbox_cmd = (struct cxl_mbox_cmd) {
572 .opcode = CXL_MBOX_OP_GET_LOG,
573 .size_in = sizeof(log),
575 .size_out = xfer_size,
579 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
584 remaining -= xfer_size;
592 * cxl_walk_cel() - Walk through the Command Effects Log.
593 * @cxlds: The device data for the operation
594 * @size: Length of the Command Effects Log.
597 * Iterate over each entry in the CEL and determine if the driver supports the
598 * command. If so, the command is enabled for the device and can be used later.
600 static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
602 struct cxl_cel_entry *cel_entry;
603 const int cel_entries = size / sizeof(*cel_entry);
606 cel_entry = (struct cxl_cel_entry *) cel;
608 for (i = 0; i < cel_entries; i++) {
609 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
610 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
614 "Opcode 0x%04x unsupported by driver", opcode);
618 set_bit(cmd->info.id, cxlds->enabled_cmds);
622 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
624 struct cxl_mbox_get_supported_logs *ret;
625 struct cxl_mbox_cmd mbox_cmd;
628 ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
630 return ERR_PTR(-ENOMEM);
632 mbox_cmd = (struct cxl_mbox_cmd) {
633 .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
634 .size_out = cxlds->payload_size,
636 /* At least the record number field must be valid */
639 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
654 /* See CXL 2.0 Table 170. Get Log Input Payload */
655 static const uuid_t log_uuid[] = {
656 [CEL_UUID] = DEFINE_CXL_CEL_UUID,
657 [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
661 * cxl_enumerate_cmds() - Enumerate commands for a device.
662 * @cxlds: The device data for the operation
664 * Returns 0 if enumerate completed successfully.
666 * CXL devices have optional support for certain commands. This function will
667 * determine the set of supported commands for the hardware and update the
668 * enabled_cmds bitmap in the @cxlds.
670 int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
672 struct cxl_mbox_get_supported_logs *gsl;
673 struct device *dev = cxlds->dev;
674 struct cxl_mem_command *cmd;
677 gsl = cxl_get_gsl(cxlds);
682 for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
683 u32 size = le32_to_cpu(gsl->entry[i].size);
684 uuid_t uuid = gsl->entry[i].uuid;
687 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
689 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
692 log = kvmalloc(size, GFP_KERNEL);
698 rc = cxl_xfer_log(cxlds, &uuid, size, log);
704 cxl_walk_cel(cxlds, size, log);
707 /* In case CEL was bogus, enable some default commands. */
708 cxl_for_each_cmd(cmd)
709 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
710 set_bit(cmd->info.id, cxlds->enabled_cmds);
712 /* Found the required CEL */
719 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
721 static int cxl_clear_event_record(struct cxl_dev_state *cxlds,
722 enum cxl_event_log_type log,
723 struct cxl_get_event_payload *get_pl)
725 struct cxl_mbox_clear_event_payload *payload;
726 u16 total = le16_to_cpu(get_pl->record_count);
727 u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
728 size_t pl_size = struct_size(payload, handles, max_handles);
729 struct cxl_mbox_cmd mbox_cmd;
734 /* Payload size may limit the max handles */
735 if (pl_size > cxlds->payload_size) {
736 max_handles = (cxlds->payload_size - sizeof(*payload)) /
738 pl_size = struct_size(payload, handles, max_handles);
741 payload = kvzalloc(pl_size, GFP_KERNEL);
745 *payload = (struct cxl_mbox_clear_event_payload) {
749 mbox_cmd = (struct cxl_mbox_cmd) {
750 .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
751 .payload_in = payload,
756 * Clear Event Records uses u8 for the handle cnt while Get Event
757 * Record can return up to 0xffff records.
760 for (cnt = 0; cnt < total; cnt++) {
761 payload->handles[i++] = get_pl->records[cnt].hdr.handle;
762 dev_dbg(cxlds->dev, "Event log '%d': Clearing %u\n",
763 log, le16_to_cpu(payload->handles[i]));
765 if (i == max_handles) {
766 payload->nr_recs = i;
767 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
774 /* Clear what is left if any */
776 payload->nr_recs = i;
777 mbox_cmd.size_in = struct_size(payload, handles, i);
778 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
788 static void cxl_mem_get_records_log(struct cxl_dev_state *cxlds,
789 enum cxl_event_log_type type)
791 struct cxl_get_event_payload *payload;
792 struct cxl_mbox_cmd mbox_cmd;
796 mutex_lock(&cxlds->event.log_lock);
797 payload = cxlds->event.buf;
799 mbox_cmd = (struct cxl_mbox_cmd) {
800 .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
801 .payload_in = &log_type,
802 .size_in = sizeof(log_type),
803 .payload_out = payload,
804 .size_out = cxlds->payload_size,
805 .min_out = struct_size(payload, records, 0),
811 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
813 dev_err_ratelimited(cxlds->dev,
814 "Event log '%d': Failed to query event records : %d",
819 nr_rec = le16_to_cpu(payload->record_count);
823 for (i = 0; i < nr_rec; i++)
824 trace_cxl_generic_event(cxlds->dev, type,
825 &payload->records[i]);
827 if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
828 trace_cxl_overflow(cxlds->dev, type, payload);
830 rc = cxl_clear_event_record(cxlds, type, payload);
832 dev_err_ratelimited(cxlds->dev,
833 "Event log '%d': Failed to clear events : %d",
839 mutex_unlock(&cxlds->event.log_lock);
843 * cxl_mem_get_event_records - Get Event Records from the device
844 * @cxlds: The device data for the operation
846 * Retrieve all event records available on the device, report them as trace
847 * events, and clear them.
849 * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
850 * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
852 void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status)
854 dev_dbg(cxlds->dev, "Reading event logs: %x\n", status);
856 if (status & CXLDEV_EVENT_STATUS_FATAL)
857 cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FATAL);
858 if (status & CXLDEV_EVENT_STATUS_FAIL)
859 cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FAIL);
860 if (status & CXLDEV_EVENT_STATUS_WARN)
861 cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_WARN);
862 if (status & CXLDEV_EVENT_STATUS_INFO)
863 cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_INFO);
865 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
868 * cxl_mem_get_partition_info - Get partition info
869 * @cxlds: The device data for the operation
871 * Retrieve the current partition info for the device specified. The active
872 * values are the current capacity in bytes. If not 0, the 'next' values are
873 * the pending values, in bytes, which take affect on next cold reset.
875 * Return: 0 if no error: or the result of the mailbox command.
877 * See CXL @8.2.9.5.2.1 Get Partition Info
879 static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
881 struct cxl_mbox_get_partition_info pi;
882 struct cxl_mbox_cmd mbox_cmd;
885 mbox_cmd = (struct cxl_mbox_cmd) {
886 .opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
887 .size_out = sizeof(pi),
890 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
894 cxlds->active_volatile_bytes =
895 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
896 cxlds->active_persistent_bytes =
897 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
898 cxlds->next_volatile_bytes =
899 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
900 cxlds->next_persistent_bytes =
901 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
907 * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
908 * @cxlds: The device data for the operation
910 * Return: 0 if identify was executed successfully.
912 * This will dispatch the identify command to the device and on success populate
913 * structures to be exported to sysfs.
915 int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
917 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
918 struct cxl_mbox_identify id;
919 struct cxl_mbox_cmd mbox_cmd;
922 mbox_cmd = (struct cxl_mbox_cmd) {
923 .opcode = CXL_MBOX_OP_IDENTIFY,
924 .size_out = sizeof(id),
927 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
932 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
933 cxlds->volatile_only_bytes =
934 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
935 cxlds->persistent_only_bytes =
936 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
937 cxlds->partition_align_bytes =
938 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
940 cxlds->lsa_size = le32_to_cpu(id.lsa_size);
941 memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
945 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
947 static int add_dpa_res(struct device *dev, struct resource *parent,
948 struct resource *res, resource_size_t start,
949 resource_size_t size, const char *type)
955 res->end = start + size - 1;
956 res->flags = IORESOURCE_MEM;
957 if (resource_size(res) == 0) {
958 dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
961 rc = request_resource(parent, res);
963 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
968 dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
973 int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
975 struct device *dev = cxlds->dev;
979 (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
981 if (cxlds->partition_align_bytes == 0) {
982 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
983 cxlds->volatile_only_bytes, "ram");
986 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
987 cxlds->volatile_only_bytes,
988 cxlds->persistent_only_bytes, "pmem");
991 rc = cxl_mem_get_partition_info(cxlds);
993 dev_err(dev, "Failed to query partition information\n");
997 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
998 cxlds->active_volatile_bytes, "ram");
1001 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
1002 cxlds->active_volatile_bytes,
1003 cxlds->active_persistent_bytes, "pmem");
1005 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
1007 struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
1009 struct cxl_dev_state *cxlds;
1011 cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
1013 dev_err(dev, "No memory available\n");
1014 return ERR_PTR(-ENOMEM);
1017 mutex_init(&cxlds->mbox_mutex);
1018 mutex_init(&cxlds->event.log_lock);
1023 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
1025 void __init cxl_mbox_init(void)
1027 struct dentry *mbox_debugfs;
1029 mbox_debugfs = cxl_debugfs_create_dir("mbox");
1030 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
1031 &cxl_raw_allow_all);