c329c1e67a2c44536600855cdd1aabb1e40774c7
[linux-2.6-microblaze.git] / drivers / cxl / core / mbox.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/security.h>
5 #include <linux/debugfs.h>
6 #include <linux/mutex.h>
7 #include <cxlmem.h>
8 #include <cxl.h>
9
10 #include "core.h"
11 #include "trace.h"
12
13 static bool cxl_raw_allow_all;
14
15 /**
16  * DOC: cxl mbox
17  *
18  * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
19  * implementation is used by the cxl_pci driver to initialize the device
20  * and implement the cxl_mem.h IOCTL UAPI. It also implements the
21  * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
22  */
23
24 #define cxl_for_each_cmd(cmd)                                                  \
25         for ((cmd) = &cxl_mem_commands[0];                                     \
26              ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
27
28 #define CXL_CMD(_id, sin, sout, _flags)                                        \
29         [CXL_MEM_COMMAND_ID_##_id] = {                                         \
30         .info = {                                                              \
31                         .id = CXL_MEM_COMMAND_ID_##_id,                        \
32                         .size_in = sin,                                        \
33                         .size_out = sout,                                      \
34                 },                                                             \
35         .opcode = CXL_MBOX_OP_##_id,                                           \
36         .flags = _flags,                                                       \
37         }
38
39 #define CXL_VARIABLE_PAYLOAD    ~0U
40 /*
41  * This table defines the supported mailbox commands for the driver. This table
42  * is made up of a UAPI structure. Non-negative values as parameters in the
43  * table will be validated against the user's input. For example, if size_in is
44  * 0, and the user passed in 1, it is an error.
45  */
46 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
47         CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
48 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
49         CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
50 #endif
51         CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
52         CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
53         CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
54         CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
55         CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
56         CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
57         CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
58         CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
59         CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
60         CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
61         CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
62         CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
63         CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0),
64         CXL_CMD(INJECT_POISON, 0x8, 0, 0),
65         CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
66         CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
67         CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
68         CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
69 };
70
71 /*
72  * Commands that RAW doesn't permit. The rationale for each:
73  *
74  * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
75  * coordination of transaction timeout values at the root bridge level.
76  *
77  * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
78  * and needs to be coordinated with HDM updates.
79  *
80  * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
81  * driver and any writes from userspace invalidates those contents.
82  *
83  * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
84  * to the device after it is marked clean, userspace can not make that
85  * assertion.
86  *
87  * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
88  * is kept up to date with patrol notifications and error management.
89  */
90 static u16 cxl_disabled_raw_commands[] = {
91         CXL_MBOX_OP_ACTIVATE_FW,
92         CXL_MBOX_OP_SET_PARTITION_INFO,
93         CXL_MBOX_OP_SET_LSA,
94         CXL_MBOX_OP_SET_SHUTDOWN_STATE,
95         CXL_MBOX_OP_SCAN_MEDIA,
96         CXL_MBOX_OP_GET_SCAN_MEDIA,
97 };
98
99 /*
100  * Command sets that RAW doesn't permit. All opcodes in this set are
101  * disabled because they pass plain text security payloads over the
102  * user/kernel boundary. This functionality is intended to be wrapped
103  * behind the keys ABI which allows for encrypted payloads in the UAPI
104  */
105 static u8 security_command_sets[] = {
106         0x44, /* Sanitize */
107         0x45, /* Persistent Memory Data-at-rest Security */
108         0x46, /* Security Passthrough */
109 };
110
111 static bool cxl_is_security_command(u16 opcode)
112 {
113         int i;
114
115         for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
116                 if (security_command_sets[i] == (opcode >> 8))
117                         return true;
118         return false;
119 }
120
121 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
122 {
123         struct cxl_mem_command *c;
124
125         cxl_for_each_cmd(c)
126                 if (c->opcode == opcode)
127                         return c;
128
129         return NULL;
130 }
131
132 static const char *cxl_mem_opcode_to_name(u16 opcode)
133 {
134         struct cxl_mem_command *c;
135
136         c = cxl_mem_find_command(opcode);
137         if (!c)
138                 return NULL;
139
140         return cxl_command_names[c->info.id].name;
141 }
142
143 /**
144  * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
145  * @cxlds: The device data for the operation
146  * @mbox_cmd: initialized command to execute
147  *
148  * Context: Any context.
149  * Return:
150  *  * %>=0      - Number of bytes returned in @out.
151  *  * %-E2BIG   - Payload is too large for hardware.
152  *  * %-EBUSY   - Couldn't acquire exclusive mailbox access.
153  *  * %-EFAULT  - Hardware error occurred.
154  *  * %-ENXIO   - Command completed, but device reported an error.
155  *  * %-EIO     - Unexpected output size.
156  *
157  * Mailbox commands may execute successfully yet the device itself reported an
158  * error. While this distinction can be useful for commands from userspace, the
159  * kernel will only be able to use results when both are successful.
160  */
161 int cxl_internal_send_cmd(struct cxl_dev_state *cxlds,
162                           struct cxl_mbox_cmd *mbox_cmd)
163 {
164         size_t out_size, min_out;
165         int rc;
166
167         if (mbox_cmd->size_in > cxlds->payload_size ||
168             mbox_cmd->size_out > cxlds->payload_size)
169                 return -E2BIG;
170
171         out_size = mbox_cmd->size_out;
172         min_out = mbox_cmd->min_out;
173         rc = cxlds->mbox_send(cxlds, mbox_cmd);
174         if (rc)
175                 return rc;
176
177         if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS)
178                 return cxl_mbox_cmd_rc2errno(mbox_cmd);
179
180         if (!out_size)
181                 return 0;
182
183         /*
184          * Variable sized output needs to at least satisfy the caller's
185          * minimum if not the fully requested size.
186          */
187         if (min_out == 0)
188                 min_out = out_size;
189
190         if (mbox_cmd->size_out < min_out)
191                 return -EIO;
192         return 0;
193 }
194 EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL);
195
196 static bool cxl_mem_raw_command_allowed(u16 opcode)
197 {
198         int i;
199
200         if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
201                 return false;
202
203         if (security_locked_down(LOCKDOWN_PCI_ACCESS))
204                 return false;
205
206         if (cxl_raw_allow_all)
207                 return true;
208
209         if (cxl_is_security_command(opcode))
210                 return false;
211
212         for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
213                 if (cxl_disabled_raw_commands[i] == opcode)
214                         return false;
215
216         return true;
217 }
218
219 /**
220  * cxl_payload_from_user_allowed() - Check contents of in_payload.
221  * @opcode: The mailbox command opcode.
222  * @payload_in: Pointer to the input payload passed in from user space.
223  *
224  * Return:
225  *  * true      - payload_in passes check for @opcode.
226  *  * false     - payload_in contains invalid or unsupported values.
227  *
228  * The driver may inspect payload contents before sending a mailbox
229  * command from user space to the device. The intent is to reject
230  * commands with input payloads that are known to be unsafe. This
231  * check is not intended to replace the users careful selection of
232  * mailbox command parameters and makes no guarantee that the user
233  * command will succeed, nor that it is appropriate.
234  *
235  * The specific checks are determined by the opcode.
236  */
237 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
238 {
239         switch (opcode) {
240         case CXL_MBOX_OP_SET_PARTITION_INFO: {
241                 struct cxl_mbox_set_partition_info *pi = payload_in;
242
243                 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
244                         return false;
245                 break;
246         }
247         default:
248                 break;
249         }
250         return true;
251 }
252
253 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
254                              struct cxl_dev_state *cxlds, u16 opcode,
255                              size_t in_size, size_t out_size, u64 in_payload)
256 {
257         *mbox = (struct cxl_mbox_cmd) {
258                 .opcode = opcode,
259                 .size_in = in_size,
260         };
261
262         if (in_size) {
263                 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
264                                                 in_size);
265                 if (IS_ERR(mbox->payload_in))
266                         return PTR_ERR(mbox->payload_in);
267
268                 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
269                         dev_dbg(cxlds->dev, "%s: input payload not allowed\n",
270                                 cxl_mem_opcode_to_name(opcode));
271                         kvfree(mbox->payload_in);
272                         return -EBUSY;
273                 }
274         }
275
276         /* Prepare to handle a full payload for variable sized output */
277         if (out_size == CXL_VARIABLE_PAYLOAD)
278                 mbox->size_out = cxlds->payload_size;
279         else
280                 mbox->size_out = out_size;
281
282         if (mbox->size_out) {
283                 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
284                 if (!mbox->payload_out) {
285                         kvfree(mbox->payload_in);
286                         return -ENOMEM;
287                 }
288         }
289         return 0;
290 }
291
292 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
293 {
294         kvfree(mbox->payload_in);
295         kvfree(mbox->payload_out);
296 }
297
298 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
299                               const struct cxl_send_command *send_cmd,
300                               struct cxl_dev_state *cxlds)
301 {
302         if (send_cmd->raw.rsvd)
303                 return -EINVAL;
304
305         /*
306          * Unlike supported commands, the output size of RAW commands
307          * gets passed along without further checking, so it must be
308          * validated here.
309          */
310         if (send_cmd->out.size > cxlds->payload_size)
311                 return -EINVAL;
312
313         if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
314                 return -EPERM;
315
316         dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n");
317
318         *mem_cmd = (struct cxl_mem_command) {
319                 .info = {
320                         .id = CXL_MEM_COMMAND_ID_RAW,
321                         .size_in = send_cmd->in.size,
322                         .size_out = send_cmd->out.size,
323                 },
324                 .opcode = send_cmd->raw.opcode
325         };
326
327         return 0;
328 }
329
330 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
331                           const struct cxl_send_command *send_cmd,
332                           struct cxl_dev_state *cxlds)
333 {
334         struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
335         const struct cxl_command_info *info = &c->info;
336
337         if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
338                 return -EINVAL;
339
340         if (send_cmd->rsvd)
341                 return -EINVAL;
342
343         if (send_cmd->in.rsvd || send_cmd->out.rsvd)
344                 return -EINVAL;
345
346         /* Check that the command is enabled for hardware */
347         if (!test_bit(info->id, cxlds->enabled_cmds))
348                 return -ENOTTY;
349
350         /* Check that the command is not claimed for exclusive kernel use */
351         if (test_bit(info->id, cxlds->exclusive_cmds))
352                 return -EBUSY;
353
354         /* Check the input buffer is the expected size */
355         if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
356             (info->size_in != send_cmd->in.size))
357                 return -ENOMEM;
358
359         /* Check the output buffer is at least large enough */
360         if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
361             (send_cmd->out.size < info->size_out))
362                 return -ENOMEM;
363
364         *mem_cmd = (struct cxl_mem_command) {
365                 .info = {
366                         .id = info->id,
367                         .flags = info->flags,
368                         .size_in = send_cmd->in.size,
369                         .size_out = send_cmd->out.size,
370                 },
371                 .opcode = c->opcode
372         };
373
374         return 0;
375 }
376
377 /**
378  * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
379  * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
380  * @cxlds: The device data for the operation
381  * @send_cmd: &struct cxl_send_command copied in from userspace.
382  *
383  * Return:
384  *  * %0        - @out_cmd is ready to send.
385  *  * %-ENOTTY  - Invalid command specified.
386  *  * %-EINVAL  - Reserved fields or invalid values were used.
387  *  * %-ENOMEM  - Input or output buffer wasn't sized properly.
388  *  * %-EPERM   - Attempted to use a protected command.
389  *  * %-EBUSY   - Kernel has claimed exclusive access to this opcode
390  *
391  * The result of this command is a fully validated command in @mbox_cmd that is
392  * safe to send to the hardware.
393  */
394 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
395                                       struct cxl_dev_state *cxlds,
396                                       const struct cxl_send_command *send_cmd)
397 {
398         struct cxl_mem_command mem_cmd;
399         int rc;
400
401         if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
402                 return -ENOTTY;
403
404         /*
405          * The user can never specify an input payload larger than what hardware
406          * supports, but output can be arbitrarily large (simply write out as
407          * much data as the hardware provides).
408          */
409         if (send_cmd->in.size > cxlds->payload_size)
410                 return -EINVAL;
411
412         /* Sanitize and construct a cxl_mem_command */
413         if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
414                 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds);
415         else
416                 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds);
417
418         if (rc)
419                 return rc;
420
421         /* Sanitize and construct a cxl_mbox_cmd */
422         return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode,
423                                  mem_cmd.info.size_in, mem_cmd.info.size_out,
424                                  send_cmd->in.payload);
425 }
426
427 int cxl_query_cmd(struct cxl_memdev *cxlmd,
428                   struct cxl_mem_query_commands __user *q)
429 {
430         struct device *dev = &cxlmd->dev;
431         struct cxl_mem_command *cmd;
432         u32 n_commands;
433         int j = 0;
434
435         dev_dbg(dev, "Query IOCTL\n");
436
437         if (get_user(n_commands, &q->n_commands))
438                 return -EFAULT;
439
440         /* returns the total number if 0 elements are requested. */
441         if (n_commands == 0)
442                 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
443
444         /*
445          * otherwise, return max(n_commands, total commands) cxl_command_info
446          * structures.
447          */
448         cxl_for_each_cmd(cmd) {
449                 const struct cxl_command_info *info = &cmd->info;
450
451                 if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
452                         return -EFAULT;
453
454                 if (j == n_commands)
455                         break;
456         }
457
458         return 0;
459 }
460
461 /**
462  * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
463  * @cxlds: The device data for the operation
464  * @mbox_cmd: The validated mailbox command.
465  * @out_payload: Pointer to userspace's output payload.
466  * @size_out: (Input) Max payload size to copy out.
467  *            (Output) Payload size hardware generated.
468  * @retval: Hardware generated return code from the operation.
469  *
470  * Return:
471  *  * %0        - Mailbox transaction succeeded. This implies the mailbox
472  *                protocol completed successfully not that the operation itself
473  *                was successful.
474  *  * %-ENOMEM  - Couldn't allocate a bounce buffer.
475  *  * %-EFAULT  - Something happened with copy_to/from_user.
476  *  * %-EINTR   - Mailbox acquisition interrupted.
477  *  * %-EXXX    - Transaction level failures.
478  *
479  * Dispatches a mailbox command on behalf of a userspace request.
480  * The output payload is copied to userspace.
481  *
482  * See cxl_send_cmd().
483  */
484 static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
485                                         struct cxl_mbox_cmd *mbox_cmd,
486                                         u64 out_payload, s32 *size_out,
487                                         u32 *retval)
488 {
489         struct device *dev = cxlds->dev;
490         int rc;
491
492         dev_dbg(dev,
493                 "Submitting %s command for user\n"
494                 "\topcode: %x\n"
495                 "\tsize: %zx\n",
496                 cxl_mem_opcode_to_name(mbox_cmd->opcode),
497                 mbox_cmd->opcode, mbox_cmd->size_in);
498
499         rc = cxlds->mbox_send(cxlds, mbox_cmd);
500         if (rc)
501                 goto out;
502
503         /*
504          * @size_out contains the max size that's allowed to be written back out
505          * to userspace. While the payload may have written more output than
506          * this it will have to be ignored.
507          */
508         if (mbox_cmd->size_out) {
509                 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
510                               "Invalid return size\n");
511                 if (copy_to_user(u64_to_user_ptr(out_payload),
512                                  mbox_cmd->payload_out, mbox_cmd->size_out)) {
513                         rc = -EFAULT;
514                         goto out;
515                 }
516         }
517
518         *size_out = mbox_cmd->size_out;
519         *retval = mbox_cmd->return_code;
520
521 out:
522         cxl_mbox_cmd_dtor(mbox_cmd);
523         return rc;
524 }
525
526 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
527 {
528         struct cxl_dev_state *cxlds = cxlmd->cxlds;
529         struct device *dev = &cxlmd->dev;
530         struct cxl_send_command send;
531         struct cxl_mbox_cmd mbox_cmd;
532         int rc;
533
534         dev_dbg(dev, "Send IOCTL\n");
535
536         if (copy_from_user(&send, s, sizeof(send)))
537                 return -EFAULT;
538
539         rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send);
540         if (rc)
541                 return rc;
542
543         rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload,
544                                           &send.out.size, &send.retval);
545         if (rc)
546                 return rc;
547
548         if (copy_to_user(s, &send, sizeof(send)))
549                 return -EFAULT;
550
551         return 0;
552 }
553
554 static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
555 {
556         u32 remaining = size;
557         u32 offset = 0;
558
559         while (remaining) {
560                 u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
561                 struct cxl_mbox_cmd mbox_cmd;
562                 struct cxl_mbox_get_log log;
563                 int rc;
564
565                 log = (struct cxl_mbox_get_log) {
566                         .uuid = *uuid,
567                         .offset = cpu_to_le32(offset),
568                         .length = cpu_to_le32(xfer_size),
569                 };
570
571                 mbox_cmd = (struct cxl_mbox_cmd) {
572                         .opcode = CXL_MBOX_OP_GET_LOG,
573                         .size_in = sizeof(log),
574                         .payload_in = &log,
575                         .size_out = xfer_size,
576                         .payload_out = out,
577                 };
578
579                 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
580                 if (rc < 0)
581                         return rc;
582
583                 out += xfer_size;
584                 remaining -= xfer_size;
585                 offset += xfer_size;
586         }
587
588         return 0;
589 }
590
591 /**
592  * cxl_walk_cel() - Walk through the Command Effects Log.
593  * @cxlds: The device data for the operation
594  * @size: Length of the Command Effects Log.
595  * @cel: CEL
596  *
597  * Iterate over each entry in the CEL and determine if the driver supports the
598  * command. If so, the command is enabled for the device and can be used later.
599  */
600 static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
601 {
602         struct cxl_cel_entry *cel_entry;
603         const int cel_entries = size / sizeof(*cel_entry);
604         int i;
605
606         cel_entry = (struct cxl_cel_entry *) cel;
607
608         for (i = 0; i < cel_entries; i++) {
609                 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
610                 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
611
612                 if (!cmd) {
613                         dev_dbg(cxlds->dev,
614                                 "Opcode 0x%04x unsupported by driver", opcode);
615                         continue;
616                 }
617
618                 set_bit(cmd->info.id, cxlds->enabled_cmds);
619         }
620 }
621
622 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
623 {
624         struct cxl_mbox_get_supported_logs *ret;
625         struct cxl_mbox_cmd mbox_cmd;
626         int rc;
627
628         ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
629         if (!ret)
630                 return ERR_PTR(-ENOMEM);
631
632         mbox_cmd = (struct cxl_mbox_cmd) {
633                 .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
634                 .size_out = cxlds->payload_size,
635                 .payload_out = ret,
636                 /* At least the record number field must be valid */
637                 .min_out = 2,
638         };
639         rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
640         if (rc < 0) {
641                 kvfree(ret);
642                 return ERR_PTR(rc);
643         }
644
645
646         return ret;
647 }
648
649 enum {
650         CEL_UUID,
651         VENDOR_DEBUG_UUID,
652 };
653
654 /* See CXL 2.0 Table 170. Get Log Input Payload */
655 static const uuid_t log_uuid[] = {
656         [CEL_UUID] = DEFINE_CXL_CEL_UUID,
657         [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
658 };
659
660 /**
661  * cxl_enumerate_cmds() - Enumerate commands for a device.
662  * @cxlds: The device data for the operation
663  *
664  * Returns 0 if enumerate completed successfully.
665  *
666  * CXL devices have optional support for certain commands. This function will
667  * determine the set of supported commands for the hardware and update the
668  * enabled_cmds bitmap in the @cxlds.
669  */
670 int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
671 {
672         struct cxl_mbox_get_supported_logs *gsl;
673         struct device *dev = cxlds->dev;
674         struct cxl_mem_command *cmd;
675         int i, rc;
676
677         gsl = cxl_get_gsl(cxlds);
678         if (IS_ERR(gsl))
679                 return PTR_ERR(gsl);
680
681         rc = -ENOENT;
682         for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
683                 u32 size = le32_to_cpu(gsl->entry[i].size);
684                 uuid_t uuid = gsl->entry[i].uuid;
685                 u8 *log;
686
687                 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
688
689                 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
690                         continue;
691
692                 log = kvmalloc(size, GFP_KERNEL);
693                 if (!log) {
694                         rc = -ENOMEM;
695                         goto out;
696                 }
697
698                 rc = cxl_xfer_log(cxlds, &uuid, size, log);
699                 if (rc) {
700                         kvfree(log);
701                         goto out;
702                 }
703
704                 cxl_walk_cel(cxlds, size, log);
705                 kvfree(log);
706
707                 /* In case CEL was bogus, enable some default commands. */
708                 cxl_for_each_cmd(cmd)
709                         if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
710                                 set_bit(cmd->info.id, cxlds->enabled_cmds);
711
712                 /* Found the required CEL */
713                 rc = 0;
714         }
715 out:
716         kvfree(gsl);
717         return rc;
718 }
719 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
720
721 static int cxl_clear_event_record(struct cxl_dev_state *cxlds,
722                                   enum cxl_event_log_type log,
723                                   struct cxl_get_event_payload *get_pl)
724 {
725         struct cxl_mbox_clear_event_payload *payload;
726         u16 total = le16_to_cpu(get_pl->record_count);
727         u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
728         size_t pl_size = struct_size(payload, handles, max_handles);
729         struct cxl_mbox_cmd mbox_cmd;
730         u16 cnt;
731         int rc = 0;
732         int i;
733
734         /* Payload size may limit the max handles */
735         if (pl_size > cxlds->payload_size) {
736                 max_handles = (cxlds->payload_size - sizeof(*payload)) /
737                                 sizeof(__le16);
738                 pl_size = struct_size(payload, handles, max_handles);
739         }
740
741         payload = kvzalloc(pl_size, GFP_KERNEL);
742         if (!payload)
743                 return -ENOMEM;
744
745         *payload = (struct cxl_mbox_clear_event_payload) {
746                 .event_log = log,
747         };
748
749         mbox_cmd = (struct cxl_mbox_cmd) {
750                 .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
751                 .payload_in = payload,
752                 .size_in = pl_size,
753         };
754
755         /*
756          * Clear Event Records uses u8 for the handle cnt while Get Event
757          * Record can return up to 0xffff records.
758          */
759         i = 0;
760         for (cnt = 0; cnt < total; cnt++) {
761                 payload->handles[i++] = get_pl->records[cnt].hdr.handle;
762                 dev_dbg(cxlds->dev, "Event log '%d': Clearing %u\n",
763                         log, le16_to_cpu(payload->handles[i]));
764
765                 if (i == max_handles) {
766                         payload->nr_recs = i;
767                         rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
768                         if (rc)
769                                 goto free_pl;
770                         i = 0;
771                 }
772         }
773
774         /* Clear what is left if any */
775         if (i) {
776                 payload->nr_recs = i;
777                 mbox_cmd.size_in = struct_size(payload, handles, i);
778                 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
779                 if (rc)
780                         goto free_pl;
781         }
782
783 free_pl:
784         kvfree(payload);
785         return rc;
786 }
787
788 static void cxl_mem_get_records_log(struct cxl_dev_state *cxlds,
789                                     enum cxl_event_log_type type)
790 {
791         struct cxl_get_event_payload *payload;
792         struct cxl_mbox_cmd mbox_cmd;
793         u8 log_type = type;
794         u16 nr_rec;
795
796         mutex_lock(&cxlds->event.log_lock);
797         payload = cxlds->event.buf;
798
799         mbox_cmd = (struct cxl_mbox_cmd) {
800                 .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
801                 .payload_in = &log_type,
802                 .size_in = sizeof(log_type),
803                 .payload_out = payload,
804                 .size_out = cxlds->payload_size,
805                 .min_out = struct_size(payload, records, 0),
806         };
807
808         do {
809                 int rc, i;
810
811                 rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
812                 if (rc) {
813                         dev_err_ratelimited(cxlds->dev,
814                                 "Event log '%d': Failed to query event records : %d",
815                                 type, rc);
816                         break;
817                 }
818
819                 nr_rec = le16_to_cpu(payload->record_count);
820                 if (!nr_rec)
821                         break;
822
823                 for (i = 0; i < nr_rec; i++)
824                         trace_cxl_generic_event(cxlds->dev, type,
825                                                 &payload->records[i]);
826
827                 if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
828                         trace_cxl_overflow(cxlds->dev, type, payload);
829
830                 rc = cxl_clear_event_record(cxlds, type, payload);
831                 if (rc) {
832                         dev_err_ratelimited(cxlds->dev,
833                                 "Event log '%d': Failed to clear events : %d",
834                                 type, rc);
835                         break;
836                 }
837         } while (nr_rec);
838
839         mutex_unlock(&cxlds->event.log_lock);
840 }
841
842 /**
843  * cxl_mem_get_event_records - Get Event Records from the device
844  * @cxlds: The device data for the operation
845  *
846  * Retrieve all event records available on the device, report them as trace
847  * events, and clear them.
848  *
849  * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
850  * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
851  */
852 void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status)
853 {
854         dev_dbg(cxlds->dev, "Reading event logs: %x\n", status);
855
856         if (status & CXLDEV_EVENT_STATUS_FATAL)
857                 cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FATAL);
858         if (status & CXLDEV_EVENT_STATUS_FAIL)
859                 cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FAIL);
860         if (status & CXLDEV_EVENT_STATUS_WARN)
861                 cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_WARN);
862         if (status & CXLDEV_EVENT_STATUS_INFO)
863                 cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_INFO);
864 }
865 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
866
867 /**
868  * cxl_mem_get_partition_info - Get partition info
869  * @cxlds: The device data for the operation
870  *
871  * Retrieve the current partition info for the device specified.  The active
872  * values are the current capacity in bytes.  If not 0, the 'next' values are
873  * the pending values, in bytes, which take affect on next cold reset.
874  *
875  * Return: 0 if no error: or the result of the mailbox command.
876  *
877  * See CXL @8.2.9.5.2.1 Get Partition Info
878  */
879 static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
880 {
881         struct cxl_mbox_get_partition_info pi;
882         struct cxl_mbox_cmd mbox_cmd;
883         int rc;
884
885         mbox_cmd = (struct cxl_mbox_cmd) {
886                 .opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
887                 .size_out = sizeof(pi),
888                 .payload_out = &pi,
889         };
890         rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
891         if (rc)
892                 return rc;
893
894         cxlds->active_volatile_bytes =
895                 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
896         cxlds->active_persistent_bytes =
897                 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
898         cxlds->next_volatile_bytes =
899                 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
900         cxlds->next_persistent_bytes =
901                 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
902
903         return 0;
904 }
905
906 /**
907  * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
908  * @cxlds: The device data for the operation
909  *
910  * Return: 0 if identify was executed successfully.
911  *
912  * This will dispatch the identify command to the device and on success populate
913  * structures to be exported to sysfs.
914  */
915 int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
916 {
917         /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
918         struct cxl_mbox_identify id;
919         struct cxl_mbox_cmd mbox_cmd;
920         int rc;
921
922         mbox_cmd = (struct cxl_mbox_cmd) {
923                 .opcode = CXL_MBOX_OP_IDENTIFY,
924                 .size_out = sizeof(id),
925                 .payload_out = &id,
926         };
927         rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
928         if (rc < 0)
929                 return rc;
930
931         cxlds->total_bytes =
932                 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
933         cxlds->volatile_only_bytes =
934                 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
935         cxlds->persistent_only_bytes =
936                 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
937         cxlds->partition_align_bytes =
938                 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
939
940         cxlds->lsa_size = le32_to_cpu(id.lsa_size);
941         memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
942
943         return 0;
944 }
945 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
946
947 static int add_dpa_res(struct device *dev, struct resource *parent,
948                        struct resource *res, resource_size_t start,
949                        resource_size_t size, const char *type)
950 {
951         int rc;
952
953         res->name = type;
954         res->start = start;
955         res->end = start + size - 1;
956         res->flags = IORESOURCE_MEM;
957         if (resource_size(res) == 0) {
958                 dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
959                 return 0;
960         }
961         rc = request_resource(parent, res);
962         if (rc) {
963                 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
964                         res, rc);
965                 return rc;
966         }
967
968         dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
969
970         return 0;
971 }
972
973 int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
974 {
975         struct device *dev = cxlds->dev;
976         int rc;
977
978         cxlds->dpa_res =
979                 (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
980
981         if (cxlds->partition_align_bytes == 0) {
982                 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
983                                  cxlds->volatile_only_bytes, "ram");
984                 if (rc)
985                         return rc;
986                 return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
987                                    cxlds->volatile_only_bytes,
988                                    cxlds->persistent_only_bytes, "pmem");
989         }
990
991         rc = cxl_mem_get_partition_info(cxlds);
992         if (rc) {
993                 dev_err(dev, "Failed to query partition information\n");
994                 return rc;
995         }
996
997         rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
998                          cxlds->active_volatile_bytes, "ram");
999         if (rc)
1000                 return rc;
1001         return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
1002                            cxlds->active_volatile_bytes,
1003                            cxlds->active_persistent_bytes, "pmem");
1004 }
1005 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
1006
1007 struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
1008 {
1009         struct cxl_dev_state *cxlds;
1010
1011         cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
1012         if (!cxlds) {
1013                 dev_err(dev, "No memory available\n");
1014                 return ERR_PTR(-ENOMEM);
1015         }
1016
1017         mutex_init(&cxlds->mbox_mutex);
1018         mutex_init(&cxlds->event.log_lock);
1019         cxlds->dev = dev;
1020
1021         return cxlds;
1022 }
1023 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
1024
1025 void __init cxl_mbox_init(void)
1026 {
1027         struct dentry *mbox_debugfs;
1028
1029         mbox_debugfs = cxl_debugfs_create_dir("mbox");
1030         debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
1031                             &cxl_raw_allow_all);
1032 }