Merge branch 'for-6.3/cxl-events' into cxl/next
[linux-2.6-microblaze.git] / drivers / cxl / core / mbox.c
index 202d49d..03909b6 100644 (file)
@@ -3,11 +3,13 @@
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/security.h>
 #include <linux/debugfs.h>
+#include <linux/ktime.h>
 #include <linux/mutex.h>
 #include <cxlmem.h>
 #include <cxl.h>
 
 #include "core.h"
+#include "trace.h"
 
 static bool cxl_raw_allow_all;
 
@@ -737,6 +739,203 @@ out:
 }
 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
 
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+static const uuid_t gen_media_event_uuid =
+       UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
+                 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6);
+
+/*
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+static const uuid_t dram_event_uuid =
+       UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
+                 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24);
+
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+static const uuid_t mem_mod_event_uuid =
+       UUID_INIT(0xfe927475, 0xdd59, 0x4339,
+                 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74);
+
+static void cxl_event_trace_record(const struct device *dev,
+                                  enum cxl_event_log_type type,
+                                  struct cxl_event_record_raw *record)
+{
+       uuid_t *id = &record->hdr.id;
+
+       if (uuid_equal(id, &gen_media_event_uuid)) {
+               struct cxl_event_gen_media *rec =
+                               (struct cxl_event_gen_media *)record;
+
+               trace_cxl_general_media(dev, type, rec);
+       } else if (uuid_equal(id, &dram_event_uuid)) {
+               struct cxl_event_dram *rec = (struct cxl_event_dram *)record;
+
+               trace_cxl_dram(dev, type, rec);
+       } else if (uuid_equal(id, &mem_mod_event_uuid)) {
+               struct cxl_event_mem_module *rec =
+                               (struct cxl_event_mem_module *)record;
+
+               trace_cxl_memory_module(dev, type, rec);
+       } else {
+               /* For unknown record types print just the header */
+               trace_cxl_generic_event(dev, type, record);
+       }
+}
+
+static int cxl_clear_event_record(struct cxl_dev_state *cxlds,
+                                 enum cxl_event_log_type log,
+                                 struct cxl_get_event_payload *get_pl)
+{
+       struct cxl_mbox_clear_event_payload *payload;
+       u16 total = le16_to_cpu(get_pl->record_count);
+       u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
+       size_t pl_size = struct_size(payload, handles, max_handles);
+       struct cxl_mbox_cmd mbox_cmd;
+       u16 cnt;
+       int rc = 0;
+       int i;
+
+       /* Payload size may limit the max handles */
+       if (pl_size > cxlds->payload_size) {
+               max_handles = (cxlds->payload_size - sizeof(*payload)) /
+                               sizeof(__le16);
+               pl_size = struct_size(payload, handles, max_handles);
+       }
+
+       payload = kvzalloc(pl_size, GFP_KERNEL);
+       if (!payload)
+               return -ENOMEM;
+
+       *payload = (struct cxl_mbox_clear_event_payload) {
+               .event_log = log,
+       };
+
+       mbox_cmd = (struct cxl_mbox_cmd) {
+               .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
+               .payload_in = payload,
+               .size_in = pl_size,
+       };
+
+       /*
+        * Clear Event Records uses u8 for the handle cnt while Get Event
+        * Record can return up to 0xffff records.
+        */
+       i = 0;
+       for (cnt = 0; cnt < total; cnt++) {
+               payload->handles[i++] = get_pl->records[cnt].hdr.handle;
+               dev_dbg(cxlds->dev, "Event log '%d': Clearing %u\n",
+                       log, le16_to_cpu(payload->handles[i]));
+
+               if (i == max_handles) {
+                       payload->nr_recs = i;
+                       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+                       if (rc)
+                               goto free_pl;
+                       i = 0;
+               }
+       }
+
+       /* Clear what is left if any */
+       if (i) {
+               payload->nr_recs = i;
+               mbox_cmd.size_in = struct_size(payload, handles, i);
+               rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+               if (rc)
+                       goto free_pl;
+       }
+
+free_pl:
+       kvfree(payload);
+       return rc;
+}
+
+static void cxl_mem_get_records_log(struct cxl_dev_state *cxlds,
+                                   enum cxl_event_log_type type)
+{
+       struct cxl_get_event_payload *payload;
+       struct cxl_mbox_cmd mbox_cmd;
+       u8 log_type = type;
+       u16 nr_rec;
+
+       mutex_lock(&cxlds->event.log_lock);
+       payload = cxlds->event.buf;
+
+       mbox_cmd = (struct cxl_mbox_cmd) {
+               .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+               .payload_in = &log_type,
+               .size_in = sizeof(log_type),
+               .payload_out = payload,
+               .size_out = cxlds->payload_size,
+               .min_out = struct_size(payload, records, 0),
+       };
+
+       do {
+               int rc, i;
+
+               rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+               if (rc) {
+                       dev_err_ratelimited(cxlds->dev,
+                               "Event log '%d': Failed to query event records : %d",
+                               type, rc);
+                       break;
+               }
+
+               nr_rec = le16_to_cpu(payload->record_count);
+               if (!nr_rec)
+                       break;
+
+               for (i = 0; i < nr_rec; i++)
+                       cxl_event_trace_record(cxlds->dev, type,
+                                              &payload->records[i]);
+
+               if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
+                       trace_cxl_overflow(cxlds->dev, type, payload);
+
+               rc = cxl_clear_event_record(cxlds, type, payload);
+               if (rc) {
+                       dev_err_ratelimited(cxlds->dev,
+                               "Event log '%d': Failed to clear events : %d",
+                               type, rc);
+                       break;
+               }
+       } while (nr_rec);
+
+       mutex_unlock(&cxlds->event.log_lock);
+}
+
+/**
+ * cxl_mem_get_event_records - Get Event Records from the device
+ * @cxlds: The device data for the operation
+ * @status: Event Status register value identifying which events are available.
+ *
+ * Retrieve all event records available on the device, report them as trace
+ * events, and clear them.
+ *
+ * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
+ * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
+ */
+void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status)
+{
+       dev_dbg(cxlds->dev, "Reading event logs: %x\n", status);
+
+       if (status & CXLDEV_EVENT_STATUS_FATAL)
+               cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FATAL);
+       if (status & CXLDEV_EVENT_STATUS_FAIL)
+               cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_FAIL);
+       if (status & CXLDEV_EVENT_STATUS_WARN)
+               cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_WARN);
+       if (status & CXLDEV_EVENT_STATUS_INFO)
+               cxl_mem_get_records_log(cxlds, CXL_EVENT_TYPE_INFO);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
+
 /**
  * cxl_mem_get_partition_info - Get partition info
  * @cxlds: The device data for the operation
@@ -877,6 +1076,32 @@ int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
 }
 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
 
+int cxl_set_timestamp(struct cxl_dev_state *cxlds)
+{
+       struct cxl_mbox_cmd mbox_cmd;
+       struct cxl_mbox_set_timestamp_in pi;
+       int rc;
+
+       pi.timestamp = cpu_to_le64(ktime_get_real_ns());
+       mbox_cmd = (struct cxl_mbox_cmd) {
+               .opcode = CXL_MBOX_OP_SET_TIMESTAMP,
+               .size_in = sizeof(pi),
+               .payload_in = &pi,
+       };
+
+       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
+       /*
+        * Command is optional. Devices may have another way of providing
+        * a timestamp, or may return all 0s in timestamp fields.
+        * Don't report an error if this command isn't supported
+        */
+       if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED))
+               return rc;
+
+       return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL);
+
 struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
 {
        struct cxl_dev_state *cxlds;
@@ -888,6 +1113,7 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
        }
 
        mutex_init(&cxlds->mbox_mutex);
+       mutex_init(&cxlds->event.log_lock);
        cxlds->dev = dev;
 
        return cxlds;