Merge tag 'cxl-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 9 Sep 2021 18:48:27 +0000 (11:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 9 Sep 2021 18:48:27 +0000 (11:48 -0700)
Pull CXL (Compute Express Link) updates from Dan Williams:

 - Fix detection of CXL host bridges to filter out disabled ACPI0016
   devices in the ACPI DSDT.

 - Fix kernel lockdown integration to disable raw commands when raw PCI
   access is disabled.

 - Fix a broken debug message.

 - Add support for "Get Partition Info". I.e. enumerate the split
   between volatile and persistent capacity on bi-modal CXL memory
   expanders.

 - Re-factor the core by subject area. This is a work in progress.

 - Prepare libnvdimm to understand CXL labels in addition to EFI labels.
   This is a work in progress.

* tag 'cxl-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (25 commits)
  cxl/registers: Fix Documentation warning
  cxl/pmem: Fix Documentation warning
  cxl/uapi: Fix defined but not used warnings
  cxl/pci: Fix debug message in cxl_probe_regs()
  cxl/pci: Fix lockdown level
  cxl/acpi: Do not add DSDT disabled ACPI0016 host bridge ports
  libnvdimm/labels: Add claim class helpers
  libnvdimm/labels: Add type-guid helpers
  libnvdimm/labels: Add blk special cases for nlabel and position helpers
  libnvdimm/labels: Add blk isetcookie set / validation helpers
  libnvdimm/labels: Add a checksum calculation helper
  libnvdimm/labels: Introduce label setter helpers
  libnvdimm/labels: Add isetcookie validation helper
  libnvdimm/labels: Introduce getters for namespace label fields
  cxl/mem: Adjust ram/pmem range to represent DPA ranges
  cxl/mem: Account for partitionable space in ram/pmem ranges
  cxl/pci: Store memory capacity values
  cxl/pci: Simplify register setup
  cxl/pci: Ignore unknown register block types
  cxl/core: Move memdev management to core
  ...

21 files changed:
Documentation/driver-api/cxl/memory-devices.rst
drivers/cxl/Makefile
drivers/cxl/acpi.c
drivers/cxl/core.c [deleted file]
drivers/cxl/core/Makefile [new file with mode: 0644]
drivers/cxl/core/bus.c [new file with mode: 0644]
drivers/cxl/core/core.h [new file with mode: 0644]
drivers/cxl/core/memdev.c [new file with mode: 0644]
drivers/cxl/core/pmem.c [new file with mode: 0644]
drivers/cxl/core/regs.c [new file with mode: 0644]
drivers/cxl/cxl.h
drivers/cxl/cxlmem.h [new file with mode: 0644]
drivers/cxl/mem.h [deleted file]
drivers/cxl/pci.c
drivers/cxl/pci.h
drivers/cxl/pmem.c
drivers/nvdimm/label.c
drivers/nvdimm/label.h
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/nd.h
include/uapi/linux/cxl_mem.h

index 487ce4f..50ebcda 100644 (file)
@@ -36,9 +36,15 @@ CXL Core
 .. kernel-doc:: drivers/cxl/cxl.h
    :internal:
 
-.. kernel-doc:: drivers/cxl/core.c
+.. kernel-doc:: drivers/cxl/core/bus.c
    :doc: cxl core
 
+.. kernel-doc:: drivers/cxl/core/pmem.c
+   :doc: cxl pmem
+
+.. kernel-doc:: drivers/cxl/core/regs.c
+   :doc: cxl registers
+
 External Interfaces
 ===================
 
index 3295405..d1aaabc 100644 (file)
@@ -1,11 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CXL_BUS) += cxl_core.o
+obj-$(CONFIG_CXL_BUS) += core/
 obj-$(CONFIG_CXL_MEM) += cxl_pci.o
 obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
 obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
 
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL
-cxl_core-y := core.o
 cxl_pci-y := pci.o
 cxl_acpi-y := acpi.o
 cxl_pmem-y := pmem.o
index 8ae8927..54e9d4d 100644 (file)
@@ -243,6 +243,9 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev)
 {
        struct acpi_device *adev = to_acpi_device(dev);
 
+       if (!acpi_pci_find_root(adev->handle))
+               return NULL;
+
        if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
                return adev;
        return NULL;
@@ -266,10 +269,6 @@ static int add_host_bridge_uport(struct device *match, void *arg)
        if (!bridge)
                return 0;
 
-       pci_root = acpi_pci_find_root(bridge->handle);
-       if (!pci_root)
-               return -ENXIO;
-
        dport = find_dport_by_dev(root_port, match);
        if (!dport) {
                dev_dbg(host, "host bridge expected and not found\n");
@@ -282,6 +281,11 @@ static int add_host_bridge_uport(struct device *match, void *arg)
                return PTR_ERR(port);
        dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
 
+       /*
+        * Note that this lookup already succeeded in
+        * to_cxl_host_bridge(), so no need to check for failure here
+        */
+       pci_root = acpi_pci_find_root(bridge->handle);
        ctx = (struct cxl_walk_context){
                .dev = host,
                .root = pci_root->bus,
diff --git a/drivers/cxl/core.c b/drivers/cxl/core.c
deleted file mode 100644 (file)
index 2b90b7c..0000000
+++ /dev/null
@@ -1,1066 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/idr.h>
-#include "cxl.h"
-#include "mem.h"
-
-/**
- * DOC: cxl core
- *
- * The CXL core provides a sysfs hierarchy for control devices and a rendezvous
- * point for cross-device interleave coordination through cxl ports.
- */
-
-static DEFINE_IDA(cxl_port_ida);
-
-static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
-                           char *buf)
-{
-       return sysfs_emit(buf, "%s\n", dev->type->name);
-}
-static DEVICE_ATTR_RO(devtype);
-
-static struct attribute *cxl_base_attributes[] = {
-       &dev_attr_devtype.attr,
-       NULL,
-};
-
-static struct attribute_group cxl_base_attribute_group = {
-       .attrs = cxl_base_attributes,
-};
-
-static ssize_t start_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct cxl_decoder *cxld = to_cxl_decoder(dev);
-
-       return sysfs_emit(buf, "%#llx\n", cxld->range.start);
-}
-static DEVICE_ATTR_RO(start);
-
-static ssize_t size_show(struct device *dev, struct device_attribute *attr,
-                       char *buf)
-{
-       struct cxl_decoder *cxld = to_cxl_decoder(dev);
-
-       return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
-}
-static DEVICE_ATTR_RO(size);
-
-#define CXL_DECODER_FLAG_ATTR(name, flag)                            \
-static ssize_t name##_show(struct device *dev,                       \
-                          struct device_attribute *attr, char *buf) \
-{                                                                    \
-       struct cxl_decoder *cxld = to_cxl_decoder(dev);              \
-                                                                     \
-       return sysfs_emit(buf, "%s\n",                               \
-                         (cxld->flags & (flag)) ? "1" : "0");       \
-}                                                                    \
-static DEVICE_ATTR_RO(name)
-
-CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
-CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
-CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
-CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
-CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
-
-static ssize_t target_type_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct cxl_decoder *cxld = to_cxl_decoder(dev);
-
-       switch (cxld->target_type) {
-       case CXL_DECODER_ACCELERATOR:
-               return sysfs_emit(buf, "accelerator\n");
-       case CXL_DECODER_EXPANDER:
-               return sysfs_emit(buf, "expander\n");
-       }
-       return -ENXIO;
-}
-static DEVICE_ATTR_RO(target_type);
-
-static ssize_t target_list_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       struct cxl_decoder *cxld = to_cxl_decoder(dev);
-       ssize_t offset = 0;
-       int i, rc = 0;
-
-       device_lock(dev);
-       for (i = 0; i < cxld->interleave_ways; i++) {
-               struct cxl_dport *dport = cxld->target[i];
-               struct cxl_dport *next = NULL;
-
-               if (!dport)
-                       break;
-
-               if (i + 1 < cxld->interleave_ways)
-                       next = cxld->target[i + 1];
-               rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
-                                  next ? "," : "");
-               if (rc < 0)
-                       break;
-               offset += rc;
-       }
-       device_unlock(dev);
-
-       if (rc < 0)
-               return rc;
-
-       rc = sysfs_emit_at(buf, offset, "\n");
-       if (rc < 0)
-               return rc;
-
-       return offset + rc;
-}
-static DEVICE_ATTR_RO(target_list);
-
-static struct attribute *cxl_decoder_base_attrs[] = {
-       &dev_attr_start.attr,
-       &dev_attr_size.attr,
-       &dev_attr_locked.attr,
-       &dev_attr_target_list.attr,
-       NULL,
-};
-
-static struct attribute_group cxl_decoder_base_attribute_group = {
-       .attrs = cxl_decoder_base_attrs,
-};
-
-static struct attribute *cxl_decoder_root_attrs[] = {
-       &dev_attr_cap_pmem.attr,
-       &dev_attr_cap_ram.attr,
-       &dev_attr_cap_type2.attr,
-       &dev_attr_cap_type3.attr,
-       NULL,
-};
-
-static struct attribute_group cxl_decoder_root_attribute_group = {
-       .attrs = cxl_decoder_root_attrs,
-};
-
-static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
-       &cxl_decoder_root_attribute_group,
-       &cxl_decoder_base_attribute_group,
-       &cxl_base_attribute_group,
-       NULL,
-};
-
-static struct attribute *cxl_decoder_switch_attrs[] = {
-       &dev_attr_target_type.attr,
-       NULL,
-};
-
-static struct attribute_group cxl_decoder_switch_attribute_group = {
-       .attrs = cxl_decoder_switch_attrs,
-};
-
-static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
-       &cxl_decoder_switch_attribute_group,
-       &cxl_decoder_base_attribute_group,
-       &cxl_base_attribute_group,
-       NULL,
-};
-
-static void cxl_decoder_release(struct device *dev)
-{
-       struct cxl_decoder *cxld = to_cxl_decoder(dev);
-       struct cxl_port *port = to_cxl_port(dev->parent);
-
-       ida_free(&port->decoder_ida, cxld->id);
-       kfree(cxld);
-}
-
-static const struct device_type cxl_decoder_switch_type = {
-       .name = "cxl_decoder_switch",
-       .release = cxl_decoder_release,
-       .groups = cxl_decoder_switch_attribute_groups,
-};
-
-static const struct device_type cxl_decoder_root_type = {
-       .name = "cxl_decoder_root",
-       .release = cxl_decoder_release,
-       .groups = cxl_decoder_root_attribute_groups,
-};
-
-bool is_root_decoder(struct device *dev)
-{
-       return dev->type == &cxl_decoder_root_type;
-}
-EXPORT_SYMBOL_GPL(is_root_decoder);
-
-struct cxl_decoder *to_cxl_decoder(struct device *dev)
-{
-       if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
-                         "not a cxl_decoder device\n"))
-               return NULL;
-       return container_of(dev, struct cxl_decoder, dev);
-}
-EXPORT_SYMBOL_GPL(to_cxl_decoder);
-
-static void cxl_dport_release(struct cxl_dport *dport)
-{
-       list_del(&dport->list);
-       put_device(dport->dport);
-       kfree(dport);
-}
-
-static void cxl_port_release(struct device *dev)
-{
-       struct cxl_port *port = to_cxl_port(dev);
-       struct cxl_dport *dport, *_d;
-
-       device_lock(dev);
-       list_for_each_entry_safe(dport, _d, &port->dports, list)
-               cxl_dport_release(dport);
-       device_unlock(dev);
-       ida_free(&cxl_port_ida, port->id);
-       kfree(port);
-}
-
-static const struct attribute_group *cxl_port_attribute_groups[] = {
-       &cxl_base_attribute_group,
-       NULL,
-};
-
-static const struct device_type cxl_port_type = {
-       .name = "cxl_port",
-       .release = cxl_port_release,
-       .groups = cxl_port_attribute_groups,
-};
-
-struct cxl_port *to_cxl_port(struct device *dev)
-{
-       if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
-                         "not a cxl_port device\n"))
-               return NULL;
-       return container_of(dev, struct cxl_port, dev);
-}
-
-static void unregister_port(void *_port)
-{
-       struct cxl_port *port = _port;
-       struct cxl_dport *dport;
-
-       device_lock(&port->dev);
-       list_for_each_entry(dport, &port->dports, list) {
-               char link_name[CXL_TARGET_STRLEN];
-
-               if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
-                            dport->port_id) >= CXL_TARGET_STRLEN)
-                       continue;
-               sysfs_remove_link(&port->dev.kobj, link_name);
-       }
-       device_unlock(&port->dev);
-       device_unregister(&port->dev);
-}
-
-static void cxl_unlink_uport(void *_port)
-{
-       struct cxl_port *port = _port;
-
-       sysfs_remove_link(&port->dev.kobj, "uport");
-}
-
-static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
-{
-       int rc;
-
-       rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
-       if (rc)
-               return rc;
-       return devm_add_action_or_reset(host, cxl_unlink_uport, port);
-}
-
-static struct cxl_port *cxl_port_alloc(struct device *uport,
-                                      resource_size_t component_reg_phys,
-                                      struct cxl_port *parent_port)
-{
-       struct cxl_port *port;
-       struct device *dev;
-       int rc;
-
-       port = kzalloc(sizeof(*port), GFP_KERNEL);
-       if (!port)
-               return ERR_PTR(-ENOMEM);
-
-       rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
-       if (rc < 0)
-               goto err;
-       port->id = rc;
-
-       /*
-        * The top-level cxl_port "cxl_root" does not have a cxl_port as
-        * its parent and it does not have any corresponding component
-        * registers as its decode is described by a fixed platform
-        * description.
-        */
-       dev = &port->dev;
-       if (parent_port)
-               dev->parent = &parent_port->dev;
-       else
-               dev->parent = uport;
-
-       port->uport = uport;
-       port->component_reg_phys = component_reg_phys;
-       ida_init(&port->decoder_ida);
-       INIT_LIST_HEAD(&port->dports);
-
-       device_initialize(dev);
-       device_set_pm_not_required(dev);
-       dev->bus = &cxl_bus_type;
-       dev->type = &cxl_port_type;
-
-       return port;
-
-err:
-       kfree(port);
-       return ERR_PTR(rc);
-}
-
-/**
- * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
- * @host: host device for devm operations
- * @uport: "physical" device implementing this upstream port
- * @component_reg_phys: (optional) for configurable cxl_port instances
- * @parent_port: next hop up in the CXL memory decode hierarchy
- */
-struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
-                                  resource_size_t component_reg_phys,
-                                  struct cxl_port *parent_port)
-{
-       struct cxl_port *port;
-       struct device *dev;
-       int rc;
-
-       port = cxl_port_alloc(uport, component_reg_phys, parent_port);
-       if (IS_ERR(port))
-               return port;
-
-       dev = &port->dev;
-       if (parent_port)
-               rc = dev_set_name(dev, "port%d", port->id);
-       else
-               rc = dev_set_name(dev, "root%d", port->id);
-       if (rc)
-               goto err;
-
-       rc = device_add(dev);
-       if (rc)
-               goto err;
-
-       rc = devm_add_action_or_reset(host, unregister_port, port);
-       if (rc)
-               return ERR_PTR(rc);
-
-       rc = devm_cxl_link_uport(host, port);
-       if (rc)
-               return ERR_PTR(rc);
-
-       return port;
-
-err:
-       put_device(dev);
-       return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(devm_cxl_add_port);
-
-static struct cxl_dport *find_dport(struct cxl_port *port, int id)
-{
-       struct cxl_dport *dport;
-
-       device_lock_assert(&port->dev);
-       list_for_each_entry (dport, &port->dports, list)
-               if (dport->port_id == id)
-                       return dport;
-       return NULL;
-}
-
-static int add_dport(struct cxl_port *port, struct cxl_dport *new)
-{
-       struct cxl_dport *dup;
-
-       device_lock(&port->dev);
-       dup = find_dport(port, new->port_id);
-       if (dup)
-               dev_err(&port->dev,
-                       "unable to add dport%d-%s non-unique port id (%s)\n",
-                       new->port_id, dev_name(new->dport),
-                       dev_name(dup->dport));
-       else
-               list_add_tail(&new->list, &port->dports);
-       device_unlock(&port->dev);
-
-       return dup ? -EEXIST : 0;
-}
-
-/**
- * cxl_add_dport - append downstream port data to a cxl_port
- * @port: the cxl_port that references this dport
- * @dport_dev: firmware or PCI device representing the dport
- * @port_id: identifier for this dport in a decoder's target list
- * @component_reg_phys: optional location of CXL component registers
- *
- * Note that all allocations and links are undone by cxl_port deletion
- * and release.
- */
-int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
-                 resource_size_t component_reg_phys)
-{
-       char link_name[CXL_TARGET_STRLEN];
-       struct cxl_dport *dport;
-       int rc;
-
-       if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
-           CXL_TARGET_STRLEN)
-               return -EINVAL;
-
-       dport = kzalloc(sizeof(*dport), GFP_KERNEL);
-       if (!dport)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&dport->list);
-       dport->dport = get_device(dport_dev);
-       dport->port_id = port_id;
-       dport->component_reg_phys = component_reg_phys;
-       dport->port = port;
-
-       rc = add_dport(port, dport);
-       if (rc)
-               goto err;
-
-       rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
-       if (rc)
-               goto err;
-
-       return 0;
-err:
-       cxl_dport_release(dport);
-       return rc;
-}
-EXPORT_SYMBOL_GPL(cxl_add_dport);
-
-static struct cxl_decoder *
-cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
-                 resource_size_t len, int interleave_ways,
-                 int interleave_granularity, enum cxl_decoder_type type,
-                 unsigned long flags)
-{
-       struct cxl_decoder *cxld;
-       struct device *dev;
-       int rc = 0;
-
-       if (interleave_ways < 1)
-               return ERR_PTR(-EINVAL);
-
-       device_lock(&port->dev);
-       if (list_empty(&port->dports))
-               rc = -EINVAL;
-       device_unlock(&port->dev);
-       if (rc)
-               return ERR_PTR(rc);
-
-       cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
-       if (!cxld)
-               return ERR_PTR(-ENOMEM);
-
-       rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
-       if (rc < 0)
-               goto err;
-
-       *cxld = (struct cxl_decoder) {
-               .id = rc,
-               .range = {
-                       .start = base,
-                       .end = base + len - 1,
-               },
-               .flags = flags,
-               .interleave_ways = interleave_ways,
-               .interleave_granularity = interleave_granularity,
-               .target_type = type,
-       };
-
-       /* handle implied target_list */
-       if (interleave_ways == 1)
-               cxld->target[0] =
-                       list_first_entry(&port->dports, struct cxl_dport, list);
-       dev = &cxld->dev;
-       device_initialize(dev);
-       device_set_pm_not_required(dev);
-       dev->parent = &port->dev;
-       dev->bus = &cxl_bus_type;
-
-       /* root ports do not have a cxl_port_type parent */
-       if (port->dev.parent->type == &cxl_port_type)
-               dev->type = &cxl_decoder_switch_type;
-       else
-               dev->type = &cxl_decoder_root_type;
-
-       return cxld;
-err:
-       kfree(cxld);
-       return ERR_PTR(rc);
-}
-
-static void unregister_dev(void *dev)
-{
-       device_unregister(dev);
-}
-
-struct cxl_decoder *
-devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
-                    resource_size_t base, resource_size_t len,
-                    int interleave_ways, int interleave_granularity,
-                    enum cxl_decoder_type type, unsigned long flags)
-{
-       struct cxl_decoder *cxld;
-       struct device *dev;
-       int rc;
-
-       cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
-                                interleave_granularity, type, flags);
-       if (IS_ERR(cxld))
-               return cxld;
-
-       dev = &cxld->dev;
-       rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
-       if (rc)
-               goto err;
-
-       rc = device_add(dev);
-       if (rc)
-               goto err;
-
-       rc = devm_add_action_or_reset(host, unregister_dev, dev);
-       if (rc)
-               return ERR_PTR(rc);
-       return cxld;
-
-err:
-       put_device(dev);
-       return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
-
-/**
- * cxl_probe_component_regs() - Detect CXL Component register blocks
- * @dev: Host device of the @base mapping
- * @base: Mapping containing the HDM Decoder Capability Header
- * @map: Map object describing the register block information found
- *
- * See CXL 2.0 8.2.4 Component Register Layout and Definition
- * See CXL 2.0 8.2.5.5 CXL Device Register Interface
- *
- * Probe for component register information and return it in map object.
- */
-void cxl_probe_component_regs(struct device *dev, void __iomem *base,
-                             struct cxl_component_reg_map *map)
-{
-       int cap, cap_count;
-       u64 cap_array;
-
-       *map = (struct cxl_component_reg_map) { 0 };
-
-       /*
-        * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
-        * CXL 2.0 8.2.4 Table 141.
-        */
-       base += CXL_CM_OFFSET;
-
-       cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
-
-       if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
-               dev_err(dev,
-                       "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
-               return;
-       }
-
-       /* It's assumed that future versions will be backward compatible */
-       cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
-
-       for (cap = 1; cap <= cap_count; cap++) {
-               void __iomem *register_block;
-               u32 hdr;
-               int decoder_cnt;
-               u16 cap_id, offset;
-               u32 length;
-
-               hdr = readl(base + cap * 0x4);
-
-               cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
-               offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
-               register_block = base + offset;
-
-               switch (cap_id) {
-               case CXL_CM_CAP_CAP_ID_HDM:
-                       dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
-                               offset);
-
-                       hdr = readl(register_block);
-
-                       decoder_cnt = cxl_hdm_decoder_count(hdr);
-                       length = 0x20 * decoder_cnt + 0x10;
-
-                       map->hdm_decoder.valid = true;
-                       map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
-                       map->hdm_decoder.size = length;
-                       break;
-               default:
-                       dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
-                               offset);
-                       break;
-               }
-       }
-}
-EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
-
-static void cxl_nvdimm_bridge_release(struct device *dev)
-{
-       struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
-
-       kfree(cxl_nvb);
-}
-
-static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
-       &cxl_base_attribute_group,
-       NULL,
-};
-
-static const struct device_type cxl_nvdimm_bridge_type = {
-       .name = "cxl_nvdimm_bridge",
-       .release = cxl_nvdimm_bridge_release,
-       .groups = cxl_nvdimm_bridge_attribute_groups,
-};
-
-struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
-{
-       if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
-                         "not a cxl_nvdimm_bridge device\n"))
-               return NULL;
-       return container_of(dev, struct cxl_nvdimm_bridge, dev);
-}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
-
-static struct cxl_nvdimm_bridge *
-cxl_nvdimm_bridge_alloc(struct cxl_port *port)
-{
-       struct cxl_nvdimm_bridge *cxl_nvb;
-       struct device *dev;
-
-       cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
-       if (!cxl_nvb)
-               return ERR_PTR(-ENOMEM);
-
-       dev = &cxl_nvb->dev;
-       cxl_nvb->port = port;
-       cxl_nvb->state = CXL_NVB_NEW;
-       device_initialize(dev);
-       device_set_pm_not_required(dev);
-       dev->parent = &port->dev;
-       dev->bus = &cxl_bus_type;
-       dev->type = &cxl_nvdimm_bridge_type;
-
-       return cxl_nvb;
-}
-
-static void unregister_nvb(void *_cxl_nvb)
-{
-       struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
-       bool flush;
-
-       /*
-        * If the bridge was ever activated then there might be in-flight state
-        * work to flush. Once the state has been changed to 'dead' then no new
-        * work can be queued by user-triggered bind.
-        */
-       device_lock(&cxl_nvb->dev);
-       flush = cxl_nvb->state != CXL_NVB_NEW;
-       cxl_nvb->state = CXL_NVB_DEAD;
-       device_unlock(&cxl_nvb->dev);
-
-       /*
-        * Even though the device core will trigger device_release_driver()
-        * before the unregister, it does not know about the fact that
-        * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
-        * release not and flush it before tearing down the nvdimm device
-        * hierarchy.
-        */
-       device_release_driver(&cxl_nvb->dev);
-       if (flush)
-               flush_work(&cxl_nvb->state_work);
-       device_unregister(&cxl_nvb->dev);
-}
-
-struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
-                                                    struct cxl_port *port)
-{
-       struct cxl_nvdimm_bridge *cxl_nvb;
-       struct device *dev;
-       int rc;
-
-       if (!IS_ENABLED(CONFIG_CXL_PMEM))
-               return ERR_PTR(-ENXIO);
-
-       cxl_nvb = cxl_nvdimm_bridge_alloc(port);
-       if (IS_ERR(cxl_nvb))
-               return cxl_nvb;
-
-       dev = &cxl_nvb->dev;
-       rc = dev_set_name(dev, "nvdimm-bridge");
-       if (rc)
-               goto err;
-
-       rc = device_add(dev);
-       if (rc)
-               goto err;
-
-       rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
-       if (rc)
-               return ERR_PTR(rc);
-
-       return cxl_nvb;
-
-err:
-       put_device(dev);
-       return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
-
-static void cxl_nvdimm_release(struct device *dev)
-{
-       struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
-
-       kfree(cxl_nvd);
-}
-
-static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
-       &cxl_base_attribute_group,
-       NULL,
-};
-
-static const struct device_type cxl_nvdimm_type = {
-       .name = "cxl_nvdimm",
-       .release = cxl_nvdimm_release,
-       .groups = cxl_nvdimm_attribute_groups,
-};
-
-bool is_cxl_nvdimm(struct device *dev)
-{
-       return dev->type == &cxl_nvdimm_type;
-}
-EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
-
-struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
-{
-       if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
-                         "not a cxl_nvdimm device\n"))
-               return NULL;
-       return container_of(dev, struct cxl_nvdimm, dev);
-}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
-
-static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
-{
-       struct cxl_nvdimm *cxl_nvd;
-       struct device *dev;
-
-       cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
-       if (!cxl_nvd)
-               return ERR_PTR(-ENOMEM);
-
-       dev = &cxl_nvd->dev;
-       cxl_nvd->cxlmd = cxlmd;
-       device_initialize(dev);
-       device_set_pm_not_required(dev);
-       dev->parent = &cxlmd->dev;
-       dev->bus = &cxl_bus_type;
-       dev->type = &cxl_nvdimm_type;
-
-       return cxl_nvd;
-}
-
-int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
-{
-       struct cxl_nvdimm *cxl_nvd;
-       struct device *dev;
-       int rc;
-
-       cxl_nvd = cxl_nvdimm_alloc(cxlmd);
-       if (IS_ERR(cxl_nvd))
-               return PTR_ERR(cxl_nvd);
-
-       dev = &cxl_nvd->dev;
-       rc = dev_set_name(dev, "pmem%d", cxlmd->id);
-       if (rc)
-               goto err;
-
-       rc = device_add(dev);
-       if (rc)
-               goto err;
-
-       dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
-               dev_name(dev));
-
-       return devm_add_action_or_reset(host, unregister_dev, dev);
-
-err:
-       put_device(dev);
-       return rc;
-}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
-
-/**
- * cxl_probe_device_regs() - Detect CXL Device register blocks
- * @dev: Host device of the @base mapping
- * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
- * @map: Map object describing the register block information found
- *
- * Probe for device register information and return it in map object.
- */
-void cxl_probe_device_regs(struct device *dev, void __iomem *base,
-                          struct cxl_device_reg_map *map)
-{
-       int cap, cap_count;
-       u64 cap_array;
-
-       *map = (struct cxl_device_reg_map){ 0 };
-
-       cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
-       if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
-           CXLDEV_CAP_ARRAY_CAP_ID)
-               return;
-
-       cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
-
-       for (cap = 1; cap <= cap_count; cap++) {
-               u32 offset, length;
-               u16 cap_id;
-
-               cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
-                                  readl(base + cap * 0x10));
-               offset = readl(base + cap * 0x10 + 0x4);
-               length = readl(base + cap * 0x10 + 0x8);
-
-               switch (cap_id) {
-               case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
-                       dev_dbg(dev, "found Status capability (0x%x)\n", offset);
-
-                       map->status.valid = true;
-                       map->status.offset = offset;
-                       map->status.size = length;
-                       break;
-               case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
-                       dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
-                       map->mbox.valid = true;
-                       map->mbox.offset = offset;
-                       map->mbox.size = length;
-                       break;
-               case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
-                       dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
-                       break;
-               case CXLDEV_CAP_CAP_ID_MEMDEV:
-                       dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
-                       map->memdev.valid = true;
-                       map->memdev.offset = offset;
-                       map->memdev.size = length;
-                       break;
-               default:
-                       if (cap_id >= 0x8000)
-                               dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
-                       else
-                               dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
-                       break;
-               }
-       }
-}
-EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
-
-static void __iomem *devm_cxl_iomap_block(struct device *dev,
-                                         resource_size_t addr,
-                                         resource_size_t length)
-{
-       void __iomem *ret_val;
-       struct resource *res;
-
-       res = devm_request_mem_region(dev, addr, length, dev_name(dev));
-       if (!res) {
-               resource_size_t end = addr + length - 1;
-
-               dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
-               return NULL;
-       }
-
-       ret_val = devm_ioremap(dev, addr, length);
-       if (!ret_val)
-               dev_err(dev, "Failed to map region %pr\n", res);
-
-       return ret_val;
-}
-
-int cxl_map_component_regs(struct pci_dev *pdev,
-                          struct cxl_component_regs *regs,
-                          struct cxl_register_map *map)
-{
-       struct device *dev = &pdev->dev;
-       resource_size_t phys_addr;
-       resource_size_t length;
-
-       phys_addr = pci_resource_start(pdev, map->barno);
-       phys_addr += map->block_offset;
-
-       phys_addr += map->component_map.hdm_decoder.offset;
-       length = map->component_map.hdm_decoder.size;
-       regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
-       if (!regs->hdm_decoder)
-               return -ENOMEM;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_map_component_regs);
-
-int cxl_map_device_regs(struct pci_dev *pdev,
-                       struct cxl_device_regs *regs,
-                       struct cxl_register_map *map)
-{
-       struct device *dev = &pdev->dev;
-       resource_size_t phys_addr;
-
-       phys_addr = pci_resource_start(pdev, map->barno);
-       phys_addr += map->block_offset;
-
-       if (map->device_map.status.valid) {
-               resource_size_t addr;
-               resource_size_t length;
-
-               addr = phys_addr + map->device_map.status.offset;
-               length = map->device_map.status.size;
-               regs->status = devm_cxl_iomap_block(dev, addr, length);
-               if (!regs->status)
-                       return -ENOMEM;
-       }
-
-       if (map->device_map.mbox.valid) {
-               resource_size_t addr;
-               resource_size_t length;
-
-               addr = phys_addr + map->device_map.mbox.offset;
-               length = map->device_map.mbox.size;
-               regs->mbox = devm_cxl_iomap_block(dev, addr, length);
-               if (!regs->mbox)
-                       return -ENOMEM;
-       }
-
-       if (map->device_map.memdev.valid) {
-               resource_size_t addr;
-               resource_size_t length;
-
-               addr = phys_addr + map->device_map.memdev.offset;
-               length = map->device_map.memdev.size;
-               regs->memdev = devm_cxl_iomap_block(dev, addr, length);
-               if (!regs->memdev)
-                       return -ENOMEM;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_map_device_regs);
-
-/**
- * __cxl_driver_register - register a driver for the cxl bus
- * @cxl_drv: cxl driver structure to attach
- * @owner: owning module/driver
- * @modname: KBUILD_MODNAME for parent driver
- */
-int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
-                         const char *modname)
-{
-       if (!cxl_drv->probe) {
-               pr_debug("%s ->probe() must be specified\n", modname);
-               return -EINVAL;
-       }
-
-       if (!cxl_drv->name) {
-               pr_debug("%s ->name must be specified\n", modname);
-               return -EINVAL;
-       }
-
-       if (!cxl_drv->id) {
-               pr_debug("%s ->id must be specified\n", modname);
-               return -EINVAL;
-       }
-
-       cxl_drv->drv.bus = &cxl_bus_type;
-       cxl_drv->drv.owner = owner;
-       cxl_drv->drv.mod_name = modname;
-       cxl_drv->drv.name = cxl_drv->name;
-
-       return driver_register(&cxl_drv->drv);
-}
-EXPORT_SYMBOL_GPL(__cxl_driver_register);
-
-void cxl_driver_unregister(struct cxl_driver *cxl_drv)
-{
-       driver_unregister(&cxl_drv->drv);
-}
-EXPORT_SYMBOL_GPL(cxl_driver_unregister);
-
-static int cxl_device_id(struct device *dev)
-{
-       if (dev->type == &cxl_nvdimm_bridge_type)
-               return CXL_DEVICE_NVDIMM_BRIDGE;
-       if (dev->type == &cxl_nvdimm_type)
-               return CXL_DEVICE_NVDIMM;
-       return 0;
-}
-
-static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-       return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
-                             cxl_device_id(dev));
-}
-
-static int cxl_bus_match(struct device *dev, struct device_driver *drv)
-{
-       return cxl_device_id(dev) == to_cxl_drv(drv)->id;
-}
-
-static int cxl_bus_probe(struct device *dev)
-{
-       return to_cxl_drv(dev->driver)->probe(dev);
-}
-
-static void cxl_bus_remove(struct device *dev)
-{
-       struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
-
-       if (cxl_drv->remove)
-               cxl_drv->remove(dev);
-}
-
-struct bus_type cxl_bus_type = {
-       .name = "cxl",
-       .uevent = cxl_bus_uevent,
-       .match = cxl_bus_match,
-       .probe = cxl_bus_probe,
-       .remove = cxl_bus_remove,
-};
-EXPORT_SYMBOL_GPL(cxl_bus_type);
-
-static __init int cxl_core_init(void)
-{
-       return bus_register(&cxl_bus_type);
-}
-
-static void cxl_core_exit(void)
-{
-       bus_unregister(&cxl_bus_type);
-}
-
-module_init(cxl_core_init);
-module_exit(cxl_core_exit);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
new file mode 100644 (file)
index 0000000..0fdbf3c
--- /dev/null
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CXL_BUS) += cxl_core.o
+
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl
+cxl_core-y := bus.o
+cxl_core-y += pmem.o
+cxl_core-y += regs.o
+cxl_core-y += memdev.o
diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c
new file mode 100644 (file)
index 0000000..267d804
--- /dev/null
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+/**
+ * DOC: cxl core
+ *
+ * The CXL core provides a set of interfaces that can be consumed by CXL aware
+ * drivers. The interfaces allow for creation, modification, and destruction of
+ * regions, memory devices, ports, and decoders. CXL aware drivers must register
+ * with the CXL core via these interfaces in order to be able to participate in
+ * cross-device interleave coordination. The CXL core also establishes and
+ * maintains the bridge to the nvdimm subsystem.
+ *
+ * CXL core introduces sysfs hierarchy to control the devices that are
+ * instantiated by the core.
+ */
+
+static DEFINE_IDA(cxl_port_ida);
+
+static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       return sysfs_emit(buf, "%s\n", dev->type->name);
+}
+static DEVICE_ATTR_RO(devtype);
+
+static struct attribute *cxl_base_attributes[] = {
+       &dev_attr_devtype.attr,
+       NULL,
+};
+
+struct attribute_group cxl_base_attribute_group = {
+       .attrs = cxl_base_attributes,
+};
+
+static ssize_t start_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+       return sysfs_emit(buf, "%#llx\n", cxld->range.start);
+}
+static DEVICE_ATTR_RO(start);
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+       return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
+}
+static DEVICE_ATTR_RO(size);
+
+#define CXL_DECODER_FLAG_ATTR(name, flag)                            \
+static ssize_t name##_show(struct device *dev,                       \
+                          struct device_attribute *attr, char *buf) \
+{                                                                    \
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);              \
+                                                                     \
+       return sysfs_emit(buf, "%s\n",                               \
+                         (cxld->flags & (flag)) ? "1" : "0");       \
+}                                                                    \
+static DEVICE_ATTR_RO(name)
+
+CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
+CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
+CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
+CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
+CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
+
+static ssize_t target_type_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+       switch (cxld->target_type) {
+       case CXL_DECODER_ACCELERATOR:
+               return sysfs_emit(buf, "accelerator\n");
+       case CXL_DECODER_EXPANDER:
+               return sysfs_emit(buf, "expander\n");
+       }
+       return -ENXIO;
+}
+static DEVICE_ATTR_RO(target_type);
+
+static ssize_t target_list_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);
+       ssize_t offset = 0;
+       int i, rc = 0;
+
+       device_lock(dev);
+       for (i = 0; i < cxld->interleave_ways; i++) {
+               struct cxl_dport *dport = cxld->target[i];
+               struct cxl_dport *next = NULL;
+
+               if (!dport)
+                       break;
+
+               if (i + 1 < cxld->interleave_ways)
+                       next = cxld->target[i + 1];
+               rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
+                                  next ? "," : "");
+               if (rc < 0)
+                       break;
+               offset += rc;
+       }
+       device_unlock(dev);
+
+       if (rc < 0)
+               return rc;
+
+       rc = sysfs_emit_at(buf, offset, "\n");
+       if (rc < 0)
+               return rc;
+
+       return offset + rc;
+}
+static DEVICE_ATTR_RO(target_list);
+
+static struct attribute *cxl_decoder_base_attrs[] = {
+       &dev_attr_start.attr,
+       &dev_attr_size.attr,
+       &dev_attr_locked.attr,
+       &dev_attr_target_list.attr,
+       NULL,
+};
+
+static struct attribute_group cxl_decoder_base_attribute_group = {
+       .attrs = cxl_decoder_base_attrs,
+};
+
+static struct attribute *cxl_decoder_root_attrs[] = {
+       &dev_attr_cap_pmem.attr,
+       &dev_attr_cap_ram.attr,
+       &dev_attr_cap_type2.attr,
+       &dev_attr_cap_type3.attr,
+       NULL,
+};
+
+static struct attribute_group cxl_decoder_root_attribute_group = {
+       .attrs = cxl_decoder_root_attrs,
+};
+
+static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
+       &cxl_decoder_root_attribute_group,
+       &cxl_decoder_base_attribute_group,
+       &cxl_base_attribute_group,
+       NULL,
+};
+
+static struct attribute *cxl_decoder_switch_attrs[] = {
+       &dev_attr_target_type.attr,
+       NULL,
+};
+
+static struct attribute_group cxl_decoder_switch_attribute_group = {
+       .attrs = cxl_decoder_switch_attrs,
+};
+
+static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
+       &cxl_decoder_switch_attribute_group,
+       &cxl_decoder_base_attribute_group,
+       &cxl_base_attribute_group,
+       NULL,
+};
+
+static void cxl_decoder_release(struct device *dev)
+{
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);
+       struct cxl_port *port = to_cxl_port(dev->parent);
+
+       ida_free(&port->decoder_ida, cxld->id);
+       kfree(cxld);
+}
+
+static const struct device_type cxl_decoder_switch_type = {
+       .name = "cxl_decoder_switch",
+       .release = cxl_decoder_release,
+       .groups = cxl_decoder_switch_attribute_groups,
+};
+
+static const struct device_type cxl_decoder_root_type = {
+       .name = "cxl_decoder_root",
+       .release = cxl_decoder_release,
+       .groups = cxl_decoder_root_attribute_groups,
+};
+
+bool is_root_decoder(struct device *dev)
+{
+       return dev->type == &cxl_decoder_root_type;
+}
+EXPORT_SYMBOL_GPL(is_root_decoder);
+
+struct cxl_decoder *to_cxl_decoder(struct device *dev)
+{
+       if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
+                         "not a cxl_decoder device\n"))
+               return NULL;
+       return container_of(dev, struct cxl_decoder, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_decoder);
+
+static void cxl_dport_release(struct cxl_dport *dport)
+{
+       list_del(&dport->list);
+       put_device(dport->dport);
+       kfree(dport);
+}
+
+static void cxl_port_release(struct device *dev)
+{
+       struct cxl_port *port = to_cxl_port(dev);
+       struct cxl_dport *dport, *_d;
+
+       device_lock(dev);
+       list_for_each_entry_safe(dport, _d, &port->dports, list)
+               cxl_dport_release(dport);
+       device_unlock(dev);
+       ida_free(&cxl_port_ida, port->id);
+       kfree(port);
+}
+
+static const struct attribute_group *cxl_port_attribute_groups[] = {
+       &cxl_base_attribute_group,
+       NULL,
+};
+
+static const struct device_type cxl_port_type = {
+       .name = "cxl_port",
+       .release = cxl_port_release,
+       .groups = cxl_port_attribute_groups,
+};
+
+struct cxl_port *to_cxl_port(struct device *dev)
+{
+       if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
+                         "not a cxl_port device\n"))
+               return NULL;
+       return container_of(dev, struct cxl_port, dev);
+}
+
+static void unregister_port(void *_port)
+{
+       struct cxl_port *port = _port;
+       struct cxl_dport *dport;
+
+       device_lock(&port->dev);
+       list_for_each_entry(dport, &port->dports, list) {
+               char link_name[CXL_TARGET_STRLEN];
+
+               if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
+                            dport->port_id) >= CXL_TARGET_STRLEN)
+                       continue;
+               sysfs_remove_link(&port->dev.kobj, link_name);
+       }
+       device_unlock(&port->dev);
+       device_unregister(&port->dev);
+}
+
+static void cxl_unlink_uport(void *_port)
+{
+       struct cxl_port *port = _port;
+
+       sysfs_remove_link(&port->dev.kobj, "uport");
+}
+
+static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
+{
+       int rc;
+
+       rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
+       if (rc)
+               return rc;
+       return devm_add_action_or_reset(host, cxl_unlink_uport, port);
+}
+
+static struct cxl_port *cxl_port_alloc(struct device *uport,
+                                      resource_size_t component_reg_phys,
+                                      struct cxl_port *parent_port)
+{
+       struct cxl_port *port;
+       struct device *dev;
+       int rc;
+
+       port = kzalloc(sizeof(*port), GFP_KERNEL);
+       if (!port)
+               return ERR_PTR(-ENOMEM);
+
+       rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
+       if (rc < 0)
+               goto err;
+       port->id = rc;
+
+       /*
+        * The top-level cxl_port "cxl_root" does not have a cxl_port as
+        * its parent and it does not have any corresponding component
+        * registers as its decode is described by a fixed platform
+        * description.
+        */
+       dev = &port->dev;
+       if (parent_port)
+               dev->parent = &parent_port->dev;
+       else
+               dev->parent = uport;
+
+       port->uport = uport;
+       port->component_reg_phys = component_reg_phys;
+       ida_init(&port->decoder_ida);
+       INIT_LIST_HEAD(&port->dports);
+
+       device_initialize(dev);
+       device_set_pm_not_required(dev);
+       dev->bus = &cxl_bus_type;
+       dev->type = &cxl_port_type;
+
+       return port;
+
+err:
+       kfree(port);
+       return ERR_PTR(rc);
+}
+
+/**
+ * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
+ * @host: host device for devm operations
+ * @uport: "physical" device implementing this upstream port
+ * @component_reg_phys: (optional) for configurable cxl_port instances
+ * @parent_port: next hop up in the CXL memory decode hierarchy
+ */
+struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
+                                  resource_size_t component_reg_phys,
+                                  struct cxl_port *parent_port)
+{
+       struct cxl_port *port;
+       struct device *dev;
+       int rc;
+
+       port = cxl_port_alloc(uport, component_reg_phys, parent_port);
+       if (IS_ERR(port))
+               return port;
+
+       dev = &port->dev;
+       if (parent_port)
+               rc = dev_set_name(dev, "port%d", port->id);
+       else
+               rc = dev_set_name(dev, "root%d", port->id);
+       if (rc)
+               goto err;
+
+       rc = device_add(dev);
+       if (rc)
+               goto err;
+
+       rc = devm_add_action_or_reset(host, unregister_port, port);
+       if (rc)
+               return ERR_PTR(rc);
+
+       rc = devm_cxl_link_uport(host, port);
+       if (rc)
+               return ERR_PTR(rc);
+
+       return port;
+
+err:
+       put_device(dev);
+       return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_port);
+
+static struct cxl_dport *find_dport(struct cxl_port *port, int id)
+{
+       struct cxl_dport *dport;
+
+       device_lock_assert(&port->dev);
+       list_for_each_entry (dport, &port->dports, list)
+               if (dport->port_id == id)
+                       return dport;
+       return NULL;
+}
+
+static int add_dport(struct cxl_port *port, struct cxl_dport *new)
+{
+       struct cxl_dport *dup;
+
+       device_lock(&port->dev);
+       dup = find_dport(port, new->port_id);
+       if (dup)
+               dev_err(&port->dev,
+                       "unable to add dport%d-%s non-unique port id (%s)\n",
+                       new->port_id, dev_name(new->dport),
+                       dev_name(dup->dport));
+       else
+               list_add_tail(&new->list, &port->dports);
+       device_unlock(&port->dev);
+
+       return dup ? -EEXIST : 0;
+}
+
+/**
+ * cxl_add_dport - append downstream port data to a cxl_port
+ * @port: the cxl_port that references this dport
+ * @dport_dev: firmware or PCI device representing the dport
+ * @port_id: identifier for this dport in a decoder's target list
+ * @component_reg_phys: optional location of CXL component registers
+ *
+ * Note that all allocations and links are undone by cxl_port deletion
+ * and release.
+ */
+int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
+                 resource_size_t component_reg_phys)
+{
+       char link_name[CXL_TARGET_STRLEN];
+       struct cxl_dport *dport;
+       int rc;
+
+       if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
+           CXL_TARGET_STRLEN)
+               return -EINVAL;
+
+       dport = kzalloc(sizeof(*dport), GFP_KERNEL);
+       if (!dport)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&dport->list);
+       dport->dport = get_device(dport_dev);
+       dport->port_id = port_id;
+       dport->component_reg_phys = component_reg_phys;
+       dport->port = port;
+
+       rc = add_dport(port, dport);
+       if (rc)
+               goto err;
+
+       rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
+       if (rc)
+               goto err;
+
+       return 0;
+err:
+       cxl_dport_release(dport);
+       return rc;
+}
+EXPORT_SYMBOL_GPL(cxl_add_dport);
+
+static struct cxl_decoder *
+cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
+                 resource_size_t len, int interleave_ways,
+                 int interleave_granularity, enum cxl_decoder_type type,
+                 unsigned long flags)
+{
+       struct cxl_decoder *cxld;
+       struct device *dev;
+       int rc = 0;
+
+       if (interleave_ways < 1)
+               return ERR_PTR(-EINVAL);
+
+       device_lock(&port->dev);
+       if (list_empty(&port->dports))
+               rc = -EINVAL;
+       device_unlock(&port->dev);
+       if (rc)
+               return ERR_PTR(rc);
+
+       cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
+       if (!cxld)
+               return ERR_PTR(-ENOMEM);
+
+       rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
+       if (rc < 0)
+               goto err;
+
+       *cxld = (struct cxl_decoder) {
+               .id = rc,
+               .range = {
+                       .start = base,
+                       .end = base + len - 1,
+               },
+               .flags = flags,
+               .interleave_ways = interleave_ways,
+               .interleave_granularity = interleave_granularity,
+               .target_type = type,
+       };
+
+       /* handle implied target_list */
+       if (interleave_ways == 1)
+               cxld->target[0] =
+                       list_first_entry(&port->dports, struct cxl_dport, list);
+       dev = &cxld->dev;
+       device_initialize(dev);
+       device_set_pm_not_required(dev);
+       dev->parent = &port->dev;
+       dev->bus = &cxl_bus_type;
+
+       /* root ports do not have a cxl_port_type parent */
+       if (port->dev.parent->type == &cxl_port_type)
+               dev->type = &cxl_decoder_switch_type;
+       else
+               dev->type = &cxl_decoder_root_type;
+
+       return cxld;
+err:
+       kfree(cxld);
+       return ERR_PTR(rc);
+}
+
+struct cxl_decoder *
+devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
+                    resource_size_t base, resource_size_t len,
+                    int interleave_ways, int interleave_granularity,
+                    enum cxl_decoder_type type, unsigned long flags)
+{
+       struct cxl_decoder *cxld;
+       struct device *dev;
+       int rc;
+
+       cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
+                                interleave_granularity, type, flags);
+       if (IS_ERR(cxld))
+               return cxld;
+
+       dev = &cxld->dev;
+       rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
+       if (rc)
+               goto err;
+
+       rc = device_add(dev);
+       if (rc)
+               goto err;
+
+       rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev);
+       if (rc)
+               return ERR_PTR(rc);
+       return cxld;
+
+err:
+       put_device(dev);
+       return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
+
+/**
+ * __cxl_driver_register - register a driver for the cxl bus
+ * @cxl_drv: cxl driver structure to attach
+ * @owner: owning module/driver
+ * @modname: KBUILD_MODNAME for parent driver
+ */
+int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
+                         const char *modname)
+{
+       if (!cxl_drv->probe) {
+               pr_debug("%s ->probe() must be specified\n", modname);
+               return -EINVAL;
+       }
+
+       if (!cxl_drv->name) {
+               pr_debug("%s ->name must be specified\n", modname);
+               return -EINVAL;
+       }
+
+       if (!cxl_drv->id) {
+               pr_debug("%s ->id must be specified\n", modname);
+               return -EINVAL;
+       }
+
+       cxl_drv->drv.bus = &cxl_bus_type;
+       cxl_drv->drv.owner = owner;
+       cxl_drv->drv.mod_name = modname;
+       cxl_drv->drv.name = cxl_drv->name;
+
+       return driver_register(&cxl_drv->drv);
+}
+EXPORT_SYMBOL_GPL(__cxl_driver_register);
+
+void cxl_driver_unregister(struct cxl_driver *cxl_drv)
+{
+       driver_unregister(&cxl_drv->drv);
+}
+EXPORT_SYMBOL_GPL(cxl_driver_unregister);
+
+static int cxl_device_id(struct device *dev)
+{
+       if (dev->type == &cxl_nvdimm_bridge_type)
+               return CXL_DEVICE_NVDIMM_BRIDGE;
+       if (dev->type == &cxl_nvdimm_type)
+               return CXL_DEVICE_NVDIMM;
+       return 0;
+}
+
+static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
+                             cxl_device_id(dev));
+}
+
+static int cxl_bus_match(struct device *dev, struct device_driver *drv)
+{
+       return cxl_device_id(dev) == to_cxl_drv(drv)->id;
+}
+
+static int cxl_bus_probe(struct device *dev)
+{
+       return to_cxl_drv(dev->driver)->probe(dev);
+}
+
+static void cxl_bus_remove(struct device *dev)
+{
+       struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
+
+       if (cxl_drv->remove)
+               cxl_drv->remove(dev);
+}
+
+struct bus_type cxl_bus_type = {
+       .name = "cxl",
+       .uevent = cxl_bus_uevent,
+       .match = cxl_bus_match,
+       .probe = cxl_bus_probe,
+       .remove = cxl_bus_remove,
+};
+EXPORT_SYMBOL_GPL(cxl_bus_type);
+
+static __init int cxl_core_init(void)
+{
+       int rc;
+
+       rc = cxl_memdev_init();
+       if (rc)
+               return rc;
+
+       rc = bus_register(&cxl_bus_type);
+       if (rc)
+               goto err;
+       return 0;
+
+err:
+       cxl_memdev_exit();
+       return rc;
+}
+
+static void cxl_core_exit(void)
+{
+       bus_unregister(&cxl_bus_type);
+       cxl_memdev_exit();
+}
+
+module_init(cxl_core_init);
+module_exit(cxl_core_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
new file mode 100644 (file)
index 0000000..036a3c8
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2020 Intel Corporation. */
+
+#ifndef __CXL_CORE_H__
+#define __CXL_CORE_H__
+
+extern const struct device_type cxl_nvdimm_bridge_type;
+extern const struct device_type cxl_nvdimm_type;
+
+extern struct attribute_group cxl_base_attribute_group;
+
+static inline void unregister_cxl_dev(void *dev)
+{
+       device_unregister(dev);
+}
+
+int cxl_memdev_init(void);
+void cxl_memdev_exit(void);
+
+#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
new file mode 100644 (file)
index 0000000..a9c317e
--- /dev/null
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/pci.h>
+#include <cxlmem.h>
+#include "core.h"
+
+/*
+ * An entire PCI topology full of devices should be enough for any
+ * config
+ */
+#define CXL_MEM_MAX_DEVS 65536
+
+static int cxl_mem_major;
+static DEFINE_IDA(cxl_memdev_ida);
+
+static void cxl_memdev_release(struct device *dev)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+       ida_free(&cxl_memdev_ida, cxlmd->id);
+       kfree(cxlmd);
+}
+
+static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
+                               kgid_t *gid)
+{
+       return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
+}
+
+static ssize_t firmware_version_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_mem *cxlm = cxlmd->cxlm;
+
+       return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t payload_max_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_mem *cxlm = cxlmd->cxlm;
+
+       return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
+}
+static DEVICE_ATTR_RO(payload_max);
+
+static ssize_t label_storage_size_show(struct device *dev,
+                                      struct device_attribute *attr, char *buf)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_mem *cxlm = cxlmd->cxlm;
+
+       return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
+}
+static DEVICE_ATTR_RO(label_storage_size);
+
+static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_mem *cxlm = cxlmd->cxlm;
+       unsigned long long len = range_len(&cxlm->ram_range);
+
+       return sysfs_emit(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_ram_size =
+       __ATTR(size, 0444, ram_size_show, NULL);
+
+static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
+                             char *buf)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_mem *cxlm = cxlmd->cxlm;
+       unsigned long long len = range_len(&cxlm->pmem_range);
+
+       return sysfs_emit(buf, "%#llx\n", len);
+}
+
+static struct device_attribute dev_attr_pmem_size =
+       __ATTR(size, 0444, pmem_size_show, NULL);
+
+static struct attribute *cxl_memdev_attributes[] = {
+       &dev_attr_firmware_version.attr,
+       &dev_attr_payload_max.attr,
+       &dev_attr_label_storage_size.attr,
+       NULL,
+};
+
+static struct attribute *cxl_memdev_pmem_attributes[] = {
+       &dev_attr_pmem_size.attr,
+       NULL,
+};
+
+static struct attribute *cxl_memdev_ram_attributes[] = {
+       &dev_attr_ram_size.attr,
+       NULL,
+};
+
+static struct attribute_group cxl_memdev_attribute_group = {
+       .attrs = cxl_memdev_attributes,
+};
+
+static struct attribute_group cxl_memdev_ram_attribute_group = {
+       .name = "ram",
+       .attrs = cxl_memdev_ram_attributes,
+};
+
+static struct attribute_group cxl_memdev_pmem_attribute_group = {
+       .name = "pmem",
+       .attrs = cxl_memdev_pmem_attributes,
+};
+
+static const struct attribute_group *cxl_memdev_attribute_groups[] = {
+       &cxl_memdev_attribute_group,
+       &cxl_memdev_ram_attribute_group,
+       &cxl_memdev_pmem_attribute_group,
+       NULL,
+};
+
+static const struct device_type cxl_memdev_type = {
+       .name = "cxl_memdev",
+       .release = cxl_memdev_release,
+       .devnode = cxl_memdev_devnode,
+       .groups = cxl_memdev_attribute_groups,
+};
+
+static void cxl_memdev_unregister(void *_cxlmd)
+{
+       struct cxl_memdev *cxlmd = _cxlmd;
+       struct device *dev = &cxlmd->dev;
+       struct cdev *cdev = &cxlmd->cdev;
+       const struct cdevm_file_operations *cdevm_fops;
+
+       cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops);
+       cdevm_fops->shutdown(dev);
+
+       cdev_device_del(&cxlmd->cdev, dev);
+       put_device(dev);
+}
+
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
+                                          const struct file_operations *fops)
+{
+       struct pci_dev *pdev = cxlm->pdev;
+       struct cxl_memdev *cxlmd;
+       struct device *dev;
+       struct cdev *cdev;
+       int rc;
+
+       cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
+       if (!cxlmd)
+               return ERR_PTR(-ENOMEM);
+
+       rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
+       if (rc < 0)
+               goto err;
+       cxlmd->id = rc;
+
+       dev = &cxlmd->dev;
+       device_initialize(dev);
+       dev->parent = &pdev->dev;
+       dev->bus = &cxl_bus_type;
+       dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
+       dev->type = &cxl_memdev_type;
+       device_set_pm_not_required(dev);
+
+       cdev = &cxlmd->cdev;
+       cdev_init(cdev, fops);
+       return cxlmd;
+
+err:
+       kfree(cxlmd);
+       return ERR_PTR(rc);
+}
+
+struct cxl_memdev *
+devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
+                   const struct cdevm_file_operations *cdevm_fops)
+{
+       struct cxl_memdev *cxlmd;
+       struct device *dev;
+       struct cdev *cdev;
+       int rc;
+
+       cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops);
+       if (IS_ERR(cxlmd))
+               return cxlmd;
+
+       dev = &cxlmd->dev;
+       rc = dev_set_name(dev, "mem%d", cxlmd->id);
+       if (rc)
+               goto err;
+
+       /*
+        * Activate ioctl operations, no cxl_memdev_rwsem manipulation
+        * needed as this is ordered with cdev_add() publishing the device.
+        */
+       cxlmd->cxlm = cxlm;
+
+       cdev = &cxlmd->cdev;
+       rc = cdev_device_add(cdev, dev);
+       if (rc)
+               goto err;
+
+       rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
+       if (rc)
+               return ERR_PTR(rc);
+       return cxlmd;
+
+err:
+       /*
+        * The cdev was briefly live, shutdown any ioctl operations that
+        * saw that state.
+        */
+       cdevm_fops->shutdown(dev);
+       put_device(dev);
+       return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_memdev);
+
+__init int cxl_memdev_init(void)
+{
+       dev_t devt;
+       int rc;
+
+       rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
+       if (rc)
+               return rc;
+
+       cxl_mem_major = MAJOR(devt);
+
+       return 0;
+}
+
+void cxl_memdev_exit(void)
+{
+       unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
+}
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
new file mode 100644 (file)
index 0000000..d24570f
--- /dev/null
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <cxlmem.h>
+#include <cxl.h>
+#include "core.h"
+
+/**
+ * DOC: cxl pmem
+ *
+ * The core CXL PMEM infrastructure supports persistent memory
+ * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
+ * 'bridge' device is added at the root of a CXL device topology if
+ * platform firmware advertises at least one persistent memory capable
+ * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
+ * device. Then for each cxl_memdev in the CXL device topology a bridge
+ * device is added to host a LIBNVDIMM dimm object. When these bridges
+ * are registered native LIBNVDIMM uapis are translated to CXL
+ * operations, for example, namespace label access commands.
+ */
+
+static void cxl_nvdimm_bridge_release(struct device *dev)
+{
+       struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
+
+       kfree(cxl_nvb);
+}
+
+static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
+       &cxl_base_attribute_group,
+       NULL,
+};
+
+const struct device_type cxl_nvdimm_bridge_type = {
+       .name = "cxl_nvdimm_bridge",
+       .release = cxl_nvdimm_bridge_release,
+       .groups = cxl_nvdimm_bridge_attribute_groups,
+};
+
+struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
+{
+       if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
+                         "not a cxl_nvdimm_bridge device\n"))
+               return NULL;
+       return container_of(dev, struct cxl_nvdimm_bridge, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
+
+static struct cxl_nvdimm_bridge *
+cxl_nvdimm_bridge_alloc(struct cxl_port *port)
+{
+       struct cxl_nvdimm_bridge *cxl_nvb;
+       struct device *dev;
+
+       cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
+       if (!cxl_nvb)
+               return ERR_PTR(-ENOMEM);
+
+       dev = &cxl_nvb->dev;
+       cxl_nvb->port = port;
+       cxl_nvb->state = CXL_NVB_NEW;
+       device_initialize(dev);
+       device_set_pm_not_required(dev);
+       dev->parent = &port->dev;
+       dev->bus = &cxl_bus_type;
+       dev->type = &cxl_nvdimm_bridge_type;
+
+       return cxl_nvb;
+}
+
+static void unregister_nvb(void *_cxl_nvb)
+{
+       struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
+       bool flush;
+
+       /*
+        * If the bridge was ever activated then there might be in-flight state
+        * work to flush. Once the state has been changed to 'dead' then no new
+        * work can be queued by user-triggered bind.
+        */
+       device_lock(&cxl_nvb->dev);
+       flush = cxl_nvb->state != CXL_NVB_NEW;
+       cxl_nvb->state = CXL_NVB_DEAD;
+       device_unlock(&cxl_nvb->dev);
+
+       /*
+        * Even though the device core will trigger device_release_driver()
+        * before the unregister, it does not know about the fact that
+        * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
+        * release not and flush it before tearing down the nvdimm device
+        * hierarchy.
+        */
+       device_release_driver(&cxl_nvb->dev);
+       if (flush)
+               flush_work(&cxl_nvb->state_work);
+       device_unregister(&cxl_nvb->dev);
+}
+
+/**
+ * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
+ * @host: platform firmware root device
+ * @port: CXL port at the root of a CXL topology
+ *
+ * Return: bridge device that can host cxl_nvdimm objects
+ */
+struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+                                                    struct cxl_port *port)
+{
+       struct cxl_nvdimm_bridge *cxl_nvb;
+       struct device *dev;
+       int rc;
+
+       if (!IS_ENABLED(CONFIG_CXL_PMEM))
+               return ERR_PTR(-ENXIO);
+
+       cxl_nvb = cxl_nvdimm_bridge_alloc(port);
+       if (IS_ERR(cxl_nvb))
+               return cxl_nvb;
+
+       dev = &cxl_nvb->dev;
+       rc = dev_set_name(dev, "nvdimm-bridge");
+       if (rc)
+               goto err;
+
+       rc = device_add(dev);
+       if (rc)
+               goto err;
+
+       rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
+       if (rc)
+               return ERR_PTR(rc);
+
+       return cxl_nvb;
+
+err:
+       put_device(dev);
+       return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
+
+static void cxl_nvdimm_release(struct device *dev)
+{
+       struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
+
+       kfree(cxl_nvd);
+}
+
+static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
+       &cxl_base_attribute_group,
+       NULL,
+};
+
+const struct device_type cxl_nvdimm_type = {
+       .name = "cxl_nvdimm",
+       .release = cxl_nvdimm_release,
+       .groups = cxl_nvdimm_attribute_groups,
+};
+
+bool is_cxl_nvdimm(struct device *dev)
+{
+       return dev->type == &cxl_nvdimm_type;
+}
+EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
+
+struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
+{
+       if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
+                         "not a cxl_nvdimm device\n"))
+               return NULL;
+       return container_of(dev, struct cxl_nvdimm, dev);
+}
+EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
+
+static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
+{
+       struct cxl_nvdimm *cxl_nvd;
+       struct device *dev;
+
+       cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
+       if (!cxl_nvd)
+               return ERR_PTR(-ENOMEM);
+
+       dev = &cxl_nvd->dev;
+       cxl_nvd->cxlmd = cxlmd;
+       device_initialize(dev);
+       device_set_pm_not_required(dev);
+       dev->parent = &cxlmd->dev;
+       dev->bus = &cxl_bus_type;
+       dev->type = &cxl_nvdimm_type;
+
+       return cxl_nvd;
+}
+
+/**
+ * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
+ * @host: same host as @cxlmd
+ * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
+ *
+ * Return: 0 on success negative error code on failure.
+ */
+int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
+{
+       struct cxl_nvdimm *cxl_nvd;
+       struct device *dev;
+       int rc;
+
+       cxl_nvd = cxl_nvdimm_alloc(cxlmd);
+       if (IS_ERR(cxl_nvd))
+               return PTR_ERR(cxl_nvd);
+
+       dev = &cxl_nvd->dev;
+       rc = dev_set_name(dev, "pmem%d", cxlmd->id);
+       if (rc)
+               goto err;
+
+       rc = device_add(dev);
+       if (rc)
+               goto err;
+
+       dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
+               dev_name(dev));
+
+       return devm_add_action_or_reset(host, unregister_cxl_dev, dev);
+
+err:
+       put_device(dev);
+       return rc;
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
new file mode 100644 (file)
index 0000000..41de4a1
--- /dev/null
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2020 Intel Corporation. */
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <cxlmem.h>
+
+/**
+ * DOC: cxl registers
+ *
+ * CXL device capabilities are enumerated by PCI DVSEC (Designated
+ * Vendor-specific) and / or descriptors provided by platform firmware.
+ * They can be defined as a set like the device and component registers
+ * mandated by CXL Section 8.1.12.2 Memory Device PCIe Capabilities and
+ * Extended Capabilities, or they can be individual capabilities
+ * appended to bridged and endpoint devices.
+ *
+ * Provide common infrastructure for enumerating and mapping these
+ * discrete capabilities.
+ */
+
+/**
+ * cxl_probe_component_regs() - Detect CXL Component register blocks
+ * @dev: Host device of the @base mapping
+ * @base: Mapping containing the HDM Decoder Capability Header
+ * @map: Map object describing the register block information found
+ *
+ * See CXL 2.0 8.2.4 Component Register Layout and Definition
+ * See CXL 2.0 8.2.5.5 CXL Device Register Interface
+ *
+ * Probe for component register information and return it in map object.
+ */
+void cxl_probe_component_regs(struct device *dev, void __iomem *base,
+                             struct cxl_component_reg_map *map)
+{
+       int cap, cap_count;
+       u64 cap_array;
+
+       *map = (struct cxl_component_reg_map) { 0 };
+
+       /*
+        * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
+        * CXL 2.0 8.2.4 Table 141.
+        */
+       base += CXL_CM_OFFSET;
+
+       cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
+
+       if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
+               dev_err(dev,
+                       "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
+               return;
+       }
+
+       /* It's assumed that future versions will be backward compatible */
+       cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
+
+       for (cap = 1; cap <= cap_count; cap++) {
+               void __iomem *register_block;
+               u32 hdr;
+               int decoder_cnt;
+               u16 cap_id, offset;
+               u32 length;
+
+               hdr = readl(base + cap * 0x4);
+
+               cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
+               offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
+               register_block = base + offset;
+
+               switch (cap_id) {
+               case CXL_CM_CAP_CAP_ID_HDM:
+                       dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
+                               offset);
+
+                       hdr = readl(register_block);
+
+                       decoder_cnt = cxl_hdm_decoder_count(hdr);
+                       length = 0x20 * decoder_cnt + 0x10;
+
+                       map->hdm_decoder.valid = true;
+                       map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
+                       map->hdm_decoder.size = length;
+                       break;
+               default:
+                       dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
+                               offset);
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
+
+/**
+ * cxl_probe_device_regs() - Detect CXL Device register blocks
+ * @dev: Host device of the @base mapping
+ * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
+ * @map: Map object describing the register block information found
+ *
+ * Probe for device register information and return it in map object.
+ */
+void cxl_probe_device_regs(struct device *dev, void __iomem *base,
+                          struct cxl_device_reg_map *map)
+{
+       int cap, cap_count;
+       u64 cap_array;
+
+       *map = (struct cxl_device_reg_map){ 0 };
+
+       cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
+       if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
+           CXLDEV_CAP_ARRAY_CAP_ID)
+               return;
+
+       cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
+
+       for (cap = 1; cap <= cap_count; cap++) {
+               u32 offset, length;
+               u16 cap_id;
+
+               cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
+                                  readl(base + cap * 0x10));
+               offset = readl(base + cap * 0x10 + 0x4);
+               length = readl(base + cap * 0x10 + 0x8);
+
+               switch (cap_id) {
+               case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
+                       dev_dbg(dev, "found Status capability (0x%x)\n", offset);
+
+                       map->status.valid = true;
+                       map->status.offset = offset;
+                       map->status.size = length;
+                       break;
+               case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
+                       dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
+                       map->mbox.valid = true;
+                       map->mbox.offset = offset;
+                       map->mbox.size = length;
+                       break;
+               case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
+                       dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
+                       break;
+               case CXLDEV_CAP_CAP_ID_MEMDEV:
+                       dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
+                       map->memdev.valid = true;
+                       map->memdev.offset = offset;
+                       map->memdev.size = length;
+                       break;
+               default:
+                       if (cap_id >= 0x8000)
+                               dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
+                       else
+                               dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
+
+static void __iomem *devm_cxl_iomap_block(struct device *dev,
+                                         resource_size_t addr,
+                                         resource_size_t length)
+{
+       void __iomem *ret_val;
+       struct resource *res;
+
+       res = devm_request_mem_region(dev, addr, length, dev_name(dev));
+       if (!res) {
+               resource_size_t end = addr + length - 1;
+
+               dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
+               return NULL;
+       }
+
+       ret_val = devm_ioremap(dev, addr, length);
+       if (!ret_val)
+               dev_err(dev, "Failed to map region %pr\n", res);
+
+       return ret_val;
+}
+
+int cxl_map_component_regs(struct pci_dev *pdev,
+                          struct cxl_component_regs *regs,
+                          struct cxl_register_map *map)
+{
+       struct device *dev = &pdev->dev;
+       resource_size_t phys_addr;
+       resource_size_t length;
+
+       phys_addr = pci_resource_start(pdev, map->barno);
+       phys_addr += map->block_offset;
+
+       phys_addr += map->component_map.hdm_decoder.offset;
+       length = map->component_map.hdm_decoder.size;
+       regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
+       if (!regs->hdm_decoder)
+               return -ENOMEM;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_map_component_regs);
+
+int cxl_map_device_regs(struct pci_dev *pdev,
+                       struct cxl_device_regs *regs,
+                       struct cxl_register_map *map)
+{
+       struct device *dev = &pdev->dev;
+       resource_size_t phys_addr;
+
+       phys_addr = pci_resource_start(pdev, map->barno);
+       phys_addr += map->block_offset;
+
+       if (map->device_map.status.valid) {
+               resource_size_t addr;
+               resource_size_t length;
+
+               addr = phys_addr + map->device_map.status.offset;
+               length = map->device_map.status.size;
+               regs->status = devm_cxl_iomap_block(dev, addr, length);
+               if (!regs->status)
+                       return -ENOMEM;
+       }
+
+       if (map->device_map.mbox.valid) {
+               resource_size_t addr;
+               resource_size_t length;
+
+               addr = phys_addr + map->device_map.mbox.offset;
+               length = map->device_map.mbox.size;
+               regs->mbox = devm_cxl_iomap_block(dev, addr, length);
+               if (!regs->mbox)
+                       return -ENOMEM;
+       }
+
+       if (map->device_map.memdev.valid) {
+               resource_size_t addr;
+               resource_size_t length;
+
+               addr = phys_addr + map->device_map.memdev.offset;
+               length = map->device_map.memdev.size;
+               regs->memdev = devm_cxl_iomap_block(dev, addr, length);
+               if (!regs->memdev)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_map_device_regs);
index b6bda39..53927f9 100644 (file)
@@ -140,7 +140,6 @@ struct cxl_device_reg_map {
 };
 
 struct cxl_register_map {
-       struct list_head list;
        u64 block_offset;
        u8 reg_type;
        u8 barno;
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
new file mode 100644 (file)
index 0000000..6c0b1e2
--- /dev/null
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2020-2021 Intel Corporation. */
+#ifndef __CXL_MEM_H__
+#define __CXL_MEM_H__
+#include <linux/cdev.h>
+#include "cxl.h"
+
+/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
+#define CXLMDEV_STATUS_OFFSET 0x0
+#define   CXLMDEV_DEV_FATAL BIT(0)
+#define   CXLMDEV_FW_HALT BIT(1)
+#define   CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2)
+#define     CXLMDEV_MS_NOT_READY 0
+#define     CXLMDEV_MS_READY 1
+#define     CXLMDEV_MS_ERROR 2
+#define     CXLMDEV_MS_DISABLED 3
+#define CXLMDEV_READY(status)                                                  \
+       (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) ==                \
+        CXLMDEV_MS_READY)
+#define   CXLMDEV_MBOX_IF_READY BIT(4)
+#define   CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5)
+#define     CXLMDEV_RESET_NEEDED_NOT 0
+#define     CXLMDEV_RESET_NEEDED_COLD 1
+#define     CXLMDEV_RESET_NEEDED_WARM 2
+#define     CXLMDEV_RESET_NEEDED_HOT 3
+#define     CXLMDEV_RESET_NEEDED_CXL 4
+#define CXLMDEV_RESET_NEEDED(status)                                           \
+       (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) !=                       \
+        CXLMDEV_RESET_NEEDED_NOT)
+
+/**
+ * struct cdevm_file_operations - devm coordinated cdev file operations
+ * @fops: file operations that are synchronized against @shutdown
+ * @shutdown: disconnect driver data
+ *
+ * @shutdown is invoked in the devres release path to disconnect any
+ * driver instance data from @dev. It assumes synchronization with any
+ * fops operation that requires driver data. After @shutdown an
+ * operation may only reference @device data.
+ */
+struct cdevm_file_operations {
+       struct file_operations fops;
+       void (*shutdown)(struct device *dev);
+};
+
+/**
+ * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
+ * @dev: driver core device object
+ * @cdev: char dev core object for ioctl operations
+ * @cxlm: pointer to the parent device driver data
+ * @id: id number of this memdev instance.
+ */
+struct cxl_memdev {
+       struct device dev;
+       struct cdev cdev;
+       struct cxl_mem *cxlm;
+       int id;
+};
+
+static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
+{
+       return container_of(dev, struct cxl_memdev, dev);
+}
+
+struct cxl_memdev *
+devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
+                   const struct cdevm_file_operations *cdevm_fops);
+
+/**
+ * struct cxl_mem - A CXL memory device
+ * @pdev: The PCI device associated with this CXL device.
+ * @cxlmd: Logical memory device chardev / interface
+ * @regs: Parsed register blocks
+ * @payload_size: Size of space for payload
+ *                (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
+ * @lsa_size: Size of Label Storage Area
+ *                (CXL 2.0 8.2.9.5.1.1 Identify Memory Device)
+ * @mbox_mutex: Mutex to synchronize mailbox access.
+ * @firmware_version: Firmware version for the memory device.
+ * @enabled_cmds: Hardware commands found enabled in CEL.
+ * @pmem_range: Persistent memory capacity information.
+ * @ram_range: Volatile memory capacity information.
+ */
+struct cxl_mem {
+       struct pci_dev *pdev;
+       struct cxl_memdev *cxlmd;
+
+       struct cxl_regs regs;
+
+       size_t payload_size;
+       size_t lsa_size;
+       struct mutex mbox_mutex; /* Protects device mailbox and firmware */
+       char firmware_version[0x10];
+       unsigned long *enabled_cmds;
+
+       struct range pmem_range;
+       struct range ram_range;
+       u64 total_bytes;
+       u64 volatile_only_bytes;
+       u64 persistent_only_bytes;
+       u64 partition_align_bytes;
+
+       u64 active_volatile_bytes;
+       u64 active_persistent_bytes;
+       u64 next_volatile_bytes;
+       u64 next_persistent_bytes;
+};
+#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/mem.h b/drivers/cxl/mem.h
deleted file mode 100644 (file)
index 8f02d02..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2020-2021 Intel Corporation. */
-#ifndef __CXL_MEM_H__
-#define __CXL_MEM_H__
-#include <linux/cdev.h>
-#include "cxl.h"
-
-/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
-#define CXLMDEV_STATUS_OFFSET 0x0
-#define   CXLMDEV_DEV_FATAL BIT(0)
-#define   CXLMDEV_FW_HALT BIT(1)
-#define   CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2)
-#define     CXLMDEV_MS_NOT_READY 0
-#define     CXLMDEV_MS_READY 1
-#define     CXLMDEV_MS_ERROR 2
-#define     CXLMDEV_MS_DISABLED 3
-#define CXLMDEV_READY(status)                                                  \
-       (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) ==                \
-        CXLMDEV_MS_READY)
-#define   CXLMDEV_MBOX_IF_READY BIT(4)
-#define   CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5)
-#define     CXLMDEV_RESET_NEEDED_NOT 0
-#define     CXLMDEV_RESET_NEEDED_COLD 1
-#define     CXLMDEV_RESET_NEEDED_WARM 2
-#define     CXLMDEV_RESET_NEEDED_HOT 3
-#define     CXLMDEV_RESET_NEEDED_CXL 4
-#define CXLMDEV_RESET_NEEDED(status)                                           \
-       (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) !=                       \
-        CXLMDEV_RESET_NEEDED_NOT)
-
-/*
- * An entire PCI topology full of devices should be enough for any
- * config
- */
-#define CXL_MEM_MAX_DEVS 65536
-
-/**
- * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
- * @dev: driver core device object
- * @cdev: char dev core object for ioctl operations
- * @cxlm: pointer to the parent device driver data
- * @id: id number of this memdev instance.
- */
-struct cxl_memdev {
-       struct device dev;
-       struct cdev cdev;
-       struct cxl_mem *cxlm;
-       int id;
-};
-
-/**
- * struct cxl_mem - A CXL memory device
- * @pdev: The PCI device associated with this CXL device.
- * @cxlmd: Logical memory device chardev / interface
- * @regs: Parsed register blocks
- * @payload_size: Size of space for payload
- *                (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
- * @lsa_size: Size of Label Storage Area
- *                (CXL 2.0 8.2.9.5.1.1 Identify Memory Device)
- * @mbox_mutex: Mutex to synchronize mailbox access.
- * @firmware_version: Firmware version for the memory device.
- * @enabled_cmds: Hardware commands found enabled in CEL.
- * @pmem_range: Persistent memory capacity information.
- * @ram_range: Volatile memory capacity information.
- */
-struct cxl_mem {
-       struct pci_dev *pdev;
-       struct cxl_memdev *cxlmd;
-
-       struct cxl_regs regs;
-
-       size_t payload_size;
-       size_t lsa_size;
-       struct mutex mbox_mutex; /* Protects device mailbox and firmware */
-       char firmware_version[0x10];
-       unsigned long *enabled_cmds;
-
-       struct range pmem_range;
-       struct range ram_range;
-};
-#endif /* __CXL_MEM_H__ */
index 4cf351a..8e45aa0 100644 (file)
@@ -12,9 +12,9 @@
 #include <linux/pci.h>
 #include <linux/io.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
+#include "cxlmem.h"
 #include "pci.h"
 #include "cxl.h"
-#include "mem.h"
 
 /**
  * DOC: cxl pci
@@ -64,6 +64,15 @@ enum opcode {
        CXL_MBOX_OP_MAX                 = 0x10000
 };
 
+/*
+ * CXL 2.0 - Memory capacity multiplier
+ * See Section 8.2.9.5
+ *
+ * Volatile, Persistent, and Partition capacities are specified to be in
+ * multiples of 256MB - define a multiplier to convert to/from bytes.
+ */
+#define CXL_CAPACITY_MULTIPLIER SZ_256M
+
 /**
  * struct mbox_cmd - A command to be submitted to hardware.
  * @opcode: (input) The command set and command submitted to hardware.
@@ -94,8 +103,6 @@ struct mbox_cmd {
 #define CXL_MBOX_SUCCESS 0
 };
 
-static int cxl_mem_major;
-static DEFINE_IDA(cxl_memdev_ida);
 static DECLARE_RWSEM(cxl_memdev_rwsem);
 static struct dentry *cxl_debugfs;
 static bool cxl_raw_allow_all;
@@ -568,7 +575,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
        if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
                return false;
 
-       if (security_locked_down(LOCKDOWN_NONE))
+       if (security_locked_down(LOCKDOWN_PCI_ACCESS))
                return false;
 
        if (cxl_raw_allow_all)
@@ -806,13 +813,25 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file)
        return 0;
 }
 
-static const struct file_operations cxl_memdev_fops = {
-       .owner = THIS_MODULE,
-       .unlocked_ioctl = cxl_memdev_ioctl,
-       .open = cxl_memdev_open,
-       .release = cxl_memdev_release_file,
-       .compat_ioctl = compat_ptr_ioctl,
-       .llseek = noop_llseek,
+static void cxl_memdev_shutdown(struct device *dev)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+
+       down_write(&cxl_memdev_rwsem);
+       cxlmd->cxlm = NULL;
+       up_write(&cxl_memdev_rwsem);
+}
+
+static const struct cdevm_file_operations cxl_memdev_fops = {
+       .fops = {
+               .owner = THIS_MODULE,
+               .unlocked_ioctl = cxl_memdev_ioctl,
+               .open = cxl_memdev_open,
+               .release = cxl_memdev_release_file,
+               .compat_ioctl = compat_ptr_ioctl,
+               .llseek = noop_llseek,
+       },
+       .shutdown = cxl_memdev_shutdown,
 };
 
 static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
@@ -1022,8 +1041,8 @@ static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base,
                    !dev_map->memdev.valid) {
                        dev_err(dev, "registers not found: %s%s%s\n",
                                !dev_map->status.valid ? "status " : "",
-                               !dev_map->mbox.valid ? "status " : "",
-                               !dev_map->memdev.valid ? "status " : "");
+                               !dev_map->mbox.valid ? "mbox " : "",
+                               !dev_map->memdev.valid ? "memdev " : "");
                        return -ENXIO;
                }
 
@@ -1081,9 +1100,8 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
        struct device *dev = &pdev->dev;
        u32 regloc_size, regblocks;
        void __iomem *base;
-       int regloc, i;
-       struct cxl_register_map *map, *n;
-       LIST_HEAD(register_maps);
+       int regloc, i, n_maps;
+       struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES];
        int ret = 0;
 
        regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID);
@@ -1102,20 +1120,12 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
        regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
        regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
 
-       for (i = 0; i < regblocks; i++, regloc += 8) {
+       for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) {
                u32 reg_lo, reg_hi;
                u8 reg_type;
                u64 offset;
                u8 bar;
 
-               map = kzalloc(sizeof(*map), GFP_KERNEL);
-               if (!map) {
-                       ret = -ENOMEM;
-                       goto free_maps;
-               }
-
-               list_add(&map->list, &register_maps);
-
                pci_read_config_dword(pdev, regloc, &reg_lo);
                pci_read_config_dword(pdev, regloc + 4, &reg_hi);
 
@@ -1125,12 +1135,15 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
                dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n",
                        bar, offset, reg_type);
 
+               /* Ignore unknown register block types */
+               if (reg_type > CXL_REGLOC_RBI_MEMDEV)
+                       continue;
+
                base = cxl_mem_map_regblock(cxlm, bar, offset);
-               if (!base) {
-                       ret = -ENOMEM;
-                       goto free_maps;
-               }
+               if (!base)
+                       return -ENOMEM;
 
+               map = &maps[n_maps];
                map->barno = bar;
                map->block_offset = offset;
                map->reg_type = reg_type;
@@ -1141,240 +1154,22 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
                cxl_mem_unmap_regblock(cxlm, base);
 
                if (ret)
-                       goto free_maps;
+                       return ret;
+
+               n_maps++;
        }
 
        pci_release_mem_regions(pdev);
 
-       list_for_each_entry(map, &register_maps, list) {
-               ret = cxl_map_regs(cxlm, map);
+       for (i = 0; i < n_maps; i++) {
+               ret = cxl_map_regs(cxlm, &maps[i]);
                if (ret)
-                       goto free_maps;
-       }
-
-free_maps:
-       list_for_each_entry_safe(map, n, &register_maps, list) {
-               list_del(&map->list);
-               kfree(map);
+                       break;
        }
 
        return ret;
 }
 
-static struct cxl_memdev *to_cxl_memdev(struct device *dev)
-{
-       return container_of(dev, struct cxl_memdev, dev);
-}
-
-static void cxl_memdev_release(struct device *dev)
-{
-       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-
-       ida_free(&cxl_memdev_ida, cxlmd->id);
-       kfree(cxlmd);
-}
-
-static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
-                               kgid_t *gid)
-{
-       return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
-}
-
-static ssize_t firmware_version_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
-{
-       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_mem *cxlm = cxlmd->cxlm;
-
-       return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
-}
-static DEVICE_ATTR_RO(firmware_version);
-
-static ssize_t payload_max_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_mem *cxlm = cxlmd->cxlm;
-
-       return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
-}
-static DEVICE_ATTR_RO(payload_max);
-
-static ssize_t label_storage_size_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_mem *cxlm = cxlmd->cxlm;
-
-       return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
-}
-static DEVICE_ATTR_RO(label_storage_size);
-
-static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
-                            char *buf)
-{
-       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_mem *cxlm = cxlmd->cxlm;
-       unsigned long long len = range_len(&cxlm->ram_range);
-
-       return sysfs_emit(buf, "%#llx\n", len);
-}
-
-static struct device_attribute dev_attr_ram_size =
-       __ATTR(size, 0444, ram_size_show, NULL);
-
-static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
-                             char *buf)
-{
-       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_mem *cxlm = cxlmd->cxlm;
-       unsigned long long len = range_len(&cxlm->pmem_range);
-
-       return sysfs_emit(buf, "%#llx\n", len);
-}
-
-static struct device_attribute dev_attr_pmem_size =
-       __ATTR(size, 0444, pmem_size_show, NULL);
-
-static struct attribute *cxl_memdev_attributes[] = {
-       &dev_attr_firmware_version.attr,
-       &dev_attr_payload_max.attr,
-       &dev_attr_label_storage_size.attr,
-       NULL,
-};
-
-static struct attribute *cxl_memdev_pmem_attributes[] = {
-       &dev_attr_pmem_size.attr,
-       NULL,
-};
-
-static struct attribute *cxl_memdev_ram_attributes[] = {
-       &dev_attr_ram_size.attr,
-       NULL,
-};
-
-static struct attribute_group cxl_memdev_attribute_group = {
-       .attrs = cxl_memdev_attributes,
-};
-
-static struct attribute_group cxl_memdev_ram_attribute_group = {
-       .name = "ram",
-       .attrs = cxl_memdev_ram_attributes,
-};
-
-static struct attribute_group cxl_memdev_pmem_attribute_group = {
-       .name = "pmem",
-       .attrs = cxl_memdev_pmem_attributes,
-};
-
-static const struct attribute_group *cxl_memdev_attribute_groups[] = {
-       &cxl_memdev_attribute_group,
-       &cxl_memdev_ram_attribute_group,
-       &cxl_memdev_pmem_attribute_group,
-       NULL,
-};
-
-static const struct device_type cxl_memdev_type = {
-       .name = "cxl_memdev",
-       .release = cxl_memdev_release,
-       .devnode = cxl_memdev_devnode,
-       .groups = cxl_memdev_attribute_groups,
-};
-
-static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
-{
-       down_write(&cxl_memdev_rwsem);
-       cxlmd->cxlm = NULL;
-       up_write(&cxl_memdev_rwsem);
-}
-
-static void cxl_memdev_unregister(void *_cxlmd)
-{
-       struct cxl_memdev *cxlmd = _cxlmd;
-       struct device *dev = &cxlmd->dev;
-
-       cdev_device_del(&cxlmd->cdev, dev);
-       cxl_memdev_shutdown(cxlmd);
-       put_device(dev);
-}
-
-static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
-{
-       struct pci_dev *pdev = cxlm->pdev;
-       struct cxl_memdev *cxlmd;
-       struct device *dev;
-       struct cdev *cdev;
-       int rc;
-
-       cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
-       if (!cxlmd)
-               return ERR_PTR(-ENOMEM);
-
-       rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
-       if (rc < 0)
-               goto err;
-       cxlmd->id = rc;
-
-       dev = &cxlmd->dev;
-       device_initialize(dev);
-       dev->parent = &pdev->dev;
-       dev->bus = &cxl_bus_type;
-       dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
-       dev->type = &cxl_memdev_type;
-       device_set_pm_not_required(dev);
-
-       cdev = &cxlmd->cdev;
-       cdev_init(cdev, &cxl_memdev_fops);
-       return cxlmd;
-
-err:
-       kfree(cxlmd);
-       return ERR_PTR(rc);
-}
-
-static struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
-                                             struct cxl_mem *cxlm)
-{
-       struct cxl_memdev *cxlmd;
-       struct device *dev;
-       struct cdev *cdev;
-       int rc;
-
-       cxlmd = cxl_memdev_alloc(cxlm);
-       if (IS_ERR(cxlmd))
-               return cxlmd;
-
-       dev = &cxlmd->dev;
-       rc = dev_set_name(dev, "mem%d", cxlmd->id);
-       if (rc)
-               goto err;
-
-       /*
-        * Activate ioctl operations, no cxl_memdev_rwsem manipulation
-        * needed as this is ordered with cdev_add() publishing the device.
-        */
-       cxlmd->cxlm = cxlm;
-
-       cdev = &cxlmd->cdev;
-       rc = cdev_device_add(cdev, dev);
-       if (rc)
-               goto err;
-
-       rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
-       if (rc)
-               return ERR_PTR(rc);
-       return cxlmd;
-
-err:
-       /*
-        * The cdev was briefly live, shutdown any ioctl operations that
-        * saw that state.
-        */
-       cxl_memdev_shutdown(cxlmd);
-       put_device(dev);
-       return ERR_PTR(rc);
-}
-
 static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
 {
        u32 remaining = size;
@@ -1468,6 +1263,53 @@ static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
        return ret;
 }
 
+/**
+ * cxl_mem_get_partition_info - Get partition info
+ * @cxlm: The device to act on
+ * @active_volatile_bytes: returned active volatile capacity
+ * @active_persistent_bytes: returned active persistent capacity
+ * @next_volatile_bytes: return next volatile capacity
+ * @next_persistent_bytes: return next persistent capacity
+ *
+ * Retrieve the current partition info for the device specified.  If not 0, the
+ * 'next' values are pending and take affect on next cold reset.
+ *
+ * Return: 0 if no error: or the result of the mailbox command.
+ *
+ * See CXL @8.2.9.5.2.1 Get Partition Info
+ */
+static int cxl_mem_get_partition_info(struct cxl_mem *cxlm,
+                                     u64 *active_volatile_bytes,
+                                     u64 *active_persistent_bytes,
+                                     u64 *next_volatile_bytes,
+                                     u64 *next_persistent_bytes)
+{
+       struct cxl_mbox_get_partition_info {
+               __le64 active_volatile_cap;
+               __le64 active_persistent_cap;
+               __le64 next_volatile_cap;
+               __le64 next_persistent_cap;
+       } __packed pi;
+       int rc;
+
+       rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO,
+                                  NULL, 0, &pi, sizeof(pi));
+       if (rc)
+               return rc;
+
+       *active_volatile_bytes = le64_to_cpu(pi.active_volatile_cap);
+       *active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap);
+       *next_volatile_bytes = le64_to_cpu(pi.next_volatile_cap);
+       *next_persistent_bytes = le64_to_cpu(pi.next_volatile_cap);
+
+       *active_volatile_bytes *= CXL_CAPACITY_MULTIPLIER;
+       *active_persistent_bytes *= CXL_CAPACITY_MULTIPLIER;
+       *next_volatile_bytes *= CXL_CAPACITY_MULTIPLIER;
+       *next_persistent_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+       return 0;
+}
+
 /**
  * cxl_mem_enumerate_cmds() - Enumerate commands for a device.
  * @cxlm: The device.
@@ -1564,16 +1406,27 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
        if (rc < 0)
                return rc;
 
-       /*
-        * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias.
-        * For now, only the capacity is exported in sysfs
-        */
-       cxlm->ram_range.start = 0;
-       cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
+       cxlm->total_bytes = le64_to_cpu(id.total_capacity);
+       cxlm->total_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+       cxlm->volatile_only_bytes = le64_to_cpu(id.volatile_capacity);
+       cxlm->volatile_only_bytes *= CXL_CAPACITY_MULTIPLIER;
 
-       cxlm->pmem_range.start = 0;
-       cxlm->pmem_range.end =
-               le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
+       cxlm->persistent_only_bytes = le64_to_cpu(id.persistent_capacity);
+       cxlm->persistent_only_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+       cxlm->partition_align_bytes = le64_to_cpu(id.partition_align);
+       cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER;
+
+       dev_dbg(&cxlm->pdev->dev, "Identify Memory Device\n"
+               "     total_bytes = %#llx\n"
+               "     volatile_only_bytes = %#llx\n"
+               "     persistent_only_bytes = %#llx\n"
+               "     partition_align_bytes = %#llx\n",
+                       cxlm->total_bytes,
+                       cxlm->volatile_only_bytes,
+                       cxlm->persistent_only_bytes,
+                       cxlm->partition_align_bytes);
 
        cxlm->lsa_size = le32_to_cpu(id.lsa_size);
        memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
@@ -1581,6 +1434,49 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
        return 0;
 }
 
+static int cxl_mem_create_range_info(struct cxl_mem *cxlm)
+{
+       int rc;
+
+       if (cxlm->partition_align_bytes == 0) {
+               cxlm->ram_range.start = 0;
+               cxlm->ram_range.end = cxlm->volatile_only_bytes - 1;
+               cxlm->pmem_range.start = cxlm->volatile_only_bytes;
+               cxlm->pmem_range.end = cxlm->volatile_only_bytes +
+                                       cxlm->persistent_only_bytes - 1;
+               return 0;
+       }
+
+       rc = cxl_mem_get_partition_info(cxlm,
+                                       &cxlm->active_volatile_bytes,
+                                       &cxlm->active_persistent_bytes,
+                                       &cxlm->next_volatile_bytes,
+                                       &cxlm->next_persistent_bytes);
+       if (rc < 0) {
+               dev_err(&cxlm->pdev->dev, "Failed to query partition information\n");
+               return rc;
+       }
+
+       dev_dbg(&cxlm->pdev->dev, "Get Partition Info\n"
+               "     active_volatile_bytes = %#llx\n"
+               "     active_persistent_bytes = %#llx\n"
+               "     next_volatile_bytes = %#llx\n"
+               "     next_persistent_bytes = %#llx\n",
+                       cxlm->active_volatile_bytes,
+                       cxlm->active_persistent_bytes,
+                       cxlm->next_volatile_bytes,
+                       cxlm->next_persistent_bytes);
+
+       cxlm->ram_range.start = 0;
+       cxlm->ram_range.end = cxlm->active_volatile_bytes - 1;
+
+       cxlm->pmem_range.start = cxlm->active_volatile_bytes;
+       cxlm->pmem_range.end = cxlm->active_volatile_bytes +
+                               cxlm->active_persistent_bytes - 1;
+
+       return 0;
+}
+
 static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct cxl_memdev *cxlmd;
@@ -1611,7 +1507,11 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (rc)
                return rc;
 
-       cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm);
+       rc = cxl_mem_create_range_info(cxlm);
+       if (rc)
+               return rc;
+
+       cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops);
        if (IS_ERR(cxlmd))
                return PTR_ERR(cxlmd);
 
@@ -1640,25 +1540,15 @@ static struct pci_driver cxl_mem_driver = {
 static __init int cxl_mem_init(void)
 {
        struct dentry *mbox_debugfs;
-       dev_t devt;
        int rc;
 
        /* Double check the anonymous union trickery in struct cxl_regs */
        BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
                     offsetof(struct cxl_regs, device_regs.memdev));
 
-       rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
-       if (rc)
-               return rc;
-
-       cxl_mem_major = MAJOR(devt);
-
        rc = pci_register_driver(&cxl_mem_driver);
-       if (rc) {
-               unregister_chrdev_region(MKDEV(cxl_mem_major, 0),
-                                        CXL_MEM_MAX_DEVS);
+       if (rc)
                return rc;
-       }
 
        cxl_debugfs = debugfs_create_dir("cxl", NULL);
        mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
@@ -1672,7 +1562,6 @@ static __exit void cxl_mem_exit(void)
 {
        debugfs_remove_recursive(cxl_debugfs);
        pci_unregister_driver(&cxl_mem_driver);
-       unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
 }
 
 MODULE_LICENSE("GPL v2");
index dad7a83..8c1a588 100644 (file)
@@ -25,6 +25,7 @@
 #define CXL_REGLOC_RBI_COMPONENT 1
 #define CXL_REGLOC_RBI_VIRT 2
 #define CXL_REGLOC_RBI_MEMDEV 3
+#define CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_MEMDEV + 1
 
 #define CXL_REGLOC_ADDR_MASK GENMASK(31, 16)
 
index 0088e41..9652c3e 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/ndctl.h>
 #include <linux/async.h>
 #include <linux/slab.h>
-#include "mem.h"
+#include "cxlmem.h"
 #include "cxl.h"
 
 /*
index 9251441..7f473f9 100644 (file)
@@ -346,29 +346,45 @@ static bool preamble_next(struct nvdimm_drvdata *ndd,
                        free, nslot);
 }
 
+static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
+                                 struct nd_namespace_label *nd_label)
+{
+       u64 sum, sum_save;
+
+       if (!namespace_label_has(ndd, checksum))
+               return true;
+
+       sum_save = nsl_get_checksum(ndd, nd_label);
+       nsl_set_checksum(ndd, nd_label, 0);
+       sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
+       nsl_set_checksum(ndd, nd_label, sum_save);
+       return sum == sum_save;
+}
+
+static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
+                                  struct nd_namespace_label *nd_label)
+{
+       u64 sum;
+
+       if (!namespace_label_has(ndd, checksum))
+               return;
+       nsl_set_checksum(ndd, nd_label, 0);
+       sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
+       nsl_set_checksum(ndd, nd_label, sum);
+}
+
 static bool slot_valid(struct nvdimm_drvdata *ndd,
                struct nd_namespace_label *nd_label, u32 slot)
 {
+       bool valid;
+
        /* check that we are written where we expect to be written */
-       if (slot != __le32_to_cpu(nd_label->slot))
+       if (slot != nsl_get_slot(ndd, nd_label))
                return false;
-
-       /* check checksum */
-       if (namespace_label_has(ndd, checksum)) {
-               u64 sum, sum_save;
-
-               sum_save = __le64_to_cpu(nd_label->checksum);
-               nd_label->checksum = __cpu_to_le64(0);
-               sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
-               nd_label->checksum = __cpu_to_le64(sum_save);
-               if (sum != sum_save) {
-                       dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
-                               slot, sum);
-                       return false;
-               }
-       }
-
-       return true;
+       valid = nsl_validate_checksum(ndd, nd_label);
+       if (!valid)
+               dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot);
+       return valid;
 }
 
 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
@@ -395,13 +411,13 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
                        continue;
 
                memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
-               flags = __le32_to_cpu(nd_label->flags);
+               flags = nsl_get_flags(ndd, nd_label);
                if (test_bit(NDD_NOBLK, &nvdimm->flags))
                        flags &= ~NSLABEL_FLAG_LOCAL;
                nd_label_gen_id(&label_id, label_uuid, flags);
                res = nvdimm_allocate_dpa(ndd, &label_id,
-                               __le64_to_cpu(nd_label->dpa),
-                               __le64_to_cpu(nd_label->rawsize));
+                                         nsl_get_dpa(ndd, nd_label),
+                                         nsl_get_rawsize(ndd, nd_label));
                nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
                if (!res)
                        return -EBUSY;
@@ -548,9 +564,9 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd)
                nd_label = to_label(ndd, slot);
 
                if (!slot_valid(ndd, nd_label, slot)) {
-                       u32 label_slot = __le32_to_cpu(nd_label->slot);
-                       u64 size = __le64_to_cpu(nd_label->rawsize);
-                       u64 dpa = __le64_to_cpu(nd_label->dpa);
+                       u32 label_slot = nsl_get_slot(ndd, nd_label);
+                       u64 size = nsl_get_rawsize(ndd, nd_label);
+                       u64 dpa = nsl_get_dpa(ndd, nd_label);
 
                        dev_dbg(ndd->dev,
                                "slot%d invalid slot: %d dpa: %llx size: %llx\n",
@@ -708,7 +724,7 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
                - (unsigned long) to_namespace_index(ndd, 0);
 }
 
-enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
+static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
 {
        if (guid_equal(guid, &nvdimm_btt_guid))
                return NVDIMM_CCLASS_BTT;
@@ -756,6 +772,45 @@ static void reap_victim(struct nd_mapping *nd_mapping,
        victim->label = NULL;
 }
 
+static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
+                             struct nd_namespace_label *nd_label, guid_t *guid)
+{
+       if (namespace_label_has(ndd, type_guid))
+               guid_copy(&nd_label->type_guid, guid);
+}
+
+bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
+                           struct nd_namespace_label *nd_label, guid_t *guid)
+{
+       if (!namespace_label_has(ndd, type_guid))
+               return true;
+       if (!guid_equal(&nd_label->type_guid, guid)) {
+               dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
+                       &nd_label->type_guid);
+               return false;
+       }
+       return true;
+}
+
+static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
+                               struct nd_namespace_label *nd_label,
+                               enum nvdimm_claim_class claim_class)
+{
+       if (!namespace_label_has(ndd, abstraction_guid))
+               return;
+       guid_copy(&nd_label->abstraction_guid,
+                 to_abstraction_guid(claim_class,
+                                     &nd_label->abstraction_guid));
+}
+
+enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
+                                           struct nd_namespace_label *nd_label)
+{
+       if (!namespace_label_has(ndd, abstraction_guid))
+               return NVDIMM_CCLASS_NONE;
+       return to_nvdimm_cclass(&nd_label->abstraction_guid);
+}
+
 static int __pmem_label_update(struct nd_region *nd_region,
                struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
                int pos, unsigned long flags)
@@ -797,29 +852,18 @@ static int __pmem_label_update(struct nd_region *nd_region,
        nd_label = to_label(ndd, slot);
        memset(nd_label, 0, sizeof_namespace_label(ndd));
        memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
-       if (nspm->alt_name)
-               memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
-       nd_label->flags = __cpu_to_le32(flags);
-       nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
-       nd_label->position = __cpu_to_le16(pos);
-       nd_label->isetcookie = __cpu_to_le64(cookie);
-       nd_label->rawsize = __cpu_to_le64(resource_size(res));
-       nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
-       nd_label->dpa = __cpu_to_le64(res->start);
-       nd_label->slot = __cpu_to_le32(slot);
-       if (namespace_label_has(ndd, type_guid))
-               guid_copy(&nd_label->type_guid, &nd_set->type_guid);
-       if (namespace_label_has(ndd, abstraction_guid))
-               guid_copy(&nd_label->abstraction_guid,
-                               to_abstraction_guid(ndns->claim_class,
-                                       &nd_label->abstraction_guid));
-       if (namespace_label_has(ndd, checksum)) {
-               u64 sum;
-
-               nd_label->checksum = __cpu_to_le64(0);
-               sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
-               nd_label->checksum = __cpu_to_le64(sum);
-       }
+       nsl_set_name(ndd, nd_label, nspm->alt_name);
+       nsl_set_flags(ndd, nd_label, flags);
+       nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
+       nsl_set_position(ndd, nd_label, pos);
+       nsl_set_isetcookie(ndd, nd_label, cookie);
+       nsl_set_rawsize(ndd, nd_label, resource_size(res));
+       nsl_set_lbasize(ndd, nd_label, nspm->lbasize);
+       nsl_set_dpa(ndd, nd_label, res->start);
+       nsl_set_slot(ndd, nd_label, slot);
+       nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
+       nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
+       nsl_calculate_checksum(ndd, nd_label);
        nd_dbg_dpa(nd_region, ndd, res, "\n");
 
        /* update label */
@@ -879,9 +923,9 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd,
        struct resource *res;
 
        for_each_dpa_resource(ndd, res) {
-               if (res->start != __le64_to_cpu(nd_label->dpa))
+               if (res->start != nsl_get_dpa(ndd, nd_label))
                        continue;
-               if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
+               if (resource_size(res) != nsl_get_rawsize(ndd, nd_label))
                        continue;
                return res;
        }
@@ -889,6 +933,59 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd,
        return NULL;
 }
 
+/*
+ * Use the presence of the type_guid as a flag to determine isetcookie
+ * usage and nlabel + position policy for blk-aperture namespaces.
+ */
+static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd,
+                                  struct nd_namespace_label *nd_label,
+                                  u64 isetcookie)
+{
+       if (namespace_label_has(ndd, type_guid)) {
+               nsl_set_isetcookie(ndd, nd_label, isetcookie);
+               return;
+       }
+       nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */
+}
+
+bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
+                                struct nd_namespace_label *nd_label,
+                                u64 isetcookie)
+{
+       if (!namespace_label_has(ndd, type_guid))
+               return true;
+
+       if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) {
+               dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", isetcookie,
+                       nsl_get_isetcookie(ndd, nd_label));
+               return false;
+       }
+
+       return true;
+}
+
+static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd,
+                              struct nd_namespace_label *nd_label, int nlabel,
+                              bool first)
+{
+       if (!namespace_label_has(ndd, type_guid)) {
+               nsl_set_nlabel(ndd, nd_label, 0); /* N/A */
+               return;
+       }
+       nsl_set_nlabel(ndd, nd_label, first ? nlabel : 0xffff);
+}
+
+static void nsl_set_blk_position(struct nvdimm_drvdata *ndd,
+                                struct nd_namespace_label *nd_label,
+                                bool first)
+{
+       if (!namespace_label_has(ndd, type_guid)) {
+               nsl_set_position(ndd, nd_label, 0);
+               return;
+       }
+       nsl_set_position(ndd, nd_label, first ? 0 : 0xffff);
+}
+
 /*
  * 1/ Account all the labels that can be freed after this update
  * 2/ Allocate and write the label to the staging (next) index
@@ -1017,50 +1114,21 @@ static int __blk_label_update(struct nd_region *nd_region,
                nd_label = to_label(ndd, slot);
                memset(nd_label, 0, sizeof_namespace_label(ndd));
                memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
-               if (nsblk->alt_name)
-                       memcpy(nd_label->name, nsblk->alt_name,
-                                       NSLABEL_NAME_LEN);
-               nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
-
-               /*
-                * Use the presence of the type_guid as a flag to
-                * determine isetcookie usage and nlabel + position
-                * policy for blk-aperture namespaces.
-                */
-               if (namespace_label_has(ndd, type_guid)) {
-                       if (i == min_dpa_idx) {
-                               nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
-                               nd_label->position = __cpu_to_le16(0);
-                       } else {
-                               nd_label->nlabel = __cpu_to_le16(0xffff);
-                               nd_label->position = __cpu_to_le16(0xffff);
-                       }
-                       nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
-               } else {
-                       nd_label->nlabel = __cpu_to_le16(0); /* N/A */
-                       nd_label->position = __cpu_to_le16(0); /* N/A */
-                       nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
-               }
-
-               nd_label->dpa = __cpu_to_le64(res->start);
-               nd_label->rawsize = __cpu_to_le64(resource_size(res));
-               nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
-               nd_label->slot = __cpu_to_le32(slot);
-               if (namespace_label_has(ndd, type_guid))
-                       guid_copy(&nd_label->type_guid, &nd_set->type_guid);
-               if (namespace_label_has(ndd, abstraction_guid))
-                       guid_copy(&nd_label->abstraction_guid,
-                                       to_abstraction_guid(ndns->claim_class,
-                                               &nd_label->abstraction_guid));
-
-               if (namespace_label_has(ndd, checksum)) {
-                       u64 sum;
-
-                       nd_label->checksum = __cpu_to_le64(0);
-                       sum = nd_fletcher64(nd_label,
-                                       sizeof_namespace_label(ndd), 1);
-                       nd_label->checksum = __cpu_to_le64(sum);
-               }
+               nsl_set_name(ndd, nd_label, nsblk->alt_name);
+               nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL);
+
+               nsl_set_blk_nlabel(ndd, nd_label, nsblk->num_resources,
+                                  i == min_dpa_idx);
+               nsl_set_blk_position(ndd, nd_label, i == min_dpa_idx);
+               nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2);
+
+               nsl_set_dpa(ndd, nd_label, res->start);
+               nsl_set_rawsize(ndd, nd_label, resource_size(res));
+               nsl_set_lbasize(ndd, nd_label, nsblk->lbasize);
+               nsl_set_slot(ndd, nd_label, slot);
+               nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
+               nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
+               nsl_calculate_checksum(ndd, nd_label);
 
                /* update label */
                offset = nd_label_offset(ndd, nd_label);
index 956b6d1..31f94fa 100644 (file)
@@ -135,7 +135,6 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n);
 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd);
 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot);
 u32 nd_label_nfree(struct nvdimm_drvdata *ndd);
-enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid);
 struct nd_region;
 struct nd_namespace_pmem;
 struct nd_namespace_blk;
index 7454782..4cec171 100644 (file)
@@ -1235,7 +1235,7 @@ static int namespace_update_uuid(struct nd_region *nd_region,
                        if (!nd_label)
                                continue;
                        nd_label_gen_id(&label_id, nd_label->uuid,
-                                       __le32_to_cpu(nd_label->flags));
+                                       nsl_get_flags(ndd, nd_label));
                        if (strcmp(old_label_id.id, label_id.id) == 0)
                                set_bit(ND_LABEL_REAP, &label_ent->flags);
                }
@@ -1847,28 +1847,21 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
                list_for_each_entry(label_ent, &nd_mapping->labels, list) {
                        struct nd_namespace_label *nd_label = label_ent->label;
                        u16 position, nlabel;
-                       u64 isetcookie;
 
                        if (!nd_label)
                                continue;
-                       isetcookie = __le64_to_cpu(nd_label->isetcookie);
-                       position = __le16_to_cpu(nd_label->position);
-                       nlabel = __le16_to_cpu(nd_label->nlabel);
+                       position = nsl_get_position(ndd, nd_label);
+                       nlabel = nsl_get_nlabel(ndd, nd_label);
 
-                       if (isetcookie != cookie)
+                       if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
                                continue;
 
                        if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
                                continue;
 
-                       if (namespace_label_has(ndd, type_guid)
-                                       && !guid_equal(&nd_set->type_guid,
-                                               &nd_label->type_guid)) {
-                               dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
-                                               &nd_set->type_guid,
-                                               &nd_label->type_guid);
+                       if (!nsl_validate_type_guid(ndd, nd_label,
+                                                   &nd_set->type_guid))
                                continue;
-                       }
 
                        if (found_uuid) {
                                dev_dbg(ndd->dev, "duplicate entry for uuid\n");
@@ -1923,8 +1916,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
                 */
                hw_start = nd_mapping->start;
                hw_end = hw_start + nd_mapping->size;
-               pmem_start = __le64_to_cpu(nd_label->dpa);
-               pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
+               pmem_start = nsl_get_dpa(ndd, nd_label);
+               pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
                if (pmem_start >= hw_start && pmem_start < hw_end
                                && pmem_end <= hw_end && pmem_end > hw_start)
                        /* pass */;
@@ -1947,14 +1940,16 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
  * @nd_label: target pmem namespace label to evaluate
  */
 static struct device *create_namespace_pmem(struct nd_region *nd_region,
-               struct nd_namespace_index *nsindex,
-               struct nd_namespace_label *nd_label)
+                                           struct nd_mapping *nd_mapping,
+                                           struct nd_namespace_label *nd_label)
 {
+       struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+       struct nd_namespace_index *nsindex =
+               to_namespace_index(ndd, ndd->ns_current);
        u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
        u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
        struct nd_label_ent *label_ent;
        struct nd_namespace_pmem *nspm;
-       struct nd_mapping *nd_mapping;
        resource_size_t size = 0;
        struct resource *res;
        struct device *dev;
@@ -1966,10 +1961,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
                return ERR_PTR(-ENXIO);
        }
 
-       if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
+       if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
                dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
                                nd_label->uuid);
-               if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
+               if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
                        return ERR_PTR(-EAGAIN);
 
                dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
@@ -2037,20 +2032,18 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
                        continue;
                }
 
-               size += __le64_to_cpu(label0->rawsize);
-               if (__le16_to_cpu(label0->position) != 0)
+               ndd = to_ndd(nd_mapping);
+               size += nsl_get_rawsize(ndd, label0);
+               if (nsl_get_position(ndd, label0) != 0)
                        continue;
                WARN_ON(nspm->alt_name || nspm->uuid);
-               nspm->alt_name = kmemdup((void __force *) label0->name,
-                               NSLABEL_NAME_LEN, GFP_KERNEL);
+               nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
+                                        NSLABEL_NAME_LEN, GFP_KERNEL);
                nspm->uuid = kmemdup((void __force *) label0->uuid,
                                NSLABEL_UUID_LEN, GFP_KERNEL);
-               nspm->lbasize = __le64_to_cpu(label0->lbasize);
-               ndd = to_ndd(nd_mapping);
-               if (namespace_label_has(ndd, abstraction_guid))
-                       nspm->nsio.common.claim_class
-                               = to_nvdimm_cclass(&label0->abstraction_guid);
-
+               nspm->lbasize = nsl_get_lbasize(ndd, label0);
+               nspm->nsio.common.claim_class =
+                       nsl_get_claim_class(ndd, label0);
        }
 
        if (!nspm->alt_name || !nspm->uuid) {
@@ -2237,7 +2230,7 @@ static int add_namespace_resource(struct nd_region *nd_region,
                if (is_namespace_blk(devs[i])) {
                        res = nsblk_add_resource(nd_region, ndd,
                                        to_nd_namespace_blk(devs[i]),
-                                       __le64_to_cpu(nd_label->dpa));
+                                       nsl_get_dpa(ndd, nd_label));
                        if (!res)
                                return -ENXIO;
                        nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
@@ -2265,21 +2258,10 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
        struct device *dev = NULL;
        struct resource *res;
 
-       if (namespace_label_has(ndd, type_guid)) {
-               if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
-                       dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
-                                       &nd_set->type_guid,
-                                       &nd_label->type_guid);
-                       return ERR_PTR(-EAGAIN);
-               }
-
-               if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
-                       dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
-                                       nd_set->cookie2,
-                                       __le64_to_cpu(nd_label->isetcookie));
-                       return ERR_PTR(-EAGAIN);
-               }
-       }
+       if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid))
+               return ERR_PTR(-EAGAIN);
+       if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2))
+               return ERR_PTR(-EAGAIN);
 
        nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
        if (!nsblk)
@@ -2288,23 +2270,19 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
        dev->type = &namespace_blk_device_type;
        dev->parent = &nd_region->dev;
        nsblk->id = -1;
-       nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
-       nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
-                       GFP_KERNEL);
-       if (namespace_label_has(ndd, abstraction_guid))
-               nsblk->common.claim_class
-                       = to_nvdimm_cclass(&nd_label->abstraction_guid);
+       nsblk->lbasize = nsl_get_lbasize(ndd, nd_label);
+       nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, GFP_KERNEL);
+       nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label);
        if (!nsblk->uuid)
                goto blk_err;
-       memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
+       nsl_get_name(ndd, nd_label, name);
        if (name[0]) {
-               nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
-                               GFP_KERNEL);
+               nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL);
                if (!nsblk->alt_name)
                        goto blk_err;
        }
        res = nsblk_add_resource(nd_region, ndd, nsblk,
-                       __le64_to_cpu(nd_label->dpa));
+                       nsl_get_dpa(ndd, nd_label));
        if (!res)
                goto blk_err;
        nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
@@ -2345,6 +2323,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
        struct device *dev, **devs = NULL;
        struct nd_label_ent *label_ent, *e;
        struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+       struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
        resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
 
        /* "safe" because create_namespace_pmem() might list_move() label_ent */
@@ -2355,7 +2334,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
 
                if (!nd_label)
                        continue;
-               flags = __le32_to_cpu(nd_label->flags);
+               flags = nsl_get_flags(ndd, nd_label);
                if (is_nd_blk(&nd_region->dev)
                                == !!(flags & NSLABEL_FLAG_LOCAL))
                        /* pass, region matches label type */;
@@ -2363,9 +2342,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
                        continue;
 
                /* skip labels that describe extents outside of the region */
-               if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
-                   __le64_to_cpu(nd_label->dpa) > map_end)
-                               continue;
+               if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
+                   nsl_get_dpa(ndd, nd_label) > map_end)
+                       continue;
 
                i = add_namespace_resource(nd_region, nd_label, devs, count);
                if (i < 0)
@@ -2381,13 +2360,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
 
                if (is_nd_blk(&nd_region->dev))
                        dev = create_namespace_blk(nd_region, nd_label, count);
-               else {
-                       struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
-                       struct nd_namespace_index *nsindex;
-
-                       nsindex = to_namespace_index(ndd, ndd->ns_current);
-                       dev = create_namespace_pmem(nd_region, nsindex, nd_label);
-               }
+               else
+                       dev = create_namespace_pmem(nd_region, nd_mapping,
+                                                   nd_label);
 
                if (IS_ERR(dev)) {
                        switch (PTR_ERR(dev)) {
@@ -2571,10 +2546,10 @@ static int init_active_labels(struct nd_region *nd_region)
                                break;
                        label = nd_label_active(ndd, j);
                        if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
-                               u32 flags = __le32_to_cpu(label->flags);
+                               u32 flags = nsl_get_flags(ndd, label);
 
                                flags &= ~NSLABEL_FLAG_LOCAL;
-                               label->flags = __cpu_to_le32(flags);
+                               nsl_set_flags(ndd, label, flags);
                        }
                        label_ent->label = label;
 
index 696b555..5467ebb 100644 (file)
@@ -35,6 +35,156 @@ struct nvdimm_drvdata {
        struct kref kref;
 };
 
+static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd,
+                                    struct nd_namespace_label *nd_label)
+{
+       return nd_label->name;
+}
+
+static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd,
+                              struct nd_namespace_label *nd_label, u8 *name)
+{
+       return memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
+}
+
+static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd,
+                              struct nd_namespace_label *nd_label, u8 *name)
+{
+       if (!name)
+               return NULL;
+       return memcpy(nd_label->name, name, NSLABEL_NAME_LEN);
+}
+
+static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd,
+                              struct nd_namespace_label *nd_label)
+{
+       return __le32_to_cpu(nd_label->slot);
+}
+
+static inline void nsl_set_slot(struct nvdimm_drvdata *ndd,
+                               struct nd_namespace_label *nd_label, u32 slot)
+{
+       nd_label->slot = __cpu_to_le32(slot);
+}
+
+static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd,
+                                  struct nd_namespace_label *nd_label)
+{
+       return __le64_to_cpu(nd_label->checksum);
+}
+
+static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd,
+                                   struct nd_namespace_label *nd_label,
+                                   u64 checksum)
+{
+       nd_label->checksum = __cpu_to_le64(checksum);
+}
+
+static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd,
+                               struct nd_namespace_label *nd_label)
+{
+       return __le32_to_cpu(nd_label->flags);
+}
+
+static inline void nsl_set_flags(struct nvdimm_drvdata *ndd,
+                                struct nd_namespace_label *nd_label, u32 flags)
+{
+       nd_label->flags = __cpu_to_le32(flags);
+}
+
+static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd,
+                             struct nd_namespace_label *nd_label)
+{
+       return __le64_to_cpu(nd_label->dpa);
+}
+
+static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd,
+                              struct nd_namespace_label *nd_label, u64 dpa)
+{
+       nd_label->dpa = __cpu_to_le64(dpa);
+}
+
+static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd,
+                                 struct nd_namespace_label *nd_label)
+{
+       return __le64_to_cpu(nd_label->rawsize);
+}
+
+static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd,
+                                  struct nd_namespace_label *nd_label,
+                                  u64 rawsize)
+{
+       nd_label->rawsize = __cpu_to_le64(rawsize);
+}
+
+static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd,
+                                    struct nd_namespace_label *nd_label)
+{
+       return __le64_to_cpu(nd_label->isetcookie);
+}
+
+static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd,
+                                     struct nd_namespace_label *nd_label,
+                                     u64 isetcookie)
+{
+       nd_label->isetcookie = __cpu_to_le64(isetcookie);
+}
+
+static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd,
+                                          struct nd_namespace_label *nd_label,
+                                          u64 cookie)
+{
+       return cookie == __le64_to_cpu(nd_label->isetcookie);
+}
+
+static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd,
+                                  struct nd_namespace_label *nd_label)
+{
+       return __le16_to_cpu(nd_label->position);
+}
+
+static inline void nsl_set_position(struct nvdimm_drvdata *ndd,
+                                   struct nd_namespace_label *nd_label,
+                                   u16 position)
+{
+       nd_label->position = __cpu_to_le16(position);
+}
+
+
+static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd,
+                                struct nd_namespace_label *nd_label)
+{
+       return __le16_to_cpu(nd_label->nlabel);
+}
+
+static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd,
+                                 struct nd_namespace_label *nd_label,
+                                 u16 nlabel)
+{
+       nd_label->nlabel = __cpu_to_le16(nlabel);
+}
+
+static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd,
+                                 struct nd_namespace_label *nd_label)
+{
+       return __le64_to_cpu(nd_label->lbasize);
+}
+
+static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd,
+                                  struct nd_namespace_label *nd_label,
+                                  u64 lbasize)
+{
+       nd_label->lbasize = __cpu_to_le64(lbasize);
+}
+
+bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
+                                struct nd_namespace_label *nd_label,
+                                u64 isetcookie);
+bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
+                           struct nd_namespace_label *nd_label, guid_t *guid);
+enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
+                                           struct nd_namespace_label *nd_label);
+
 struct nd_region_data {
        int ns_count;
        int ns_active;
index f6e8a00..8d206f2 100644 (file)
@@ -50,7 +50,7 @@ enum { CXL_CMDS };
 #define ___C(a, b) { b }
 static const struct {
        const char *name;
-} cxl_command_names[] = { CXL_CMDS };
+} cxl_command_names[] __attribute__((__unused__)) = { CXL_CMDS };
 
 /*
  * Here's how this actually breaks out: