return ret;
}
-static void setup_dma_device(struct ib_device *device)
+static void setup_dma_device(struct ib_device *device,
+ struct device *dma_device)
{
- struct device *parent = device->dev.parent;
-
- WARN_ON_ONCE(device->dma_device);
-
-#ifdef CONFIG_DMA_OPS
- if (device->dev.dma_ops) {
- /*
- * The caller provided custom DMA operations. Copy the
- * DMA-related fields that are used by e.g. dma_alloc_coherent()
- * into device->dev.
- */
- device->dma_device = &device->dev;
- if (!device->dev.dma_mask) {
- if (parent)
- device->dev.dma_mask = parent->dma_mask;
- else
- WARN_ON_ONCE(true);
- }
- if (!device->dev.coherent_dma_mask) {
- if (parent)
- device->dev.coherent_dma_mask =
- parent->coherent_dma_mask;
- else
- WARN_ON_ONCE(true);
- }
- } else
-#endif /* CONFIG_DMA_OPS */
- {
- /*
- * The caller did not provide custom DMA operations. Use the
- * DMA mapping operations of the parent device.
- */
- WARN_ON_ONCE(!parent);
- device->dma_device = parent;
- }
-
- if (!device->dev.dma_parms) {
- if (parent) {
- /*
- * The caller did not provide DMA parameters, so
- * 'parent' probably represents a PCI device. The PCI
- * core sets the maximum segment size to 64
- * KB. Increase this parameter to 2 GB.
- */
- device->dev.dma_parms = parent->dma_parms;
- dma_set_max_seg_size(device->dma_device, SZ_2G);
- } else {
- WARN_ON_ONCE(true);
- }
+ /*
+ * If the caller does not provide a DMA capable device then the IB
+ * device will be used. In this case the caller should fully setup the
+ * ibdev for DMA. This usually means using dma_virt_ops.
+ */
+#ifdef CONFIG_DMA_VIRT_OPS
+ if (!dma_device) {
+ device->dev.dma_ops = &dma_virt_ops;
+ dma_device = &device->dev;
}
+#endif
+ WARN_ON(!dma_device);
+ device->dma_device = dma_device;
+ WARN_ON(!device->dma_device->dma_parms);
}
/*
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
int ret;
- setup_dma_device(device);
ib_device_check_mandatory(device);
ret = setup_port_data(device);
* ib_register_device - Register an IB device with IB core
* @device: Device to register
* @name: unique string device name. This may include a '%' which will
- * cause a unique index to be added to the passed device name.
+ * cause a unique index to be added to the passed device name.
+ * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB
+ * device will be used. In this case the caller should fully
+ * setup the ibdev for DMA. This usually means using dma_virt_ops.
*
* Low-level drivers use ib_register_device() to register their
* devices with the IB core. All registered clients will receive a
* asynchronously then the device pointer may become freed as soon as this
* function returns.
*/
-int ib_register_device(struct ib_device *device, const char *name)
+int ib_register_device(struct ib_device *device, const char *name,
+ struct device *dma_device)
{
int ret;
if (ret)
return ret;
+ setup_dma_device(device, dma_device);
ret = setup_device(device);
if (ret)
return ret;
if (ret)
return ret;
- return ib_register_device(ibdev, "bnxt_re%d");
+ dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX);
+ return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
}
static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
ret = set_netdevs(&dev->ibdev, &dev->rdev);
if (ret)
goto err_dealloc_ctx;
- ret = ib_register_device(&dev->ibdev, "cxgb4_%d");
+ dma_set_max_seg_size(&dev->rdev.lldi.pdev->dev, UINT_MAX);
+ ret = ib_register_device(&dev->ibdev, "cxgb4_%d",
+ &dev->rdev.lldi.pdev->dev);
if (ret)
goto err_dealloc_ctx;
return;
ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
- err = ib_register_device(&dev->ibdev, "efa_%d");
+ err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
if (err)
goto err_release_doorbell_bar;
err);
return err;
}
-
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
return 0;
}
if (ret)
return ret;
}
- ret = ib_register_device(ib_dev, "hns_%d");
+ dma_set_max_seg_size(dev, UINT_MAX);
+ ret = ib_register_device(ib_dev, "hns_%d", dev);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
return ret;
if (ret)
goto error;
- ret = ib_register_device(&iwibdev->ibdev, "i40iw%d");
+ dma_set_max_seg_size(&iwdev->hw.pcidev->dev, UINT_MAX);
+ ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", &iwdev->hw.pcidev->dev);
if (ret)
goto error;
goto err_steer_free_bitmap;
rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
- if (ib_register_device(&ibdev->ib_dev, "mlx4_%d"))
+ if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
+ &dev->persist->pdev->dev))
goto err_diag_counters;
if (mlx4_ib_mad_init(ibdev))
name = "mlx5_%d";
else
name = "mlx5_bond_%d";
- return ib_register_device(&dev->ib_dev, name);
+ return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
}
static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
mutex_init(&dev->cap_mask_mutex);
rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
- ret = ib_register_device(&dev->ib_dev, "mthca%d");
+ ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev);
if (ret)
return ret;
if (ret)
return ret;
- return ib_register_device(&dev->ibdev, "ocrdma%d");
+ dma_set_max_seg_size(&dev->nic_info.pdev->dev, UINT_MAX);
+ return ib_register_device(&dev->ibdev, "ocrdma%d",
+ &dev->nic_info.pdev->dev);
}
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
if (rc)
return rc;
- return ib_register_device(&dev->ibdev, "qedr%d");
+ dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX);
+ return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev);
}
/* This function allocates fast-path status block memory */
if (ret)
goto err_fwd_dealloc;
- if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d"))
+ dma_set_max_seg_size(&dev->dev, SZ_2G);
+ if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", &dev->dev))
goto err_fwd_dealloc;
usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
spin_lock_init(&dev->srq_tbl_lock);
rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
- ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d");
+ ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", &dev->pdev->dev);
if (ret)
goto err_srq_free;
goto err_free_resource;
}
}
-
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
pci_set_master(pdev);
/* Map register space */
spin_lock_init(&rdi->n_cqs_lock);
/* DMA Operations */
- rdi->ibdev.dev.dma_ops = rdi->ibdev.dev.dma_ops ? : &dma_virt_ops;
+ rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms;
+ dma_set_coherent_mask(&rdi->ibdev.dev,
+ rdi->ibdev.dev.parent->coherent_dma_mask);
/* Protection Domain */
spin_lock_init(&rdi->n_pds_lock);
rdi->ibdev.num_comp_vectors = 1;
/* We are now good to announce we exist */
- ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev));
+ ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev), NULL);
if (ret) {
rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
goto bail_wss;
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
- dev->dev.dma_ops = &dma_virt_ops;
dev->dev.dma_parms = &rxe->dma_parms;
- rxe->dma_parms = (struct device_dma_parameters)
- { .max_segment_size = SZ_2G };
- dma_coerce_mask_and_coherent(&dev->dev,
- dma_get_required_mask(&dev->dev));
+ dma_set_max_seg_size(&dev->dev, UINT_MAX);
+ dma_set_coherent_mask(&dev->dev, dma_get_required_mask(&dev->dev));
dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
| BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
rxe->tfm = tfm;
rdma_set_device_sysfs_group(dev, &rxe_attr_group);
- err = ib_register_device(dev, ibdev_name);
+ err = ib_register_device(dev, ibdev_name, NULL);
if (err)
pr_warn("%s failed with error %d\n", __func__, err);
sdev->vendor_part_id = dev_id++;
- rv = ib_register_device(base_dev, name);
+ rv = ib_register_device(base_dev, name, NULL);
if (rv) {
pr_warn("siw: device registration error %d\n", rv);
return rv;
*/
base_dev->phys_port_cnt = 1;
base_dev->dev.parent = parent;
- base_dev->dev.dma_ops = &dma_virt_ops;
base_dev->dev.dma_parms = &sdev->dma_parms;
- sdev->dma_parms = (struct device_dma_parameters)
- { .max_segment_size = SZ_2G };
+ dma_set_max_seg_size(&base_dev->dev, UINT_MAX);
+ dma_set_coherent_mask(&base_dev->dev,
+ dma_get_required_mask(&base_dev->dev));
base_dev->num_comp_vectors = num_possible_cpus();
xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
void ib_get_device_fw_str(struct ib_device *device, char *str);
-int ib_register_device(struct ib_device *device, const char *name);
+int ib_register_device(struct ib_device *device, const char *name,
+ struct device *dma_device);
void ib_unregister_device(struct ib_device *device);
void ib_unregister_driver(enum rdma_driver_id driver_id);
void ib_unregister_device_and_put(struct ib_device *device);