RDMA/uverbs: Check ODP in ib_check_mr_access() as well
[linux-2.6-microblaze.git] / include / rdma / ib_verbs.h
index 9bf6c31..4fcbc6d 100644 (file)
@@ -1234,6 +1234,8 @@ enum ib_qp_attr_mask {
        IB_QP_RESERVED3                 = (1<<23),
        IB_QP_RESERVED4                 = (1<<24),
        IB_QP_RATE_LIMIT                = (1<<25),
+
+       IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
 };
 
 enum ib_qp_state {
@@ -1469,6 +1471,8 @@ enum rdma_remove_reason {
        RDMA_REMOVE_DRIVER_REMOVE,
        /* uobj is being cleaned-up before being committed */
        RDMA_REMOVE_ABORT,
+       /* The driver failed to destroy the uobject and is being disconnected */
+       RDMA_REMOVE_DRIVER_FAILURE,
 };
 
 struct ib_rdmacg_object {
@@ -1481,8 +1485,6 @@ struct ib_ucontext {
        struct ib_device       *device;
        struct ib_uverbs_file  *ufile;
 
-       bool cleanup_retryable;
-
        struct ib_rdmacg_object cg_obj;
        /*
         * Implementation details of the RDMA core, don't use in drivers:
@@ -2401,6 +2403,8 @@ struct ib_device_ops {
        int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
        int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
                         struct ib_udata *udata);
+       int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
+                             struct ib_udata *udata);
        int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
        int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
        int (*destroy_ah)(struct ib_ah *ah, u32 flags);
@@ -2665,7 +2669,6 @@ struct ib_device {
        const struct attribute_group    *groups[3];
 
        u64                          uverbs_cmd_mask;
-       u64                          uverbs_ex_cmd_mask;
 
        char                         node_desc[IB_DEVICE_NODE_DESC_MAX];
        __be64                       node_guid;
@@ -2900,46 +2903,6 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
        return ib_is_buffer_cleared(udata->inbuf + offset, len);
 }
 
-/**
- * ib_is_destroy_retryable - Check whether the uobject destruction
- * is retryable.
- * @ret: The initial destruction return code
- * @why: remove reason
- * @uobj: The uobject that is destroyed
- *
- * This function is a helper function that IB layer and low-level drivers
- * can use to consider whether the destruction of the given uobject is
- * retry-able.
- * It checks the original return code, if it wasn't success the destruction
- * is retryable according to the ucontext state (i.e. cleanup_retryable) and
- * the remove reason. (i.e. why).
- * Must be called with the object locked for destroy.
- */
-static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
-                                          struct ib_uobject *uobj)
-{
-       return ret && (why == RDMA_REMOVE_DESTROY ||
-                      uobj->context->cleanup_retryable);
-}
-
-/**
- * ib_destroy_usecnt - Called during destruction to check the usecnt
- * @usecnt: The usecnt atomic
- * @why: remove reason
- * @uobj: The uobject that is destroyed
- *
- * Non-zero usecnts will block destruction unless destruction was triggered by
- * a ucontext cleanup.
- */
-static inline int ib_destroy_usecnt(atomic_t *usecnt,
-                                   enum rdma_remove_reason why,
-                                   struct ib_uobject *uobj)
-{
-       if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
-               return -EBUSY;
-       return 0;
-}
-
 /**
  * ib_modify_qp_is_ok - Check that the supplied attribute mask
  * contains all required attributes and no attributes not allowed for
@@ -3655,8 +3618,14 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
                                              bad_recv_wr ? : &dummy);
 }
 
-struct ib_qp *ib_create_qp(struct ib_pd *pd,
-                          struct ib_qp_init_attr *qp_init_attr);
+struct ib_qp *ib_create_named_qp(struct ib_pd *pd,
+                                struct ib_qp_init_attr *qp_init_attr,
+                                const char *caller);
+static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
+                                        struct ib_qp_init_attr *init_attr)
+{
+       return ib_create_named_qp(pd, init_attr, KBUILD_MODNAME);
+}
 
 /**
  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
@@ -3943,6 +3912,16 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
                -ENOSYS;
 }
 
+/*
+ * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
+ * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
+ * address into the dma address.
+ */
+static inline bool ib_uses_virt_dma(struct ib_device *dev)
+{
+       return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
+}
+
 /**
  * ib_dma_mapping_error - check a DMA addr for error
  * @dev: The device for which the dma_addr was created
@@ -3950,6 +3929,8 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
  */
 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
 {
+       if (ib_uses_virt_dma(dev))
+               return 0;
        return dma_mapping_error(dev->dma_device, dma_addr);
 }
 
@@ -3964,6 +3945,8 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
                                    void *cpu_addr, size_t size,
                                    enum dma_data_direction direction)
 {
+       if (ib_uses_virt_dma(dev))
+               return (uintptr_t)cpu_addr;
        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
 }
 
@@ -3978,7 +3961,8 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
                                       u64 addr, size_t size,
                                       enum dma_data_direction direction)
 {
-       dma_unmap_single(dev->dma_device, addr, size, direction);
+       if (!ib_uses_virt_dma(dev))
+               dma_unmap_single(dev->dma_device, addr, size, direction);
 }
 
 /**
@@ -3995,6 +3979,8 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
                                  size_t size,
                                         enum dma_data_direction direction)
 {
+       if (ib_uses_virt_dma(dev))
+               return (uintptr_t)(page_address(page) + offset);
        return dma_map_page(dev->dma_device, page, offset, size, direction);
 }
 
@@ -4009,7 +3995,30 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
                                     u64 addr, size_t size,
                                     enum dma_data_direction direction)
 {
-       dma_unmap_page(dev->dma_device, addr, size, direction);
+       if (!ib_uses_virt_dma(dev))
+               dma_unmap_page(dev->dma_device, addr, size, direction);
+}
+
+int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
+static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+                                     struct scatterlist *sg, int nents,
+                                     enum dma_data_direction direction,
+                                     unsigned long dma_attrs)
+{
+       if (ib_uses_virt_dma(dev))
+               return ib_dma_virt_map_sg(dev, sg, nents);
+       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+                               dma_attrs);
+}
+
+static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
+                                        struct scatterlist *sg, int nents,
+                                        enum dma_data_direction direction,
+                                        unsigned long dma_attrs)
+{
+       if (!ib_uses_virt_dma(dev))
+               dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
+                                  dma_attrs);
 }
 
 /**
@@ -4023,7 +4032,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
                                struct scatterlist *sg, int nents,
                                enum dma_data_direction direction)
 {
-       return dma_map_sg(dev->dma_device, sg, nents, direction);
+       return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
 }
 
 /**
@@ -4037,24 +4046,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
                                   struct scatterlist *sg, int nents,
                                   enum dma_data_direction direction)
 {
-       dma_unmap_sg(dev->dma_device, sg, nents, direction);
-}
-
-static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
-                                     struct scatterlist *sg, int nents,
-                                     enum dma_data_direction direction,
-                                     unsigned long dma_attrs)
-{
-       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
-                               dma_attrs);
-}
-
-static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
-                                        struct scatterlist *sg, int nents,
-                                        enum dma_data_direction direction,
-                                        unsigned long dma_attrs)
-{
-       dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
+       ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
 }
 
 /**
@@ -4065,6 +4057,8 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
  */
 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
 {
+       if (ib_uses_virt_dma(dev))
+               return UINT_MAX;
        return dma_get_max_seg_size(dev->dma_device);
 }
 
@@ -4080,7 +4074,8 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
                                              size_t size,
                                              enum dma_data_direction dir)
 {
-       dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+       if (!ib_uses_virt_dma(dev))
+               dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
 }
 
 /**
@@ -4095,36 +4090,8 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
                                                 size_t size,
                                                 enum dma_data_direction dir)
 {
-       dma_sync_single_for_device(dev->dma_device, addr, size, dir);
-}
-
-/**
- * ib_dma_alloc_coherent - Allocate memory and map it for DMA
- * @dev: The device for which the DMA address is requested
- * @size: The size of the region to allocate in bytes
- * @dma_handle: A pointer for returning the DMA address of the region
- * @flag: memory allocator flags
- */
-static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
-                                          size_t size,
-                                          dma_addr_t *dma_handle,
-                                          gfp_t flag)
-{
-       return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
-}
-
-/**
- * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
- * @dev: The device for which the DMA addresses were allocated
- * @size: The size of the region
- * @cpu_addr: the address returned by ib_dma_alloc_coherent()
- * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
- */
-static inline void ib_dma_free_coherent(struct ib_device *dev,
-                                       size_t size, void *cpu_addr,
-                                       dma_addr_t dma_handle)
-{
-       dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
+       if (!ib_uses_virt_dma(dev))
+               dma_sync_single_for_device(dev->dma_device, addr, size, dir);
 }
 
 /* ib_reg_user_mr - register a memory region for virtual addresses from kernel
@@ -4216,7 +4183,8 @@ struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
                                   struct inode *inode, struct ib_udata *udata);
 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
 
-static inline int ib_check_mr_access(int flags)
+static inline int ib_check_mr_access(struct ib_device *ib_dev,
+                                    unsigned int flags)
 {
        /*
         * Local write permission is required if remote write or
@@ -4229,6 +4197,9 @@ static inline int ib_check_mr_access(int flags)
        if (flags & ~IB_ACCESS_SUPPORTED)
                return -EINVAL;
 
+       if (flags & IB_ACCESS_ON_DEMAND &&
+           !(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
+               return -EINVAL;
        return 0;
 }
 
@@ -4615,6 +4586,19 @@ static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
        return coredev->owner;
 }
 
+/**
+ * ibdev_to_node - return the NUMA node for a given ib_device
+ * @dev:       device to get the NUMA node for.
+ */
+static inline int ibdev_to_node(struct ib_device *ibdev)
+{
+       struct device *parent = ibdev->dev.parent;
+
+       if (!parent)
+               return NUMA_NO_NODE;
+       return dev_to_node(parent);
+}
+
 /**
  * rdma_device_to_drv_device - Helper macro to reach back to driver's
  *                            ib_device holder structure from device pointer.