]> Pileus Git - ~andy/linux/commitdiff
IB/mlx4: Support memory window binding
authorShani Michaeli <shanim@mellanox.com>
Wed, 6 Feb 2013 16:19:15 +0000 (16:19 +0000)
committerRoland Dreier <roland@purestorage.com>
Mon, 25 Feb 2013 18:44:32 +0000 (10:44 -0800)
* Implement memory windows binding in mlx4_ib_post_send.

* Implement mlx4_ib_bind_mw by deferring to mlx4_ib_post_send.

* Rename MLX4_WQE_FMR_PERM_* flags to MLX4_WQE_FMR_AND_BIND_PERM_*,
  indicating that they are used both for fast registration work
  requests, and for memory window bind work requests.

Signed-off-by: Haggai Eran <haggaie@mellanox.com>
Signed-off-by: Shani Michaeli <shanim@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
include/linux/mlx4/qp.h

index 9ba0aaf3a58e52e9e16eda357ff4b28749fc35be..f61ec26500c4919628abf3eb5ee570e878a43155 100644 (file)
@@ -592,6 +592,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                                  struct ib_udata *udata);
 int mlx4_ib_dereg_mr(struct ib_mr *mr);
 struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
+int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+                   struct ib_mw_bind *mw_bind);
 int mlx4_ib_dealloc_mw(struct ib_mw *mw);
 struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
                                        int max_page_list_len);
index 5adf4c47ee1866c923f63316466bb25b5d43eefc..e471f089ff00ef3028bd3137184e9ed09e23c097 100644 (file)
@@ -231,6 +231,28 @@ err_free:
        return ERR_PTR(err);
 }
 
+int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+                   struct ib_mw_bind *mw_bind)
+{
+       struct ib_send_wr  wr;
+       struct ib_send_wr *bad_wr;
+       int ret;
+
+       memset(&wr, 0, sizeof(wr));
+       wr.opcode               = IB_WR_BIND_MW;
+       wr.wr_id                = mw_bind->wr_id;
+       wr.send_flags           = mw_bind->send_flags;
+       wr.wr.bind_mw.mw        = mw;
+       wr.wr.bind_mw.bind_info = mw_bind->bind_info;
+       wr.wr.bind_mw.rkey      = ib_inc_rkey(mw->rkey);
+
+       ret = mlx4_ib_post_send(qp, &wr, &bad_wr);
+       if (!ret)
+               mw->rkey = wr.wr.bind_mw.rkey;
+
+       return ret;
+}
+
 int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
 {
        struct mlx4_ib_mw *mw = to_mmw(ibmw);
index ebadebe83b11dfaa6182c949a42203ba37fed8b4..35cced2a4da835b1254f7e7a76bc1d6b84752057 100644 (file)
@@ -104,6 +104,7 @@ static const __be32 mlx4_ib_opcode[] = {
        [IB_WR_FAST_REG_MR]                     = cpu_to_be32(MLX4_OPCODE_FMR),
        [IB_WR_MASKED_ATOMIC_CMP_AND_SWP]       = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
        [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]     = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
+       [IB_WR_BIND_MW]                         = cpu_to_be32(MLX4_OPCODE_BIND_MW),
 };
 
 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -1953,9 +1954,12 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq
 
 static __be32 convert_access(int acc)
 {
-       return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC)       : 0) |
-              (acc & IB_ACCESS_REMOTE_WRITE  ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) |
-              (acc & IB_ACCESS_REMOTE_READ   ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ)  : 0) |
+       return (acc & IB_ACCESS_REMOTE_ATOMIC ?
+               cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC)       : 0) |
+              (acc & IB_ACCESS_REMOTE_WRITE  ?
+               cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
+              (acc & IB_ACCESS_REMOTE_READ   ?
+               cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ)  : 0) |
               (acc & IB_ACCESS_LOCAL_WRITE   ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE)  : 0) |
                cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
 }
@@ -1981,6 +1985,24 @@ static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
        fseg->reserved[1]       = 0;
 }
 
+static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr)
+{
+       bseg->flags1 =
+               convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) &
+               cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ  |
+                           MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
+                           MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
+       bseg->flags2 = 0;
+       if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2)
+               bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
+       if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED)
+               bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
+       bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey);
+       bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey);
+       bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr);
+       bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length);
+}
+
 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
 {
        memset(iseg, 0, sizeof(*iseg));
@@ -2289,6 +2311,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
                                break;
 
+                       case IB_WR_BIND_MW:
+                               ctrl->srcrb_flags |=
+                                       cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
+                               set_bind_seg(wqe, wr);
+                               wqe  += sizeof(struct mlx4_wqe_bind_seg);
+                               size += sizeof(struct mlx4_wqe_bind_seg) / 16;
+                               break;
                        default:
                                /* No extra segments required for sends */
                                break;
index 6c8a68c602be10a254963bea4a82245082eb7c0d..67f46ad6920a0bffeefb2efe0c437a7a2f86a883 100644 (file)
@@ -265,6 +265,11 @@ struct mlx4_wqe_lso_seg {
        __be32                  header[0];
 };
 
+enum mlx4_wqe_bind_seg_flags2 {
+       MLX4_WQE_BIND_ZERO_BASED = (1 << 30),
+       MLX4_WQE_BIND_TYPE_2     = (1 << 31),
+};
+
 struct mlx4_wqe_bind_seg {
        __be32                  flags1;
        __be32                  flags2;
@@ -277,9 +282,9 @@ struct mlx4_wqe_bind_seg {
 enum {
        MLX4_WQE_FMR_PERM_LOCAL_READ    = 1 << 27,
        MLX4_WQE_FMR_PERM_LOCAL_WRITE   = 1 << 28,
-       MLX4_WQE_FMR_PERM_REMOTE_READ   = 1 << 29,
-       MLX4_WQE_FMR_PERM_REMOTE_WRITE  = 1 << 30,
-       MLX4_WQE_FMR_PERM_ATOMIC        = 1 << 31
+       MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ  = 1 << 29,
+       MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE = 1 << 30,
+       MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC       = 1 << 31
 };
 
 struct mlx4_wqe_fmr_seg {