Skip to content

Commit

Permalink
RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
Browse files Browse the repository at this point in the history
The source codes related with IRDMA_MEMREG_TYPE_MEM are split
into a new function irdma_reg_user_mr_type_mem.

Reviewed-by: Shiraz Saleem <[email protected]>
Signed-off-by: Zhu Yanjun <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Leon Romanovsky <[email protected]>
  • Loading branch information
zhuyj authored and rleon committed Jan 26, 2023
1 parent ed73a50 commit 01798df
Showing 1 changed file with 50 additions and 32 deletions.
82 changes: 50 additions & 32 deletions drivers/infiniband/hw/irdma/verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -2745,6 +2745,54 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
return ret;
}

static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
{
struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
bool use_pbles;
u32 stag;
int err;

use_pbles = iwmr->page_cnt != 1;

err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
if (err)
return err;

if (use_pbles) {
err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
iwmr->page_size);
if (err) {
irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
iwpbl->pbl_allocated = false;
}
}

stag = irdma_create_stag(iwdev);
if (!stag) {
err = -ENOMEM;
goto free_pble;
}

iwmr->stag = stag;
iwmr->ibmr.rkey = stag;
iwmr->ibmr.lkey = stag;
err = irdma_hwreg_mr(iwdev, iwmr, access);
if (err)
goto err_hwreg;

return 0;

err_hwreg:
irdma_free_stag(iwdev, stag);

free_pble:
if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);

return err;
}

/**
* irdma_reg_user_mr - Register a user memory region
* @pd: ptr of pd
Expand All @@ -2761,12 +2809,11 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_ucontext *ucontext;
struct irdma_pble_alloc *palloc;
struct irdma_pbl *iwpbl;
struct irdma_mr *iwmr;
struct ib_umem *region;
struct irdma_mem_reg_req req;
u32 total, stag = 0;
u32 total;
u8 shadow_pgcnt = 1;
bool use_pbles = false;
unsigned long flags;
Expand Down Expand Up @@ -2817,7 +2864,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
}
iwmr->len = region->length;
iwpbl->user_base = virt;
palloc = &iwpbl->pble_alloc;
iwmr->type = req.reg_type;
iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);

Expand Down Expand Up @@ -2863,36 +2909,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case IRDMA_MEMREG_TYPE_MEM:
use_pbles = (iwmr->page_cnt != 1);

err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
err = irdma_reg_user_mr_type_mem(iwmr, access);
if (err)
goto error;

if (use_pbles) {
err = irdma_check_mr_contiguous(palloc,
iwmr->page_size);
if (err) {
irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
iwpbl->pbl_allocated = false;
}
}

stag = irdma_create_stag(iwdev);
if (!stag) {
err = -ENOMEM;
goto error;
}

iwmr->stag = stag;
iwmr->ibmr.rkey = stag;
iwmr->ibmr.lkey = stag;
err = irdma_hwreg_mr(iwdev, iwmr, access);
if (err) {
irdma_free_stag(iwdev, stag);
goto error;
}

break;
default:
goto error;
Expand All @@ -2903,8 +2923,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
return &iwmr->ibmr;

error:
if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
ib_umem_release(region);
kfree(iwmr);

Expand Down

0 comments on commit 01798df

Please sign in to comment.