--- zzzz-none-000/linux-3.10.107/drivers/infiniband/ulp/iser/iser_verbs.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/infiniband/ulp/iser/iser_verbs.c 2021-02-04 17:41:59.000000000 +0000 @@ -1,7 +1,7 @@ /* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -39,27 +39,34 @@ #include "iscsi_iser.h" #define ISCSI_ISER_MAX_CONN 8 -#define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) -#define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) +#define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) +#define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) +#define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \ + ISCSI_ISER_MAX_CONN) + +static int iser_cq_poll_limit = 512; static void iser_cq_tasklet_fn(unsigned long data); static void iser_cq_callback(struct ib_cq *cq, void *cq_context); static void iser_cq_event_callback(struct ib_event *cause, void *context) { - iser_err("got cq event %d \n", cause->event); + iser_err("cq event %s (%d)\n", + ib_event_msg(cause->event), cause->event); } static void iser_qp_event_callback(struct ib_event *cause, void *context) { - iser_err("got qp event %d\n",cause->event); + iser_err("qp event %s (%d)\n", + ib_event_msg(cause->event), cause->event); } static void iser_event_handler(struct ib_event_handler *handler, struct ib_event *event) { - iser_err("async event %d on device %s port %d\n", event->event, - event->device->name, event->element.port_num); + iser_err("async event %s (%d) on device %s port %d\n", + ib_event_msg(event->event), event->event, + event->device->name, event->element.port_num); } /** @@ -71,57 +78,70 @@ */ static int iser_create_device_ib_res(struct iser_device *device) { - int i, j; - struct iser_cq_desc *cq_desc; + struct ib_device_attr *dev_attr = &device->dev_attr; + int ret, i, max_cqe; + + ret = ib_query_device(device->ib_device, dev_attr); + if (ret) { + pr_warn("Query device failed for %s\n", device->ib_device->name); + return ret; + } + + ret = iser_assign_reg_ops(device); + if (ret) + return ret; + + device->comps_used = min_t(int, num_online_cpus(), + device->ib_device->num_comp_vectors); + + device->comps = kcalloc(device->comps_used, sizeof(*device->comps), + GFP_KERNEL); + if (!device->comps) + goto comps_err; + + max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe); - device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); - iser_info("using %d CQs, device %s supports %d vectors\n", - device->cqs_used, device->ib_device->name, - device->ib_device->num_comp_vectors); - - device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used, - GFP_KERNEL); - if (device->cq_desc == NULL) - goto cq_desc_err; - cq_desc = device->cq_desc; + iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n", + device->comps_used, device->ib_device->name, + device->ib_device->num_comp_vectors, max_cqe); device->pd = ib_alloc_pd(device->ib_device); if (IS_ERR(device->pd)) goto pd_err; - for (i = 0; i < device->cqs_used; i++) { - cq_desc[i].device = device; - cq_desc[i].cq_index = i; - - device->rx_cq[i] = ib_create_cq(device->ib_device, - iser_cq_callback, - iser_cq_event_callback, - (void *)&cq_desc[i], - ISER_MAX_RX_CQ_LEN, i); - if (IS_ERR(device->rx_cq[i])) - goto cq_err; - - device->tx_cq[i] = ib_create_cq(device->ib_device, - NULL, iser_cq_event_callback, - (void *)&cq_desc[i], - ISER_MAX_TX_CQ_LEN, i); - - if (IS_ERR(device->tx_cq[i])) + for (i = 0; i < device->comps_used; i++) { + struct ib_cq_init_attr cq_attr = {}; + struct iser_comp *comp = &device->comps[i]; + + comp->device = device; + cq_attr.cqe = max_cqe; + cq_attr.comp_vector = i; + comp->cq = ib_create_cq(device->ib_device, + iser_cq_callback, + iser_cq_event_callback, + (void *)comp, + &cq_attr); + if (IS_ERR(comp->cq)) { + comp->cq = NULL; goto cq_err; + } - if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) + if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP)) goto cq_err; - tasklet_init(&device->cq_tasklet[i], - iser_cq_tasklet_fn, - (unsigned long)&cq_desc[i]); + tasklet_init(&comp->tasklet, iser_cq_tasklet_fn, + (unsigned long)comp); } - device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ); - if (IS_ERR(device->mr)) - goto dma_mr_err; + if (!iser_always_reg) { + int access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; + + device->mr = ib_get_dma_mr(device->pd, access); + if (IS_ERR(device->mr)) + goto dma_mr_err; + } INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, iser_event_handler); @@ -131,21 +151,22 @@ return 0; handler_err: - ib_dereg_mr(device->mr); + if (device->mr) + ib_dereg_mr(device->mr); dma_mr_err: - for (j = 0; j < device->cqs_used; j++) - tasklet_kill(&device->cq_tasklet[j]); + for (i = 0; i < device->comps_used; i++) + tasklet_kill(&device->comps[i].tasklet); cq_err: - for (j = 0; j < i; j++) { - if (device->tx_cq[j]) - ib_destroy_cq(device->tx_cq[j]); - if (device->rx_cq[j]) - ib_destroy_cq(device->rx_cq[j]); + for (i = 0; i < device->comps_used; i++) { + struct iser_comp *comp = &device->comps[i]; + + if (comp->cq) + ib_destroy_cq(comp->cq); } ib_dealloc_pd(device->pd); pd_err: - kfree(device->cq_desc); -cq_desc_err: + kfree(device->comps); +comps_err: iser_err("failed to allocate an IB resource\n"); return -1; } @@ -157,182 +178,361 @@ static void iser_free_device_ib_res(struct iser_device *device) { int i; - BUG_ON(device->mr == NULL); - for (i = 0; i < device->cqs_used; i++) { - tasklet_kill(&device->cq_tasklet[i]); - (void)ib_destroy_cq(device->tx_cq[i]); - (void)ib_destroy_cq(device->rx_cq[i]); - device->tx_cq[i] = NULL; - device->rx_cq[i] = NULL; + for (i = 0; i < device->comps_used; i++) { + struct iser_comp *comp = &device->comps[i]; + + tasklet_kill(&comp->tasklet); + ib_destroy_cq(comp->cq); + comp->cq = NULL; } (void)ib_unregister_event_handler(&device->event_handler); - (void)ib_dereg_mr(device->mr); - (void)ib_dealloc_pd(device->pd); + if (device->mr) + (void)ib_dereg_mr(device->mr); + ib_dealloc_pd(device->pd); - kfree(device->cq_desc); + kfree(device->comps); + device->comps = NULL; device->mr = NULL; device->pd = NULL; } /** - * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP) + * iser_alloc_fmr_pool - Creates FMR pool and page_vector * - * returns 0 on success, -1 on failure + * returns 0 on success, or errno code on failure */ -static int iser_create_ib_conn_res(struct iser_conn *ib_conn) -{ - struct iser_device *device; - struct ib_qp_init_attr init_attr; - int req_err, resp_err, ret = -ENOMEM; +int iser_alloc_fmr_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size) +{ + struct iser_device *device = ib_conn->device; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_page_vec *page_vec; + struct iser_fr_desc *desc; + struct ib_fmr_pool *fmr_pool; struct ib_fmr_pool_param params; - int index, min_index = 0; - - BUG_ON(ib_conn->device == NULL); + int ret; - device = ib_conn->device; + INIT_LIST_HEAD(&fr_pool->list); + spin_lock_init(&fr_pool->lock); - ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + - ISER_RX_LOGIN_SIZE, GFP_KERNEL); - if (!ib_conn->login_buf) - goto out_err; + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; - ib_conn->login_req_buf = ib_conn->login_buf; - ib_conn->login_resp_buf = ib_conn->login_buf + ISCSI_DEF_MAX_RECV_SEG_LEN; - - ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device, - (void *)ib_conn->login_req_buf, - ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); - - ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device, - (void *)ib_conn->login_resp_buf, - ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); - - req_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_req_dma); - resp_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_resp_dma); - - if (req_err || resp_err) { - if (req_err) - ib_conn->login_req_dma = 0; - if (resp_err) - ib_conn->login_resp_dma = 0; - goto out_err; + page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size), + GFP_KERNEL); + if (!page_vec) { + ret = -ENOMEM; + goto err_frpl; } - ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + - (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), - GFP_KERNEL); - if (!ib_conn->page_vec) - goto out_err; - - ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); + page_vec->pages = (u64 *)(page_vec + 1); params.page_shift = SHIFT_4K; - /* when the first/last SG element are not start/end * - * page aligned, the map whould be of N+1 pages */ - params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; + params.max_pages_per_fmr = size; /* make the pool size twice the max number of SCSI commands * * the ML is expected to queue, watermark for unmap at 50% */ - params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2; - params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX; + params.pool_size = cmds_max * 2; + params.dirty_watermark = cmds_max; params.cache = 0; params.flush_function = NULL; params.access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ); - ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); - ret = PTR_ERR(ib_conn->fmr_pool); - if (IS_ERR(ib_conn->fmr_pool) && ret != -ENOSYS) { - ib_conn->fmr_pool = NULL; - goto out_err; - } else if (ret == -ENOSYS) { - ib_conn->fmr_pool = NULL; - iser_warn("FMRs are not supported, using unaligned mode\n"); - ret = 0; + fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); + if (IS_ERR(fmr_pool)) { + ret = PTR_ERR(fmr_pool); + iser_err("FMR allocation failed, err %d\n", ret); + goto err_fmr; + } + + desc->rsc.page_vec = page_vec; + desc->rsc.fmr_pool = fmr_pool; + list_add(&desc->list, &fr_pool->list); + + return 0; + +err_fmr: + kfree(page_vec); +err_frpl: + kfree(desc); + + return ret; +} + +/** + * iser_free_fmr_pool - releases the FMR pool and page vec + */ +void iser_free_fmr_pool(struct ib_conn *ib_conn) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; + + desc = list_first_entry(&fr_pool->list, + struct iser_fr_desc, list); + list_del(&desc->list); + + iser_info("freeing conn %p fmr pool %p\n", + ib_conn, desc->rsc.fmr_pool); + + ib_destroy_fmr_pool(desc->rsc.fmr_pool); + kfree(desc->rsc.page_vec); + kfree(desc); +} + +static int +iser_alloc_reg_res(struct ib_device *ib_device, + struct ib_pd *pd, + struct iser_reg_resources *res, + unsigned int size) +{ + int ret; + + res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, size); + if (IS_ERR(res->mr)) { + ret = PTR_ERR(res->mr); + iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); + return ret; + } + res->mr_valid = 1; + + return 0; +} + +static void +iser_free_reg_res(struct iser_reg_resources *rsc) +{ + ib_dereg_mr(rsc->mr); +} + +static int +iser_alloc_pi_ctx(struct ib_device *ib_device, + struct ib_pd *pd, + struct iser_fr_desc *desc, + unsigned int size) +{ + struct iser_pi_context *pi_ctx = NULL; + int ret; + + desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); + if (!desc->pi_ctx) + return -ENOMEM; + + pi_ctx = desc->pi_ctx; + + ret = iser_alloc_reg_res(ib_device, pd, &pi_ctx->rsc, size); + if (ret) { + iser_err("failed to allocate reg_resources\n"); + goto alloc_reg_res_err; + } + + pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2); + if (IS_ERR(pi_ctx->sig_mr)) { + ret = PTR_ERR(pi_ctx->sig_mr); + goto sig_mr_failure; + } + pi_ctx->sig_mr_valid = 1; + desc->pi_ctx->sig_protected = 0; + + return 0; + +sig_mr_failure: + iser_free_reg_res(&pi_ctx->rsc); +alloc_reg_res_err: + kfree(desc->pi_ctx); + + return ret; +} + +static void +iser_free_pi_ctx(struct iser_pi_context *pi_ctx) +{ + iser_free_reg_res(&pi_ctx->rsc); + ib_dereg_mr(pi_ctx->sig_mr); + kfree(pi_ctx); +} + +static struct iser_fr_desc * +iser_create_fastreg_desc(struct ib_device *ib_device, + struct ib_pd *pd, + bool pi_enable, + unsigned int size) +{ + struct iser_fr_desc *desc; + int ret; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + + ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc, size); + if (ret) + goto reg_res_alloc_failure; + + if (pi_enable) { + ret = iser_alloc_pi_ctx(ib_device, pd, desc, size); + if (ret) + goto pi_ctx_alloc_failure; + } + + return desc; + +pi_ctx_alloc_failure: + iser_free_reg_res(&desc->rsc); +reg_res_alloc_failure: + kfree(desc); + + return ERR_PTR(ret); +} + +/** + * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors + * for fast registration work requests. + * returns 0 on success, or errno code on failure + */ +int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, + unsigned cmds_max, + unsigned int size) +{ + struct iser_device *device = ib_conn->device; + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc; + int i, ret; + + INIT_LIST_HEAD(&fr_pool->list); + spin_lock_init(&fr_pool->lock); + fr_pool->size = 0; + for (i = 0; i < cmds_max; i++) { + desc = iser_create_fastreg_desc(device->ib_device, device->pd, + ib_conn->pi_support, size); + if (IS_ERR(desc)) { + ret = PTR_ERR(desc); + goto err; + } + + list_add_tail(&desc->list, &fr_pool->list); + fr_pool->size++; + } + + return 0; + +err: + iser_free_fastreg_pool(ib_conn); + return ret; +} + +/** + * iser_free_fastreg_pool - releases the pool of fast_reg descriptors + */ +void iser_free_fastreg_pool(struct ib_conn *ib_conn) +{ + struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; + struct iser_fr_desc *desc, *tmp; + int i = 0; + + if (list_empty(&fr_pool->list)) + return; + + iser_info("freeing conn %p fr pool\n", ib_conn); + + list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) { + list_del(&desc->list); + iser_free_reg_res(&desc->rsc); + if (desc->pi_ctx) + iser_free_pi_ctx(desc->pi_ctx); + kfree(desc); + ++i; } + if (i < fr_pool->size) + iser_warn("pool still has %d regions registered\n", + fr_pool->size - i); +} + +/** + * iser_create_ib_conn_res - Queue-Pair (QP) + * + * returns 0 on success, -1 on failure + */ +static int iser_create_ib_conn_res(struct ib_conn *ib_conn) +{ + struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn, + ib_conn); + struct iser_device *device; + struct ib_device_attr *dev_attr; + struct ib_qp_init_attr init_attr; + int ret = -ENOMEM; + int index, min_index = 0; + + BUG_ON(ib_conn->device == NULL); + + device = ib_conn->device; + dev_attr = &device->dev_attr; + memset(&init_attr, 0, sizeof init_attr); mutex_lock(&ig.connlist_mutex); /* select the CQ with the minimal number of usages */ - for (index = 0; index < device->cqs_used; index++) - if (device->cq_active_qps[index] < - device->cq_active_qps[min_index]) + for (index = 0; index < device->comps_used; index++) { + if (device->comps[index].active_qps < + device->comps[min_index].active_qps) min_index = index; - device->cq_active_qps[min_index]++; + } + ib_conn->comp = &device->comps[min_index]; + ib_conn->comp->active_qps++; mutex_unlock(&ig.connlist_mutex); iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); init_attr.event_handler = iser_qp_event_callback; init_attr.qp_context = (void *)ib_conn; - init_attr.send_cq = device->tx_cq[min_index]; - init_attr.recv_cq = device->rx_cq[min_index]; - init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; + init_attr.send_cq = ib_conn->comp->cq; + init_attr.recv_cq = ib_conn->comp->cq; init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; init_attr.cap.max_send_sge = 2; init_attr.cap.max_recv_sge = 1; init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; init_attr.qp_type = IB_QPT_RC; + if (ib_conn->pi_support) { + init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1; + init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; + iser_conn->max_cmds = + ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS); + } else { + if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) { + init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1; + iser_conn->max_cmds = + ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS); + } else { + init_attr.cap.max_send_wr = dev_attr->max_qp_wr; + iser_conn->max_cmds = + ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr); + iser_dbg("device %s supports max_send_wr %d\n", + device->ib_device->name, dev_attr->max_qp_wr); + } + } ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); if (ret) goto out_err; ib_conn->qp = ib_conn->cma_id->qp; - iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n", + iser_info("setting conn %p cma_id %p qp %p\n", ib_conn, ib_conn->cma_id, - ib_conn->fmr_pool, ib_conn->cma_id->qp); + ib_conn->cma_id->qp); return ret; out_err: + mutex_lock(&ig.connlist_mutex); + ib_conn->comp->active_qps--; + mutex_unlock(&ig.connlist_mutex); iser_err("unable to alloc mem or create resource, err %d\n", ret); - return ret; -} - -/** - * releases the FMR pool and QP objects, returns 0 on success, - * -1 on failure - */ -static int iser_free_ib_conn_res(struct iser_conn *ib_conn) -{ - int cq_index; - BUG_ON(ib_conn == NULL); - - iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n", - ib_conn, ib_conn->cma_id, - ib_conn->fmr_pool, ib_conn->qp); - - /* qp is created only once both addr & route are resolved */ - if (ib_conn->fmr_pool != NULL) - ib_destroy_fmr_pool(ib_conn->fmr_pool); - - if (ib_conn->qp != NULL) { - cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index; - ib_conn->device->cq_active_qps[cq_index]--; - rdma_destroy_qp(ib_conn->cma_id); - } - - ib_conn->fmr_pool = NULL; - ib_conn->qp = NULL; - kfree(ib_conn->page_vec); - - if (ib_conn->login_buf) { - if (ib_conn->login_req_dma) - ib_dma_unmap_single(ib_conn->device->ib_device, - ib_conn->login_req_dma, - ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); - if (ib_conn->login_resp_dma) - ib_dma_unmap_single(ib_conn->device->ib_device, - ib_conn->login_resp_dma, - ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); - kfree(ib_conn->login_buf); - } - - return 0; + return ret; } /** @@ -386,129 +586,262 @@ mutex_unlock(&ig.device_list_mutex); } -static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, - enum iser_ib_conn_state comp, - enum iser_ib_conn_state exch) +/** + * Called with state mutex held + **/ +static int iser_conn_state_comp_exch(struct iser_conn *iser_conn, + enum iser_conn_state comp, + enum iser_conn_state exch) { int ret; - spin_lock_bh(&ib_conn->lock); - if ((ret = (ib_conn->state == comp))) - ib_conn->state = exch; - spin_unlock_bh(&ib_conn->lock); + ret = (iser_conn->state == comp); + if (ret) + iser_conn->state = exch; + return ret; } +void iser_release_work(struct work_struct *work) +{ + struct iser_conn *iser_conn; + + iser_conn = container_of(work, struct iser_conn, release_work); + + /* Wait for conn_stop to complete */ + wait_for_completion(&iser_conn->stop_completion); + /* Wait for IB resouces cleanup to complete */ + wait_for_completion(&iser_conn->ib_completion); + + mutex_lock(&iser_conn->state_mutex); + iser_conn->state = ISER_CONN_DOWN; + mutex_unlock(&iser_conn->state_mutex); + + iser_conn_release(iser_conn); +} + /** - * Frees all conn objects and deallocs conn descriptor + * iser_free_ib_conn_res - release IB related resources + * @iser_conn: iser connection struct + * @destroy: indicator if we need to try to release the + * iser device and memory regoins pool (only iscsi + * shutdown and DEVICE_REMOVAL will use this). + * + * This routine is called with the iser state mutex held + * so the cm_id removal is out of here. It is Safe to + * be invoked multiple times. */ -static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) +static void iser_free_ib_conn_res(struct iser_conn *iser_conn, + bool destroy) { - struct iser_device *device = ib_conn->device; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; - BUG_ON(ib_conn->state != ISER_CONN_DOWN); + iser_info("freeing conn %p cma_id %p qp %p\n", + iser_conn, ib_conn->cma_id, ib_conn->qp); - mutex_lock(&ig.connlist_mutex); - list_del(&ib_conn->conn_list); - mutex_unlock(&ig.connlist_mutex); - iser_free_rx_descriptors(ib_conn); - iser_free_ib_conn_res(ib_conn); - ib_conn->device = NULL; - /* on EVENT_ADDR_ERROR there's no device yet for this conn */ - if (device != NULL) - iser_device_try_release(device); - /* if cma handler context, the caller actually destroy the id */ - if (ib_conn->cma_id != NULL && can_destroy_id) { - rdma_destroy_id(ib_conn->cma_id); - ib_conn->cma_id = NULL; + if (ib_conn->qp != NULL) { + ib_conn->comp->active_qps--; + rdma_destroy_qp(ib_conn->cma_id); + ib_conn->qp = NULL; } - iscsi_destroy_endpoint(ib_conn->ep); -} -void iser_conn_get(struct iser_conn *ib_conn) -{ - atomic_inc(&ib_conn->refcount); + if (destroy) { + if (iser_conn->rx_descs) + iser_free_rx_descriptors(iser_conn); + + if (device != NULL) { + iser_device_try_release(device); + ib_conn->device = NULL; + } + } } -int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id) +/** + * Frees all conn objects and deallocs conn descriptor + */ +void iser_conn_release(struct iser_conn *iser_conn) { - if (atomic_dec_and_test(&ib_conn->refcount)) { - iser_conn_release(ib_conn, can_destroy_id); - return 1; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + + mutex_lock(&ig.connlist_mutex); + list_del(&iser_conn->conn_list); + mutex_unlock(&ig.connlist_mutex); + + mutex_lock(&iser_conn->state_mutex); + /* In case we endup here without ep_disconnect being invoked. */ + if (iser_conn->state != ISER_CONN_DOWN) { + iser_warn("iser conn %p state %d, expected state down.\n", + iser_conn, iser_conn->state); + iscsi_destroy_endpoint(iser_conn->ep); + iser_conn->state = ISER_CONN_DOWN; + } + /* + * In case we never got to bind stage, we still need to + * release IB resources (which is safe to call more than once). + */ + iser_free_ib_conn_res(iser_conn, true); + mutex_unlock(&iser_conn->state_mutex); + + if (ib_conn->cma_id != NULL) { + rdma_destroy_id(ib_conn->cma_id); + ib_conn->cma_id = NULL; } - return 0; + + kfree(iser_conn); } /** * triggers start of the disconnect procedures and wait for them to be done + * Called with state mutex held */ -void iser_conn_terminate(struct iser_conn *ib_conn) +int iser_conn_terminate(struct iser_conn *iser_conn) { + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct ib_send_wr *bad_wr; int err = 0; - /* change the ib conn state only if the conn is UP, however always call - * rdma_disconnect since this is the only way to cause the CMA to change - * the QP state to ERROR + /* terminate the iser conn only if the conn state is UP */ + if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP, + ISER_CONN_TERMINATING)) + return 0; + + iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state); + + /* suspend queuing of new iscsi commands */ + if (iser_conn->iscsi_conn) + iscsi_suspend_queue(iser_conn->iscsi_conn); + + /* + * In case we didn't already clean up the cma_id (peer initiated + * a disconnection), we need to Cause the CMA to change the QP + * state to ERROR. */ + if (ib_conn->cma_id) { + err = rdma_disconnect(ib_conn->cma_id); + if (err) + iser_err("Failed to disconnect, conn: 0x%p err %d\n", + iser_conn, err); + + /* post an indication that all flush errors were consumed */ + err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr); + if (err) { + iser_err("conn %p failed to post beacon", ib_conn); + return 1; + } - iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING); - err = rdma_disconnect(ib_conn->cma_id); - if (err) - iser_err("Failed to disconnect, conn: 0x%p err %d\n", - ib_conn,err); + wait_for_completion(&ib_conn->flush_comp); + } - wait_event_interruptible(ib_conn->wait, - ib_conn->state == ISER_CONN_DOWN); + return 1; +} - iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */ +/** + * Called with state mutex held + **/ +static void iser_connect_error(struct rdma_cm_id *cma_id) +{ + struct iser_conn *iser_conn; + + iser_conn = (struct iser_conn *)cma_id->context; + iser_conn->state = ISER_CONN_TERMINATING; } -static int iser_connect_error(struct rdma_cm_id *cma_id) +static void +iser_calc_scsi_params(struct iser_conn *iser_conn, + unsigned int max_sectors) { - struct iser_conn *ib_conn; - ib_conn = (struct iser_conn *)cma_id->context; + struct iser_device *device = iser_conn->ib_conn.device; + unsigned short sg_tablesize, sup_sg_tablesize; + + sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); + sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, + device->dev_attr.max_fast_reg_page_list_len); + + if (sg_tablesize > sup_sg_tablesize) { + sg_tablesize = sup_sg_tablesize; + iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512; + } else { + iser_conn->scsi_max_sectors = max_sectors; + } - ib_conn->state = ISER_CONN_DOWN; - wake_up_interruptible(&ib_conn->wait); - return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ + iser_conn->scsi_sg_tablesize = sg_tablesize; + + iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", + iser_conn, iser_conn->scsi_sg_tablesize, + iser_conn->scsi_max_sectors); } -static int iser_addr_handler(struct rdma_cm_id *cma_id) +/** + * Called with state mutex held + **/ +static void iser_addr_handler(struct rdma_cm_id *cma_id) { struct iser_device *device; - struct iser_conn *ib_conn; + struct iser_conn *iser_conn; + struct ib_conn *ib_conn; int ret; + iser_conn = (struct iser_conn *)cma_id->context; + if (iser_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; + + ib_conn = &iser_conn->ib_conn; device = iser_device_find_by_ib_device(cma_id); if (!device) { iser_err("device lookup/creation failed\n"); - return iser_connect_error(cma_id); + iser_connect_error(cma_id); + return; } - ib_conn = (struct iser_conn *)cma_id->context; ib_conn->device = device; + /* connection T10-PI support */ + if (iser_pi_enable) { + if (!(device->dev_attr.device_cap_flags & + IB_DEVICE_SIGNATURE_HANDOVER)) { + iser_warn("T10-PI requested but not supported on %s, " + "continue without T10-PI\n", + ib_conn->device->ib_device->name); + ib_conn->pi_support = false; + } else { + ib_conn->pi_support = true; + } + } + + iser_calc_scsi_params(iser_conn, iser_max_sectors); + ret = rdma_resolve_route(cma_id, 1000); if (ret) { iser_err("resolve route failed: %d\n", ret); - return iser_connect_error(cma_id); + iser_connect_error(cma_id); + return; } - - return 0; } -static int iser_route_handler(struct rdma_cm_id *cma_id) +/** + * Called with state mutex held + **/ +static void iser_route_handler(struct rdma_cm_id *cma_id) { struct rdma_conn_param conn_param; int ret; struct iser_cm_hdr req_hdr; + struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + + if (iser_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; - ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); + ret = iser_create_ib_conn_res(ib_conn); if (ret) goto failure; memset(&conn_param, 0, sizeof conn_param); - conn_param.responder_resources = 4; + conn_param.responder_resources = device->dev_attr.max_qp_rd_atom; conn_param.initiator_depth = 1; conn_param.retry_count = 7; conn_param.rnr_retry_count = 6; @@ -525,58 +858,74 @@ goto failure; } - return 0; + return; failure: - return iser_connect_error(cma_id); + iser_connect_error(cma_id); } static void iser_connected_handler(struct rdma_cm_id *cma_id) { - struct iser_conn *ib_conn; + struct iser_conn *iser_conn; + struct ib_qp_attr attr; + struct ib_qp_init_attr init_attr; + + iser_conn = (struct iser_conn *)cma_id->context; + if (iser_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; - ib_conn = (struct iser_conn *)cma_id->context; - ib_conn->state = ISER_CONN_UP; - wake_up_interruptible(&ib_conn->wait); + (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); + iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); + + iser_conn->state = ISER_CONN_UP; + complete(&iser_conn->up_completion); } -static int iser_disconnected_handler(struct rdma_cm_id *cma_id) +static void iser_disconnected_handler(struct rdma_cm_id *cma_id) { - struct iser_conn *ib_conn; - int ret; + struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; - ib_conn = (struct iser_conn *)cma_id->context; - - /* getting here when the state is UP means that the conn is being * - * terminated asynchronously from the iSCSI layer's perspective. */ - if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, - ISER_CONN_TERMINATING)) - iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, - ISCSI_ERR_CONN_FAILED); - - /* Complete the termination process if no posts are pending */ - if (ib_conn->post_recv_buf_count == 0 && - (atomic_read(&ib_conn->post_send_buf_count) == 0)) { - ib_conn->state = ISER_CONN_DOWN; - wake_up_interruptible(&ib_conn->wait); + if (iser_conn_terminate(iser_conn)) { + if (iser_conn->iscsi_conn) + iscsi_conn_failure(iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + else + iser_err("iscsi_iser connection isn't bound\n"); } - - ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ - return ret; } +static void iser_cleanup_handler(struct rdma_cm_id *cma_id, + bool destroy) +{ + struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + + /* + * We are not guaranteed that we visited disconnected_handler + * by now, call it here to be safe that we handle CM drep + * and flush errors. + */ + iser_disconnected_handler(cma_id); + iser_free_ib_conn_res(iser_conn, destroy); + complete(&iser_conn->ib_completion); +}; + static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { + struct iser_conn *iser_conn; int ret = 0; - iser_info("event %d status %d conn %p id %p\n", - event->event, event->status, cma_id->context, cma_id); + iser_conn = (struct iser_conn *)cma_id->context; + iser_info("%s (%d): status %d conn %p id %p\n", + rdma_event_msg(event->event), event->event, + event->status, cma_id->context, cma_id); + mutex_lock(&iser_conn->state_mutex); switch (event->event) { case RDMA_CM_EVENT_ADDR_RESOLVED: - ret = iser_addr_handler(cma_id); + iser_addr_handler(cma_id); break; case RDMA_CM_EVENT_ROUTE_RESOLVED: - ret = iser_route_handler(cma_id); + iser_route_handler(cma_id); break; case RDMA_CM_EVENT_ESTABLISHED: iser_connected_handler(cma_id); @@ -586,173 +935,126 @@ case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_REJECTED: - ret = iser_connect_error(cma_id); + iser_connect_error(cma_id); break; case RDMA_CM_EVENT_DISCONNECTED: - case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_ADDR_CHANGE: - ret = iser_disconnected_handler(cma_id); + case RDMA_CM_EVENT_TIMEWAIT_EXIT: + iser_cleanup_handler(cma_id, false); + break; + case RDMA_CM_EVENT_DEVICE_REMOVAL: + /* + * we *must* destroy the device as we cannot rely + * on iscsid to be around to initiate error handling. + * also if we are not in state DOWN implicitly destroy + * the cma_id. + */ + iser_cleanup_handler(cma_id, true); + if (iser_conn->state != ISER_CONN_DOWN) { + iser_conn->ib_conn.cma_id = NULL; + ret = 1; + } break; default: - iser_err("Unexpected RDMA CM event (%d)\n", event->event); + iser_err("Unexpected RDMA CM event: %s (%d)\n", + rdma_event_msg(event->event), event->event); break; } + mutex_unlock(&iser_conn->state_mutex); + return ret; } -void iser_conn_init(struct iser_conn *ib_conn) +void iser_conn_init(struct iser_conn *iser_conn) { - ib_conn->state = ISER_CONN_INIT; - init_waitqueue_head(&ib_conn->wait); - ib_conn->post_recv_buf_count = 0; - atomic_set(&ib_conn->post_send_buf_count, 0); - atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */ - INIT_LIST_HEAD(&ib_conn->conn_list); - spin_lock_init(&ib_conn->lock); + iser_conn->state = ISER_CONN_INIT; + iser_conn->ib_conn.post_recv_buf_count = 0; + init_completion(&iser_conn->ib_conn.flush_comp); + init_completion(&iser_conn->stop_completion); + init_completion(&iser_conn->ib_completion); + init_completion(&iser_conn->up_completion); + INIT_LIST_HEAD(&iser_conn->conn_list); + mutex_init(&iser_conn->state_mutex); } /** * starts the process of connecting to the target * sleeps until the connection is established or rejected */ -int iser_connect(struct iser_conn *ib_conn, - struct sockaddr_in *src_addr, - struct sockaddr_in *dst_addr, +int iser_connect(struct iser_conn *iser_conn, + struct sockaddr *src_addr, + struct sockaddr *dst_addr, int non_blocking) { - struct sockaddr *src, *dst; + struct ib_conn *ib_conn = &iser_conn->ib_conn; int err = 0; - sprintf(ib_conn->name, "%pI4:%d", - &dst_addr->sin_addr.s_addr, dst_addr->sin_port); + mutex_lock(&iser_conn->state_mutex); + + sprintf(iser_conn->name, "%pISp", dst_addr); + + iser_info("connecting to: %s\n", iser_conn->name); /* the device is known only --after-- address resolution */ ib_conn->device = NULL; - iser_info("connecting to: %pI4, port 0x%x\n", - &dst_addr->sin_addr, dst_addr->sin_port); + iser_conn->state = ISER_CONN_PENDING; - ib_conn->state = ISER_CONN_PENDING; + ib_conn->beacon.wr_id = ISER_BEACON_WRID; + ib_conn->beacon.opcode = IB_WR_SEND; - iser_conn_get(ib_conn); /* ref ib conn's cma id */ - ib_conn->cma_id = rdma_create_id(iser_cma_handler, - (void *)ib_conn, - RDMA_PS_TCP, IB_QPT_RC); + ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, + (void *)iser_conn, + RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(ib_conn->cma_id)) { err = PTR_ERR(ib_conn->cma_id); iser_err("rdma_create_id failed: %d\n", err); goto id_failure; } - src = (struct sockaddr *)src_addr; - dst = (struct sockaddr *)dst_addr; - err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000); + err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000); if (err) { iser_err("rdma_resolve_addr failed: %d\n", err); goto addr_failure; } if (!non_blocking) { - wait_event_interruptible(ib_conn->wait, - (ib_conn->state != ISER_CONN_PENDING)); + wait_for_completion_interruptible(&iser_conn->up_completion); - if (ib_conn->state != ISER_CONN_UP) { + if (iser_conn->state != ISER_CONN_UP) { err = -EIO; goto connect_failure; } } + mutex_unlock(&iser_conn->state_mutex); mutex_lock(&ig.connlist_mutex); - list_add(&ib_conn->conn_list, &ig.connlist); + list_add(&iser_conn->conn_list, &ig.connlist); mutex_unlock(&ig.connlist_mutex); return 0; id_failure: ib_conn->cma_id = NULL; addr_failure: - ib_conn->state = ISER_CONN_DOWN; - iser_conn_put(ib_conn, 1); /* deref ib conn's cma id */ + iser_conn->state = ISER_CONN_DOWN; connect_failure: - iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */ + mutex_unlock(&iser_conn->state_mutex); + iser_conn_release(iser_conn); return err; } -/** - * iser_reg_page_vec - Register physical memory - * - * returns: 0 on success, errno code on failure - */ -int iser_reg_page_vec(struct iser_conn *ib_conn, - struct iser_page_vec *page_vec, - struct iser_mem_reg *mem_reg) -{ - struct ib_pool_fmr *mem; - u64 io_addr; - u64 *page_list; - int status; - - page_list = page_vec->pages; - io_addr = page_list[0]; - - mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool, - page_list, - page_vec->length, - io_addr); - - if (IS_ERR(mem)) { - status = (int)PTR_ERR(mem); - iser_err("ib_fmr_pool_map_phys failed: %d\n", status); - return status; - } - - mem_reg->lkey = mem->fmr->lkey; - mem_reg->rkey = mem->fmr->rkey; - mem_reg->len = page_vec->length * SIZE_4K; - mem_reg->va = io_addr; - mem_reg->is_fmr = 1; - mem_reg->mem_h = (void *)mem; - - mem_reg->va += page_vec->offset; - mem_reg->len = page_vec->data_size; - - iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, " - "entry[0]: (0x%08lx,%ld)] -> " - "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n", - page_vec, page_vec->length, - (unsigned long)page_vec->pages[0], - (unsigned long)page_vec->data_size, - (unsigned int)mem_reg->lkey, mem_reg->mem_h, - (unsigned long)mem_reg->va, (unsigned long)mem_reg->len); - return 0; -} - -/** - * Unregister (previosuly registered) memory. - */ -void iser_unreg_mem(struct iser_mem_reg *reg) -{ - int ret; - - iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h); - - ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); - if (ret) - iser_err("ib_fmr_pool_unmap failed %d\n", ret); - - reg->mem_h = NULL; -} - -int iser_post_recvl(struct iser_conn *ib_conn) +int iser_post_recvl(struct iser_conn *iser_conn) { struct ib_recv_wr rx_wr, *rx_wr_failed; + struct ib_conn *ib_conn = &iser_conn->ib_conn; struct ib_sge sge; int ib_ret; - sge.addr = ib_conn->login_resp_dma; + sge.addr = iser_conn->login_resp_dma; sge.length = ISER_RX_LOGIN_SIZE; - sge.lkey = ib_conn->device->mr->lkey; + sge.lkey = ib_conn->device->pd->local_dma_lkey; - rx_wr.wr_id = (unsigned long)ib_conn->login_resp_buf; + rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf; rx_wr.sg_list = &sge; rx_wr.num_sge = 1; rx_wr.next = NULL; @@ -766,20 +1068,21 @@ return ib_ret; } -int iser_post_recvm(struct iser_conn *ib_conn, int count) +int iser_post_recvm(struct iser_conn *iser_conn, int count) { struct ib_recv_wr *rx_wr, *rx_wr_failed; int i, ib_ret; - unsigned int my_rx_head = ib_conn->rx_desc_head; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + unsigned int my_rx_head = iser_conn->rx_desc_head; struct iser_rx_desc *rx_desc; for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { - rx_desc = &ib_conn->rx_descs[my_rx_head]; - rx_wr->wr_id = (unsigned long)rx_desc; + rx_desc = &iser_conn->rx_descs[my_rx_head]; + rx_wr->wr_id = (uintptr_t)rx_desc; rx_wr->sg_list = &rx_desc->rx_sg; rx_wr->num_sge = 1; rx_wr->next = rx_wr + 1; - my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1); + my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask; } rx_wr--; @@ -791,7 +1094,7 @@ iser_err("ib_post_recv failed ret=%d\n", ib_ret); ib_conn->post_recv_buf_count -= count; } else - ib_conn->rx_desc_head = my_rx_head; + iser_conn->rx_desc_head = my_rx_head; return ib_ret; } @@ -801,131 +1104,218 @@ * * returns 0 on success, -1 on failure */ -int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc) +int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, + bool signal) { - int ib_ret; - struct ib_send_wr send_wr, *send_wr_failed; + struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc); + int ib_ret; ib_dma_sync_single_for_device(ib_conn->device->ib_device, - tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); - - send_wr.next = NULL; - send_wr.wr_id = (unsigned long)tx_desc; - send_wr.sg_list = tx_desc->tx_sg; - send_wr.num_sge = tx_desc->num_sge; - send_wr.opcode = IB_WR_SEND; - send_wr.send_flags = IB_SEND_SIGNALED; + tx_desc->dma_addr, ISER_HEADERS_LEN, + DMA_TO_DEVICE); - atomic_inc(&ib_conn->post_send_buf_count); + wr->next = NULL; + wr->wr_id = (uintptr_t)tx_desc; + wr->sg_list = tx_desc->tx_sg; + wr->num_sge = tx_desc->num_sge; + wr->opcode = IB_WR_SEND; + wr->send_flags = signal ? IB_SEND_SIGNALED : 0; + + ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr); + if (ib_ret) + iser_err("ib_post_send failed, ret:%d opcode:%d\n", + ib_ret, bad_wr->opcode); - ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); - if (ib_ret) { - iser_err("ib_post_send failed, ret:%d\n", ib_ret); - atomic_dec(&ib_conn->post_send_buf_count); - } return ib_ret; } -static void iser_handle_comp_error(struct iser_tx_desc *desc, - struct iser_conn *ib_conn) +/** + * is_iser_tx_desc - Indicate if the completion wr_id + * is a TX descriptor or not. + * @iser_conn: iser connection + * @wr_id: completion WR identifier + * + * Since we cannot rely on wc opcode in FLUSH errors + * we must work around it by checking if the wr_id address + * falls in the iser connection rx_descs buffer. If so + * it is an RX descriptor, otherwize it is a TX. + */ +static inline bool +is_iser_tx_desc(struct iser_conn *iser_conn, void *wr_id) { - if (desc && desc->type == ISCSI_TX_DATAOUT) - kmem_cache_free(ig.desc_cache, desc); + void *start = iser_conn->rx_descs; + int len = iser_conn->num_rx_descs * sizeof(*iser_conn->rx_descs); + + if (wr_id >= start && wr_id < start + len) + return false; - if (ib_conn->post_recv_buf_count == 0 && - atomic_read(&ib_conn->post_send_buf_count) == 0) { - /* getting here when the state is UP means that the conn is * - * being terminated asynchronously from the iSCSI layer's * - * perspective. */ - if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, - ISER_CONN_TERMINATING)) - iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, + return true; +} + +/** + * iser_handle_comp_error() - Handle error completion + * @ib_conn: connection RDMA resources + * @wc: work completion + * + * Notes: We may handle a FLUSH error completion and in this case + * we only cleanup in case TX type was DATAOUT. For non-FLUSH + * error completion we should also notify iscsi layer that + * connection is failed (in case we passed bind stage). + */ +static void +iser_handle_comp_error(struct ib_conn *ib_conn, + struct ib_wc *wc) +{ + void *wr_id = (void *)(uintptr_t)wc->wr_id; + struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn, + ib_conn); + + if (wc->status != IB_WC_WR_FLUSH_ERR) + if (iser_conn->iscsi_conn) + iscsi_conn_failure(iser_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED); - /* no more non completed posts to the QP, complete the - * termination process w.o worrying on disconnect event */ - ib_conn->state = ISER_CONN_DOWN; - wake_up_interruptible(&ib_conn->wait); + if (wc->wr_id == ISER_FASTREG_LI_WRID) + return; + + if (is_iser_tx_desc(iser_conn, wr_id)) { + struct iser_tx_desc *desc = wr_id; + + if (desc->type == ISCSI_TX_DATAOUT) + kmem_cache_free(ig.desc_cache, desc); + } else { + ib_conn->post_recv_buf_count--; } } -static int iser_drain_tx_cq(struct iser_device *device, int cq_index) +/** + * iser_handle_wc - handle a single work completion + * @wc: work completion + * + * Soft-IRQ context, work completion can be either + * SEND or RECV, and can turn out successful or + * with error (or flush error). + */ +static void iser_handle_wc(struct ib_wc *wc) { - struct ib_cq *cq = device->tx_cq[cq_index]; - struct ib_wc wc; + struct ib_conn *ib_conn; struct iser_tx_desc *tx_desc; - struct iser_conn *ib_conn; - int completed_tx = 0; + struct iser_rx_desc *rx_desc; - while (ib_poll_cq(cq, 1, &wc) == 1) { - tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id; - ib_conn = wc.qp->qp_context; - if (wc.status == IB_WC_SUCCESS) { - if (wc.opcode == IB_WC_SEND) - iser_snd_completion(tx_desc, ib_conn); - else - iser_err("expected opcode %d got %d\n", - IB_WC_SEND, wc.opcode); + ib_conn = wc->qp->qp_context; + if (likely(wc->status == IB_WC_SUCCESS)) { + if (wc->opcode == IB_WC_RECV) { + rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; + iser_rcv_completion(rx_desc, wc->byte_len, + ib_conn); + } else + if (wc->opcode == IB_WC_SEND) { + tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; + iser_snd_completion(tx_desc, ib_conn); } else { - iser_err("tx id %llx status %d vend_err %x\n", - wc.wr_id, wc.status, wc.vendor_err); - atomic_dec(&ib_conn->post_send_buf_count); - iser_handle_comp_error(tx_desc, ib_conn); + iser_err("Unknown wc opcode %d\n", wc->opcode); } - completed_tx++; + } else { + if (wc->status != IB_WC_WR_FLUSH_ERR) + iser_err("%s (%d): wr id %llx vend_err %x\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id, wc->vendor_err); + else + iser_dbg("%s (%d): wr id %llx\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id); + + if (wc->wr_id == ISER_BEACON_WRID) + /* all flush errors were consumed */ + complete(&ib_conn->flush_comp); + else + iser_handle_comp_error(ib_conn, wc); } - return completed_tx; } - +/** + * iser_cq_tasklet_fn - iSER completion polling loop + * @data: iSER completion context + * + * Soft-IRQ context, polling connection CQ until + * either CQ was empty or we exausted polling budget + */ static void iser_cq_tasklet_fn(unsigned long data) { - struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data; - struct iser_device *device = cq_desc->device; - int cq_index = cq_desc->cq_index; - struct ib_cq *cq = device->rx_cq[cq_index]; - struct ib_wc wc; - struct iser_rx_desc *desc; - unsigned long xfer_len; - struct iser_conn *ib_conn; - int completed_tx, completed_rx; - completed_tx = completed_rx = 0; - - while (ib_poll_cq(cq, 1, &wc) == 1) { - desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; - BUG_ON(desc == NULL); - ib_conn = wc.qp->qp_context; - if (wc.status == IB_WC_SUCCESS) { - if (wc.opcode == IB_WC_RECV) { - xfer_len = (unsigned long)wc.byte_len; - iser_rcv_completion(desc, xfer_len, ib_conn); - } else - iser_err("expected opcode %d got %d\n", - IB_WC_RECV, wc.opcode); - } else { - if (wc.status != IB_WC_WR_FLUSH_ERR) - iser_err("rx id %llx status %d vend_err %x\n", - wc.wr_id, wc.status, wc.vendor_err); - ib_conn->post_recv_buf_count--; - iser_handle_comp_error(NULL, ib_conn); - } - completed_rx++; - if (!(completed_rx & 63)) - completed_tx += iser_drain_tx_cq(device, cq_index); - } - /* #warning "it is assumed here that arming CQ only once its empty" * - * " would not cause interrupts to be missed" */ + struct iser_comp *comp = (struct iser_comp *)data; + struct ib_cq *cq = comp->cq; + struct ib_wc *const wcs = comp->wcs; + int i, n, completed = 0; + + while ((n = ib_poll_cq(cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) { + for (i = 0; i < n; i++) + iser_handle_wc(&wcs[i]); + + completed += n; + if (completed >= iser_cq_poll_limit) + break; + } + + /* + * It is assumed here that arming CQ only once its empty + * would not cause interrupts to be missed. + */ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); - completed_tx += iser_drain_tx_cq(device, cq_index); - iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); + iser_dbg("got %d completions\n", completed); } static void iser_cq_callback(struct ib_cq *cq, void *cq_context) { - struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context; - struct iser_device *device = cq_desc->device; - int cq_index = cq_desc->cq_index; + struct iser_comp *comp = cq_context; + + tasklet_schedule(&comp->tasklet); +} + +u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir, sector_t *sector) +{ + struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; + struct iser_fr_desc *desc = reg->mem_h; + unsigned long sector_size = iser_task->sc->device->sector_size; + struct ib_mr_status mr_status; + int ret; - tasklet_schedule(&device->cq_tasklet[cq_index]); + if (desc && desc->pi_ctx->sig_protected) { + desc->pi_ctx->sig_protected = 0; + ret = ib_check_mr_status(desc->pi_ctx->sig_mr, + IB_MR_CHECK_SIG_STATUS, &mr_status); + if (ret) { + pr_err("ib_check_mr_status failed, ret %d\n", ret); + goto err; + } + + if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { + sector_t sector_off = mr_status.sig_err.sig_err_offset; + + sector_div(sector_off, sector_size + 8); + *sector = scsi_get_lba(iser_task->sc) + sector_off; + + pr_err("PI error found type %d at sector %llx " + "expected %x vs actual %x\n", + mr_status.sig_err.err_type, + (unsigned long long)*sector, + mr_status.sig_err.expected, + mr_status.sig_err.actual); + + switch (mr_status.sig_err.err_type) { + case IB_SIG_BAD_GUARD: + return 0x1; + case IB_SIG_BAD_REFTAG: + return 0x3; + case IB_SIG_BAD_APPTAG: + return 0x2; + } + } + } + + return 0; +err: + /* Not alot we can do here, return ambiguous guard error */ + return 0x1; }