--- zzzz-none-000/linux-3.10.107/drivers/scsi/qla2xxx/qla_iocb.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/scsi/qla2xxx/qla_iocb.c 2021-02-04 17:41:59.000000000 +0000 @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -32,9 +32,11 @@ if (cmd->sc_data_direction == DMA_TO_DEVICE) { cflags = CF_WRITE; vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cflags = CF_READ; vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; } return (cflags); } @@ -106,8 +108,7 @@ cont_pkt = (cont_entry_t *)req->ring_ptr; /* Load packet defaults. */ - *((uint32_t *)(&cont_pkt->entry_type)) = - __constant_cpu_to_le32(CONTINUE_TYPE); + *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE); return (cont_pkt); } @@ -136,8 +137,8 @@ /* Load packet defaults. */ *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ? - __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) : - __constant_cpu_to_le32(CONTINUE_A64_TYPE); + cpu_to_le32(CONTINUE_A64_TYPE_FX00) : + cpu_to_le32(CONTINUE_A64_TYPE); return (cont_pkt); } @@ -202,11 +203,11 @@ /* Update entry type to indicate Command Type 2 IOCB */ *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_TYPE); + cpu_to_le32(COMMAND_TYPE); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return; } @@ -259,12 +260,11 @@ cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type 3 IOCB */ - *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_A64_TYPE); + *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return; } @@ -308,7 +308,7 @@ int qla2x00_start_scsi(srb_t *sp) { - int ret, nseg; + int nseg; unsigned long flags; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; @@ -323,10 +323,8 @@ struct qla_hw_data *ha; struct req_que *req; struct rsp_que *rsp; - char tag[2]; /* Setup device pointers. */ - ret = 0; vha = sp->fcport->vha; ha = vha->hw; reg = &ha->iobase->isp; @@ -402,26 +400,7 @@ /* Set target ID and LUN number*/ SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); cmd_pkt->lun = cpu_to_le16(cmd->device->lun); - - /* Update tagged queuing modifier */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_HEAD_TAG); - break; - case ORDERED_QUEUE_TAG: - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_ORDERED_TAG); - break; - default: - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_SIMPLE_TAG); - break; - } - } else { - cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); - } + cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); /* Load SCSI command packet. */ memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); @@ -472,9 +451,9 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) { struct qla_hw_data *ha = vha->hw; - device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); + device_reg_t *reg = ISP_QUE_REG(ha, req->id); - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { qla82xx_start_iocbs(vha); } else { /* Adjust ring index. */ @@ -486,7 +465,7 @@ req->ring_ptr++; /* Set chip new ring index. */ - if (ha->mqenable || IS_QLA83XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { WRT_REG_DWORD(req->req_q_in, req->ring_index); RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); } else if (IS_QLAFX00(ha)) { @@ -518,11 +497,10 @@ static int __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, struct rsp_que *rsp, uint16_t loop_id, - uint16_t lun, uint8_t type) + uint64_t lun, uint8_t type) { mrk_entry_t *mrk; struct mrk_entry_24xx *mrk24 = NULL; - struct mrk_entry_fx00 *mrkfx = NULL; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); @@ -539,25 +517,16 @@ mrk->entry_type = MARKER_TYPE; mrk->modifier = type; if (type != MK_SYNC_ALL) { - if (IS_QLAFX00(ha)) { - mrkfx = (struct mrk_entry_fx00 *) mrk; - mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle); - mrkfx->handle_hi = 0; - mrkfx->tgt_id = cpu_to_le16(loop_id); - mrkfx->lun[1] = LSB(lun); - mrkfx->lun[2] = MSB(lun); - host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun)); - } else if (IS_FWI2_CAPABLE(ha)) { + if (IS_FWI2_CAPABLE(ha)) { mrk24 = (struct mrk_entry_24xx *) mrk; mrk24->nport_handle = cpu_to_le16(loop_id); - mrk24->lun[1] = LSB(lun); - mrk24->lun[2] = MSB(lun); + int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); mrk24->vp_index = vha->vp_idx; mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); } else { SET_TARGET_ID(ha, mrk->target, loop_id); - mrk->lun = cpu_to_le16(lun); + mrk->lun = cpu_to_le16((uint16_t)lun); } } wmb(); @@ -569,7 +538,7 @@ int qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, - struct rsp_que *rsp, uint16_t loop_id, uint16_t lun, + struct rsp_que *rsp, uint16_t loop_id, uint64_t lun, uint8_t type) { int ret; @@ -625,12 +594,11 @@ cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type 3 IOCB */ - *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_TYPE_6); + *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return 0; } @@ -639,13 +607,13 @@ /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_WRITE_DATA); + cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_READ_DATA); + cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; } cur_seg = scsi_sglist(cmd); @@ -706,7 +674,7 @@ * * Returns the number of dsd list needed to store @dsds. */ -inline uint16_t +static inline uint16_t qla24xx_calc_dsd_lists(uint16_t dsds) { uint16_t dsd_lists = 0; @@ -726,7 +694,7 @@ * @cmd_pkt: Command type 3 IOCB * @tot_dsds: Total number of segments to transfer */ -inline void +static inline void qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, uint16_t tot_dsds) { @@ -736,32 +704,29 @@ struct scsi_cmnd *cmd; struct scatterlist *sg; int i; - struct req_que *req; cmd = GET_CMD_SP(sp); /* Update entry type to indicate Command Type 3 IOCB */ - *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_TYPE_7); + *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7); /* No data transfer */ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return; } vha = sp->fcport->vha; - req = vha->req; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { - cmd_pkt->task_mgmt_flags = - __constant_cpu_to_le16(TMF_WRITE_DATA); + cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { - cmd_pkt->task_mgmt_flags = - __constant_cpu_to_le16(TMF_READ_DATA); + cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; } /* One DSD is available in the Command Type 3 IOCB */ @@ -833,7 +798,7 @@ * match LBA in CDB + N */ case SCSI_PROT_DIF_TYPE2: - pkt->app_tag = __constant_cpu_to_le16(0); + pkt->app_tag = cpu_to_le16(0); pkt->app_tag_mask[0] = 0x0; pkt->app_tag_mask[1] = 0x0; @@ -864,7 +829,7 @@ case SCSI_PROT_DIF_TYPE1: pkt->ref_tag = cpu_to_le32((uint32_t) (0xffffffff & scsi_get_lba(cmd))); - pkt->app_tag = __constant_cpu_to_le16(0); + pkt->app_tag = cpu_to_le16(0); pkt->app_tag_mask[0] = 0x0; pkt->app_tag_mask[1] = 0x0; @@ -939,9 +904,9 @@ return 1; } -static int +int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -951,21 +916,32 @@ uint32_t *cur_dsd = dsd; uint16_t used_dsds = tot_dsds; - uint32_t prot_int; + uint32_t prot_int; /* protection interval */ uint32_t partial; struct qla2_sgx sgx; dma_addr_t sle_dma; uint32_t sle_dma_len, tot_prot_dma_len = 0; - struct scsi_cmnd *cmd = GET_CMD_SP(sp); - - prot_int = cmd->device->sector_size; + struct scsi_cmnd *cmd; memset(&sgx, 0, sizeof(struct qla2_sgx)); - sgx.tot_bytes = scsi_bufflen(cmd); - sgx.cur_sg = scsi_sglist(cmd); - sgx.sp = sp; - - sg_prot = scsi_prot_sglist(cmd); + if (sp) { + cmd = GET_CMD_SP(sp); + prot_int = cmd->device->sector_size; + + sgx.tot_bytes = scsi_bufflen(cmd); + sgx.cur_sg = scsi_sglist(cmd); + sgx.sp = sp; + + sg_prot = scsi_prot_sglist(cmd); + } else if (tc) { + prot_int = tc->blk_sz; + sgx.tot_bytes = tc->bufflen; + sgx.cur_sg = tc->sg; + sg_prot = tc->prot_sg; + } else { + BUG(); + return 1; + } while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { @@ -998,10 +974,18 @@ return 1; } - list_add_tail(&dsd_ptr->list, - &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); + if (sp) { + list_add_tail(&dsd_ptr->list, + &((struct crc_context *) + sp->u.scmd.ctx)->dsd_list); + + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + tc->ctx_dsd_alloced = 1; + } - sp->flags |= SRB_CRC_CTX_DSD_VALID; /* add new list to cmd iocb or last list */ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1036,21 +1020,32 @@ return 0; } -static int +int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, - uint16_t tot_dsds) + uint16_t tot_dsds, struct qla_tgt_cmd *tc) { void *next_dsd; uint8_t avail_dsds = 0; uint32_t dsd_list_len; struct dsd_dma *dsd_ptr; - struct scatterlist *sg; + struct scatterlist *sg, *sgl; uint32_t *cur_dsd = dsd; int i; uint16_t used_dsds = tot_dsds; - struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_cmnd *cmd; - scsi_for_each_sg(cmd, sg, tot_dsds, i) { + if (sp) { + cmd = GET_CMD_SP(sp); + sgl = scsi_sglist(cmd); + } else if (tc) { + sgl = tc->sg; + } else { + BUG(); + return 1; + } + + + for_each_sg(sgl, sg, tot_dsds, i) { dma_addr_t sle_dma; /* Allocate additional continuation packets? */ @@ -1079,10 +1074,17 @@ return 1; } - list_add_tail(&dsd_ptr->list, - &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); - - sp->flags |= SRB_CRC_CTX_DSD_VALID; + if (sp) { + list_add_tail(&dsd_ptr->list, + &((struct crc_context *) + sp->u.scmd.ctx)->dsd_list); + + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + tc->ctx_dsd_alloced = 1; + } /* add new list to cmd iocb or last list */ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1105,23 +1107,37 @@ return 0; } -static int +int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, - uint16_t tot_dsds) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) { void *next_dsd; uint8_t avail_dsds = 0; uint32_t dsd_list_len; struct dsd_dma *dsd_ptr; - struct scatterlist *sg; + struct scatterlist *sg, *sgl; int i; struct scsi_cmnd *cmd; uint32_t *cur_dsd = dsd; - uint16_t used_dsds = tot_dsds; + uint16_t used_dsds = tot_dsds; + struct scsi_qla_host *vha; - cmd = GET_CMD_SP(sp); - scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { + if (sp) { + cmd = GET_CMD_SP(sp); + sgl = scsi_prot_sglist(cmd); + vha = sp->fcport->vha; + } else if (tc) { + vha = tc->vha; + sgl = tc->prot_sg; + } else { + BUG(); + return 1; + } + + ql_dbg(ql_dbg_tgt, vha, 0xe021, + "%s: enter\n", __func__); + + for_each_sg(sgl, sg, tot_dsds, i) { dma_addr_t sle_dma; /* Allocate additional continuation packets? */ @@ -1150,10 +1166,17 @@ return 1; } - list_add_tail(&dsd_ptr->list, - &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); - - sp->flags |= SRB_CRC_CTX_DSD_VALID; + if (sp) { + list_add_tail(&dsd_ptr->list, + &((struct crc_context *) + sp->u.scmd.ctx)->dsd_list); + + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + tc->ctx_dsd_alloced = 1; + } /* add new list to cmd iocb or last list */ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1191,8 +1214,6 @@ uint32_t *cur_dsd, *fcp_dl; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; - struct scatterlist *cur_seg; - int sgc; uint32_t total_bytes = 0; uint32_t data_bytes; uint32_t dif_bytes; @@ -1205,14 +1226,11 @@ uint16_t fcp_cmnd_len; struct fcp_cmnd *fcp_cmnd; dma_addr_t crc_ctx_dma; - char tag[2]; cmd = GET_CMD_SP(sp); - sgc = 0; /* Update entry type to indicate Command Type CRC_2 IOCB */ - *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_TYPE_CRC_2); + *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2); vha = sp->fcport->vha; ha = vha->hw; @@ -1220,7 +1238,7 @@ /* No data transfer */ data_bytes = scsi_bufflen(cmd); if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return QLA_SUCCESS; } @@ -1229,10 +1247,10 @@ /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_WRITE_DATA); + cpu_to_le16(CF_WRITE_DATA); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_READ_DATA); + cpu_to_le16(CF_READ_DATA); } if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || @@ -1297,25 +1315,7 @@ cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); fcp_cmnd->task_management = 0; - - /* - * Update tagged queuing modifier if using command tag queuing - */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE; - break; - case ORDERED_QUEUE_TAG: - fcp_cmnd->task_attribute = TSK_ORDERED; - break; - default: - fcp_cmnd->task_attribute = TSK_SIMPLE; - break; - } - } else { - fcp_cmnd->task_attribute = TSK_SIMPLE; - } + fcp_cmnd->task_attribute = TSK_SIMPLE; cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ @@ -1372,7 +1372,7 @@ crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); - crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); + crc_ctx_pkt->guard_seed = cpu_to_le16(0); /* Fibre channel byte count */ cmd_pkt->byte_count = cpu_to_le32(total_bytes); fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + @@ -1380,30 +1380,27 @@ *fcp_dl = htonl(total_bytes); if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { - cmd_pkt->byte_count = __constant_cpu_to_le32(0); + cmd_pkt->byte_count = cpu_to_le32(0); return QLA_SUCCESS; } /* Walks data segments */ - cmd_pkt->control_flags |= - __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); + cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); if (!bundling && tot_prot_dsds) { if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, - cur_dsd, tot_dsds)) + cur_dsd, tot_dsds, NULL)) goto crc_queuing_error; } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, - (tot_dsds - tot_prot_dsds))) + (tot_dsds - tot_prot_dsds), NULL)) goto crc_queuing_error; if (bundling && tot_prot_dsds) { /* Walks dif segments */ - cur_seg = scsi_prot_sglist(cmd); - cmd_pkt->control_flags |= - __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); + cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, - tot_prot_dsds)) + tot_prot_dsds, NULL)) goto crc_queuing_error; } return QLA_SUCCESS; @@ -1423,7 +1420,7 @@ int qla24xx_start_scsi(srb_t *sp) { - int ret, nseg; + int nseg; unsigned long flags; uint32_t *clr_ptr; uint32_t index; @@ -1437,11 +1434,8 @@ struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; - char tag[2]; /* Setup device pointers. */ - ret = 0; - qla25xx_set_que(sp, &rsp); req = vha->req; @@ -1483,8 +1477,8 @@ tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { - cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -1520,22 +1514,7 @@ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); - /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - cmd_pkt->task = TSK_HEAD_OF_QUEUE; - break; - case ORDERED_QUEUE_TAG: - cmd_pkt->task = TSK_ORDERED; - break; - default: - cmd_pkt->task = TSK_SIMPLE; - break; - } - } else { - cmd_pkt->task = TSK_SIMPLE; - } + cmd_pkt->task = TSK_SIMPLE; /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); @@ -1702,8 +1681,8 @@ tot_prot_dsds = nseg; tot_dsds += nseg; if (req->cnt < (req_cnt + 2)) { - cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -1750,7 +1729,7 @@ cmd_pkt->entry_count = (uint8_t)req_cnt; /* Specify response queue number where completion should happen */ cmd_pkt->entry_status = (uint8_t) rsp->id; - cmd_pkt->timeout = __constant_cpu_to_le16(0); + cmd_pkt->timeout = cpu_to_le16(0); wmb(); /* Adjust ring index. */ @@ -1800,12 +1779,23 @@ } /* Generic Control-SRB manipulation functions. */ + +/* hardware_lock assumed to be held. */ +void * +qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp) +{ + if (qla2x00_reset_active(vha)) + return NULL; + + return qla2x00_alloc_iocbs(vha, sp); +} + void * qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) { struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; - device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); + device_reg_t *reg = ISP_QUE_REG(ha, req->id); uint32_t index, handle; request_t *pkt; uint16_t cnt, req_cnt; @@ -1819,7 +1809,7 @@ /* Check for room in outstanding command list. */ handle = req->current_outstanding_cmd; - for (index = 1; req->num_outstanding_cmds; index++) { + for (index = 1; index < req->num_outstanding_cmds; index++) { handle++; if (handle == req->num_outstanding_cmds) handle = 1; @@ -1843,10 +1833,10 @@ skip_cmd_array: /* Check for room on request queue. */ - if (req->cnt < req_cnt) { - if (ha->mqenable || IS_QLA83XX(ha)) + if (req->cnt < req_cnt + 2) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) cnt = RD_REG_DWORD(®->isp25mq.req_q_out); - else if (IS_QLA82XX(ha)) + else if (IS_P3P_TYPE(ha)) cnt = RD_REG_DWORD(®->isp82.req_q_out); else if (IS_FWI2_CAPABLE(ha)) cnt = RD_REG_DWORD(®->isp24.req_q_out); @@ -1862,7 +1852,7 @@ req->cnt = req->length - (req->ring_index - cnt); } - if (req->cnt < req_cnt) + if (req->cnt < req_cnt + 2) goto queuing_error; /* Prep packet */ @@ -1870,8 +1860,8 @@ pkt = req->ring_ptr; memset(pkt, 0, REQUEST_ENTRY_SIZE); if (IS_QLAFX00(ha)) { - WRT_REG_BYTE(&pkt->entry_count, req_cnt); - WRT_REG_WORD(&pkt->handle, handle); + WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt); + WRT_REG_WORD((void __iomem *)&pkt->handle, handle); } else { pkt->entry_count = req_cnt; pkt->handle = handle; @@ -1929,6 +1919,9 @@ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); + if (!sp->fcport->tgt_session || + !sp->fcport->tgt_session->keep_nport_handle) + logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; @@ -1988,7 +1981,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) { uint32_t flags; - unsigned int lun; + uint64_t lun; struct fc_port *fcport = sp->fcport; scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; @@ -2027,10 +2020,10 @@ els_iocb->entry_status = 0; els_iocb->handle = sp->handle; els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); - els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); + els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); els_iocb->vp_index = sp->fcport->vha->vp_idx; els_iocb->sof_type = EST_SOFI3; - els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); + els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); els_iocb->opcode = sp->type == SRB_ELS_CMD_RPT ? @@ -2058,6 +2051,8 @@ (bsg_job->reply_payload.sg_list))); els_iocb->rx_len = cpu_to_le32(sg_dma_len (bsg_job->reply_payload.sg_list)); + + sp->fcport->vha->qla_stats.control_requests++; } static void @@ -2072,7 +2067,6 @@ struct qla_hw_data *ha = vha->hw; struct fc_bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; - int cont_iocb_prsnt = 0; int entry_count = 1; memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); @@ -2080,13 +2074,13 @@ ct_iocb->entry_status = 0; ct_iocb->handle1 = sp->handle; SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); - ct_iocb->status = __constant_cpu_to_le16(0); - ct_iocb->control_flags = __constant_cpu_to_le16(0); + ct_iocb->status = cpu_to_le16(0); + ct_iocb->control_flags = cpu_to_le16(0); ct_iocb->timeout = 0; ct_iocb->cmd_dsd_count = - __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); + cpu_to_le16(bsg_job->request_payload.sg_cnt); ct_iocb->total_dsd_count = - __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); + cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); ct_iocb->req_bytecount = cpu_to_le32(bsg_job->request_payload.payload_len); ct_iocb->rsp_bytecount = @@ -2123,7 +2117,6 @@ vha->hw->req_q_map[0]); cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; avail_dsds = 5; - cont_iocb_prsnt = 1; entry_count++; } @@ -2135,6 +2128,8 @@ avail_dsds--; } ct_iocb->entry_count = entry_count; + + sp->fcport->vha->qla_stats.control_requests++; } static void @@ -2149,7 +2144,6 @@ struct qla_hw_data *ha = vha->hw; struct fc_bsg_job *bsg_job = sp->u.bsg_job; int loop_iterartion = 0; - int cont_iocb_prsnt = 0; int entry_count = 1; ct_iocb->entry_type = CT_IOCB_TYPE; @@ -2159,13 +2153,13 @@ ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); ct_iocb->vp_index = sp->fcport->vha->vp_idx; - ct_iocb->comp_status = __constant_cpu_to_le16(0); + ct_iocb->comp_status = cpu_to_le16(0); ct_iocb->cmd_dsd_count = - __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); + cpu_to_le16(bsg_job->request_payload.sg_cnt); ct_iocb->timeout = 0; ct_iocb->rsp_dsd_count = - __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); + cpu_to_le16(bsg_job->reply_payload.sg_cnt); ct_iocb->rsp_byte_count = cpu_to_le32(bsg_job->reply_payload.payload_len); ct_iocb->cmd_byte_count = @@ -2196,7 +2190,6 @@ ha->req_q_map[0]); cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; avail_dsds = 5; - cont_iocb_prsnt = 1; entry_count++; } @@ -2219,7 +2212,7 @@ int qla82xx_start_scsi(srb_t *sp) { - int ret, nseg; + int nseg; unsigned long flags; struct scsi_cmnd *cmd; uint32_t *clr_ptr; @@ -2237,10 +2230,8 @@ struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; struct rsp_que *rsp = NULL; - char tag[2]; /* Setup device pointers. */ - ret = 0; reg = &ha->iobase->isp82; cmd = GET_CMD_SP(sp); req = vha->req; @@ -2416,22 +2407,6 @@ else if (cmd->sc_data_direction == DMA_FROM_DEVICE) ctx->fcp_cmnd->additional_cdb_len |= 2; - /* - * Update tagged queuing modifier -- default is TSK_SIMPLE (0). - */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - ctx->fcp_cmnd->task_attribute = - TSK_HEAD_OF_QUEUE; - break; - case ORDERED_QUEUE_TAG: - ctx->fcp_cmnd->task_attribute = - TSK_ORDERED; - break; - } - } - /* Populate the FCP_PRIO. */ if (ha->flags.fcp_prio_enabled) ctx->fcp_cmnd->task_attribute |= @@ -2492,20 +2467,6 @@ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); - /* - * Update tagged queuing modifier -- default is TSK_SIMPLE (0). - */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - cmd_pkt->task = TSK_HEAD_OF_QUEUE; - break; - case ORDERED_QUEUE_TAG: - cmd_pkt->task = TSK_ORDERED; - break; - } - } - /* Populate the FCP_PRIO. */ if (ha->flags.fcp_prio_enabled) cmd_pkt->task |= sp->fcport->fcp_prio << 3; @@ -2549,16 +2510,12 @@ /* write, read and verify logic */ dbval = dbval | (req->id << 8) | (req->ring_index << 16); if (ql2xdbwr) - qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); + qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); else { - WRT_REG_DWORD( - (unsigned long __iomem *)ha->nxdb_wr_ptr, - dbval); + WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); - while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) { - WRT_REG_DWORD( - (unsigned long __iomem *)ha->nxdb_wr_ptr, - dbval); + while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { + WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); } } @@ -2586,6 +2543,29 @@ return QLA_FUNCTION_FAILED; } +static void +qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) +{ + struct srb_iocb *aio = &sp->u.iocb_cmd; + scsi_qla_host_t *vha = sp->fcport->vha; + struct req_que *req = vha->req; + + memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); + abt_iocb->entry_type = ABORT_IOCB_TYPE; + abt_iocb->entry_count = 1; + abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); + abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + abt_iocb->handle_to_abort = + cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); + abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + abt_iocb->port_id[1] = sp->fcport->d_id.b.area; + abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; + abt_iocb->vp_index = vha->vp_idx; + abt_iocb->req_que_no = cpu_to_le16(req->id); + /* Send the command to the firmware */ + wmb(); +} + int qla2x00_start_sp(srb_t *sp) { @@ -2639,7 +2619,9 @@ qlafx00_fxdisc_iocb(sp, pkt); break; case SRB_ABT_CMD: - qlafx00_abort_iocb(sp, pkt); + IS_QLAFX00(ha) ? + qlafx00_abort_iocb(sp, pkt) : + qla24xx_abort_iocb(sp, pkt); break; default: break; @@ -2667,7 +2649,7 @@ /*Update entry type to indicate bidir command */ *((uint32_t *)(&cmd_pkt->entry_type)) = - __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL); + cpu_to_le32(COMMAND_BIDIRECTIONAL); /* Set the transfer direction, in this set both flags * Also set the BD_WRAP_BACK flag, firmware will take care @@ -2675,8 +2657,7 @@ */ cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); - cmd_pkt->control_flags = - __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | + cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | BD_WRAP_BACK); req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; @@ -2687,6 +2668,9 @@ vha->bidi_stats.transfer_bytes += req_data_len; vha->bidi_stats.io_count++; + vha->qla_stats.output_bytes += req_data_len; + vha->qla_stats.output_requests++; + /* Only one dsd is available for bidirectional IOCB, remaining dsds * are bundled in continuation iocb */ @@ -2782,10 +2766,10 @@ handle = req->current_outstanding_cmd; for (index = 1; index < req->num_outstanding_cmds; index++) { handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; } if (index == req->num_outstanding_cmds) { @@ -2798,8 +2782,8 @@ /* Check for room on request queue. */ if (req->cnt < req_cnt + 2) { - cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else