--- zzzz-none-000/linux-3.10.107/drivers/net/wireless/iwlwifi/pcie/tx.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/net/wireless/iwlwifi/pcie/tx.c 2021-02-04 17:41:59.000000000 +0000 @@ -1,6 +1,7 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -34,6 +35,7 @@ #include "iwl-csr.h" #include "iwl-prph.h" #include "iwl-io.h" +#include "iwl-scd.h" #include "iwl-op-mode.h" #include "internal.h" /* FIXME: need to abstract out TX command (once we know what it looks like) */ @@ -65,34 +67,40 @@ ***************************************************/ static int iwl_queue_space(const struct iwl_queue *q) { - int s = q->read_ptr - q->write_ptr; + unsigned int max; + unsigned int used; - if (q->read_ptr > q->write_ptr) - s -= q->n_bd; + /* + * To avoid ambiguity between empty and completely full queues, there + * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. + * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need + * to reserve any queue entries for this purpose. + */ + if (q->n_window < TFD_QUEUE_SIZE_MAX) + max = q->n_window; + else + max = TFD_QUEUE_SIZE_MAX - 1; + + /* + * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to + * modulo by TFD_QUEUE_SIZE_MAX and is well defined. + */ + used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); + + if (WARN_ON(used > max)) + return 0; - if (s <= 0) - s += q->n_window; - /* keep some reserve to not confuse empty and full situations */ - s -= 2; - if (s < 0) - s = 0; - return s; + return max - used; } /* * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ -static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) +static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id) { - q->n_bd = count; q->n_window = slots_num; q->id = id; - /* count must be power-of-two size, otherwise iwl_queue_inc_wrap - * and iwl_queue_dec_wrap are broken. */ - if (WARN_ON(!is_power_of_2(count))) - return -EINVAL; - /* slots_num must be power-of-two size, otherwise * get_cmd_index is broken. */ if (WARN_ON(!is_power_of_2(slots_num))) @@ -139,7 +147,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) { struct iwl_txq *txq = (void *)data; - struct iwl_queue *q = &txq->q; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); u32 scd_sram_addr = trans_pcie->scd_base_addr + @@ -156,7 +163,7 @@ spin_unlock(&txq->lock); IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, - jiffies_to_msecs(trans_pcie->wd_timeout)); + jiffies_to_msecs(txq->wd_timeout)); IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", txq->q.read_ptr, txq->q.write_ptr); @@ -185,17 +192,12 @@ IWL_ERR(trans, "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", i, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, - SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), + iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) & + (TFD_QUEUE_SIZE_MAX - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); } - for (i = q->read_ptr; i != q->write_ptr; - i = iwl_queue_inc_wrap(i, q->n_bd)) - IWL_ERR(trans, "scratch %d = 0x%08x\n", i, - le32_to_cpu(txq->scratchbufs[i].scratch)); - - iwl_op_mode_nic_error(trans->op_mode); + iwl_force_nmi(trans); } /* @@ -217,26 +219,27 @@ scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); - sta_id = tx_cmd->sta_id; sec_ctl = tx_cmd->sec_ctl; switch (sec_ctl & TX_CMD_SEC_MSK) { case TX_CMD_SEC_CCM: - len += CCMP_MIC_LEN; + len += IEEE80211_CCMP_MIC_LEN; break; case TX_CMD_SEC_TKIP: - len += TKIP_ICV_LEN; + len += IEEE80211_TKIP_ICV_LEN; break; case TX_CMD_SEC_WEP: - len += WEP_IV_LEN + WEP_ICV_LEN; + len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } if (trans_pcie->bc_table_dword) len = DIV_ROUND_UP(len, 4); + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; @@ -275,53 +278,64 @@ /* * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware */ -void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) +static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_txq *txq) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 reg = 0; int txq_id = txq->q.id; - if (txq->need_update == 0) - return; + lockdep_assert_held(&txq->lock); - if (trans->cfg->base_params->shadow_reg_enable) { - /* shadow register enabled */ - iwl_write32(trans, HBUS_TARG_WRPTR, - txq->q.write_ptr | (txq_id << 8)); - } else { - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - /* if we're trying to save power */ - if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { - /* wake up nic if it's powered down ... - * uCode will wake up, and interrupt us again, so next - * time we'll skip this part. */ - reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(trans, - "Tx queue %d requesting wakeup," - " GP1 = 0x%x\n", txq_id, reg); - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - return; - } + /* + * explicitly wake up the NIC if: + * 1. shadow registers aren't enabled + * 2. NIC is woken up for CMD regardless of shadow outside this function + * 3. there is a chance that the NIC is asleep + */ + if (!trans->cfg->base_params->shadow_reg_enable && + txq_id != trans_pcie->cmd_queue && + test_bit(STATUS_TPOWER_PMI, &trans->status)) { + /* + * wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. + */ + reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); - IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, - txq->q.write_ptr); + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", + txq_id, reg); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + txq->need_update = true; + return; + } + } - iwl_write_direct32(trans, HBUS_TARG_WRPTR, - txq->q.write_ptr | (txq_id << 8)); + /* + * if not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr); + iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); +} - /* - * else not in power-save mode, - * uCode will never sleep when we're - * trying to tx (during RFKILL, we're not trying to tx). - */ - } else - iwl_write32(trans, HBUS_TARG_WRPTR, - txq->q.write_ptr | (txq_id << 8)); +void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + struct iwl_txq *txq = &trans_pcie->txq[i]; + + spin_lock_bh(&txq->lock); + if (trans_pcie->txq[i].need_update) { + iwl_pcie_txq_inc_wr_ptr(trans, txq); + trans_pcie->txq[i].need_update = false; + } + spin_unlock_bh(&txq->lock); } - txq->need_update = 0; } static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) @@ -336,13 +350,6 @@ return addr; } -static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - - return le16_to_cpu(tb->hi_n_len) >> 4; -} - static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, dma_addr_t addr, u16 len) { @@ -381,11 +388,18 @@ /* first TB is never freed - it's the scratchbuf data */ - for (i = 1; i < num_tbs; i++) - dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), - DMA_TO_DEVICE); - + for (i = 1; i < num_tbs; i++) { + if (meta->flags & BIT(i + CMD_TB_BITMAP_POS)) + dma_unmap_page(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + } tfd->num_tbs = 0; } @@ -402,13 +416,17 @@ { struct iwl_tfd *tfd_tmp = txq->tfds; - /* rd_ptr is bounded by n_bd and idx is bounded by n_window */ + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ int rd_ptr = txq->q.read_ptr; int idx = get_cmd_index(&txq->q, rd_ptr); lockdep_assert_held(&txq->lock); - /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ + /* We have only q->n_window txq->entries, but we use + * TFD_QUEUE_SIZE_MAX tfds + */ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); /* free SKB */ @@ -429,7 +447,7 @@ } static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, - dma_addr_t addr, u16 len, u8 reset) + dma_addr_t addr, u16 len, bool reset) { struct iwl_queue *q; struct iwl_tfd *tfd, *tfd_tmp; @@ -451,16 +469,13 @@ return -EINVAL; } - if (WARN_ON(addr & ~DMA_BIT_MASK(36))) + if (WARN(addr & ~IWL_TX_DMA_MASK, + "Unaligned address = %llx\n", (unsigned long long)addr)) return -EINVAL; - if (unlikely(addr & ~IWL_TX_DMA_MASK)) - IWL_ERR(trans, "Unaligned address = %llx\n", - (unsigned long long)addr); - iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); - return 0; + return num_tbs; } static int iwl_pcie_txq_alloc(struct iwl_trans *trans, @@ -538,15 +553,14 @@ { int ret; - txq->need_update = 0; + txq->need_update = false; /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, - txq_id); + ret = iwl_queue_init(&txq->q, slots_num, txq_id); if (ret) return ret; @@ -571,15 +585,12 @@ struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; - if (!q->n_bd) - return; - spin_lock_bh(&txq->lock); while (q->write_ptr != q->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, q->read_ptr); iwl_pcie_txq_free_tfd(trans, txq); - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); } txq->active = false; spin_unlock_bh(&txq->lock); @@ -611,15 +622,17 @@ /* De-alloc array of command/tx buffers */ if (txq_id == trans_pcie->cmd_queue) for (i = 0; i < txq->q.n_window; i++) { - kfree(txq->entries[i].cmd); - kfree(txq->entries[i].free_buf); + kzfree(txq->entries[i].cmd); + kzfree(txq->entries[i].free_buf); } /* De-alloc circular buffer of TFDs */ - if (txq->q.n_bd) { - dma_free_coherent(dev, sizeof(struct iwl_tfd) * - txq->q.n_bd, txq->tfds, txq->q.dma_addr); + if (txq->tfds) { + dma_free_coherent(dev, + sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX, + txq->tfds, txq->q.dma_addr); txq->q.dma_addr = 0; + txq->tfds = NULL; dma_free_coherent(dev, sizeof(*txq->scratchbufs) * txq->q.n_window, @@ -635,17 +648,6 @@ memset(txq, 0, sizeof(*txq)); } -/* - * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask - */ -static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask) -{ - struct iwl_trans_pcie __maybe_unused *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - iwl_write_prph(trans, SCD_TXFACT, mask); -} - void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -676,13 +678,15 @@ /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. */ - iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); + if (trans->cfg->base_params->scd_chain_ext_wa) + iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, - trans_pcie->cmd_fifo); + trans_pcie->cmd_fifo, + trans_pcie->cmd_q_wdg_timeout); /* Activate all Tx DMA/FIFO channels */ - iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7)); + iwl_scd_activate_fifos(trans); /* Enable DMA channel */ for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) @@ -696,8 +700,9 @@ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Enable L1-Active */ - iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, - APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) @@ -728,40 +733,62 @@ iwl_pcie_tx_start(trans, 0); } +static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + int ch, ret; + u32 mask = 0; + + spin_lock(&trans_pcie->irq_lock); + + if (!iwl_trans_grab_nic_access(trans, false, &flags)) + goto out; + + /* Stop each Tx DMA channel */ + for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { + iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); + mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); + } + + /* Wait for DMA channels to be idle */ + ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); + if (ret < 0) + IWL_ERR(trans, + "Failing on timeout while stopping DMA channel %d [0x%08x]\n", + ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); + + iwl_trans_release_nic_access(trans, &flags); + +out: + spin_unlock(&trans_pcie->irq_lock); +} + /* * iwl_pcie_tx_stop - Stop all Tx DMA channels */ int iwl_pcie_tx_stop(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int ch, txq_id, ret; - unsigned long flags; + int txq_id; /* Turn off all Tx DMA fifos */ - spin_lock_irqsave(&trans_pcie->irq_lock, flags); + iwl_scd_deactivate_fifos(trans); - iwl_pcie_txq_set_sched(trans, 0); + /* Turn off all Tx DMA channels */ + iwl_pcie_tx_stop_fh(trans); - /* Stop each Tx DMA channel, and wait for it to be idle */ - for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { - iwl_write_direct32(trans, - FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); - ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, - FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000); - if (ret < 0) - IWL_ERR(trans, - "Failing on timeout while stopping DMA channel %d [0x%08x]\n", - ch, - iwl_read_direct32(trans, - FH_TSSR_TX_STATUS_REG)); - } - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); + /* + * This function can be called before the op_mode disabled the + * queues. This happens when we have an rfkill interrupt. + * Since we stop Tx altogether - mark the queues as stopped. + */ + memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); + memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); - if (!trans_pcie->txq) { - IWL_WARN(trans, - "Stopping tx queues that aren't allocated...\n"); + /* This can happen: start_hw, stop_device */ + if (!trans_pcie->txq) return 0; - } /* Unmap DMA from host system and free skb's */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; @@ -834,7 +861,7 @@ sizeof(struct iwl_txq), GFP_KERNEL); if (!trans_pcie->txq) { IWL_ERR(trans, "Not enough memory for txq\n"); - ret = ENOMEM; + ret = -ENOMEM; goto error; } @@ -863,7 +890,6 @@ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; int txq_id, slots_num; - unsigned long flags; bool alloc = false; if (!trans_pcie->txq) { @@ -873,16 +899,16 @@ alloc = true; } - spin_lock_irqsave(&trans_pcie->irq_lock, flags); + spin_lock(&trans_pcie->irq_lock); /* Turn off all Tx DMA fifos */ - iwl_write_prph(trans, SCD_TXFACT, 0); + iwl_scd_deactivate_fifos(trans); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, trans_pcie->kw.dma >> 4); - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); + spin_unlock(&trans_pcie->irq_lock); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; @@ -897,6 +923,11 @@ } } + iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); + if (trans->cfg->base_params->num_of_queues > 20) + iwl_set_bits_prph(trans, SCD_GP_CTRL, + SCD_GP_CTRL_ENABLE_31_QUEUES); + return 0; error: /*Upon error, free only if we allocated something */ @@ -905,10 +936,18 @@ return ret; } -static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie, - struct iwl_txq *txq) +static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) { - if (!trans_pcie->wd_timeout) + lockdep_assert_held(&txq->lock); + + if (!txq->wd_timeout) + return; + + /* + * station is asleep and we send data - that must + * be uAPSD or PS-Poll. Don't rearm the timer. + */ + if (txq->frozen) return; /* @@ -918,7 +957,7 @@ if (txq->q.read_ptr == txq->q.write_ptr) del_timer(&txq->stuck_timer); else - mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); } /* Frees buffers until index _not_ inclusive */ @@ -927,8 +966,7 @@ { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - /* n_bd is usually 256 => n_bd - 1 = 0xff */ - int tfd_num = ssn & (txq->q.n_bd - 1); + int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); struct iwl_queue *q = &txq->q; int last_to_free; @@ -952,12 +990,12 @@ /*Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ - last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd); + last_to_free = iwl_queue_dec_wrap(tfd_num); if (!iwl_queue_used(q, last_to_free)) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, last_to_free, q->n_bd, + __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, q->write_ptr, q->read_ptr); goto out; } @@ -967,7 +1005,7 @@ for (; q->read_ptr != tfd_num; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) continue; @@ -981,14 +1019,86 @@ iwl_pcie_txq_free_tfd(trans, txq); } - iwl_pcie_txq_progress(trans_pcie, txq); + iwl_pcie_txq_progress(txq); if (iwl_queue_space(&txq->q) > txq->q.low_mark) iwl_wake_queue(trans, txq); + + if (q->read_ptr == q->write_ptr) { + IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); + iwl_trans_pcie_unref(trans); + } + out: spin_unlock_bh(&txq->lock); } +static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, + const struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + lockdep_assert_held(&trans_pcie->reg_lock); + + if (!(cmd->flags & CMD_SEND_IN_IDLE) && + !trans_pcie->ref_cmd_in_flight) { + trans_pcie->ref_cmd_in_flight = true; + IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); + iwl_trans_pcie_ref(trans); + } + + /* + * wake up the NIC to make sure that the firmware will see the host + * command - we will let the NIC sleep once all the host commands + * returned. This needs to be done only on NICs that have + * apmg_wake_up_wa set. + */ + if (trans->cfg->base_params->apmg_wake_up_wa && + !trans_pcie->cmd_hold_nic_awake) { + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), + 15000); + if (ret < 0) { + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); + return -EIO; + } + trans_pcie->cmd_hold_nic_awake = true; + } + + return 0; +} + +static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->reg_lock); + + if (trans_pcie->ref_cmd_in_flight) { + trans_pcie->ref_cmd_in_flight = false; + IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); + iwl_trans_pcie_unref(trans); + } + + if (trans->cfg->base_params->apmg_wake_up_wa) { + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) + return 0; + + trans_pcie->cmd_hold_nic_awake = false; + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + } + return 0; +} + /* * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd * @@ -1001,29 +1111,36 @@ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; + unsigned long flags; int nfreed = 0; lockdep_assert_held(&txq->lock); - if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) { + if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, idx, q->n_bd, + __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, q->write_ptr, q->read_ptr); return; } - for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx, q->write_ptr, q->read_ptr); - iwl_op_mode_nic_error(trans->op_mode); + iwl_force_nmi(trans); } } - iwl_pcie_txq_progress(trans_pcie, txq); + if (q->read_ptr == q->write_ptr) { + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + iwl_pcie_clear_cmd_in_flight(trans); + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + } + + iwl_pcie_txq_progress(txq); } static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, @@ -1051,98 +1168,138 @@ return 0; } -static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans, - u16 txq_id) -{ - /* Simply stop the queue, but don't change any configuration; - * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ - iwl_write_prph(trans, - SCD_QUEUE_STATUS_BITS(txq_id), - (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| - (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); -} - -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn) +/* Receiver address (actually, Rx station's index into station table), + * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + +void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + int fifo = -1; if (test_and_set_bit(txq_id, trans_pcie->queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); - /* Stop this Tx queue before configuring it */ - iwl_pcie_txq_set_inactive(trans, txq_id); + txq->wd_timeout = msecs_to_jiffies(wdg_timeout); - /* Set this queue as a chain-building queue unless it is CMD queue */ - if (txq_id != trans_pcie->cmd_queue) - iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); + if (cfg) { + fifo = cfg->fifo; - /* If this queue is mapped to a certain station: it is an AGG queue */ - if (sta_id >= 0) { - u16 ra_tid = BUILD_RAxTID(sta_id, tid); + /* Disable the scheduler prior configuring the cmd queue */ + if (txq_id == trans_pcie->cmd_queue && + trans_pcie->scd_set_active) + iwl_scd_enable_set_active(trans, 0); - /* Map receiver-address / traffic-ID to this queue */ - iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); + /* Stop this Tx queue before configuring it */ + iwl_scd_txq_set_inactive(trans, txq_id); - /* enable aggregations for the queue */ - iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); - } else { - /* - * disable aggregations for the queue, this will also make the - * ra_tid mapping configuration irrelevant since it is now a - * non-AGG queue. - */ - iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); + /* Set this queue as a chain-building queue unless it is CMD */ + if (txq_id != trans_pcie->cmd_queue) + iwl_scd_txq_set_chain(trans, txq_id); + + if (cfg->aggregate) { + u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); + + /* Map receiver-address / traffic-ID to this queue */ + iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); + + /* enable aggregations for the queue */ + iwl_scd_txq_enable_agg(trans, txq_id); + txq->ampdu = true; + } else { + /* + * disable aggregations for the queue, this will also + * make the ra_tid mapping configuration irrelevant + * since it is now a non-AGG queue. + */ + iwl_scd_txq_disable_agg(trans, txq_id); + + ssn = txq->q.read_ptr; + } } /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ - trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); - trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); - + txq->q.read_ptr = (ssn & 0xff); + txq->q.write_ptr = (ssn & 0xff); iwl_write_direct32(trans, HBUS_TARG_WRPTR, (ssn & 0xff) | (txq_id << 8)); - iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); - /* Set up Tx window size and frame limit for this queue */ - iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + - SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); - iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + + if (cfg) { + u8 frame_limit = cfg->frame_limit; + + iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); + + /* Set up Tx window size and frame limit for this queue */ + iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); + iwl_trans_write_mem32(trans, + trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & - SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | + SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + + /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ + iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), + (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | + (1 << SCD_QUEUE_STTS_REG_POS_WSL) | + SCD_QUEUE_STTS_REG_MSK); + + /* enable the scheduler for this queue (only) */ + if (txq_id == trans_pcie->cmd_queue && + trans_pcie->scd_set_active) + iwl_scd_enable_set_active(trans, BIT(txq_id)); + + IWL_DEBUG_TX_QUEUES(trans, + "Activate queue %d on FIFO %d WrPtr: %d\n", + txq_id, fifo, ssn & 0xff); + } else { + IWL_DEBUG_TX_QUEUES(trans, + "Activate queue %d WrPtr: %d\n", + txq_id, ssn & 0xff); + } - /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ - iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), - (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | - (1 << SCD_QUEUE_STTS_REG_POS_WSL) | - SCD_QUEUE_STTS_REG_MSK); - trans_pcie->txq[txq_id].active = true; - IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", - txq_id, fifo, ssn & 0xff); + txq->active = true; } -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, + bool configure_scd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 stts_addr = trans_pcie->scd_base_addr + SCD_TX_STTS_QUEUE_OFFSET(txq_id); static const u32 zero_val[4] = {}; + trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; + trans_pcie->txq[txq_id].frozen = false; + + /* + * Upon HW Rfkill - we stop the device, and then stop the queues + * in the op_mode. Just for the sake of the simplicity of the op_mode, + * allow the op_mode to call txq_disable after it already called + * stop_device. + */ if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { - WARN_ONCE(1, "queue %d not used", txq_id); + WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), + "queue %d not used", txq_id); return; } - iwl_pcie_txq_set_inactive(trans, txq_id); + if (configure_scd) { + iwl_scd_txq_set_inactive(trans, txq_id); - iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, - ARRAY_SIZE(zero_val)); + iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, + ARRAY_SIZE(zero_val)); + } iwl_pcie_txq_unmap(trans, txq_id); + trans_pcie->txq[txq_id].ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } @@ -1152,10 +1309,10 @@ /* * iwl_pcie_enqueue_hcmd - enqueue a uCode command * @priv: device private data point - * @cmd: a point to the ucode command structure + * @cmd: a pointer to the ucode command structure * - * The function returns < 0 values to indicate the operation is - * failed. On success, it turns the index (> 0) of command in the + * The function returns < 0 values to indicate the operation + * failed. On success, it returns the index (>= 0) of command in the * command queue. */ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, @@ -1166,18 +1323,30 @@ struct iwl_queue *q = &txq->q; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; + unsigned long flags; void *dup_buf = NULL; dma_addr_t phys_addr; int idx; u16 copy_size, cmd_size, scratch_size; bool had_nocopy = false; - int i; + u8 group_id = iwl_cmd_groupid(cmd->id); + int i, ret; u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - copy_size = sizeof(out_cmd->hdr); - cmd_size = sizeof(out_cmd->hdr); + if (WARN(!trans_pcie->wide_cmd_header && + group_id > IWL_ALWAYS_LONG_GROUP, + "unsupported wide command %#x\n", cmd->id)) + return -EINVAL; + + if (group_id != 0) { + copy_size = sizeof(struct iwl_cmd_header_wide); + cmd_size = sizeof(struct iwl_cmd_header_wide); + } else { + copy_size = sizeof(struct iwl_cmd_header); + cmd_size = sizeof(struct iwl_cmd_header); + } /* need one for the header if the first is NOCOPY */ BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); @@ -1267,46 +1436,74 @@ out_meta->source = cmd; /* set up the header */ + if (group_id != 0) { + out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr_wide.group_id = group_id; + out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); + out_cmd->hdr_wide.length = + cpu_to_le16(cmd_size - + sizeof(struct iwl_cmd_header_wide)); + out_cmd->hdr_wide.reserved = 0; + out_cmd->hdr_wide.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); - out_cmd->hdr.cmd = cmd->id; - out_cmd->hdr.flags = 0; - out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); + cmd_pos = sizeof(struct iwl_cmd_header_wide); + copy_size = sizeof(struct iwl_cmd_header_wide); + } else { + out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + out_cmd->hdr.group_id = 0; + + cmd_pos = sizeof(struct iwl_cmd_header); + copy_size = sizeof(struct iwl_cmd_header); + } /* and copy the data that needs to be copied */ - cmd_pos = offsetof(struct iwl_device_cmd, payload); - copy_size = sizeof(out_cmd->hdr); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { - int copy = 0; + int copy; if (!cmd->len[i]) continue; - /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ - if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { - copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; - - if (copy > cmd->len[i]) - copy = cmd->len[i]; - } - /* copy everything if not nocopy/dup */ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | - IWL_HCMD_DFL_DUP))) + IWL_HCMD_DFL_DUP))) { copy = cmd->len[i]; - if (copy) { memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); cmd_pos += copy; copy_size += copy; + continue; + } + + /* + * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied + * in total (for the scratchbuf handling), but copy up to what + * we can fit into the payload for debug dump purposes. + */ + copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + + /* However, treat copy_size the proper way, we need it below */ + if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { + copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; + + if (copy > cmd->len[i]) + copy = cmd->len[i]; + copy_size += copy; } } IWL_DEBUG_HC(trans, - "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", get_cmd_string(trans_pcie, out_cmd->hdr.cmd), - out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + group_id, out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); /* start the TFD with the scratchbuf */ @@ -1314,7 +1511,7 @@ memcpy(&txq->scratchbufs[idx], &out_cmd->hdr, scratch_size); iwl_pcie_txq_build_tfd(trans, txq, iwl_pcie_get_scratchbuf_dma(txq, idx), - scratch_size, 1); + scratch_size, true); /* map first command fragment, if any remains */ if (copy_size > scratch_size) { @@ -1330,7 +1527,7 @@ } iwl_pcie_txq_build_tfd(trans, txq, phys_addr, - copy_size - scratch_size, 0); + copy_size - scratch_size, false); } /* map the remaining (adjusted) nocopy/dup fragments */ @@ -1353,26 +1550,36 @@ goto out; } - iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0); + iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); } + BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS > + sizeof(out_meta->flags) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) - kfree(txq->entries[idx].free_buf); + kzfree(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; - txq->need_update = 1; - - trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); /* start timer if queue currently empty */ - if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); + if (q->read_ptr == q->write_ptr && txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + + spin_lock_irqsave(&trans_pcie->reg_lock, flags); + ret = iwl_pcie_set_cmd_in_flight(trans, cmd); + if (ret < 0) { + idx = ret; + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + goto out; + } /* Increment and update queue's write index */ - q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); + q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + out: spin_unlock_bh(&txq->lock); free_dup_buf: @@ -1384,15 +1591,13 @@ /* * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim - * @handler_status: return value of the handler of the command - * (put in setup_rx_handlers) * * If an Rx buffer has an async callback associated with it the callback * will be executed. The attached skb (if present) will only be freed * if the callback returns 1 */ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb, int handler_status) + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -1431,18 +1636,17 @@ meta->source->resp_pkt = pkt; meta->source->_rx_page_addr = (unsigned long)page_address(p); meta->source->_rx_page_order = trans_pcie->rx_page_order; - meta->source->handler_status = handler_status; } iwl_pcie_cmdq_reclaim(trans, txq_id, index); if (!(meta->flags & CMD_ASYNC)) { - if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { + if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { IWL_WARN(trans, "HCMD_ACTIVE already clear for command %s\n", get_cmd_string(trans_pcie, cmd->hdr.cmd)); } - clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", get_cmd_string(trans_pcie, cmd->hdr.cmd)); wake_up(&trans_pcie->wait_command_queue); @@ -1453,7 +1657,7 @@ spin_unlock_bh(&txq->lock); } -#define HOST_COMPLETE_TIMEOUT (2 * HZ) +#define HOST_COMPLETE_TIMEOUT (2 * HZ) static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) @@ -1485,12 +1689,11 @@ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", get_cmd_string(trans_pcie, cmd->id)); - if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, - &trans_pcie->status))) { - IWL_ERR(trans, "Command %s: a command is already active!\n", - get_cmd_string(trans_pcie, cmd->id)); + if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + "Command %s: a command is already active!\n", + get_cmd_string(trans_pcie, cmd->id))) return -EIO; - } IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", get_cmd_string(trans_pcie, cmd->id)); @@ -1498,7 +1701,7 @@ cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; - clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(trans_pcie, cmd->id), ret); @@ -1506,41 +1709,41 @@ } ret = wait_event_timeout(trans_pcie->wait_command_queue, - !test_bit(STATUS_HCMD_ACTIVE, - &trans_pcie->status), + !test_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), HOST_COMPLETE_TIMEOUT); if (!ret) { - if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { - struct iwl_txq *txq = - &trans_pcie->txq[trans_pcie->cmd_queue]; - struct iwl_queue *q = &txq->q; - - IWL_ERR(trans, - "Error sending %s: time out after %dms.\n", - get_cmd_string(trans_pcie, cmd->id), - jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); - - IWL_ERR(trans, - "Current CMD queue read_ptr %d write_ptr %d\n", - q->read_ptr, q->write_ptr); - - clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); - IWL_DEBUG_INFO(trans, - "Clearing HCMD_ACTIVE for command %s\n", - get_cmd_string(trans_pcie, cmd->id)); - ret = -ETIMEDOUT; - goto cancel; - } + struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_queue *q = &txq->q; + + IWL_ERR(trans, "Error sending %s: time out after %dms.\n", + get_cmd_string(trans_pcie, cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", + q->read_ptr, q->write_ptr); + + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", + get_cmd_string(trans_pcie, cmd->id)); + ret = -ETIMEDOUT; + + iwl_force_nmi(trans); + iwl_trans_fw_error(trans); + + goto cancel; } - if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { + if (test_bit(STATUS_FW_ERROR, &trans->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", get_cmd_string(trans_pcie, cmd->id)); + dump_stack(); ret = -EIO; goto cancel; } - if (test_bit(STATUS_RFKILL, &trans_pcie->status)) { + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); ret = -ERFKILL; goto cancel; @@ -1577,12 +1780,8 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) - return -EIO; - - if (test_bit(STATUS_RFKILL, &trans_pcie->status)) { + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", cmd->id); return -ERFKILL; @@ -1599,7 +1798,7 @@ struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; @@ -1607,18 +1806,30 @@ dma_addr_t tb0_phys, tb1_phys, scratch_phys; void *tb1_addr; u16 len, tb1_len, tb2_len; - u8 wait_write_ptr = 0; - __le16 fc = hdr->frame_control; - u8 hdr_len = ieee80211_hdrlen(fc); - u16 __maybe_unused wifi_seq; + bool wait_write_ptr; + __le16 fc; + u8 hdr_len; + u16 wifi_seq; + int i; txq = &trans_pcie->txq[txq_id]; q = &txq->q; - if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) { - WARN_ON_ONCE(1); + if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), + "TX on unused queue %d\n", txq_id)) return -EINVAL; - } + + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && + __skb_linearize(skb)) + return -ENOMEM; + + /* mac80211 always puts the full header into the SKB's head, + * so there's no need to check if it's readable there + */ + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + hdr_len = ieee80211_hdrlen(fc); spin_lock(&txq->lock); @@ -1627,19 +1838,16 @@ * the BA. * Check here that the packets are in the right place on the ring. */ -#ifdef CONFIG_IWLWIFI_DEBUG wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && - ((wifi_seq & 0xff) != q->write_ptr), + WARN_ONCE(txq->ampdu && + (wifi_seq & 0xff) != q->write_ptr, "Q: %d WiFi Seq %d tfdNum %d", txq_id, wifi_seq, q->write_ptr); -#endif /* Set up driver data for this TFD */ txq->entries[q->write_ptr].skb = skb; txq->entries[q->write_ptr].cmd = dev_cmd; - dev_cmd->hdr.cmd = REPLY_TX; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(q->write_ptr))); @@ -1653,6 +1861,7 @@ /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[q->write_ptr].meta; + out_meta->flags = 0; /* * The second TB (tb1) points to the remainder of the TX command @@ -1662,7 +1871,7 @@ */ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; - tb1_len = (len + 3) & ~3; + tb1_len = ALIGN(len, 4); /* Tell NIC about any 2-byte padding after MAC header */ if (tb1_len != len) @@ -1672,7 +1881,7 @@ memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE); iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, - IWL_HCMD_SCRATCHBUF_SIZE, 1); + IWL_HCMD_SCRATCHBUF_SIZE, true); /* there must be data left over for TB1 or this code must be changed */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); @@ -1682,13 +1891,13 @@ tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) goto out_err; - iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0); + iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); /* * Set up TFD's third entry to point directly to remainder - * of skb, if any (802.11 null frames have no payload). + * of skb's head, if any */ - tb2_len = skb->len - hdr_len; + tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { dma_addr_t tb2_phys = dma_map_single(trans->dev, skb->data + hdr_len, @@ -1698,7 +1907,30 @@ &txq->tfds[q->write_ptr]); goto out_err; } - iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0); + iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); + } + + /* set up the remaining entries to point to the data */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + goto out_err; + } + tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, + skb_frag_size(frag), false); + + out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); } /* Set up entry for this TFD in Tx byte-count array */ @@ -1710,37 +1942,43 @@ &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, skb->data + hdr_len, tb2_len); trace_iwlwifi_dev_tx_data(trans->dev, skb, - skb->data + hdr_len, tb2_len); + hdr_len, skb->len - hdr_len); - if (!ieee80211_has_morefrags(fc)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } + wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ - if (txq->need_update && q->read_ptr == q->write_ptr && - trans_pcie->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); + if (q->read_ptr == q->write_ptr) { + if (txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; + } + IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); + iwl_trans_pcie_ref(trans); + } /* Tell device the write index *just past* this latest filled TFD */ - q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); - iwl_pcie_txq_inc_wr_ptr(trans, txq); + q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); + if (!wait_write_ptr) + iwl_pcie_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully - * and we will get a TX status notification eventually, - * regardless of the value of ret. "ret" only indicates - * whether or not we should update the write pointer. + * and we will get a TX status notification eventually. */ if (iwl_queue_space(q) < q->high_mark) { - if (wait_write_ptr) { - txq->need_update = 1; + if (wait_write_ptr) iwl_pcie_txq_inc_wr_ptr(trans, txq); - } else { + else iwl_stop_queue(trans, txq); - } } spin_unlock(&txq->lock); return 0;