--- zzzz-none-000/linux-3.10.107/drivers/infiniband/hw/cxgb4/t4.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/infiniband/hw/cxgb4/t4.h 2021-02-04 17:41:59.000000000 +0000 @@ -33,26 +33,16 @@ #include "t4_hw.h" #include "t4_regs.h" +#include "t4_values.h" #include "t4_msg.h" #include "t4fw_ri_api.h" -#define T4_MAX_NUM_QP (1<<16) -#define T4_MAX_NUM_CQ (1<<15) -#define T4_MAX_NUM_PD (1<<15) -#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) -#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) -#define T4_MAX_IQ_SIZE (65520 - 1) -#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES) -#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1) -#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1) -#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1) -#define T4_MAX_NUM_STAG (1<<15) -#define T4_MAX_MR_SIZE (~0ULL - 1) +#define T4_MAX_NUM_PD 65536 +#define T4_MAX_MR_SIZE (~0ULL) #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ #define T4_STAG_UNSET 0xffffffff #define T4_FW_MAJ 0 -#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) -#define A_PCIE_MA_SYNC 0x30b4 +#define PCIE_MA_SYNC_A 0x30b4 struct t4_status_page { __be32 rsvd1; /* flit 0 - hw owns */ @@ -84,7 +74,14 @@ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ sizeof(struct fw_ri_immd)) & ~31UL) -#define T4_MAX_FR_DEPTH (1024 / sizeof(u64)) +#define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) +#define T4_MAX_FR_DSGL 1024 +#define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64)) + +static inline int t4_max_fr_depth(int use_dsgl) +{ + return use_dsgl ? T4_MAX_FR_DSGL_DEPTH : T4_MAX_FR_IMMD_DEPTH; +} #define T4_RQ_NUM_SLOTS 2 #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) @@ -188,44 +185,44 @@ /* macros for flit 0 of the cqe */ -#define S_CQE_QPID 12 -#define M_CQE_QPID 0xFFFFF -#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID) -#define V_CQE_QPID(x) ((x)<> S_CQE_SWCQE)) & M_CQE_SWCQE) -#define V_CQE_SWCQE(x) ((x)<> S_CQE_STATUS)) & M_CQE_STATUS) -#define V_CQE_STATUS(x) ((x)<> S_CQE_TYPE)) & M_CQE_TYPE) -#define V_CQE_TYPE(x) ((x)<> S_CQE_OPCODE)) & M_CQE_OPCODE) -#define V_CQE_OPCODE(x) ((x)<header))) -#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header))) -#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header))) +#define CQE_QPID_S 12 +#define CQE_QPID_M 0xFFFFF +#define CQE_QPID_G(x) ((((x) >> CQE_QPID_S)) & CQE_QPID_M) +#define CQE_QPID_V(x) ((x)<> CQE_SWCQE_S)) & CQE_SWCQE_M) +#define CQE_SWCQE_V(x) ((x)<> CQE_STATUS_S)) & CQE_STATUS_M) +#define CQE_STATUS_V(x) ((x)<> CQE_TYPE_S)) & CQE_TYPE_M) +#define CQE_TYPE_V(x) ((x)<> CQE_OPCODE_S)) & CQE_OPCODE_M) +#define CQE_OPCODE_V(x) ((x)<header))) +#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header))) +#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header))) #define SQ_TYPE(x) (CQE_TYPE((x))) #define RQ_TYPE(x) (!CQE_TYPE((x))) -#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header))) -#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header))) +#define CQE_STATUS(x) (CQE_STATUS_G(be32_to_cpu((x)->header))) +#define CQE_OPCODE(x) (CQE_OPCODE_G(be32_to_cpu((x)->header))) #define CQE_SEND_OPCODE(x)( \ - (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ - (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ - (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ - (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) + (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ + (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ + (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ + (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) #define CQE_LEN(x) (be32_to_cpu((x)->len)) @@ -237,29 +234,29 @@ #define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx) /* generic accessor macros */ -#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi) -#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low) +#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) +#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) /* macros for flit 3 of the cqe */ -#define S_CQE_GENBIT 63 -#define M_CQE_GENBIT 0x1 -#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT) -#define V_CQE_GENBIT(x) ((x)<> S_CQE_OVFBIT)) & M_CQE_OVFBIT) - -#define S_CQE_IQTYPE 60 -#define M_CQE_IQTYPE 0x3 -#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE) - -#define M_CQE_TS 0x0fffffffffffffffULL -#define G_CQE_TS(x) ((x) & M_CQE_TS) - -#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts))) -#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts))) -#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts))) +#define CQE_GENBIT_S 63 +#define CQE_GENBIT_M 0x1 +#define CQE_GENBIT_G(x) (((x) >> CQE_GENBIT_S) & CQE_GENBIT_M) +#define CQE_GENBIT_V(x) ((x)<> CQE_OVFBIT_S)) & CQE_OVFBIT_M) + +#define CQE_IQTYPE_S 60 +#define CQE_IQTYPE_M 0x3 +#define CQE_IQTYPE_G(x) ((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M) + +#define CQE_TS_M 0x0fffffffffffffffULL +#define CQE_TS_G(x) ((x) & CQE_TS_M) + +#define CQE_OVFBIT(x) ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts))) +#define CQE_GENBIT(x) ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts))) +#define CQE_TS(x) (CQE_TS_G(be64_to_cpu((x)->bits_type_ts))) struct t4_swsqe { u64 wr_id; @@ -269,6 +266,9 @@ int complete; int signaled; u16 idx; + int flushed; + struct timespec host_ts; + u64 sge_ts; }; static inline pgprot_t t4_pgprot_wc(pgprot_t prot) @@ -291,19 +291,25 @@ unsigned long phys_addr; struct t4_swsqe *sw_sq; struct t4_swsqe *oldest_read; - u64 udb; + void __iomem *bar2_va; + u64 bar2_pa; size_t memsize; + u32 bar2_qid; u32 qid; u16 in_use; u16 size; u16 cidx; u16 pidx; u16 wq_pidx; + u16 wq_pidx_inc; u16 flags; + short flush_cidx; }; struct t4_swrqe { u64 wr_id; + struct timespec host_ts; + u64 sge_ts; }; struct t4_rq { @@ -311,8 +317,10 @@ dma_addr_t dma_addr; DEFINE_DMA_UNMAP_ADDR(mapping); struct t4_swrqe *sw_rq; - u64 udb; + void __iomem *bar2_va; + u64 bar2_pa; size_t memsize; + u32 bar2_qid; u32 qid; u32 msn; u32 rqt_hwaddr; @@ -322,14 +330,15 @@ u16 cidx; u16 pidx; u16 wq_pidx; + u16 wq_pidx_inc; }; struct t4_wq { struct t4_sq sq; struct t4_rq rq; void __iomem *db; - void __iomem *gts; struct c4iw_rdev *rdev; + int flushed; }; static inline int t4_rqes_posted(struct t4_wq *wq) @@ -412,6 +421,9 @@ static inline void t4_sq_consume(struct t4_wq *wq) { + BUG_ON(wq->sq.in_use < 1); + if (wq->sq.cidx == wq->sq.flush_cidx) + wq->sq.flush_cidx = -1; wq->sq.in_use--; if (++wq->sq.cidx == wq->sq.size) wq->sq.cidx = 0; @@ -427,16 +439,73 @@ return wq->sq.size * T4_SQ_NUM_SLOTS; } -static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) +/* This function copies 64 byte coalesced work request to memory + * mapped BAR2 space. For coalesced WRs, the SGE fetches data + * from the FIFO instead of from Host. + */ +static inline void pio_copy(u64 __iomem *dst, u64 *src) +{ + int count = 8; + + while (count) { + writeq(*src, dst); + src++; + dst++; + count--; + } +} + +static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe) { + + /* Flush host queue memory writes. */ wmb(); - writel(QID(wq->sq.qid) | PIDX(inc), wq->db); + if (wq->sq.bar2_va) { + if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) { + PDBG("%s: WC wq->sq.pidx = %d\n", + __func__, wq->sq.pidx); + pio_copy((u64 __iomem *) + (wq->sq.bar2_va + SGE_UDB_WCDOORBELL), + (u64 *)wqe); + } else { + PDBG("%s: DB wq->sq.pidx = %d\n", + __func__, wq->sq.pidx); + writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid), + wq->sq.bar2_va + SGE_UDB_KDOORBELL); + } + + /* Flush user doorbell area writes. */ + wmb(); + return; + } + writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db); } -static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) +static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, + union t4_recv_wr *wqe) { + + /* Flush host queue memory writes. */ wmb(); - writel(QID(wq->rq.qid) | PIDX(inc), wq->db); + if (wq->rq.bar2_va) { + if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) { + PDBG("%s: WC wq->rq.pidx = %d\n", + __func__, wq->rq.pidx); + pio_copy((u64 __iomem *) + (wq->rq.bar2_va + SGE_UDB_WCDOORBELL), + (void *)wqe); + } else { + PDBG("%s: DB wq->rq.pidx = %d\n", + __func__, wq->rq.pidx); + writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid), + wq->rq.bar2_va + SGE_UDB_KDOORBELL); + } + + /* Flush user doorbell area writes. */ + wmb(); + return; + } + writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db); } static inline int t4_wq_in_error(struct t4_wq *wq) @@ -464,17 +533,25 @@ return !wq->rq.queue[wq->rq.size].status.db_off; } +enum t4_cq_flags { + CQ_ARMED = 1, +}; + struct t4_cq { struct t4_cqe *queue; dma_addr_t dma_addr; DEFINE_DMA_UNMAP_ADDR(mapping); struct t4_cqe *sw_queue; void __iomem *gts; + void __iomem *bar2_va; + u64 bar2_pa; + u32 bar2_qid; struct c4iw_rdev *rdev; - u64 ugts; size_t memsize; __be64 bits_type_ts; u32 cqid; + u32 qid_mask; + int vector; u16 size; /* including status page */ u16 cidx; u16 sw_pidx; @@ -483,21 +560,35 @@ u16 cidx_inc; u8 gen; u8 error; + unsigned long flags; }; +static inline void write_gts(struct t4_cq *cq, u32 val) +{ + if (cq->bar2_va) + writel(val | INGRESSQID_V(cq->bar2_qid), + cq->bar2_va + SGE_UDB_GTS); + else + writel(val | INGRESSQID_V(cq->cqid), cq->gts); +} + +static inline int t4_clear_cq_armed(struct t4_cq *cq) +{ + return test_and_clear_bit(CQ_ARMED, &cq->flags); +} + static inline int t4_arm_cq(struct t4_cq *cq, int se) { u32 val; - while (cq->cidx_inc > CIDXINC_MASK) { - val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | - INGRESSQID(cq->cqid); - writel(val, cq->gts); - cq->cidx_inc -= CIDXINC_MASK; + set_bit(CQ_ARMED, &cq->flags); + while (cq->cidx_inc > CIDXINC_M) { + val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7); + write_gts(cq, val); + cq->cidx_inc -= CIDXINC_M; } - val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | - INGRESSQID(cq->cqid); - writel(val, cq->gts); + val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6); + write_gts(cq, val); cq->cidx_inc = 0; return 0; } @@ -505,12 +596,18 @@ static inline void t4_swcq_produce(struct t4_cq *cq) { cq->sw_in_use++; + if (cq->sw_in_use == cq->size) { + PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid); + cq->error = 1; + BUG_ON(1); + } if (++cq->sw_pidx == cq->size) cq->sw_pidx = 0; } static inline void t4_swcq_consume(struct t4_cq *cq) { + BUG_ON(cq->sw_in_use < 1); cq->sw_in_use--; if (++cq->sw_cidx == cq->size) cq->sw_cidx = 0; @@ -519,12 +616,11 @@ static inline void t4_hwcq_consume(struct t4_cq *cq) { cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; - if (++cq->cidx_inc == (cq->size >> 4)) { + if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) { u32 val; - val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) | - INGRESSQID(cq->cqid); - writel(val, cq->gts); + val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7); + write_gts(cq, val); cq->cidx_inc = 0; } if (++cq->cidx == cq->size) { @@ -552,7 +648,11 @@ ret = -EOVERFLOW; cq->error = 1; printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); + BUG_ON(1); } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { + + /* Ensure CQE is flushed to memory */ + rmb(); *cqe = &cq->queue[cq->cidx]; ret = 0; } else @@ -562,6 +662,12 @@ static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) { + if (cq->sw_in_use == cq->size) { + PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid); + cq->error = 1; + BUG_ON(1); + return NULL; + } if (cq->sw_in_use) return &cq->sw_queue[cq->sw_cidx]; return NULL; @@ -590,3 +696,7 @@ ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; } #endif + +struct t4_dev_status_page { + u8 db_off; +};