--- zzzz-none-000/linux-3.10.107/drivers/infiniband/hw/qib/qib.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/infiniband/hw/qib/qib.h 2021-02-04 17:41:59.000000000 +0000 @@ -1,7 +1,7 @@ #ifndef _QIB_KERNEL_H #define _QIB_KERNEL_H /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * @@ -51,6 +51,7 @@ #include #include #include +#include #include "qib_common.h" #include "qib_verbs.h" @@ -88,7 +89,6 @@ extern struct qlogic_ib_stats qib_stats; extern const struct pci_error_handlers qib_pci_err_handler; -extern struct pci_driver qib_driver; #define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ /* @@ -114,6 +114,11 @@ /* * Below contains all data related to a single context (formerly called port). */ + +#ifdef CONFIG_DEBUG_FS +struct qib_opcode_stats_perctx; +#endif + struct qib_ctxtdata { void **rcvegrbuf; dma_addr_t *rcvegrbuf_phys; @@ -154,6 +159,8 @@ */ /* instead of calculating it */ unsigned ctxt; + /* local node of context */ + int node_id; /* non-zero if ctxt is being shared. */ u16 subctxt_cnt; /* non-zero if ctxt is being shared. */ @@ -222,12 +229,15 @@ u8 redirect_seq_cnt; /* ctxt rcvhdrq head offset */ u32 head; - u32 pkt_count; /* lookaside fields */ struct qib_qp *lookaside_qp; u32 lookaside_qpn; /* QPs waiting for context processing */ struct list_head qp_wait_list; +#ifdef CONFIG_DEBUG_FS + /* verbs stats per CTX */ + struct qib_opcode_stats_perctx *opstats; +#endif }; struct qib_sge_state; @@ -428,9 +438,19 @@ #define ACTIVITY_TIMER 5 #define MAX_NAME_SIZE 64 + +#ifdef CONFIG_INFINIBAND_QIB_DCA +struct qib_irq_notify; +#endif + struct qib_msix_entry { struct msix_entry msix; void *arg; +#ifdef CONFIG_INFINIBAND_QIB_DCA + int dca; + int rcv; + struct qib_irq_notify *notifier; +#endif char name[MAX_NAME_SIZE]; cpumask_var_t mask; }; @@ -555,11 +575,13 @@ /* read/write using lock */ spinlock_t sdma_lock ____cacheline_aligned_in_smp; struct list_head sdma_activelist; + struct list_head sdma_userpending; u64 sdma_descq_added; u64 sdma_descq_removed; u16 sdma_descq_tail; u16 sdma_descq_head; u8 sdma_generation; + u8 sdma_intrequest; struct tasklet_struct sdma_sw_clean_up_task ____cacheline_aligned_in_smp; @@ -828,6 +850,9 @@ struct qib_ctxtdata *); void (*f_writescratch)(struct qib_devdata *, u32); int (*f_tempsense_rd)(struct qib_devdata *, int regnum); +#ifdef CONFIG_INFINIBAND_QIB_DCA + int (*f_notify_dca)(struct qib_devdata *, unsigned long event); +#endif char *boardname; /* human readable board info */ @@ -843,8 +868,10 @@ /* last buffer for user use */ u32 lastctxt_piobuf; - /* saturating counter of (non-port-specific) device interrupts */ - u32 int_counter; + /* reset value */ + u64 z_int_counter; + /* percpu intcounter */ + u64 __percpu *int_counter; /* pio bufs allocated per ctxt */ u32 pbufsctxt; @@ -876,7 +903,7 @@ /* PCI Device ID (here for NodeInfo) */ u16 deviceid; /* for write combining settings */ - unsigned long wc_cookie; + int wc_cookie; unsigned long wc_base; unsigned long wc_len; @@ -1069,6 +1096,10 @@ u16 psxmitwait_check_rate; /* high volume overflow errors defered to tasklet */ struct tasklet_struct error_tasklet; + /* per device cq worker */ + struct kthread_worker *worker; + + int assigned_node_id; /* NUMA node closest to HCA */ }; /* hol_state values */ @@ -1105,7 +1136,6 @@ extern u32 qib_cpulist_count; extern unsigned long *qib_cpulist; -extern unsigned qib_wc_pat; extern unsigned qib_cc_table_size; int qib_init(struct qib_devdata *, int); int init_chip_wc_pat(struct qib_devdata *dd, u32); @@ -1148,8 +1178,8 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *); void qib_set_ctxtcnt(struct qib_devdata *); int qib_create_ctxts(struct qib_devdata *dd); -struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32); -void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); +struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int); +int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *); u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *); @@ -1291,6 +1321,8 @@ void qib_teardown_sdma(struct qib_pportdata *); void __qib_sdma_intr(struct qib_pportdata *); void qib_sdma_intr(struct qib_pportdata *); +void qib_user_sdma_send_desc(struct qib_pportdata *dd, + struct list_head *pktlist); int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *, u32, struct qib_verbs_txreq *); /* ppd->sdma_lock should be locked before calling this. */ @@ -1313,7 +1345,7 @@ return ppd->sdma_state.current_state == qib_sdma_state_s99_running; } int qib_sdma_running(struct qib_pportdata *); - +void dump_sdma_state(struct qib_pportdata *ppd); void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events); void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events); @@ -1411,6 +1443,10 @@ void qib_nomsix(struct qib_devdata *); void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *); void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8); +/* interrupts for device */ +u64 qib_int_counter(struct qib_devdata *); +/* interrupt for all devices */ +u64 qib_sps_ints(void); /* * dma_addr wrappers - all 0's invalid for hw @@ -1423,11 +1459,14 @@ * Flush write combining store buffers (if present) and perform a write * barrier. */ +static inline void qib_flush_wc(void) +{ #if defined(CONFIG_X86_64) -#define qib_flush_wc() asm volatile("sfence" : : : "memory") + asm volatile("sfence" : : : "memory"); #else -#define qib_flush_wc() wmb() /* no reorder around wc flush */ + wmb(); /* no reorder around wc flush */ #endif +} /* global module parameter variables */ extern unsigned qib_ibmtu; @@ -1438,6 +1477,7 @@ extern unsigned qib_sdma_fetch_arb; extern unsigned qib_compat_ddr_negotiate; extern int qib_special_trigger; +extern unsigned qib_numa_aware; extern struct mutex qib_mutex; @@ -1481,6 +1521,7 @@ dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ qib_get_unit_name((dd)->unit), (dd)->unit, (port), \ ##__VA_ARGS__) + #define qib_devinfo(pcidev, fmt, ...) \ dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)