--- zzzz-none-000/linux-4.4.271/drivers/net/wireless/ath/wil6210/pmc.c 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/drivers/net/wireless/ath/wil6210/pmc.c 2023-04-19 10:22:29.000000000 +0000 @@ -1,5 +1,6 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018,2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -17,6 +18,7 @@ #include #include #include +#include #include "wmi.h" #include "wil6210.h" #include "txrx.h" @@ -53,54 +55,93 @@ u32 i; struct pmc_ctx *pmc = &wil->pmc; struct device *dev = wil_to_dev(wil); + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); struct wmi_pmc_cmd pmc_cmd = {0}; + int last_cmd_err = -ENOMEM; mutex_lock(&pmc->lock); if (wil_is_pmc_allocated(pmc)) { /* sanity check */ - wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__); + wil_err(wil, "ERROR pmc is already allocated\n"); + goto no_release_err; + } + if ((num_descriptors <= 0) || (descriptor_size <= 0)) { + wil_err(wil, + "Invalid params num_descriptors(%d), descriptor_size(%d)\n", + num_descriptors, descriptor_size); + last_cmd_err = -EINVAL; + goto no_release_err; + } + + if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) { + wil_err(wil, + "num_descriptors(%d) exceeds max ring size %d\n", + num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX); + last_cmd_err = -EINVAL; + goto no_release_err; + } + + if (num_descriptors > INT_MAX / descriptor_size) { + wil_err(wil, + "Overflow in num_descriptors(%d)*descriptor_size(%d)\n", + num_descriptors, descriptor_size); + last_cmd_err = -EINVAL; goto no_release_err; } pmc->num_descriptors = num_descriptors; pmc->descriptor_size = descriptor_size; - wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n", - __func__, num_descriptors, descriptor_size); + wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n", + num_descriptors, descriptor_size); /* allocate descriptors info list in pmc context*/ pmc->descriptors = kcalloc(num_descriptors, sizeof(struct desc_alloc_info), GFP_KERNEL); if (!pmc->descriptors) { - wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__); + wil_err(wil, "ERROR allocating pmc skb list\n"); goto no_release_err; } - wil_dbg_misc(wil, - "%s: allocated descriptors info list %p\n", - __func__, pmc->descriptors); + wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n", + pmc->descriptors); /* Allocate pring buffer and descriptors. * vring->va should be aligned on its size rounded up to power of 2 - * This is granted by the dma_alloc_coherent + * This is granted by the dma_alloc_coherent. + * + * HW has limitation that all vrings addresses must share the same + * upper 16 msb bits part of 48 bits address. To workaround that, + * if we are using more than 32 bit addresses switch to 32 bit + * allocation before allocating vring memory. + * + * There's no check for the return value of dma_set_mask_and_coherent, + * since we assume if we were able to set the mask during + * initialization in this system it will not fail if we set it again */ + if (wil->dma_addr_size > 32) + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + pmc->pring_va = dma_alloc_coherent(dev, sizeof(struct vring_tx_desc) * num_descriptors, &pmc->pring_pa, GFP_KERNEL); + if (wil->dma_addr_size > 32) + dma_set_mask_and_coherent(dev, + DMA_BIT_MASK(wil->dma_addr_size)); + wil_dbg_misc(wil, - "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", - __func__, + "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", pmc->pring_va, &pmc->pring_pa, sizeof(struct vring_tx_desc), num_descriptors, sizeof(struct vring_tx_desc) * num_descriptors); if (!pmc->pring_va) { - wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__); + wil_err(wil, "ERROR allocating pmc pring\n"); goto release_pmc_skb_list; } @@ -119,9 +160,7 @@ GFP_KERNEL); if (unlikely(!pmc->descriptors[i].va)) { - wil_err(wil, - "%s: ERROR allocating pmc descriptor %d", - __func__, i); + wil_err(wil, "ERROR allocating pmc descriptor %d", i); goto release_pmc_skbs; } @@ -141,21 +180,22 @@ *_d = *d; } - wil_dbg_misc(wil, "%s: allocated successfully\n", __func__); + wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n"); pmc_cmd.op = WMI_PMC_ALLOCATE; pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); - wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__); + wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n"); pmc->last_cmd_status = wmi_send(wil, WMI_PMC_CMDID, + vif->mid, &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, - "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d", - __func__, pmc->last_cmd_status); + "WMI_PMC_CMD with ALLOCATE op failed with status %d", + pmc->last_cmd_status); goto release_pmc_skbs; } @@ -164,8 +204,8 @@ return; release_pmc_skbs: - wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__); - for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) { + wil_err(wil, "exit on error: Releasing skbs...\n"); + for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) { dma_free_coherent(dev, descriptor_size, pmc->descriptors[i].va, @@ -173,7 +213,7 @@ pmc->descriptors[i].va = NULL; } - wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__); + wil_err(wil, "exit on error: Releasing pring...\n"); dma_free_coherent(dev, sizeof(struct vring_tx_desc) * num_descriptors, @@ -183,13 +223,12 @@ pmc->pring_va = NULL; release_pmc_skb_list: - wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n", - __func__); + wil_err(wil, "exit on error: Releasing descriptors info list...\n"); kfree(pmc->descriptors); pmc->descriptors = NULL; no_release_err: - pmc->last_cmd_status = -ENOMEM; + pmc->last_cmd_status = last_cmd_err; mutex_unlock(&pmc->lock); } @@ -201,6 +240,7 @@ { struct pmc_ctx *pmc = &wil->pmc; struct device *dev = wil_to_dev(wil); + struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev); struct wmi_pmc_cmd pmc_cmd = {0}; mutex_lock(&pmc->lock); @@ -208,24 +248,23 @@ pmc->last_cmd_status = 0; if (!wil_is_pmc_allocated(pmc)) { - wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n", - __func__); + wil_dbg_misc(wil, + "pmc_free: Error, can't free - not allocated\n"); pmc->last_cmd_status = -EPERM; mutex_unlock(&pmc->lock); return; } if (send_pmc_cmd) { - wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n", - __func__); + wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n"); pmc_cmd.op = WMI_PMC_RELEASE; pmc->last_cmd_status = - wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, - sizeof(pmc_cmd)); + wmi_send(wil, WMI_PMC_CMDID, vif->mid, + &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, - "%s WMI_PMC_CMD with RELEASE op failed, status %d", - __func__, pmc->last_cmd_status); + "WMI_PMC_CMD with RELEASE op failed, status %d", + pmc->last_cmd_status); /* There's nothing we can do with this error. * Normally, it should never occur. * Continue to freeing all memory allocated for pmc. @@ -237,8 +276,8 @@ size_t buf_size = sizeof(struct vring_tx_desc) * pmc->num_descriptors; - wil_dbg_misc(wil, "%s: free pring va %p\n", - __func__, pmc->pring_va); + wil_dbg_misc(wil, "pmc_free: free pring va %p\n", + pmc->pring_va); dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); pmc->pring_va = NULL; @@ -250,18 +289,18 @@ int i; for (i = 0; - pmc->descriptors[i].va && i < pmc->num_descriptors; i++) { + i < pmc->num_descriptors && pmc->descriptors[i].va; i++) { dma_free_coherent(dev, pmc->descriptor_size, pmc->descriptors[i].va, pmc->descriptors[i].pa); pmc->descriptors[i].va = NULL; } - wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n", - __func__, i, pmc->num_descriptors); + wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i, + pmc->num_descriptors); wil_dbg_misc(wil, - "%s: free pmc descriptors info list %p\n", - __func__, pmc->descriptors); + "pmc_free: free pmc descriptors info list %p\n", + pmc->descriptors); kfree(pmc->descriptors); pmc->descriptors = NULL; } else { @@ -277,7 +316,7 @@ */ int wil_pmc_last_cmd_status(struct wil6210_priv *wil) { - wil_dbg_misc(wil, "%s: status %d\n", __func__, + wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n", wil->pmc.last_cmd_status); return wil->pmc.last_cmd_status; @@ -295,20 +334,22 @@ size_t retval = 0; unsigned long long idx; loff_t offset; - size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors; + size_t pmc_size; mutex_lock(&pmc->lock); if (!wil_is_pmc_allocated(pmc)) { - wil_err(wil, "%s: error, pmc is not allocated!\n", __func__); + wil_err(wil, "error, pmc is not allocated!\n"); pmc->last_cmd_status = -EPERM; mutex_unlock(&pmc->lock); return -EPERM; } + pmc_size = pmc->descriptor_size * pmc->num_descriptors; + wil_dbg_misc(wil, - "%s: size %u, pos %lld\n", - __func__, (unsigned)count, *f_pos); + "pmc_read: size %u, pos %lld\n", + (u32)count, *f_pos); pmc->last_cmd_status = 0; @@ -317,15 +358,16 @@ offset = *f_pos - (idx * pmc->descriptor_size); if (*f_pos >= pmc_size) { - wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n", - __func__, *f_pos, (unsigned)pmc_size); + wil_dbg_misc(wil, + "pmc_read: reached end of pmc buf: %lld >= %u\n", + *f_pos, (u32)pmc_size); pmc->last_cmd_status = -ERANGE; goto out; } wil_dbg_misc(wil, - "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", - __func__, *f_pos, idx, offset, count); + "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", + *f_pos, idx, offset, count); /* if no errors, return the copied byte count */ retval = simple_read_from_buffer(buf, @@ -345,7 +387,18 @@ loff_t newpos; struct wil6210_priv *wil = filp->private_data; struct pmc_ctx *pmc = &wil->pmc; - size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors; + size_t pmc_size; + + mutex_lock(&pmc->lock); + + if (!wil_is_pmc_allocated(pmc)) { + wil_err(wil, "error, pmc is not allocated!\n"); + pmc->last_cmd_status = -EPERM; + mutex_unlock(&pmc->lock); + return -EPERM; + } + + pmc_size = pmc->descriptor_size * pmc->num_descriptors; switch (whence) { case 0: /* SEEK_SET */ @@ -361,15 +414,46 @@ break; default: /* can't happen */ - return -EINVAL; + newpos = -EINVAL; + goto out; } - if (newpos < 0) - return -EINVAL; + if (newpos < 0) { + newpos = -EINVAL; + goto out; + } if (newpos > pmc_size) newpos = pmc_size; filp->f_pos = newpos; +out: + mutex_unlock(&pmc->lock); + return newpos; } + +int wil_pmcring_read(struct seq_file *s, void *data) +{ + struct wil6210_priv *wil = s->private; + struct pmc_ctx *pmc = &wil->pmc; + size_t pmc_ring_size = + sizeof(struct vring_rx_desc) * pmc->num_descriptors; + + mutex_lock(&pmc->lock); + + if (!wil_is_pmc_allocated(pmc)) { + wil_err(wil, "error, pmc is not allocated!\n"); + pmc->last_cmd_status = -EPERM; + mutex_unlock(&pmc->lock); + return -EPERM; + } + + wil_dbg_misc(wil, "pmcring_read: size %zu\n", pmc_ring_size); + + seq_write(s, pmc->pring_va, pmc_ring_size); + + mutex_unlock(&pmc->lock); + + return 0; +}