/* * kernel/hw_mutex.c * * GPL LICENSE SUMMARY * * Copyright(c) 2011-2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Corporation * 2200 Mission College Blvd. * Santa Clara, CA 97052 * */ #include #include #include #include #include #include #include #include #ifdef CONFIG_X86_PUMA7 #include #endif #include #include "hwmutex.h" #ifdef TEST_HWMUTEX void lock_hw_mutex_as_different_master(struct hw_mutex *hmutex); void unlock_hw_mutex_as_different_master(struct hw_mutex *hmutex); void dump_hw_mutex_registers(struct hw_mutex *hmutex); #endif /**power management callback */ int hw_mutex_handle_suspend_resume(void *args, netss_power_state_t state); struct hw_master *hw_master_glob = NULL; #define to_hw_mutex_ops() (hw_master_glob->ops) #define to_hw_mutex(mutex) (&hw_master_glob->hw_mutexes[mutex]) #define to_use_sw_mutex_only() (hw_master_glob->use_sw_mutex_only) /**There are irq locks around HW Mutexes which can be used for synchronization * between ISR and other apis */ #define HW_MUTEX_TAKE_ALL_IRQ_LOCKS() do { int i=0; \ for(i=0;ihw_mutexes[i].irq_lock); \ } \ } while (0); #define HW_MUTEX_RELEASE_ALL_IRQ_LOCKS() do { int i=0; \ for(i=0;ihw_mutexes[i].irq_lock); \ } \ } while (0); /* hw_mutex_is_locked - check whether the current master owns the mutex or not * @mutex: the mutex number to be checked * * Return 1, if current master owns the mutex * Return 0, if not * Return Negative for errors */ int hw_mutex_is_locked(uint8_t mutex) { struct hw_mutex * hmutex = NULL; struct hw_mutex_operations *hmutex_ops = to_hw_mutex_ops(); bool use_sw_mutex_only = to_use_sw_mutex_only(); BUG_ON(mutex >= HW_MUTEX_TOTAL); DEBUG_PRINTK("func %s mutex number: %x\n", __FUNCTION__, mutex); hmutex = to_hw_mutex(mutex); return use_sw_mutex_only ? hmutex->lock_count > 0 : hmutex_ops->is_locked(hmutex); } EXPORT_SYMBOL(hw_mutex_is_locked); /* * hw_mutex_isr - interrupt handler * * Sequence in case of FIFO interrupt mode: 1, Are we the one requesting for it? If not, then go away 2, Check whether we own the MUTEX 3, Is there a valid waiter waiting for the MUTEX? If not, then release the MUTEX 4, Wake up the waiter process * * * Sequence in case of NULL interrupt mode: 1, Are we the one wairing for it? If not, then go away 2, Is there a valid waiter waiting for the MUTEX? If not, then release the MUTEX 3, Try to lock the HW mutex again, if failure, wait for the next interrupt */ static irqreturn_t hw_mutex_isr(int irq, void *dev_id) { struct hw_master *pmaster = (struct hw_master *)dev_id; struct hw_mutex_operations *hmutex_ops = pmaster->ops; struct hw_mutex *hmutex; irqreturn_t ret = IRQ_NONE; int i; switch(pmaster->mode) { case HW_MUTEX_FIFO_SCHE: hmutex_ops->clr_intr(pmaster); ret = IRQ_HANDLED; for (i = 0; i< HW_MUTEX_TOTAL; i++) { hmutex = &pmaster->hw_mutexes[i]; spin_lock(&hmutex->irq_lock); if (HW_MUTEX_REQUESTING == atomic_read(&hmutex->status)) { if (hmutex_ops->is_locked(hmutex)) { atomic_set(&hmutex->status, HW_MUTEX_LOCKED); if (likely(hw_mutex_get_owner(hmutex))) { wake_up_process(hmutex->owner); } else { /* Nobody need the MUTEX, just unlock it to avoid deadlock */ hmutex_ops->unlock(hmutex); } } } spin_unlock(&hmutex->irq_lock); } break; case HW_MUTEX_NULL_SCHE: for (i = 0; i< HW_MUTEX_TOTAL; i++) { hmutex = &pmaster->hw_mutexes[i]; if (hmutex_ops->is_waiting(hmutex)) { hmutex_ops->clr_intr(pmaster); spin_lock(&hmutex->irq_lock); if (likely(hw_mutex_get_owner(hmutex))) { /* Forcibly request it again */ if (hmutex_ops->lock(hmutex,1)) wake_up_process(hmutex->owner); } spin_unlock(&hmutex->irq_lock); ret = IRQ_HANDLED; } } break; case HW_MUTEX_POLLING: return ret; } /* clear interrupt status */ return ret; } /* * Sleep until the irq indicates the lock was obtained or a timeout occurs. * * NOTE: Assumes that interrupts have been disabled before calling. * Exits with interrupts disabled. */ static inline long __sched wait_for_interrupt(struct hw_mutex_operations *hmutex_ops, struct hw_mutex *hmutex, long timeout, int state, unsigned long flags) { int ret = 0; bool use_sw_mutex_only = to_use_sw_mutex_only(); WARN_ON(use_sw_mutex_only); do { /* If we have the lock, we are done */ if (hmutex_ops->is_locked(hmutex)) { break; } /* Check if a signal has interrupted us */ if (unlikely(signal_pending_state(state, current))) { printk(KERN_ERR "interrupt by signal\n"); ret = -EINTR; goto exit; } __set_current_state(state); /* Enable interrupts and wait to see if we get an interrupt */ spin_unlock_irqrestore(&hmutex->irq_lock,flags); timeout = schedule_timeout(timeout); /* timeout == 0: woken up from timer timeout * timeout > 0: woken up by others */ spin_lock_irqsave(&hmutex->irq_lock,flags); } while (timeout); if (unlikely(!hmutex_ops->is_locked(hmutex))) { printk(KERN_ERR "HW_Mutex-ERROR: timeout while waiting to HW mutex id=%d\n",hmutex->lock_name); ret = -EINTR; goto exit; } exit: return ret; } /* * hw_mutex_lock - acquire the mutex * @mutex: the mutex to be acquired * * Lock the mutex exclusively for this task. If the mutex is not * available right now, it will sleep until we can get it. * * The function is non interruptible */ void hw_mutex_lock(uint8_t mutex) { struct hw_mutex *hmutex = NULL; struct hw_mutex_operations *hmutex_ops = to_hw_mutex_ops(); bool use_sw_mutex_only = to_use_sw_mutex_only(); unsigned long flags; long timeout; BUG_ON(mutex >= HW_MUTEX_TOTAL); hmutex = to_hw_mutex(mutex); might_sleep(); /* Resetting this status is not implemented plus the task that jammed the * lock must not call functions that might sleep --> Once entered this loop * is never left. */ while (atomic_read(&hmutex->status) == HW_MUTEX_JAMMED) { /* We must not return from here. Good night. */ set_current_state(TASK_INTERRUPTIBLE); schedule(); } /* Is this the same thread locking again? */ mutex_lock(&hmutex->data_mutex); spin_lock_irqsave(&hmutex->irq_lock, flags); if (hmutex->lock_count > 0 && hw_mutex_is_same_owner(hmutex)) { hmutex->lock_count++; spin_unlock_irqrestore(&hmutex->irq_lock,flags); mutex_unlock(&hmutex->data_mutex); return; } spin_unlock_irqrestore(&hmutex->irq_lock,flags); mutex_unlock(&hmutex->data_mutex); /* This mutex is held between lock/unlock calls forcing others to sleep here */ down(&hmutex->soft_lock); mutex_lock(&hmutex->data_mutex); spin_lock_irqsave(&hmutex->irq_lock, flags); hw_mutex_set_owner(hmutex); /* If there is no HW support, continue with "lock succeeded" as the SW mutex is already held. */ if (use_sw_mutex_only || hmutex_ops->lock(hmutex,0)) { spin_unlock_irqrestore(&hmutex->irq_lock,flags); hmutex->lock_count = 1; mutex_unlock(&hmutex->data_mutex); return; } timeout = wait_for_interrupt(hmutex_ops, hmutex, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE, flags); if (timeout == -EINTR) { hw_mutex_clear_owner(hmutex); } spin_unlock_irqrestore(&hmutex->irq_lock,flags); if (!timeout) hmutex->lock_count = 1; mutex_unlock(&hmutex->data_mutex); /* Lock failure */ if (timeout) { printk(KERN_ERR "Failed to obtain HW Mutex\n"); up(&hmutex->soft_lock); } } EXPORT_SYMBOL(hw_mutex_lock); /* AVM */ #if defined(CONFIG_TFFS_PANIC_LOG) /* * For those moments you REALLY need that hw mutex. * * Note this breaks the delicate locking mechanisms implemented here and spins * until the HW mutex is taken by force. Not much sense in ever releasing this * again. */ void hw_mutex_jam_lock(uint8_t mutex) { struct hw_mutex *hmutex = NULL; struct hw_mutex_operations *hmutex_ops = to_hw_mutex_ops(); bool use_sw_mutex_only = to_use_sw_mutex_only(); unsigned int timeout = 4000; // 4s should be enough. unsigned long flags; BUG_ON(mutex >= HW_MUTEX_TOTAL); hmutex = to_hw_mutex(mutex); atomic_set(&hmutex->status, HW_MUTEX_JAMMED); spin_lock_irqsave(&hmutex->irq_lock, flags); /* Yeah... not really using this. */ if (use_sw_mutex_only) goto out; DEBUG_PRINTK("%s: Requesting hw lock %d\n", __func__, mutex); if (!hmutex_ops->lock(hmutex, 1)) DEBUG_PRINTK("%s: Waiting for hw lock %d\n", __func__, mutex); /* Make sure we obtained the lock before leaving. */ while (!hmutex_ops->is_locked(hmutex) && timeout--) mdelay(1); out: DEBUG_PRINTK("%s: Got lock %u\n", __func__, mutex); hw_mutex_set_owner(hmutex); hmutex->lock_count = 1; spin_unlock_irqrestore(&hmutex->irq_lock, flags); } EXPORT_SYMBOL(hw_mutex_jam_lock); #endif #ifdef TEST_HWMUTEX void hw_mutex_lock_test(uint8_t mutex) { struct hw_mutex * hmutex = NULL; struct hw_mutex_operations *hmutex_ops = to_hw_mutex_ops(); unsigned long flags; long timeout; BUG_ON(mutex >= HW_MUTEX_TOTAL); hmutex = to_hw_mutex(mutex); DEBUG_PRINTK("lock hmutex 0x%x, number %d\n",(unsigned int)hmutex,hmutex->lock_name); might_sleep(); down(&hmutex->soft_lock); spin_lock_irqsave(&hmutex->irq_lock,flags); hw_mutex_set_owner(hmutex); #ifdef TEST_HWMUTEX lock_hw_mutex_as_different_master(hmutex); dump_hw_mutex_registers(hmutex); #endif if (hmutex_ops->lock(hmutex,0)) { spin_unlock_irqrestore(&hmutex->irq_lock,flags); return ; } #ifdef TEST_HWMUTEX unlock_hw_mutex_as_different_master(hmutex); #endif timeout = wait_for_interrupt(hmutex_ops,hmutex,MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE, flags); if (timeout == -EINTR) hw_mutex_clear_owner(hmutex); spin_unlock_irqrestore(&hmutex->irq_lock,flags); /* Lock failure */ if (timeout) up(&hmutex->soft_lock); printk("mutex status %d\n", atomic_read(&hmutex->status)); #ifdef TEST_HWMUTEX dump_hw_mutex_registers(hmutex); #endif return ; } EXPORT_SYMBOL(hw_mutex_lock_test); #endif /* * hw_mutex_lock_interruptible - acquire the mutex * @mutex: the mutex to be acquired * * Lock the mutex exclusively for this task. If the mutex is not * available right now, it will sleep until it can get it. * It can be interruptibed by signal, or exit when timedout. * * Returns 0 if success, negative if interrupted or timeout */ long __sched hw_mutex_lock_interruptible(uint8_t mutex) { struct hw_mutex * hmutex = NULL; struct hw_mutex_operations *hmutex_ops = to_hw_mutex_ops(); bool use_sw_mutex_only = to_use_sw_mutex_only(); unsigned long flags; long timeout; BUG_ON(mutex >= HW_MUTEX_TOTAL); hmutex = to_hw_mutex(mutex); DEBUG_PRINTK("lock hmutex(interruptible) 0x%x, number %d\n", (uint32_t)hmutex, hmutex->lock_name); might_sleep(); /* Is this the same thread locking again? */ mutex_lock(&hmutex->data_mutex); spin_lock_irqsave(&hmutex->irq_lock, flags); if (hmutex->lock_count > 0 && hw_mutex_is_same_owner(hmutex)) { hmutex->lock_count++; spin_unlock_irqrestore(&hmutex->irq_lock, flags); mutex_unlock(&hmutex->data_mutex); return 0; } spin_unlock_irqrestore(&hmutex->irq_lock, flags); mutex_unlock(&hmutex->data_mutex); /* Don't bother waiting if someone jammed the lock. */ if (atomic_read(&hmutex->status) == HW_MUTEX_JAMMED) return -EBUSY; /* This mutex is held between lock/unlock calls forcing others to sleep here */ if (down_interruptible(&hmutex->soft_lock)) return -EINTR; mutex_lock(&hmutex->data_mutex); spin_lock_irqsave(&hmutex->irq_lock,flags); hw_mutex_set_owner(hmutex); /* If there is no HW support, continue with "lock succeeded" as the SW mutex is already held. */ if (use_sw_mutex_only || hmutex_ops->lock(hmutex,0)) { hmutex->lock_count = 1; mutex_unlock(&hmutex->data_mutex); spin_unlock_irqrestore(&hmutex->irq_lock,flags); return 0; } timeout = wait_for_interrupt(hmutex_ops, hmutex, IRQ_HW_MUTEX_TIME_OUT, TASK_INTERRUPTIBLE, flags); if (timeout == -EINTR) hw_mutex_clear_owner(hmutex); spin_unlock_irqrestore(&hmutex->irq_lock,flags); mutex_unlock(&hmutex->data_mutex); /* Lock failure */ if (timeout) up(&hmutex->soft_lock); return timeout; } EXPORT_SYMBOL(hw_mutex_lock_interruptible); /* * hw_mutex_unlock - release the mutex * @mutex: the mutex to be released */ void hw_mutex_unlock(uint8_t mutex) { struct hw_mutex * hmutex = NULL; struct hw_mutex_operations *hmutex_ops = to_hw_mutex_ops(); bool use_sw_mutex_only = to_use_sw_mutex_only(); unsigned long flags; BUG_ON(mutex >= HW_MUTEX_TOTAL); hmutex = to_hw_mutex(mutex); DEBUG_PRINTK("unlock hmutex 0x%x, number %d\n", (uint32_t)hmutex, hmutex->lock_name); mutex_lock(&hmutex->data_mutex); spin_lock_irqsave(&hmutex->irq_lock, flags); if (atomic_read(&hmutex->status) == HW_MUTEX_JAMMED) { printk(KERN_ERR "ERROR: hw_mutex is jammed and cannot be unlocked" "Mutex:0x%x\n", mutex); spin_unlock_irqrestore(&hmutex->irq_lock, flags); mutex_unlock(&hmutex->data_mutex); return; } WARN_ON(hmutex->lock_count == 0); DEBUG_PRINTK("hmutex 0x%x, number %d, owner 0x%x, released by 0x%x\n", (uint32_t)hmutex, hmutex->lock_name, (uint32_t)hw_mutex_get_owner(hmutex), (uint32_t)current); if (hmutex->lock_count >= 2) { hmutex->lock_count--; spin_unlock_irqrestore(&hmutex->irq_lock, flags); mutex_unlock(&hmutex->data_mutex); return; } if (!use_sw_mutex_only) hmutex_ops->unlock(hmutex); hw_mutex_clear_owner(hmutex); hmutex->lock_count = 0; spin_unlock_irqrestore(&hmutex->irq_lock,flags); //printk("UNLOCK: device is off %d\n", hw_master_glob->dev_is_off); mutex_unlock(&hmutex->data_mutex); up(&hmutex->soft_lock); } EXPORT_SYMBOL(hw_mutex_unlock); int hw_mutex_register (struct hw_master *pmaster) { if (WARN_ON(pmaster == NULL)) return -EINVAL; hw_master_glob = pmaster; if (pmaster->mode != HW_MUTEX_POLLING) { #ifdef CONFIG_X86_PUMA6 if (request_irq(pmaster->irq_num, hw_mutex_isr, IRQF_SHARED,HW_MUTEX_IRQ_NAME, (void *)pmaster)) { printk(KERN_ERR "HW Mutex: Unable to allocate IRQ\n"); return -ENODEV; } #elif CONFIG_X86_PUMA7 netss_power_state_callback_info_t pm_callback_info; netss_interrupt_info_t irq_info; irq_info.func = &hw_mutex_isr; irq_info.args = pmaster; netss_interrupt_register(NETSS_INTERUPT_HWMUTEX, -1, &irq_info); pm_callback_info.func = hw_mutex_handle_suspend_resume; pm_callback_info.args = NULL; if (0 != netss_power_state_change_callback_register(NETSS_DEV_HW_MUTEX, &pm_callback_info)) { printk(KERN_ERR "HW Mutex: failed to register power management callback with NetSS driver\n"); } #endif } return 0; } EXPORT_SYMBOL(hw_mutex_register); void hw_mutex_unregister(struct hw_master *pmaster) { #ifdef CONFIG_X86_PUMA6 if (WARN_ON(pmaster == NULL)) return; if (pmaster->mode != HW_MUTEX_POLLING) { free_irq(pmaster->irq_num,(void *)pmaster); } #elif CONFIG_X86_PUMA7 netss_interrupt_info_t irq_info; irq_info.func = NULL; irq_info.args = NULL; if (pmaster->mode != HW_MUTEX_POLLING) netss_interrupt_register(NETSS_INTERUPT_HWMUTEX, -1, &irq_info); #endif hw_master_glob = NULL; } EXPORT_SYMBOL(hw_mutex_unregister); int hw_mutex_handle_suspend_resume(void *args, netss_power_state_t state) { int i=0; struct hw_mutex_operations *hmutex_ops = to_hw_mutex_ops(); /**Take all irq locks around all HW mutexs to stop any other accesses */ HW_MUTEX_TAKE_ALL_IRQ_LOCKS(); /**If resuming indicate that device is on now, to restore mutexes at HW level */ if(state == NETSS_NETIP_POWER_STATE_ACTIVE) { atomic_set(&hw_master_glob->dev_is_off, 0); } for(i=0; i< HW_MUTEX_TOTAL; i++) { struct hw_mutex * hmutex = to_hw_mutex(i); if(atomic_read(&hmutex->status) == HW_MUTEX_LOCKED) { if(state == NETSS_NETIP_POWER_STATE_OFF) { /**Release the mutex at HW level */ hmutex_ops->unlock(hmutex); /**But maintain the locked status of the mutex in software * so that when resumed HW level mutex lock can be acquired */ atomic_set(&hmutex->status, HW_MUTEX_LOCKED); } else if (state == NETSS_NETIP_POWER_STATE_ACTIVE) { /**Restore Mutex at HW level forcefully*/ if(0 == hmutex_ops->lock(hmutex, 1)) { printk("Failed to acquire HW Mutex %d\n", i); } } } } /**From now on, mutex lock is always successful, and status is maintained * only at software level */ if(state == NETSS_NETIP_POWER_STATE_OFF) { atomic_set(&hw_master_glob->dev_is_off, 1); } /**Release all irq locks around all HW mutexs */ HW_MUTEX_RELEASE_ALL_IRQ_LOCKS(); return 0; }