/** * * Copyright (C) 2015 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if defined(CONFIG_AVM_WATCHDOG) #ifdef CONFIG_AVM_FASTIRQ #ifdef CONFIG_AVM_FASTIRQ_ARCH_ARM_COMMON #include #else #include #endif #define CLIENT_FIQ_PRIO FIQ_PRIO_WATCHDOG #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_AVM_FASTIRQ #include "63138_intr.h" #ifdef CONFIG_AVM_FASTIRQ_ARCH_ARM_COMMON #include #include #include #include #else #include #include #include #include #endif #endif #include "avm_sammel.h" #include #include #if defined(CONFIG_BCM_EXT_TIMER) #include "bcm_ext_timer.h" #endif void bcm_init_watchdog(void); void bcm_configure_watchdog(unsigned int enabled, unsigned int timer, unsigned int userMode, unsigned int userThreshold); void bcm_acknowledge_watchdog(void); int bcm_suspend_watchdog(void); static struct timer_list WDTimer[NR_CPUS]; static atomic_t wdt_active; static unsigned int act_wdt_cpu; static atomic_t wdt_busy; static spinlock_t wd_lock; static atomic_t wd_sync; static atomic_t wd_count; #define RETRY_COUNT 0 unsigned int wd_retry_count = RETRY_COUNT; static RAW_NOTIFIER_HEAD(nmi_chain); /** * der (pseudo-)nmi-handler ueber fastirq */ int register_nmi_notifier(struct notifier_block *nb) { return raw_notifier_chain_register(&nmi_chain, nb); } /** */ static void wd_timer_function(unsigned long context __maybe_unused) { bcm_acknowledge_watchdog(); atomic_set(&wdt_busy, 0); atomic_set(&wd_count, wd_retry_count); } /** * \brief: jede CPU per roundrobin triggern */ unsigned int cancelled; void ar7wdt_hw_trigger(void) { unsigned int cpu; if (atomic_read(&wdt_active) == 0) { return; } if (atomic_read(&wdt_busy)) { return; } //pr_err("[%s]Trigger cpu=%u\n", __func__, act_wdt_cpu); for (cpu = act_wdt_cpu; cpu < num_possible_cpus(); cpu++) { if (!cpu_online(cpu)) { continue; } atomic_set(&wdt_busy, cpu + 1); if (cpu == smp_processor_id()) { if (!cancelled) wd_timer_function(0); break; } WDTimer[cpu].expires = jiffies + 1; del_timer(&WDTimer[cpu]); add_timer_on(&WDTimer[cpu], cpu); break; } if (++cpu >= num_possible_cpus()) cpu = 0; act_wdt_cpu = cpu; } EXPORT_SYMBOL(ar7wdt_hw_trigger); extern unsigned int bcm_gpio_get_data(unsigned int gpio_num); extern void bcm_gpio_set_data(unsigned int gpio_num, unsigned int data); void bcm_intclear_watchdog(void); extern void avm_mask_all_fiqs_down(unsigned int fiq_prio, unsigned long *restore_PMR, unsigned long flags); extern void avm_unmask_all_fiqs_up(unsigned long restore_PMR, unsigned long flags); irqreturn_t wdt_isr(int irq __attribute__ ((unused)), void *arg __attribute__ ((unused))) { struct pt_regs regs, *pregs; unsigned int limit; unsigned int tstart; unsigned int cpu = raw_smp_processor_id(); if (!firq_spin_trylock(&wd_lock)) { int wd_counter = 0; pr_err("FIQ watchdog handling, slave CPU#%d caught!\n", cpu); wd_counter = atomic_read(&wd_count); /* Sync ... */ atomic_inc(&wd_sync); if (wd_counter == 0) { pr_err("FIQ watchdog handling, slave: CPU#%d, waiting for backtrace trigger ...\n", cpu); /* disable target routing */ set_ICDIPTR(AVM_IRQ_WD, get_ICDIPTR(AVM_IRQ_WD, IS_ATOMIC) & ~(1 << cpu), IS_ATOMIC); local_fiq_enable(); /* Festhalten, IPI-FIRQ-Trigger wird hier reinschlagen ... */ while (1) { } } while (atomic_read(&wd_sync) != 0) { } return IRQ_HANDLED; } else { int forced_panic = 0; pr_err("FIQ watchdog handling, master: CPU#%d, retry counter=%d\n", cpu, atomic_read(&wd_count)); /* alle Slaves einsammeln */ limit = avm_get_cyclefreq() * 3; /* sec */ tstart = avm_get_cycles(); while (atomic_read(&wd_sync) != (num_possible_cpus() - 1)) { if ((avm_get_cycles() - tstart) > limit) { pr_err("It seems, slave CPU is caught badly while keeping WATCHDOG FIQ masked ...\n"); pr_err("Sync trigger should come through though, if not, FIQs are erroneously switched off there ...\n"); forced_panic = 1; break; } } /* Jetzt Interrupt-Controller behandeln: spaetes Interrupt-Ack */ (void)get_ICCIAR(IS_ATOMIC); /* Watchdog anhalten */ TIMER->WatchDogCtl = 0xEE00; TIMER->WatchDogCtl = 0x00EE; dmb(); /* Watchdog wieder starten */ TIMER->WatchDogCtl = 0xFF00; TIMER->WatchDogCtl = 0x00FF; dmb(); /* Watchdog-Interrupt zurücksetzen */ TIMER->TimerInts |= WATCHDOG; dmb(); /* Interrupt Pending löschen */ set_ICDICPR(AVM_IRQ_WD, 1, IS_ATOMIC); /* Interrupt beenden */ set_ICCEOIR(AVM_IRQ_WD, IS_ATOMIC); /*Master-Slave Lock zuruecksetzen */ firq_spin_unlock(&wd_lock); if ((forced_panic) || (atomic_read(&wd_count) == 0)) { if (!firq_is_avm_rte()) { pregs = get_irq_regs(); } else { //avm_tick_jiffies_update(); //--- auch im FASTIRQ-Fall jiffies korrigieren --- pregs = get_fiq_regs(); } prepare_register_for_trap(®s, &pregs); avm_set_reset_status(RS_NMIWATCHDOG); avm_stack_check(NULL); raw_notifier_call_chain(&nmi_chain, 0, pregs); console_verbose(); bust_spinlocks(1); flush_cache_all(); die("HardwareWatchDog - NMI taken", pregs, 0); // Zur Sicherheit, wir kehren nicht zurueck, Reset wird durch WD gezogen ... while (1) { } } /* Retry Counter erniedrigen, Sync löschen, dann Slaves freigeben */ atomic_dec(&wd_count); atomic_set(&wd_sync, 0); return IRQ_HANDLED; } } /** */ void register_wdt_irq(int irq) { int ret; ret = avm_request_fiq_on(3, irq, wdt_isr, FIQ_CPUMASK, "Watchdog", 0); avm_gic_fiq_setup(irq, 3, FIQ_PRIO_WATCHDOG, 1, 0); if (!ret) { pr_err("[%s] Watchdog as fastirq(%u) on all cpus registered\n", __func__, irq); return; } pr_err("[%s] ERROR request_irq(irq(%d)) ret:%d\n", __func__, irq, ret); } /** */ void ar7wdt_hw_init(void) { int cpu; pr_err("[%s]: Setting up watchdog for 32sec (bark) and 64sec (bite) ...\n", __func__); for (cpu = 0; cpu < num_possible_cpus(); cpu++) { init_timer(&WDTimer[cpu]); WDTimer[cpu].data = (unsigned long)&WDTimer[cpu]; WDTimer[cpu].function = wd_timer_function; } bcm_init_watchdog(); firq_spin_lock_init(&wd_lock); atomic_set(&wd_sync, 0); register_wdt_irq(AVM_IRQ_WD); bcm_configure_watchdog(0, 0x3FFFFFF, 0, 0); mdelay(100); bcm_configure_watchdog(1, 0x3FFFFFF, 0, 0); atomic_set(&wd_count, wd_retry_count); mdelay(100); atomic_set(&wdt_active, 1); } /** */ int ar7wdt_hw_is_wdt_running(void) { return atomic_read(&wdt_active); } /** * dummy */ void ar7wdt_hw_secure_wdt_disable(void) { } /** */ void ar7wdt_hw_deinit(void) { pr_err("[%s]: Switching off watchdog ...\n", __func__); bcm_acknowledge_watchdog(); mdelay(100); bcm_configure_watchdog(0, 0x3FFFFFF, 0, 0); mdelay(100); bcm_configure_watchdog(1, 0x3FFFFFF, 0, 0); atomic_set(&wd_count, 10); mdelay(100); atomic_set(&wdt_active, 0); } /** */ void ar7wdt_hw_reboot(void) { pr_err("[%s]: triggered ...\n", __func__); bcm_acknowledge_watchdog(); mdelay(100); bcm_configure_watchdog(1, 0x100000, 0, 0); //panic("%s: Watchdog expired\n", __func__); } #endif/*--- #if defined(CONFIG_AVM_WATCHDOG) ---*/