/* * puma6_core.c * Description: * Architecture specific stuff. * * * GPL LICENSE SUMMARY * * Copyright(c) 2013 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Corporation * 2200 Mission College Blvd. * Santa Clara, CA 97052 */ #include #include #include #ifdef CONFIG_HIGH_RES_TIMERS #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include /* For CRU IDs */ /*--- #define PUMA_DEBUG_TIMER ---*/ #ifdef PUMA_DEBUG_TIMER #define DBGT(args...) printk(KERN_ERR args) #else #define DBGT(args...) #endif #include void arch_init_irq(void); extern void create_mapping(struct map_desc *md); extern int ti_avalanche_setup(void); static unsigned int no_linux_mem_size = CONFIG_ARM_AVALANCHE_TOP_MEM_RESERVED; static unsigned int no_linux_mem_last; static unsigned int no_linux_mem_start; static int no_linux_mem_desc_idx = -1; struct NO_OPERSYS_MEM_DESC_T no_OperSys_memory_desc[eNO_OperSys_END]; /* used to compute VMALLOC_START */ phys_addr_t puma6_memory_start; phys_addr_t puma6_memory_end; EXPORT_SYMBOL(puma6_memory_start); EXPORT_SYMBOL(puma6_memory_end); #ifdef CONFIG_HIGH_RES_TIMERS static unsigned int puma_cpu_cycles_factor; static void config_timer2(u32); #else /* For VeryPrecise clock */ static int SC_timer_clk; unsigned long long sc_calibrate_jf = 0; /* Jiffies calibration */ unsigned long long frc2correct_load = 0; static INT32 sc_timer_err_nsec; /* HW Timer error */ static void (*late_time_init_chain)(void) = NULL; /* For Precise clock */ UINT32 sc_count_nsec = 0; #endif static struct map_desc puma6_io_desc[] __initdata = { #if CONFIG_ARM_AVALANCHE_TOP_MEM_RESERVED /* Note: This memory descriptor is set to be the FIRST intentionally !!! */ /* Do not change it's location !!! */ { .virtual = NON_OS_RESERVED_VIRT, .pfn = 0, // to be updated later .length = 0, // to be updated later .type = MT_MEMORY }, #endif { .virtual = IO_VIRT, .pfn = __phys_to_pfn(IO_PHY), .length = IO_SIZE, .type = MT_DEVICE }, { .virtual = MM_SPI_VIRT, .pfn = __phys_to_pfn(MM_SPI_PHY), .length = MM_SPI_SIZE, .type = MT_DEVICE, }, { .virtual = SOC_IO_VIRT, .pfn = __phys_to_pfn(SOC_IO_PHY), .length = SOC_IO_SIZE, .type = MT_DEVICE }, { .virtual = INTC_VIRT, .pfn = __phys_to_pfn(INTC_PHY), .length = INTC_SIZE, .type =MT_DEVICE_NONSHARED }, { .virtual = ATOM_RESERVED_VIRT, .pfn = __phys_to_pfn(ATOM_RESERVED_PHY), .length = ATOM_RESERVED_SIZE, .type = MT_MEMORY }, // will be filled in if Atom has a chunk of memory following ARM memory { .virtual = 0, .pfn = 0, .length = 0, .type = MT_MEMORY }, }; static unsigned int num_io_desc = ARRAY_SIZE(puma6_io_desc) - 1; static void __init puma6_map_io(void) { unsigned long arm_memsize; #if CONFIG_ARM_AVALANCHE_TOP_MEM_RESERVED no_linux_mem_desc_idx = 0; puma6_io_desc[ no_linux_mem_desc_idx ].pfn = __phys_to_pfn(no_linux_mem_start); puma6_io_desc[ no_linux_mem_desc_idx ].length = no_linux_mem_size; #endif /* FIXME: we assume a total memory size of 512MB and a 128MB chunk at the start * reserved for the Atom. Also see definition of ATOM_RESERVED_* in puma6_hardware.h * * rounding start down to the next lower page insures that computed * ARM mem size can not be smaller than its real mem size */ arm_memsize = puma6_memory_end - (puma6_memory_start & PAGE_MASK); puma6_io_desc[num_io_desc].virtual= ATOM_RESERVED_END + 1; puma6_io_desc[num_io_desc].pfn = __phys_to_pfn(puma6_memory_end); // puma6_io_desc[num_io_desc].length = ATOM_RESERVED_SIZE; puma6_io_desc[num_io_desc].length = 0x20000000 - ATOM_RESERVED_SIZE - arm_memsize; ++num_io_desc; printk("puma6_memory_start: 0x%x, puma6_memory_end: 0x%x arm_memsize: 0x%lx\n", puma6_memory_start, puma6_memory_end, arm_memsize); iotable_init(puma6_io_desc, num_io_desc); printk("Reserved %dk DSP memory starting from Physical 0x%p\n", (int)(no_linux_mem_size/1024), (void*)(no_linux_mem_start)); } #define TIMER16_CNTRL_PRESCALE_ENABLE 0x8000 #define TIMER16_CNTRL_PRESCALE 0x003C #define TIMER16_CNTRL_MODE 0x0002 #define TIMER16_MINPRESCALE 2 #define TIMER16_MAXPRESCALE 8192 /*--------------------------------------------------------------------------------*\ #define TIMER16_PRESCALE_DEFAULT 0x05 * latenter Bug (i.d.R. Timer-Event eher): * MaxTimerPeriod des NO-HZ-Timer wird mit 160 ms aufgesetzt, * bei Prescale 5 des HI_RES-Timers wuerde es schon nach 37 ms zu einem Wraparround * kommen, d.h. timer_read-Fkt. wuerde keine overflows erkennen \*--------------------------------------------------------------------------------*/ #define TIMER16_PRESCALE_DEFAULT 0x08 #define TIMER16_MIN_LOAD_VALUE 1 #define TIMER16_MAX_LOAD_VALUE 0xFFFF #define MHZ 1000000 /* set min clock divisor to a little higher value * so that we are not close to the edge. * so multiply by factor 2 */ #define TIMER16_MAX_CLK_DIVISOR (TIMER16_MAX_LOAD_VALUE * TIMER16_MAXPRESCALE) #define TIMER16_MIN_CLK_DIVISOR (TIMER16_MIN_LOAD_VALUE * TIMER16_MINPRESCALE * 2) typedef struct { volatile u32 ctrl_reg; /* Timer Control Register */ volatile u32 load_reg; /* Timer Load value register */ volatile u32 count_reg; /* Timer count register */ volatile u32 intr_reg; /* Timer Interrupt register */ } puma_timer_regs_t; typedef enum { TIMER16_MODE_ONESHOT = 0, TIMER16_MODE_AUTOLOAD = 2 } puma_timer_mode; typedef enum { TIMER16_STATUS_STOP = 0, TIMER16_STATUS_START } puma_timer_status; extern unsigned int system_rev; #if defined(PUMA_DEBUG_TIMER) /*--------------------------------------------------------------------------------*\ T = ( 2 ^ (prescale + 1 ) ) * range / clk return: range im usec \*--------------------------------------------------------------------------------*/ static void print_timer_value(char *prefix, unsigned int clk, unsigned int prescale, unsigned int range) { unsigned long long T_nsec, tmp; unsigned long T_usec, TR_usec; T_nsec = (1ULL << (prescale +1)) * 1000000ULL; do_div(T_nsec, clk / 1000); tmp = T_nsec; do_div(tmp, 1000); T_usec = (unsigned long)tmp; tmp = T_nsec * range; do_div(tmp, 1000); TR_usec = (unsigned long)tmp; printk(KERN_INFO"%s %lu.%03u us approx. range(%u) %lu.%03lu ms\n", prefix, T_usec, do_div(T_nsec, 1000), range, TR_usec / 1000, TR_usec % 1000); } #endif/*--- #if defined(PUMA_DEBUG_TIMER) ---*/ #ifdef CONFIG_HIGH_RES_TIMERS /**************************************************************************** * FUNCTION: puma_config_timer **************************************************************************** * Description: The routine is called to configure the timer mode and * time period (in micro seconds). * returns prescale ***************************************************************************/ int puma_config_timer(u32 base_address,u32 refclk_freq, puma_timer_mode mode, u32 usec) { volatile puma_timer_regs_t *p_timer; u32 prescale; u32 count; u32 ctrl_reg; u32 refclk_mhz = (refclk_freq / MHZ); DBGT("[%s] base_address=%#x\n", __FUNCTION__, base_address); DBGT("[%s] refclk_freq=%dMHZ\n", __FUNCTION__, refclk_mhz); DBGT("[%s] usecs=%d\n", __FUNCTION__, usec); if ((base_address == 0) || (usec == 0)) { printk(KERN_ERR "[%s] base_address oder usec 0", __FUNCTION__); return -1; } if (((enum PAL_SYS_TIMER16_MODE_tag)mode != TIMER16_CNTRL_ONESHOT) && ((enum PAL_SYS_TIMER16_MODE_tag)mode != TIMER16_CNTRL_AUTOLOAD)) { printk(KERN_ERR "[%s] Error: Mode AUTOLOAD oder ONESHOT wird benoetigt", __FUNCTION__); return -1; } /* The min time period is 1 usec and since the reference clock freq is always going to be more than "min" divider value, minimum value is not checked. Check the max time period that can be derived from the timer in micro-seconds */ if (usec > ((TIMER16_MAX_CLK_DIVISOR) / refclk_mhz)) { printk(KERN_ERR "[%s] input argument speed out of range\n", __FUNCTION__); return -1; } p_timer = (puma_timer_regs_t *) (base_address); count = refclk_mhz * usec; DBGT("[%s] initial_count=%d\n", __FUNCTION__, count); /*------------------------------------------------------------------------------------*\ * T = ( 2 ^ (prescale + 1 ) ) * COUNT / f_timer_clock \*------------------------------------------------------------------------------------*/ for (prescale = 0; prescale < 12; prescale++) { count = count >> 1; if (count <= TIMER16_MAX_LOAD_VALUE) { break; } } /*write the load counter value */ DBGT("[%s] final_count=%d\n", __FUNCTION__, count); p_timer->load_reg = count; /* write prescalar and mode to control reg */ ctrl_reg = mode | TIMER16_CNTRL_PRESCALE_ENABLE | (prescale << 2); DBGT("[%s] prescale=%d\n", __FUNCTION__, prescale); p_timer->ctrl_reg = ctrl_reg; #if defined(PUMA_DEBUG_TIMER) print_timer_value("Timer0-Tick(Irq-MaxTimerPeriod)", refclk_freq, prescale, count); #endif/*--- #if defined(PUMA_DEBUG_TIMER) ---*/ return (int)prescale; } static void config_timer2(u32 base_address) { volatile puma_timer_regs_t *p_timer; u32 ctrl_reg; p_timer = (puma_timer_regs_t *) (base_address); /*write the load counter value */ p_timer->load_reg = TIMER16_MAX_LOAD_VALUE; /*-----------------------------------------------------------------*\ * write prescalar and mode to control reg * TIMER16_PRESCALE_DEFAULT = 9 \*-----------------------------------------------------------------*/ ctrl_reg = TIMER16_CNTRL_AUTOLOAD | TIMER16_CNTRL_PRESCALE_ENABLE |(TIMER16_PRESCALE_DEFAULT << 2); p_timer->ctrl_reg = ctrl_reg; } /**************************************************************************** * FUNCTION: puma_timer_ctrl **************************************************************************** * Description: The routine is called to start/stop the timer * ***************************************************************************/ void puma_timer_ctrl(u32 base_address, puma_timer_status status) { volatile puma_timer_regs_t *p_timer; if (base_address) { p_timer = (puma_timer_regs_t *) (base_address); if ((enum PAL_SYS_TIMER16_CTRL_tag)status == TIMER16_CTRL_START) { p_timer->ctrl_reg |= TIMER16_CTRL_START; } else { p_timer->ctrl_reg &= ~(TIMER16_CTRL_START); } } } /**************************************************************************** * FUNCTION: puma_timer_read **************************************************************************** * Description: The routine is called to read the current value of timer. * ***************************************************************************/ static cycle_t timer_read(struct clocksource *cs __attribute__((unused))) { static DEFINE_SPINLOCK(tlock); static cycle_t overflow_timer_value; static unsigned int last_t; volatile puma_timer_regs_t *p_timer; unsigned long flags = 0; cycle_t timer_value; register unsigned int t; p_timer = (puma_timer_regs_t *)(AVALANCHE_TIMER2_BASE); __BUILD_AVM_CONTEXT_FUNC(spin_lock_irqsave(&tlock, flags)); t = (p_timer->load_reg & 0xffff) - (p_timer->count_reg & 0xffff); if(t < last_t) overflow_timer_value += 1 << 16; last_t = t; __BUILD_AVM_CONTEXT_FUNC(spin_unlock_irqrestore(&tlock, flags)); timer_value = (cycle_t)t | overflow_timer_value; /*--- printk("%s: %llu load_reg=%x count_reg=%x ctrl_reg=%x\n", __FUNCTION__, timer_value, p_timer->load_reg, p_timer->count_reg, p_timer->ctrl_reg); ---*/ return timer_value; } /* * clocksource */ static struct clocksource clocksource_puma = { .name = "timer16bit", .rating = 300, .read = timer_read, .mask = CLOCKSOURCE_MASK(64), .shift = 16, .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES }; static int puma_timer_set_next_event(unsigned long cycles, struct clock_event_device *evt) { volatile puma_timer_regs_t *p_timer; p_timer = (puma_timer_regs_t *)(AVALANCHE_TIMER0_BASE); /* First stop the timer */ p_timer->ctrl_reg &= ~(TIMER16_CTRL_START); /* Load the value being passed */ p_timer->load_reg = cycles; /* Now start the timer */ p_timer->ctrl_reg |= TIMER16_CTRL_START; return 0; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static void puma_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { volatile puma_timer_regs_t *p_timer; p_timer = (puma_timer_regs_t *)(AVALANCHE_TIMER0_BASE); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: /* write mode to control reg */ p_timer->ctrl_reg |= TIMER16_CNTRL_AUTOLOAD; break; case CLOCK_EVT_MODE_ONESHOT: /* write mode to control reg */ p_timer->ctrl_reg &= ~(TIMER16_CNTRL_AUTOLOAD); break; case CLOCK_EVT_MODE_SHUTDOWN: /* stop the timer */ p_timer->ctrl_reg &= ~(TIMER16_CTRL_START); break; case CLOCK_EVT_MODE_UNUSED: break; case CLOCK_EVT_MODE_RESUME: printk(KERN_ERR "%s: CLOCK_EVT_MODE_RESUME\n", __FUNCTION__); break; } } /* * clockevent */ static struct clock_event_device clockevent_puma = { .name = "timer16bit", .features = CLOCK_EVT_FEAT_ONESHOT, .mode = CLOCK_EVT_MODE_UNUSED, .shift = 32, .set_next_event = puma_timer_set_next_event, .set_mode = puma_timer_set_mode, .event_handler = NULL, }; #endif /*--- #ifdef CONFIG_HIGH_RES_TIMERS ---*/ #if 0 /*--------------------------------------------------------------------------------*\ * puma6: nicht (mehr) nutzbar, da mit idle keine cycles gezaehlt werden :-( \*--------------------------------------------------------------------------------*/ static void puma_cpu_cycle_init(void) { union __performance_monitor_control C; write_secure_debug_enable_register(0, 1); C.Register = read_p15_performance_monitor_control(); C.Bits.CycleCounterDivider = 1; /*--- / 64 ---*/ C.Bits.CycleCounterReset = 1; C.Bits.EnableCounters = 1; write_p15_performance_monitor_control(C.Register); DBGT(KERN_ERR"%s: enable cycle_count performance-monitor-register: %x\n", __func__, read_p15_performance_monitor_control()); } #endif /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ cycles_t get_puma_cpu_cycles(void) { return clocksource_puma.read(&clocksource_puma) * puma_cpu_cycles_factor; /*--- konform mit MIPS-Takt ---*/ /*--- return (cycles_t)(read_p15_cycle_counter() << 5); ---*/ } EXPORT_SYMBOL(get_puma_cpu_cycles); #ifdef CONFIG_HIGH_RES_TIMERS unsigned int timer_irq_count = 0; static irqreturn_t puma_timer0_interrupt(int irq, void *dev_id) { volatile puma_timer_regs_t *p_timer; if(clockevent_puma.event_handler) { clockevent_puma.event_handler(&clockevent_puma); } p_timer = (puma_timer_regs_t *)(AVALANCHE_TIMER0_BASE); if(p_timer->ctrl_reg & TIMER16_CNTRL_AUTOLOAD) { p_timer->ctrl_reg |= TIMER16_CTRL_START; } timer_irq_count++; return IRQ_HANDLED; } #else static irqreturn_t puma_timer0_interrupt(int irq, void *dev_id) { timer_tick(); return IRQ_HANDLED; } #endif static struct irqaction puma_timer0_irq = { .name = "Puma6 Timer Tick", .flags = IRQF_DISABLED | IRQF_TIMER |IRQF_IRQPOLL , /* POLL hinzugefuegt (wie bei omap1)*/ .handler = puma_timer0_interrupt }; #ifdef CONFIG_HIGH_RES_TIMERS /*-------------------------------------------------------------------------------------*\ Euklidias Algo Wenn b > a: eine Schleifenoperation mehr !!! ret = 0: wenn a oder b == 0 \*-------------------------------------------------------------------------------------*/ static unsigned int ggT(unsigned int a, unsigned int b) { unsigned int Rest; if(a == 0) { return 0; } Rest = b; while(Rest) { b = Rest; Rest = a % b; a = b; } return b; } /* * Set up timer interrupt, and return the current time in seconds. */ static void __init puma6_timer_init(void) { int timer_clk; int scale_timer0; int scale_timer2; int exp_prescale_timer0; int cpu = smp_processor_id(); unsigned int puma_cpu_clock; int MaxTimerPeriod_usec = (int)((16.0 / (float)(HZ)) * 1000000.0); system_rev = MACH_TYPE_PUMA6; /*--- ti_avalanche_setup(); ---*/ /*--- puma_cpu_cycle_init(); ---*/ /*-------------------------------------------------------------------------------------*\ * get the input clock frequency * Input_Clock_Frequency = 112.5 MHz \*-------------------------------------------------------------------------------------*/ timer_clk = PAL_sysClkcGetFreq(PAL_SYS_CLKC_TIMER0); DBGT("[%s] vbus_clock=%d \n", __FUNCTION__, timer_clk); DBGT("[%s] HZ=%d \n", __FUNCTION__, HZ); /*--------------------------------------------------------------------------------------*\ * Timer 0 * berechne den benoetigten prescale-exponenten um eine MaxTimerPeriod von 16/HZ * einzurichten \*--------------------------------------------------------------------------------------*/ PAL_sysResetCtrl(AVALANCHE_TIMER0_RESET, OUT_OF_RESET); exp_prescale_timer0 = puma_config_timer(AVALANCHE_TIMER0_BASE, timer_clk, TIMER16_CNTRL_AUTOLOAD, MaxTimerPeriod_usec); puma_timer_ctrl(AVALANCHE_TIMER0_BASE, TIMER16_CTRL_START); setup_irq(AVALANCHE_TIMER_0_INT, &puma_timer0_irq); scale_timer0 = (1 << (exp_prescale_timer0 + 1)); /*--------------------------------------------------------------------------------------*\ * Timer 2 \*--------------------------------------------------------------------------------------*/ PAL_sysResetCtrl(AVALANCHE_TIMER2_RESET, OUT_OF_RESET); config_timer2(AVALANCHE_TIMER2_BASE); puma_timer_ctrl(AVALANCHE_TIMER2_BASE, TIMER16_CTRL_START); scale_timer2 = (( 1 << (TIMER16_PRESCALE_DEFAULT + 1))); #if defined(PUMA_DEBUG_TIMER) print_timer_value("Timer2-Tick", timer_clk, TIMER16_PRESCALE_DEFAULT, TIMER16_MAX_LOAD_VALUE); #endif/*--- #if defined(PUMA_DEBUG_TIMER) ---*/ /*--------------------------------------------------------------------------------------*\ * setup and register clocksource (Timer 2) \*--------------------------------------------------------------------------------------*/ clocksource_puma.mult = clocksource_hz2mult(timer_clk/scale_timer2 /*--- Hz ---*/, clocksource_puma.shift); DBGT("Mult = %u\n", clocksource_puma.mult); if (clocksource_register(&clocksource_puma)) { printk(KERN_ERR "%s: can't register clocksource!\n", clocksource_puma.name); } DBGT("%s: %d: timer_clk=%d scale2=%d clocksource_puma.mul=%d\n", __FUNCTION__, __LINE__, timer_clk, scale_timer2, clockevent_puma.mult); puma_cpu_clock = PAL_sysClkcGetFreq(PAL_SYS_CLKC_ARM); { unsigned int timer2_clk, teiler = ggT(puma_cpu_clock, timer_clk); puma_cpu_clock /= teiler, timer2_clk = timer_clk / teiler; puma_cpu_cycles_factor = (unsigned int)((puma_cpu_clock / 2 * scale_timer2) / timer2_clk); } DBGT("[%s] puma_cpu_clock=%d, cycle-factor=%d\n", __FUNCTION__, puma_cpu_clock, puma_cpu_cycles_factor); /*--------------------------------------------------------------------------------------*\ * setup and register clockevent (Timer 0) \*--------------------------------------------------------------------------------------*/ if (MaxTimerPeriod_usec > (scale_timer2 * TIMER16_MAX_LOAD_VALUE) / (timer_clk / 1000 / 1000)) { printk(KERN_ERR "[%s] Warning: => NoHz-Timer (%d us) might exceed clocksource-cycle-range (%u us)\n", __FUNCTION__, MaxTimerPeriod_usec, (scale_timer2 * TIMER16_MAX_LOAD_VALUE) / (timer_clk / 1000 / 1000) ); } /*--- static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec, int shift) ---*/ clockevent_puma.mult = div_sc(timer_clk/scale_timer0, NSEC_PER_SEC, clockevent_puma.shift); clockevent_puma.max_delta_ns = clockevent_delta2ns(0xfffe, &clockevent_puma); clockevent_puma.min_delta_ns = clockevent_delta2ns(1, &clockevent_puma); clockevent_puma.cpumask = &cpumask_of_cpu(cpu); /*--- DBGT("%s: current_cpu=%i, cpumask_of_cpu=0x%lx \n", __func__, cpu, clockevent_puma.cpumask); ---*/ clockevents_register_device(&clockevent_puma); DBGT("%s: %d: timer_clk=%d scale0=%d max_ns=%llu min_ns=%llu clockevent_puma.mul=%d\n", __FUNCTION__, __LINE__, timer_clk, scale_timer0, clockevent_puma.max_delta_ns, clockevent_puma.min_delta_ns, clockevent_puma.mult); printk(KERN_INFO"Puma6 Timer0 & HRES Timer2 initialized\n"); } #else /* For VeryPrecise clock */ static int FRC_init = 0; extern void (*__initdata late_time_init)(void); static void late_time_init_f(void) { unsigned long long timer0_phase; unsigned long flags; unsigned long frc_hi; unsigned long frc_lo; printk("INITIAL_JIFFIES %lu\n", INITIAL_JIFFIES); printk("jiffies %lu\n", jiffies); /* Sync the tick timer to the FRC */ local_irq_save(flags); do { timer0_phase = PAL_sysTimer16GetNsAfterTick(AVALANCHE_TIMER0_BASE, sc_count_nsec); } while (timer0_phase > sc_count_nsec); /* Exit close to the reload, there is a whole tick to achieve this */ /* Now timer0 is around the reload area */ /* Now start FRC */ /* Reset the free-running clock */ FREE_RUNNING_COUNTER_ENABLE(); FREE_RUNNING_COUNTER_RESET(); frc_hi = FREE_RUNNING_COUNTER_H_GET(); frc_lo = FREE_RUNNING_COUNTER_L_GET(); local_irq_restore(flags); printk("Sync FRC (hi %lu lo %lu) to timer0 (%llu)\n", frc_hi, frc_lo, timer0_phase); /* Calc frc2correct_load: recalc when total err > 100 uSec */ /* err per tick * num ticks = 100Usec nticks = 100 uSec / sc_timer_err_nsec nticks = 100000 / sc_timer_err_nsec frc = nticks * 10mSec * frc timer_clk frc = 100000 / sc_timer_err_nsec * 0.01 * frc timer_clk frc = 100000 / HZ * frc timer_clk / sc_timer_err_nsec */ /* Since we requested a negative err, make the err positive */ sc_timer_err_nsec = -sc_timer_err_nsec; /* Change the timer_clk to frc freq */ SC_timer_clk *= 4; frc2correct_load = 100000 / HZ; frc2correct_load *= SC_timer_clk; frc2correct_load += sc_timer_err_nsec/2; do_div(frc2correct_load, sc_timer_err_nsec); //printk("sc_timer_err_nsec %d, frc2correct_load %llu\n", sc_timer_err_nsec, frc2correct_load); FRC_init = 1; if (late_time_init_chain != NULL) { late_time_init_chain(); } } static void __init puma6_timer_init(void) { int timer_clk; system_rev = MACH_TYPE_PUMA6; /*--- ti_avalanche_setup(); ---*/ /* get the input clock frequency */ timer_clk = PAL_sysClkcGetFreq(PAL_SYS_CLKC_TIMER0); /* timer 0 - enable timer and auto load, and go off every 1 ms */ PAL_sysResetCtrl(AVALANCHE_TIMER0_RESET, OUT_OF_RESET); PAL_sysTimer16SetParams(AVALANCHE_TIMER0_BASE, timer_clk, TIMER16_CNTRL_AUTOLOAD, (int)((1.0/(float)(HZ)) * 1000000.0)); PAL_sysTimer16Ctrl(AVALANCHE_TIMER0_BASE, TIMER16_CTRL_START); /* For VeryPrecise clock */ /* It is too early to start the FRC, start later */ late_time_init_chain = late_time_init; late_time_init = late_time_init_f; /* For Precise clock */ sc_count_nsec = PAL_sysTimer16GetNSecPerCount(AVALANCHE_TIMER0_BASE, NSEC_PER_SEC / HZ); printk("Precise sched_clock: load %d, nsec/count %d\n", PAL_sysTimer16GetLoad(AVALANCHE_TIMER0_BASE), sc_count_nsec); setup_irq(AVALANCHE_TIMER_0_INT, &puma_timer0_irq); printk("Puma6 Timer0 initialized\n"); } #endif static struct sys_timer puma6_timer = { .init = puma6_timer_init, }; #ifndef CONFIG_HIGH_RES_TIMERS /* For VeryPrecise clock */ /* Calc sched_clock from free running 64 bit counter */ unsigned long long notrace avalanche_clock_vp(void) { unsigned long long ns = 0; /* Re-Read till hi is equal */ UINT32 prev_hi = 0; UINT32 curr_hi = 0; UINT32 curr_lo = 0; static unsigned long long frc2correct = 0; unsigned long long curr_frc = 0; static long long err_crct_ns = 0; if (FRC_init == 0) { return (unsigned long long)(jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ); } do { prev_hi = curr_hi; /* Do lo before hi so when hi==0 a single read is enough */ curr_lo = FREE_RUNNING_COUNTER_L_GET(); curr_hi = FREE_RUNNING_COUNTER_H_GET(); } while (prev_hi != curr_hi); curr_frc = (unsigned long long )(curr_hi) << 32 | (unsigned long long )(curr_lo); /* The base clock is 450MHz, so normalize to nSec */ /* ns = cnt*tick_ns = cnt*(10^9/freq_hz) = cnt*(10^9 / 450MHz) = cnt*(1000/450) = cnt * 20 / 9 */ /* Don't worry about wrap/overflow, 64bit * nSec ==~ 6000 years */ ns = curr_frc; ns *= 20; ns += 5; /* Round up the division */ do_div(ns, 9); if (frc2correct_load != 0) { if (curr_frc > frc2correct) { /* Reload */ frc2correct = curr_frc + frc2correct_load; /* Recalc : correction == number of ticks * err per tick frc = nticks * 10mSec * frc timer_clk nticks = frc / 10mSec / frc timer_clk nticks = frc * HZ / frc timer_clk */ err_crct_ns = curr_frc * HZ + SC_timer_clk/2; do_div(err_crct_ns, SC_timer_clk); err_crct_ns *= sc_timer_err_nsec; //printk("Recalculated err_crct_ns %lld: curr_frc %lld, next frc %lld\n", err_crct_ns, curr_frc, frc2correct); } ns += err_crct_ns; } /* Normalize using calibration value */ ns -= sc_calibrate_jf; return ns; } #endif /* This API is used to allocate specified size of memory for the * specified module (DSP) from the reserved region. Currently size * of reserved region is decided at compile time using Kconfig variable */ int avalanche_alloc_no_OperSys_memory(AVALANCHE_NO_OPERSYS_MOD_T mod, unsigned int size, unsigned int *phys_start) { unsigned int cookie; int ret = 0; if(mod >= eNO_OperSys_END) return -1; PAL_osProtectEntry(PAL_OSPROTECT_INTERRUPT, &cookie); /* 32-bit align */ size = ((size + 0x3) & (~0x3)); /* we do not have that much reserved memory */ if(size > no_linux_mem_size) { ret = -1; goto topmem_done; } switch(mod) { case eNO_OperSys_VDSP: /* If the memory was already allocated then simply return it */ if(no_OperSys_memory_desc[mod].reserved) { *phys_start = no_OperSys_memory_desc[mod].phys_start; goto topmem_done; } no_linux_mem_last -= size; *phys_start = no_linux_mem_last; no_OperSys_memory_desc[mod].reserved = 1; no_OperSys_memory_desc[mod].phys_start = no_linux_mem_last; no_linux_mem_size -= size; break; case eNO_OperSys_VoiceNI: { unsigned int mask, padding; mask = size - 1; padding = no_linux_mem_start & mask; if(padding+size > no_linux_mem_size) { printk("%s:%d\n", __FUNCTION__,__LINE__); ret = -1; break; } if (padding) { no_linux_mem_start &= ~mask; no_linux_mem_start += size; no_linux_mem_size -= padding; } *phys_start = no_linux_mem_start; no_linux_mem_start += size; no_linux_mem_size -= size; break; } default: ret = -1; } topmem_done: PAL_osProtectExit(PAL_OSPROTECT_INTERRUPT, cookie); return ret; } EXPORT_SYMBOL(avalanche_alloc_no_OperSys_memory); unsigned int avalanche_no_OperSys_memory_phys_to_virt( unsigned int pAddr ) { if ( -1 == no_linux_mem_desc_idx ) { printk("%s:%d\n", __FUNCTION__,__LINE__); return (unsigned int)-1; } if ( (pAddr < __pfn_to_phys(puma6_io_desc[ no_linux_mem_desc_idx ].pfn)) || (pAddr > (__pfn_to_phys(puma6_io_desc[ no_linux_mem_desc_idx ].pfn) + puma6_io_desc[ no_linux_mem_desc_idx ].length ))) { printk("%s:%d\n", __FUNCTION__,__LINE__); return (unsigned int)-1; } return ( pAddr - __pfn_to_phys(puma6_io_desc[ no_linux_mem_desc_idx ].pfn) + puma6_io_desc[ no_linux_mem_desc_idx ].virtual ); } EXPORT_SYMBOL(avalanche_no_OperSys_memory_phys_to_virt); unsigned int avalanche_no_OperSys_memory_virt_to_phys( unsigned int vAddr ) { if ( -1 == no_linux_mem_desc_idx ) { printk("%s:%d\n", __FUNCTION__,__LINE__); return (unsigned int)-1; } if ( (vAddr < puma6_io_desc[ no_linux_mem_desc_idx ].virtual) || (vAddr > (puma6_io_desc[ no_linux_mem_desc_idx ].virtual) + puma6_io_desc[ no_linux_mem_desc_idx ].length )) { printk("%s:%d\n", __FUNCTION__,__LINE__); return (unsigned int)-1; } return ( vAddr - puma6_io_desc[ no_linux_mem_desc_idx ].virtual + __pfn_to_phys(puma6_io_desc[ no_linux_mem_desc_idx ].pfn) ); } EXPORT_SYMBOL(avalanche_no_OperSys_memory_virt_to_phys); /* variable used by vlynq */ unsigned int avalanche_mem_size; EXPORT_SYMBOL(avalanche_mem_size); static void __init puma6_fixup(struct machine_desc *desc, struct tag *tag, char **cmdline, struct meminfo *mi) { for(; tag->hdr.size; tag = tag_next(tag) ){ if(tag->hdr.tag == ATAG_MEM){ unsigned long size = tag->u.mem.size, start = tag->u.mem.start; size -= start & ~PAGE_MASK; /* This variable is used by DSP memory allocation API */ no_linux_mem_last = start + size; puma6_memory_start = start; puma6_memory_end = no_linux_mem_last; size -= no_linux_mem_size; /* dsp memory starts where normal memory ends */ no_linux_mem_start = start + size; avalanche_mem_size += size; mi->bank[mi->nr_banks].start = PAGE_ALIGN(start); mi->bank[mi->nr_banks].size = size & PAGE_MASK; mi->bank[mi->nr_banks].highmem = PHYS_TO_NID(start); mi->nr_banks += 1; break; } } } /* * if a RAM-image has been loaded, update puma6_memory_end to point at its end */ extern struct meminfo meminfo; static int __init puma6_mtdram_init(char *p) { phys_addr_t start = 0, end = 0; char *orig_p = p; if(p) { start = (phys_addr_t) simple_strtoul(p, NULL, 16); p = strchr(p, ','); if(p) { p++; end = (phys_addr_t) simple_strtoul(p, NULL, 16); } if(start != 0 && end != 0){ printk(KERN_INFO "[%s] Reserving RAM-image memory from 0x%08lx to 0x%08lx\n", __func__, (unsigned long)start, (unsigned long)end); puma6_memory_end = end; return 0; } } printk(KERN_INFO "[%s] Invalid size for RAM-image given: %s\n", __func__, orig_p); return 0; } early_param("mtdram1", puma6_mtdram_init); void __init __ti_avalanche_setup(void) { ti_avalanche_setup(); } /* boot_params: * This is the location from where Linux should pick the Atag list * created by the bootloader (mostly uboot) */ MACHINE_START(PUMA6, "puma6") /* .phys_io = IO_START, */ /* .io_pg_offst = ((IO_BASE) >> 18) & 0xfffc, */ .boot_params = CONFIG_ARM_AVALANCHE_KERNEL_PARAMS_ADDRESS, .map_io = puma6_map_io, .init_irq = arch_init_irq, .init_early = __ti_avalanche_setup, .timer = &puma6_timer, .fixup = puma6_fixup, MACHINE_END