--- zzzz-none-000/linux-2.6.28.10/arch/mips/kernel/vpe.c 2009-05-02 18:54:43.000000000 +0000 +++ puma5-6360-529/linux-2.6.28.10/arch/mips/kernel/vpe.c 2011-01-12 12:04:21.000000000 +0000 @@ -144,14 +144,15 @@ }; struct { - /* Virtual processing elements */ - struct list_head vpe_list; - - /* Thread contexts */ - struct list_head tc_list; + spinlock_t vpe_list_lock; + struct list_head vpe_list; /* Virtual processing elements */ + spinlock_t tc_list_lock; + struct list_head tc_list; /* Thread contexts */ } vpecontrol = { - .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), - .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) + .vpe_list_lock = SPIN_LOCK_UNLOCKED, + .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), + .tc_list_lock = SPIN_LOCK_UNLOCKED, + .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) }; static void release_progmem(void *ptr); @@ -160,30 +161,40 @@ /* get the vpe associated with this minor */ struct vpe *get_vpe(int minor) { - struct vpe *v; + struct vpe *res, *v; if (!cpu_has_mipsmt) return NULL; + res = NULL; + spin_lock(&vpecontrol.vpe_list_lock); list_for_each_entry(v, &vpecontrol.vpe_list, list) { - if (v->minor == minor) - return v; + if (v->minor == minor) { + res = v; + break; + } } + spin_unlock(&vpecontrol.vpe_list_lock); - return NULL; + return res; } /* get the vpe associated with this minor */ struct tc *get_tc(int index) { - struct tc *t; + struct tc *res, *t; + res = NULL; + spin_lock(&vpecontrol.tc_list_lock); list_for_each_entry(t, &vpecontrol.tc_list, list) { - if (t->index == index) - return t; + if (t->index == index) { + res = t; + break; + } } + spin_unlock(&vpecontrol.tc_list_lock); - return NULL; + return res; } struct tc *get_tc_unused(void) @@ -208,7 +219,9 @@ } INIT_LIST_HEAD(&v->tc); + spin_lock(&vpecontrol.vpe_list_lock); list_add_tail(&v->list, &vpecontrol.vpe_list); + spin_unlock(&vpecontrol.vpe_list_lock); INIT_LIST_HEAD(&v->notify); v->minor = minor; @@ -225,7 +238,10 @@ INIT_LIST_HEAD(&tc->tc); tc->index = index; + + spin_lock(&vpecontrol.tc_list_lock); list_add_tail(&tc->list, &vpecontrol.tc_list); + spin_unlock(&vpecontrol.tc_list_lock); out: return tc; @@ -327,7 +343,7 @@ || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL) continue; - s->sh_entsize = get_offset(&mod->core_size, s); + s->sh_entsize = get_offset((unsigned long*)&mod->core_size, s); } if (m == 0) @@ -593,8 +609,6 @@ } /* end module-elf32.c */ - - /* Change all symbols so that sh_value encodes the pointer directly. */ static void simplify_symbols(Elf_Shdr * sechdrs, unsigned int symindex, @@ -733,6 +747,7 @@ return -ENOEXEC; } + printk(KERN_WARNING "%s: tc %d start-addr %lx\n", __func__, t->index, v->__start); /* Write the address we want it to start running from in the TCPC register. */ write_tc_c0_tcrestart((unsigned long)v->__start); write_tc_c0_tccontext((unsigned long)0); @@ -744,7 +759,21 @@ val = read_tc_c0_tcstatus(); val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; write_tc_c0_tcstatus(val); - +#if 0 + settc(0); + __write_32bit_c0_register($1,7, (1 << 8) | (1 << 0)); + settc(t->index); + __write_32bit_c0_register($1,7, (0xE << 8) | (0xE << 0)); + set_c0_mvpcontrol(0x1 << 3); +#endif + printk(KERN_WARNING "%s: tc %d tcstatus %08lx tcbind %08lx\n MVPCONTROL %08x vpeopt %08x\n", + __func__, + t->index, + read_tc_c0_tcstatus(), + read_tc_c0_tcbind(), + read_c0_mvpcontrol(), + __read_32bit_c0_register($1,7)); + write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); /* @@ -901,7 +930,7 @@ if (!v->load_addr) return -ENOMEM; - pr_info("VPE loader: loading to %p\n", v->load_addr); + pr_info("VPE loader: loading to %p %s len = %ld\n", v->load_addr, relocate ? "(relocate)" : "(fix at start-addr)", v->plen); if (relocate) { for (i = 0; i < hdr->e_shnum; i++) { @@ -949,6 +978,8 @@ return err; } + /* make sure it's physically written out */ + flush_icache_range((unsigned long)v->load_addr, (unsigned long)v->load_addr + v->len); } else { struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); @@ -959,6 +990,10 @@ phdr->p_filesz); memset((void *)phdr->p_paddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); + if(phdr->p_memsz) { + dma_cache_wback_inv((unsigned long)phdr->p_paddr, phdr->p_memsz); + flush_icache_range((unsigned long)phdr->p_paddr, (unsigned long)phdr->p_paddr + phdr->p_memsz); + } } phdr++; } @@ -976,11 +1011,6 @@ } } } - - /* make sure it's physically written out */ - flush_icache_range((unsigned long)v->load_addr, - (unsigned long)v->load_addr + v->len); - if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { if (v->__start == 0) { printk(KERN_WARNING "VPE loader: program does not contain " @@ -1410,6 +1440,85 @@ struct device vpe_device; +/*--------------------------------------------------------------------------------*\ +\*--------------------------------------------------------------------------------*/ +static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0) { + unsigned long tmp; + + if (!tc) { + printk("%s: tc %d tcbind %08lx tcstatus %08lx\n", __func__, tc, read_tc_c0_tcbind(), read_tc_c0_tcstatus()); + return; + } + + /* bind a TC to each VPE, May as well put all excess TC's + on the last VPE */ + if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1)) + write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); + else { + write_tc_c0_tcbind(read_tc_c0_tcbind() | tc); + + /* and set XTC */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT)); + } + + tmp = read_tc_c0_tcstatus(); + + /* mark not allocated and not dynamically allocatable */ + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ + write_tc_c0_tcstatus(tmp); + + write_tc_c0_tchalt(TCHALT_H); + + printk("%s: tc %d tcbind %08lx tcstatus %08lx\n", __func__, tc, read_tc_c0_tcbind(), read_tc_c0_tcstatus()); +} +/*--------------------------------------------------------------------------------*\ +\*--------------------------------------------------------------------------------*/ +static void __init smvp_copy_vpe_config(void) { + write_vpe_c0_status( + (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); + + /* set config to be the same as vpe0, particularly kseg0 coherency alg */ + write_vpe_c0_config( read_c0_config()); + + /* make sure there are no software interrupts pending */ + write_vpe_c0_cause(0); + + /* Propagate Config7 */ + write_vpe_c0_config7(read_c0_config7()); + + write_vpe_c0_count(read_c0_count()); +} +/*--------------------------------------------------------------------------------*\ +\*--------------------------------------------------------------------------------*/ +static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, unsigned int ncpu) { + if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) + return ncpu; + + /* Deactivate all but VPE 0 */ + if (tc != 0) { + unsigned long tmp = read_vpe_c0_vpeconf0(); + + tmp &= ~VPECONF0_VPA; + + /* master VPE */ + tmp |= VPECONF0_MVP; + write_vpe_c0_vpeconf0(tmp); + + /* Record this as available CPU */ + /*--- cpu_set(tc, phys_cpu_present_map); ---*/ + /*--- __cpu_number_map[tc] = ++ncpu; ---*/ + /*--- __cpu_logical_map[ncpu] = tc; ---*/ + } + + /* Disable multi-threading with TC's */ + write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); + + if (tc != 0) + smvp_copy_vpe_config(); + return ncpu; +} + static int __init vpe_module_init(void) { unsigned int mtflags, vpflags; @@ -1424,19 +1533,27 @@ } if (vpelimit == 0) { +#if 0 printk(KERN_WARNING "No VPEs reserved for AP/SP, not " "initializing VPE loader.\nPass maxvpes= argument as " "kernel argument\n"); return -ENODEV; +#else + vpelimit = 1; +#endif } if (tclimit == 0) { +#if 0 printk(KERN_WARNING "No TCs reserved for AP/SP, not " "initializing VPE loader.\nPass maxtcs= argument as " "kernel argument\n"); return -ENODEV; +#else + tclimit = 1; +#endif } major = register_chrdev(0, module_name, &vpe_fops); @@ -1469,12 +1586,13 @@ /* Put MVPE's into 'configuration state' */ set_c0_mvpcontrol(MVPCONTROL_VPC); - /* dump_mtregs(); */ + dump_mtregs(); val = read_c0_mvpconf0(); hw_tcs = (val & MVPCONF0_PTC) + 1; hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; +#if 0 for (tc = tclimit; tc < hw_tcs; tc++) { /* * Must re-enable multithreading temporarily or in case we @@ -1493,8 +1611,12 @@ local_irq_save(flags); mtflags = dmt(); vpflags = dvpe(); + /*--------------------------------------------------------------------------------*\ + Set to write to configuration register fields which are otherwise read- + only on conventional MIPS32® CPUs. Unset to make the fields in + the ConfigNN registers read-only. + \*--------------------------------------------------------------------------------*/ set_c0_mvpcontrol(MVPCONTROL_VPC); - /* VPE's */ if (tc < hw_tcs) { settc(tc); @@ -1511,14 +1633,22 @@ list_add(&t->tc, &v->tc); /* deactivate all but vpe0 */ - if (tc >= tclimit) { + if (tc > tclimit) { unsigned long tmp = read_vpe_c0_vpeconf0(); - + /*--------------------------------------------------------------------------------*\ + Virtual Processor Activated: If zero, no TCs bound to this VPE will + run. It is reset to 1 for VPE0, 0 for others. + \*--------------------------------------------------------------------------------*/ tmp &= ~VPECONF0_VPA; - - /* master VPE */ + /*--------------------------------------------------------------------------------*\ + Master Virtual Processor: If set, registers in different VPEs (or in the + TCs of different VPE affiliation) are made writable. It also controls + read/write access to MVPControl (see Section 7.2.2 “MVPControl + Register (CP0 Register 0, Select 1)”). It is reset to 1 for VPE0, 0 for + others. + \*--------------------------------------------------------------------------------*/ tmp |= VPECONF0_MVP; - write_vpe_c0_vpeconf0(tmp); + write_vpe_c0_vpeconf0(tmp); /*--- mbahr@avm: fix: auskommentiert sonst bleibt kernel haengen ---*/ } /* disable multi-threading with TC's */ @@ -1532,7 +1662,6 @@ write_vpe_c0_config(read_c0_config()); } } - /* TC's */ t->pvpe = v; /* set the parent vpe */ @@ -1570,6 +1699,45 @@ write_tc_c0_tcstatus(tmp); } } +#else + { + unsigned int mvpconf0 = read_c0_mvpconf0(); + local_irq_save(flags); + mtflags = dmt(); + vpflags = dvpe(); + + /*--------------------------------------------------------------------------------*\ + Set to write to configuration register fields which are otherwise read- + only on conventional MIPS32® CPUs. Unset to make the fields in + the ConfigNN registers read-only. + \*--------------------------------------------------------------------------------*/ + set_c0_mvpcontrol(MVPCONTROL_VPC); + for (tc = 0; tc < hw_tcs; tc++) { + settc(tc); + + if(tc) { + t = alloc_tc(tc); + if (!t) { + err = -ENOMEM; + goto out; + } + if ((v = alloc_vpe(tc)) == NULL) { + printk(KERN_WARNING "VPE: unable to allocate VPE\n"); + + goto out_reenable; + } + v->ntcs = hw_tcs - tclimit; + /* add the tc to the list of this vpe's tc's. */ + list_add(&t->tc, &v->tc); + /* TC's */ + t->pvpe = v; /* set the parent vpe */ + } + smvp_tc_init(tc, mvpconf0); + smvp_vpe_init(tc, mvpconf0, hw_vpes); + } + } +#endif + dump_mtregs(); out_reenable: /* release config state */ @@ -1597,6 +1765,8 @@ { struct vpe *v, *n; + device_del(&vpe_device); + unregister_chrdev(major, module_name); list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { if (v->state != VPE_STATE_UNUSED) { release_vpe(v);