--- zzzz-none-000/linux-2.6.32.61/arch/x86/xen/xen-asm_32.S 2013-06-10 09:43:48.000000000 +0000 +++ virian-300e-630/linux-2.6.32.61/arch/x86/xen/xen-asm_32.S 2013-02-11 15:10:47.000000000 +0000 @@ -88,11 +88,11 @@ */ #ifdef CONFIG_SMP GET_THREAD_INFO(%eax) - movl %ss:TI_cpu(%eax), %eax - movl %ss:__per_cpu_offset(,%eax,4), %eax - mov %ss:per_cpu__xen_vcpu(%eax), %eax + movl TI_cpu(%eax), %eax + movl __per_cpu_offset(,%eax,4), %eax + mov per_cpu__xen_vcpu(%eax), %eax #else - movl %ss:per_cpu__xen_vcpu, %eax + movl per_cpu__xen_vcpu, %eax #endif /* check IF state we're restoring */ @@ -105,11 +105,11 @@ * resuming the code, so we don't have to be worried about * being preempted to another CPU. */ - setz %ss:XEN_vcpu_info_mask(%eax) + setz XEN_vcpu_info_mask(%eax) xen_iret_start_crit: /* check for unmasked and pending */ - cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax) + cmpw $0x0001, XEN_vcpu_info_pending(%eax) /* * If there's something pending, mask events again so we can @@ -117,7 +117,7 @@ * touch XEN_vcpu_info_mask. */ jne 1f - movb $1, %ss:XEN_vcpu_info_mask(%eax) + movb $1, XEN_vcpu_info_mask(%eax) 1: popl %eax