--- zzzz-none-000/linux-4.4.271/mm/kasan/kasan.c 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/mm/kasan/kasan.c 2023-04-19 10:22:30.000000000 +0000 @@ -33,6 +33,8 @@ #include #include +#include + #include "kasan.h" #include "../slab.h" @@ -233,6 +235,20 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) { if (__builtin_constant_p(size)) { + if (IS_ENABLED(CONFIG_ARM)) { /* avoid alignment faults. */ + switch (size) { + case 1: + case 2: + case 4: + case 8: + return memory_is_poisoned_1(addr); + case 16: + return memory_is_poisoned_1(addr) + || memory_is_poisoned_1(addr + 8); + default: + BUILD_BUG(); + } + } switch (size) { case 1: return memory_is_poisoned_1(addr); @@ -447,6 +463,18 @@ size_t shadow_size; unsigned long shadow_start; + /* If size < KASAN_SHADOW_SCALE_SHIFT, then size will become ZERO + * on doing size >> KASAN_SHADOW_SCALE_SHIFT. With this, + * shadow_size = round_up(0, PAGE_SIZE) will give ZERO. + * Hence we need to make sure that size >> KASAN_SHADOW_SCALE_SHIFT + * is a non-zero value. Hence if size is less than + * KASAN_SHADOW_SCALE_SIZE, set size as KASAN_SHADOW_SCALE_SIZE, so + * that round_up(size >> KASAN_SHADOW_SCALE_SIZE, PAGE_SIZE) will + * give a non-zero value + */ + if (size < KASAN_SHADOW_SCALE_SIZE) + size = KASAN_SHADOW_SCALE_SIZE; + shadow_start = (unsigned long)kasan_mem_to_shadow(addr); scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; shadow_size = round_up(scaled_size, PAGE_SIZE); @@ -478,6 +506,13 @@ static void register_global(struct kasan_global *global) { size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); + /* + * Currently we do not allocate shadow for vmalloc area + * Skip globals that in modules in vmalloc area. + */ + if ((unsigned long)global->beg >= VMALLOC_START + && (unsigned long)global->beg < VMALLOC_END) + return; kasan_unpoison_shadow(global->beg, global->size);