bugs.
KASan uses compile-time instrumentation for checking every memory access,
-therefore you will need a certain version of GCC >= 4.9.2
+therefore you will need a certain version of GCC > 4.9.2
Currently KASan is supported only for x86_64 architecture and requires that the
kernel be built with the SLUB allocator.
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/kasan.h>
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/gfp.h>
void *module_alloc(unsigned long size)
{
+ void *p;
+
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
- return __vmalloc_node_range(size, 1,
+
+ p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(),
MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
+ if (p && (kasan_module_alloc(p, size) < 0)) {
+ vfree(p);
+ return NULL;
+ }
+
+ return p;
}
#ifdef CONFIG_X86_32
(unsigned long)kasan_mem_to_shadow(_end),
NUMA_NO_NODE);
- populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_VADDR),
+ populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
(void *)KASAN_SHADOW_END);
memset(kasan_zero_page, 0, PAGE_SIZE);
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if GCC_VERSION >= 40902
+#define KASAN_ABI_VERSION 3
+#endif
#define __HAVE_BUILTIN_BSWAP64__
#define __HAVE_BUILTIN_BSWAP16__
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#define KASAN_ABI_VERSION 4
void kasan_slab_alloc(struct kmem_cache *s, void *object);
void kasan_slab_free(struct kmem_cache *s, void *object);
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+
+int kasan_module_alloc(void *addr, size_t size);
+void kasan_module_free(void *addr);
+
#else /* CONFIG_KASAN */
+#define MODULE_ALIGN 1
+
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_enable_current(void) {}
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline void kasan_module_free(void *addr) {}
+
#endif /* CONFIG_KASAN */
#endif /* LINUX_KASAN_H */
#include <linux/async.h>
#include <linux/percpu.h>
#include <linux/kmemleak.h>
+#include <linux/kasan.h>
#include <linux/jump_label.h>
#include <linux/pfn.h>
#include <linux/bsearch.h>
void __weak module_memfree(void *module_region)
{
vfree(module_region);
+ kasan_module_free(module_region);
}
void __weak module_arch_cleanup(struct module *mod)
config KASAN
bool "KASan: runtime memory debugger"
depends on SLUB_DEBUG
+ select CONSTRUCTORS
help
Enables kernel address sanitizer - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
KASAN_FREE_PAGE);
}
+int kasan_module_alloc(void *addr, size_t size)
+{
+ void *ret;
+ size_t shadow_size;
+ unsigned long shadow_start;
+
+ shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
+ shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
+ PAGE_SIZE);
+
+ if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
+ return -EINVAL;
+
+ ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
+ shadow_start + shadow_size,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ return ret ? 0 : -ENOMEM;
+}
+
+void kasan_module_free(void *addr)
+{
+ vfree(kasan_mem_to_shadow(addr));
+}
+
+static void register_global(struct kasan_global *global)
+{
+ size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+
+ kasan_unpoison_shadow(global->beg, global->size);
+
+ kasan_poison_shadow(global->beg + aligned_size,
+ global->size_with_redzone - aligned_size,
+ KASAN_GLOBAL_REDZONE);
+}
+
+void __asan_register_globals(struct kasan_global *globals, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ register_global(&globals[i]);
+}
+EXPORT_SYMBOL(__asan_register_globals);
+
+void __asan_unregister_globals(struct kasan_global *globals, size_t size)
+{
+}
+EXPORT_SYMBOL(__asan_unregister_globals);
+
#define DEFINE_ASAN_LOAD_STORE(size) \
void __asan_load##size(unsigned long addr) \
{ \
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
+#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */
/*
* Stack redzone shadow values
#define KASAN_STACK_RIGHT 0xF3
#define KASAN_STACK_PARTIAL 0xF4
+/* Don't break randconfig/all*config builds */
+#ifndef KASAN_ABI_VERSION
+#define KASAN_ABI_VERSION 1
+#endif
struct kasan_access_info {
const void *access_addr;
unsigned long ip;
};
+/* The layout of struct dictated by compiler */
+struct kasan_source_location {
+ const char *filename;
+ int line_no;
+ int column_no;
+};
+
+/* The layout of struct dictated by compiler */
+struct kasan_global {
+ const void *beg; /* Address of the beginning of the global variable. */
+ size_t size; /* Size of the global variable. */
+ size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */
+ const void *name;
+ const void *module_name; /* Name of the module where the global variable is declared. */
+ unsigned long has_dynamic_init; /* This needed for C++ */
+#if KASAN_ABI_VERSION >= 4
+ struct kasan_source_location *location;
+#endif
+};
+
void kasan_report_error(struct kasan_access_info *info);
void kasan_report_user_access(struct kasan_access_info *info);
#include <linux/types.h>
#include <linux/kasan.h>
+#include <asm/sections.h>
+
#include "kasan.h"
#include "../slab.h"
break;
case KASAN_PAGE_REDZONE:
case KASAN_KMALLOC_REDZONE:
+ case KASAN_GLOBAL_REDZONE:
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
bug_type = "out of bounds access";
break;
info->access_size, current->comm, task_pid_nr(current));
}
+static inline bool kernel_or_module_addr(const void *addr)
+{
+ return (addr >= (void *)_stext && addr < (void *)_end)
+ || (addr >= (void *)MODULES_VADDR
+ && addr < (void *)MODULES_END);
+}
+
+static inline bool init_task_stack_addr(const void *addr)
+{
+ return addr >= (void *)&init_thread_union.stack &&
+ (addr <= (void *)&init_thread_union.stack +
+ sizeof(init_thread_union.stack));
+}
+
static void print_address_description(struct kasan_access_info *info)
{
const void *addr = info->access_addr;
dump_page(page, "kasan: bad access detected");
}
+ if (kernel_or_module_addr(addr)) {
+ if (!init_task_stack_addr(addr))
+ pr_err("Address belongs to variable %pS\n", addr);
+ }
+
dump_stack();
}
CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
-fasan-shadow-offset=$(CONFIG_KASAN_SHADOW_OFFSET) \
- --param asan-stack=1 \
+ --param asan-stack=1 --param asan-globals=1 \
--param asan-instrumentation-with-call-threshold=$(call_threshold))
ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),)