cregit-Linux how code gets into the kernel

Release 4.8 mm/kasan/kasan.c

Directory: mm/kasan
/*
 * This file contains shadow memory manipulation code.
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
 *
 * Some code borrowed from https://github.com/xairy/kasan-prototype by
 *        Andrey Konovalov <adech.fo@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#define DISABLE_BRANCH_PROFILING

#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/linkage.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>

#include "kasan.h"
#include "../slab.h"

/*
 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
 */

static void kasan_poison_shadow(const void *address, size_t size, u8 value) { void *shadow_start, *shadow_end; shadow_start = kasan_mem_to_shadow(address); shadow_end = kasan_mem_to_shadow(address + size); memset(shadow_start, value, shadow_end - shadow_start); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin51100.00%1100.00%
Total51100.00%1100.00%


void kasan_unpoison_shadow(const void *address, size_t size) { kasan_poison_shadow(address, size, 0); if (size & KASAN_SHADOW_MASK) { u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); *shadow = size & KASAN_SHADOW_MASK; } }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin52100.00%1100.00%
Total52100.00%1100.00%


static void __kasan_unpoison_stack(struct task_struct *task, void *sp) { void *base = task_stack_page(task); size_t size = sp - base; kasan_unpoison_shadow(base, size); }

Contributors

PersonTokensPropCommitsCommitProp
mark rutlandmark rutland38100.00%1100.00%
Total38100.00%1100.00%

/* Unpoison the entire stack for a task. */
void kasan_unpoison_task_stack(struct task_struct *task) { __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); }

Contributors

PersonTokensPropCommitsCommitProp
mark rutlandmark rutland22100.00%1100.00%
Total22100.00%1100.00%

/* Unpoison the stack for the current task beyond a watermark sp value. */
asmlinkage void kasan_unpoison_remaining_stack(void *sp) { __kasan_unpoison_stack(current, sp); }

Contributors

PersonTokensPropCommitsCommitProp
mark rutlandmark rutland17100.00%1100.00%
Total17100.00%1100.00%

/* * All functions below always inlined so compiler could * perform better optimizations in each of __asan_loadX/__assn_storeX * depending on memory access size X. */
static __always_inline bool memory_is_poisoned_1(unsigned long addr) { s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); if (unlikely(shadow_value)) { s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; return unlikely(last_accessible_byte >= shadow_value); } return false; }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin55100.00%1100.00%
Total55100.00%1100.00%


static __always_inline bool memory_is_poisoned_2(unsigned long addr) { u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); if (unlikely(*shadow_addr)) { if (memory_is_poisoned_1(addr + 1)) return true; /* * If single shadow byte covers 2-byte access, we don't * need to do anything more. Otherwise, test the first * shadow byte. */ if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0)) return false; return unlikely(*(u8 *)shadow_addr); } return false; }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin8498.82%150.00%
xishi qiuxishi qiu11.18%150.00%
Total85100.00%2100.00%


static __always_inline bool memory_is_poisoned_4(unsigned long addr) { u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); if (unlikely(*shadow_addr)) { if (memory_is_poisoned_1(addr + 3)) return true; /* * If single shadow byte covers 4-byte access, we don't * need to do anything more. Otherwise, test the first * shadow byte. */ if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3)) return false; return unlikely(*(u8 *)shadow_addr); } return false; }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin8498.82%150.00%
xishi qiuxishi qiu11.18%150.00%
Total85100.00%2100.00%


static __always_inline bool memory_is_poisoned_8(unsigned long addr) { u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); if (unlikely(*shadow_addr)) { if (memory_is_poisoned_1(addr + 7)) return true; /* * If single shadow byte covers 8-byte access, we don't * need to do anything more. Otherwise, test the first * shadow byte. */ if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) return false; return unlikely(*(u8 *)shadow_addr); } return false; }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin7492.50%150.00%
xishi qiuxishi qiu67.50%150.00%
Total80100.00%2100.00%


static __always_inline bool memory_is_poisoned_16(unsigned long addr) { u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr); if (unlikely(*shadow_addr)) { u16 shadow_first_bytes = *(u16 *)shadow_addr; if (unlikely(shadow_first_bytes)) return true; /* * If two shadow bytes covers 16-byte access, we don't * need to do anything more. Otherwise, test the last * shadow byte. */ if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) return false; return memory_is_poisoned_1(addr + 15); } return false; }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin7891.76%133.33%
xishi qiuxishi qiu78.24%266.67%
Total85100.00%3100.00%


static __always_inline unsigned long bytes_is_zero(const u8 *start, size_t size) { while (size) { if (unlikely(*start)) return (unsigned long)start; start++; size--; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin46100.00%1100.00%
Total46100.00%1100.00%


static __always_inline unsigned long memory_is_zero(const void *start, const void *end) { unsigned int words; unsigned long ret; unsigned int prefix = (unsigned long)start % 8; if (end - start <= 16) return bytes_is_zero(start, end - start); if (prefix) { prefix = 8 - prefix; ret = bytes_is_zero(start, prefix); if (unlikely(ret)) return ret; start += prefix; } words = (end - start) / 8; while (words) { if (unlikely(*(u64 *)start)) return bytes_is_zero(start, 8); start += 8; words--; } return bytes_is_zero(start, (end - start) % 8); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin148100.00%1100.00%
Total148100.00%1100.00%


static __always_inline bool memory_is_poisoned_n(unsigned long addr, size_t size) { unsigned long ret; ret = memory_is_zero(kasan_mem_to_shadow((void *)addr), kasan_mem_to_shadow((void *)addr + size - 1) + 1); if (unlikely(ret)) { unsigned long last_byte = addr + size - 1; s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); if (unlikely(ret != (unsigned long)last_shadow || ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin11195.69%150.00%
wang longwang long54.31%150.00%
Total116100.00%2100.00%


static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) { if (__builtin_constant_p(size)) { switch (size) { case 1: return memory_is_poisoned_1(addr); case 2: return memory_is_poisoned_2(addr); case 4: return memory_is_poisoned_4(addr); case 8: return memory_is_poisoned_8(addr); case 16: return memory_is_poisoned_16(addr); default: BUILD_BUG(); } } return memory_is_poisoned_n(addr, size); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin86100.00%1100.00%
Total86100.00%1100.00%


static __always_inline void check_memory_region_inline(unsigned long addr, size_t size, bool write, unsigned long ret_ip) { if (unlikely(size == 0)) return; if (unlikely((void *)addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { kasan_report(addr, size, write, ret_ip); return; } if (likely(!memory_is_poisoned(addr, size))) return; kasan_report(addr, size, write, ret_ip); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin8594.44%266.67%
andrey konovalovandrey konovalov55.56%133.33%
Total90100.00%3100.00%


static void check_memory_region(unsigned long addr, size_t size, bool write, unsigned long ret_ip) { check_memory_region_inline(addr, size, write, ret_ip); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin31100.00%2100.00%
Total31100.00%2100.00%


void kasan_check_read(const void *p, unsigned int size) { check_memory_region((unsigned long)p, size, false, _RET_IP_); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin29100.00%1100.00%
Total29100.00%1100.00%

EXPORT_SYMBOL(kasan_check_read);
void kasan_check_write(const void *p, unsigned int size) { check_memory_region((unsigned long)p, size, true, _RET_IP_); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin29100.00%1100.00%
Total29100.00%1100.00%

EXPORT_SYMBOL(kasan_check_write); #undef memset
void *memset(void *addr, int c, size_t len) { check_memory_region((unsigned long)addr, len, true, _RET_IP_); return __memset(addr, c, len); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin41100.00%2100.00%
Total41100.00%2100.00%

#undef memmove
void *memmove(void *dest, const void *src, size_t len) { check_memory_region((unsigned long)src, len, false, _RET_IP_); check_memory_region((unsigned long)dest, len, true, _RET_IP_); return __memmove(dest, src, len); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin58100.00%2100.00%
Total58100.00%2100.00%

#undef memcpy
void *memcpy(void *dest, const void *src, size_t len) { check_memory_region((unsigned long)src, len, false, _RET_IP_); check_memory_region((unsigned long)dest, len, true, _RET_IP_); return __memcpy(dest, src, len); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin58100.00%2100.00%
Total58100.00%2100.00%


void kasan_alloc_pages(struct page *page, unsigned int order) { if (likely(!PageHighMem(page))) kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin37100.00%1100.00%
Total37100.00%1100.00%


void kasan_free_pages(struct page *page, unsigned int order) { if (likely(!PageHighMem(page))) kasan_poison_shadow(page_address(page), PAGE_SIZE << order, KASAN_FREE_PAGE); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin39100.00%1100.00%
Total39100.00%1100.00%

/* * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. * For larger allocations larger redzones are used. */
static size_t optimal_redzone(size_t object_size) { int rz = object_size <= 64 - 16 ? 16 : object_size <= 128 - 32 ? 32 : object_size <= 512 - 64 ? 64 : object_size <= 4096 - 128 ? 128 : object_size <= (1 << 14) - 256 ? 256 : object_size <= (1 << 15) - 512 ? 512 : object_size <= (1 << 16) - 1024 ? 1024 : 2048; return rz; }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko85100.00%1100.00%
Total85100.00%1100.00%


void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags) { int redzone_adjust; int orig_size = *size; /* Add alloc meta. */ cache->kasan_info.alloc_meta_offset = *size; *size += sizeof(struct kasan_alloc_meta); /* Add free meta. */ if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor || cache->object_size < sizeof(struct kasan_free_meta)) { cache->kasan_info.free_meta_offset = *size; *size += sizeof(struct kasan_free_meta); } redzone_adjust = optimal_redzone(cache->object_size) - (*size - cache->object_size); if (redzone_adjust > 0) *size += redzone_adjust; *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size + optimal_redzone(cache->object_size))); /* * If the metadata doesn't fit, don't enable KASAN at all. */ if (*size <= cache->kasan_info.alloc_meta_offset || *size <= cache->kasan_info.free_meta_offset) { cache->kasan_info.alloc_meta_offset = 0; cache->kasan_info.free_meta_offset = 0; *size = orig_size; return; } *flags |= SLAB_KASAN; }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko194100.00%2100.00%
Total194100.00%2100.00%


void kasan_cache_shrink(struct kmem_cache *cache) { quarantine_remove_cache(cache); }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko15100.00%1100.00%
Total15100.00%1100.00%


void kasan_cache_destroy(struct kmem_cache *cache) { quarantine_remove_cache(cache); }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko15100.00%1100.00%
Total15100.00%1100.00%


size_t kasan_metadata_size(struct kmem_cache *cache) { return (cache->kasan_info.alloc_meta_offset ? sizeof(struct kasan_alloc_meta) : 0) + (cache->kasan_info.free_meta_offset ? sizeof(struct kasan_free_meta) : 0); }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko43100.00%1100.00%
Total43100.00%1100.00%


void kasan_poison_slab(struct page *page) { kasan_poison_shadow(page_address(page), PAGE_SIZE << compound_order(page), KASAN_KMALLOC_REDZONE); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin27100.00%1100.00%
Total27100.00%1100.00%


void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) { kasan_unpoison_shadow(object, cache->object_size); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin23100.00%1100.00%
Total23100.00%1100.00%


void kasan_poison_object_data(struct kmem_cache *cache, void *object) { kasan_poison_shadow(object, round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), KASAN_KMALLOC_REDZONE); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin2996.67%150.00%
alexander potapenkoalexander potapenko13.33%150.00%
Total30100.00%2100.00%


static inline int in_irqentry_text(unsigned long ptr) { return (ptr >= (unsigned long)&__irqentry_text_start && ptr < (unsigned long)&__irqentry_text_end) || (ptr >= (unsigned long)&__softirqentry_text_start && ptr < (unsigned long)&__softirqentry_text_end); }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko52100.00%1100.00%
Total52100.00%1100.00%


static inline void filter_irq_stacks(struct stack_trace *trace) { int i; if (!trace->nr_entries) return; for (i = 0; i < trace->nr_entries; i++) if (in_irqentry_text(trace->entries[i])) { /* Include the irqentry function into the stack. */ trace->nr_entries = i + 1; break; } }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko62100.00%1100.00%
Total62100.00%1100.00%


static inline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[KASAN_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = 0, .entries = entries, .max_entries = KASAN_STACK_DEPTH, .skip = 0 }; save_stack_trace(&trace); filter_irq_stacks(&trace); if (trace.nr_entries != 0 && trace.entries[trace.nr_entries-1] == ULONG_MAX) trace.nr_entries--; return depot_save_stack(&trace, flags); }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko90100.00%1100.00%
Total90100.00%1100.00%


static inline void set_track(struct kasan_track *track, gfp_t flags) { track->pid = current->pid; track->stack = save_stack(flags); }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko3196.88%266.67%
andrey ryabininandrey ryabinin13.12%133.33%
Total32100.00%3100.00%


struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, const void *object) { BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); return (void *)object + cache->kasan_info.alloc_meta_offset; }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko41100.00%2100.00%
Total41100.00%2100.00%


struct kasan_free_meta *get_free_info(struct kmem_cache *cache, const void *object) { BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); return (void *)object + cache->kasan_info.free_meta_offset; }

Contributors

PersonTokensPropCommitsCommitProp
alexander potapenkoalexander potapenko41100.00%2100.00%
Total41100.00%2100.00%


void kasan_init_slab_obj(struct kmem_cache *cache, const void *object) { struct kasan_alloc_meta *alloc_info; if (!(cache->flags & SLAB_KASAN)) return; alloc_info = get_alloc_info(cache, object); __memset(alloc_info, 0, sizeof(*alloc_info)); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin54100.00%1100.00%
Total54100.00%1100.00%


void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) { kasan_kmalloc(cache, object, cache->object_size, flags); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin2583.33%150.00%
alexander potapenkoalexander potapenko516.67%150.00%
Total30100.00%2100.00%


static void kasan_poison_slab_free(struct kmem_cache *cache, void *object) { unsigned long size = cache->object_size; unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); /* RCU slabs could be legally used after free within the RCU period */ if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) return; kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin4580.36%266.67%
alexander potapenkoalexander potapenko1119.64%133.33%
Total56100.00%3100.00%


bool kasan_slab_free(struct kmem_cache *cache, void *object) { s8 shadow_byte; /* RCU slabs could be legally used after free within the RCU period */ if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) return false; shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { kasan_report_double_free(cache, object, shadow_byte); return true; } kasan_poison_slab_free(cache, object); if (unlikely(!(cache->flags & SLAB_KASAN))) return false; set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); quarantine_put(get_free_info(cache, object), cache); return true; }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin6753.60%450.00%
alexander potapenkoalexander potapenko5846.40%450.00%
Total125100.00%8100.00%


void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, gfp_t flags) { unsigned long redzone_start; unsigned long redzone_end; if (gfpflags_allow_blocking(flags)) quarantine_reduce(); if (unlikely(object == NULL)) return; redzone_start = round_up((unsigned long)(object + size), KASAN_SHADOW_SCALE_SIZE); redzone_end = round_up((unsigned long)object + cache->object_size, KASAN_SHADOW_SCALE_SIZE); kasan_unpoison_shadow(object, size); kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, KASAN_KMALLOC_REDZONE); if (cache->flags & SLAB_KASAN) set_track(&get_alloc_info(cache, object)->alloc_track, flags); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin9876.56%342.86%
alexander potapenkoalexander potapenko3023.44%457.14%
Total128100.00%7100.00%

EXPORT_SYMBOL(kasan_kmalloc);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) { struct page *page; unsigned long redzone_start; unsigned long redzone_end; if (gfpflags_allow_blocking(flags)) quarantine_reduce(); if (unlikely(ptr == NULL)) return; page = virt_to_page(ptr); redzone_start = round_up((unsigned long)(ptr + size), KASAN_SHADOW_SCALE_SIZE); redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); kasan_unpoison_shadow(ptr, size); kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, KASAN_PAGE_REDZONE); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin10291.07%250.00%
alexander potapenkoalexander potapenko108.93%250.00%
Total112100.00%4100.00%


void kasan_krealloc(const void *object, size_t size, gfp_t flags) { struct page *page; if (unlikely(object == ZERO_SIZE_PTR)) return; page = virt_to_head_page(object); if (unlikely(!PageSlab(page))) kasan_kmalloc_large(object, size, flags); else kasan_kmalloc(page->slab_cache, object, size, flags); }

Contributors

PersonTokensPropCommitsCommitProp