cregit-Linux how code gets into the kernel

Release 4.15 kernel/events/internal.h

Directory: kernel/events
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _KERNEL_EVENTS_INTERNAL_H

#define _KERNEL_EVENTS_INTERNAL_H

#include <linux/hardirq.h>
#include <linux/uaccess.h>

/* Buffer handling */


#define RING_BUFFER_WRITABLE		0x01


struct ring_buffer {
	
atomic_t			refcount;
	
struct rcu_head			rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC
	
struct work_struct		work;
	
int				page_order;	/* allocation order  */
#endif
	
int				nr_pages;	/* nr of data pages  */
	
int				overwrite;	/* can overwrite itself */
	
int				paused;		/* can write into ring buffer */

	
atomic_t			poll;		/* POLL_ for wakeups */

	
local_t				head;		/* write position    */
	
local_t				nest;		/* nested writers    */
	
local_t				events;		/* event limit       */
	
local_t				wakeup;		/* wakeup stamp      */
	
local_t				lost;		/* nr records lost   */

	
long				watermark;	/* wakeup watermark  */
	
long				aux_watermark;
	/* poll crap */
	
spinlock_t			event_lock;
	
struct list_head		event_list;

	
atomic_t			mmap_count;
	
unsigned long			mmap_locked;
	
struct user_struct		*mmap_user;

	/* AUX area */
	
long				aux_head;
	
local_t				aux_nest;
	
long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
	
unsigned long			aux_pgoff;
	
int				aux_nr_pages;
	
int				aux_overwrite;
	
atomic_t			aux_mmap_count;
	
unsigned long			aux_mmap_locked;
	
void				(*free_aux)(void *);
	
atomic_t			aux_refcount;
	
void				**aux_pages;
	
void				*aux_priv;

	
struct perf_event_mmap_page	*user_page;
	
void				*data_pages[0];
};

extern void rb_free(struct ring_buffer *rb);


static inline void rb_free_rcu(struct rcu_head *rcu_head) { struct ring_buffer *rb; rb = container_of(rcu_head, struct ring_buffer, rcu_head); rb_free(rb); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra34100.00%1100.00%
Total34100.00%1100.00%


static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause) { if (!pause && rb->nr_pages) rb->paused = 0; else rb->paused = 1; }

Contributors

PersonTokensPropCommitsCommitProp
Wang Nan37100.00%1100.00%
Total37100.00%1100.00%

extern struct ring_buffer * rb_alloc(int nr_pages, long watermark, int cpu, int flags); extern void perf_event_wakeup(struct perf_event *event); extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, pgoff_t pgoff, int nr_pages, long watermark, int flags); extern void rb_free_aux(struct ring_buffer *rb); extern struct ring_buffer *ring_buffer_get(struct perf_event *event); extern void ring_buffer_put(struct ring_buffer *rb);
static inline bool rb_has_aux(struct ring_buffer *rb) { return !!rb->aux_nr_pages; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra19100.00%1100.00%
Total19100.00%1100.00%

void perf_event_aux_event(struct perf_event *event, unsigned long head, unsigned long size, u64 flags); extern struct page * perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); #ifdef CONFIG_PERF_USE_VMALLOC /* * Back perf_mmap() with vmalloc memory. * * Required for architectures that have d-cache aliasing issues. */
static inline int page_order(struct ring_buffer *rb) { return rb->page_order; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker17100.00%1100.00%
Total17100.00%1100.00%

#else
static inline int page_order(struct ring_buffer *rb) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker15100.00%1100.00%
Total15100.00%1100.00%

#endif
static inline unsigned long perf_data_size(struct ring_buffer *rb) { return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker2696.30%150.00%
Borislav Petkov13.70%150.00%
Total27100.00%2100.00%


static inline unsigned long perf_aux_size(struct ring_buffer *rb) { return rb->aux_nr_pages << PAGE_SHIFT; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra20100.00%1100.00%
Total20100.00%1100.00%

#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \ { \ unsigned long size, written; \ \ do { \ size = min(handle->size, len); \ written = memcpy_func(__VA_ARGS__); \ written = size - written; \ \ len -= written; \ handle->addr += written; \ if (advance_buf) \ buf += written; \ handle->size -= written; \ if (!handle->size) { \ struct ring_buffer *rb = handle->rb; \ \ handle->page++; \ handle->page &= rb->nr_pages - 1; \ handle->addr = rb->data_pages[handle->page]; \ handle->size = PAGE_SIZE << page_order(rb); \ } \ } while (len && written == size); \ \ return len; \ } #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ static inline unsigned long \ func_name(struct perf_output_handle *handle, \ const void *buf, unsigned long len) \ __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
static inline unsigned long __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func, const void *buf, unsigned long len) { unsigned long orig_len = len; __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf, orig_len - len, size) }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Borkmann4191.11%250.00%
Peter Zijlstra24.44%125.00%
Frédéric Weisbecker24.44%125.00%
Total45100.00%4100.00%


static inline unsigned long memcpy_common(void *dst, const void *src, unsigned long n) { memcpy(dst, src, n); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker2163.64%240.00%
Daniel Borkmann927.27%240.00%
Peter Zijlstra39.09%120.00%
Total33100.00%5100.00%

DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
static inline unsigned long memcpy_skip(void *dst, const void *src, unsigned long n) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1770.83%150.00%
Jiri Olsa729.17%150.00%
Total24100.00%2100.00%

DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip) #ifndef arch_perf_out_copy_user #define arch_perf_out_copy_user arch_perf_out_copy_user
static inline unsigned long arch_perf_out_copy_user(void *dst, const void *src, unsigned long n) { unsigned long ret; pagefault_disable(); ret = __copy_from_user_inatomic(dst, src, n); pagefault_enable(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra45100.00%1100.00%
Total45100.00%1100.00%

#endif DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) /* Callchain handling */ extern struct perf_callchain_entry * perf_callchain(struct perf_event *event, struct pt_regs *regs);
static inline int get_recursion_context(int *recursion) { int rctx; if (unlikely(in_nmi())) rctx = 3; else if (in_irq()) rctx = 2; else if (in_softirq()) rctx = 1; else rctx = 0; if (recursion[rctx]) return -1; recursion[rctx]++; barrier(); return rctx; }

Contributors

PersonTokensPropCommitsCommitProp
Borislav Petkov7195.95%150.00%
Jesper Dangaard Brouer34.05%150.00%
Total74100.00%2100.00%


static inline void put_recursion_context(int *recursion, int rctx) { barrier(); recursion[rctx]--; }

Contributors

PersonTokensPropCommitsCommitProp
Borislav Petkov23100.00%1100.00%
Total23100.00%1100.00%

#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
static inline bool arch_perf_have_user_stack_dump(void) { return true; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa12100.00%1100.00%
Total12100.00%1100.00%

#define perf_user_stack_pointer(regs) user_stack_pointer(regs) #else
static inline bool arch_perf_have_user_stack_dump(void) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Jiri Olsa12100.00%1100.00%
Total12100.00%1100.00%

#define perf_user_stack_pointer(regs) 0 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */ #endif /* _KERNEL_EVENTS_INTERNAL_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker26231.23%28.33%
Peter Zijlstra23227.65%625.00%
Borislav Petkov11213.35%14.17%
Daniel Borkmann637.51%28.33%
Jiri Olsa586.91%28.33%
Alexander Shishkin576.79%416.67%
Wang Nan414.89%14.17%
Andrey Vagin50.60%14.17%
Will Deacon30.36%28.33%
Jesper Dangaard Brouer30.36%14.17%
Stéphane Eranian20.24%14.17%
Greg Kroah-Hartman10.12%14.17%
Total839100.00%24100.00%
Directory: kernel/events
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.