Release 4.11 arch/powerpc/include/asm/bug.h
#ifndef _ASM_POWERPC_BUG_H
#define _ASM_POWERPC_BUG_H
#ifdef __KERNEL__
#include <asm/asm-compat.h>
/*
* Define an illegal instr to trap on the bug.
* We don't use 0 because that marks the end of a function
* in the ELF ABI. That's "Boo Boo" in case you wonder...
*/
#define BUG_OPCODE .long 0x00b00b00
/* For asm */
#define BUG_ILLEGAL_INSTR "0x00b00b00"
/* For BUG macro */
#ifdef CONFIG_BUG
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE
.macro EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"a"
5001: PPC_LONG \addr, 5002f
.short \line, \flags
.org 5001b+BUG_ENTRY_SIZE
.previous
.section .rodata,"a"
5002: .asciz "\file"
.previous
.endm
#else
.macro EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"a"
5001: PPC_LONG \addr
.short \flags
.org 5001b+BUG_ENTRY_SIZE
.previous
.endm
#endif /* verbose */
#else /* !__ASSEMBLY__ */
/* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and
sizeof(struct bug_entry), respectively */
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \
".section __bug_table,\"a\"\n" \
"2:\t" PPC_LONG "1b, %0\n" \
"\t.short %1, %2\n" \
".org 2b+%3\n" \
".previous\n"
#else
#define _EMIT_BUG_ENTRY \
".section __bug_table,\"a\"\n" \
"2:\t" PPC_LONG "1b\n" \
"\t.short %2\n" \
".org 2b+%3\n" \
".previous\n"
#endif
/*
* BUG_ON() and WARN_ON() do their best to cooperate with compile-time
* optimisations. However depending on the complexity of the condition
* some compiler versions may not produce optimal results.
*/
#define BUG() do { \
__asm__ __volatile__( \
"1: twi 31,0,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry))); \
unreachable(); \
} while (0)
#define BUG_ON(x) do { \
if (__builtin_constant_p(x)) { \
if (x) \
BUG(); \
} else { \
__asm__ __volatile__( \
"1: "PPC_TLNEI" %4,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), "i" (0), \
"i" (sizeof(struct bug_entry)), \
"r" ((__force long)(x))); \
} \
} while (0)
#define __WARN_TAINT(taint) do { \
__asm__ __volatile__( \
"1: twi 31,0,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_TAINT(taint)), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
if (__builtin_constant_p(__ret_warn_on)) { \
if (__ret_warn_on) \
__WARN(); \
} else { \
__asm__ __volatile__( \
"1: "PPC_TLNEI" %4,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_TAINT(TAINT_WARN)), \
"i" (sizeof(struct bug_entry)), \
"r" (__ret_warn_on)); \
} \
unlikely(__ret_warn_on); \
})
#define HAVE_ARCH_BUG
#define HAVE_ARCH_BUG_ON
#define HAVE_ARCH_WARN_ON
#endif /* __ASSEMBLY __ */
#else
#ifdef __ASSEMBLY__
.macro EMIT_BUG_ENTRY addr,file,line,flags
.endm
#else /* !__ASSEMBLY__ */
#define _EMIT_BUG_ENTRY
#endif
#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
#ifndef __ASSEMBLY__
struct pt_regs;
extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
extern void _exception(int, struct pt_regs *, int, unsigned long);
extern void die(const char *, struct pt_regs *, long);
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BUG_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Woodhouse | 99 | 34.14% | 1 | 5.88% |
David Howells | 69 | 23.79% | 1 | 5.88% |
Benjamin Herrenschmidt | 29 | 10.00% | 1 | 5.88% |
Anton Blanchard | 23 | 7.93% | 3 | 17.65% |
Jeremy Fitzhardinge | 19 | 6.55% | 1 | 5.88% |
Matt Mackall | 18 | 6.21% | 2 | 11.76% |
Paul Mackerras | 10 | 3.45% | 1 | 5.88% |
Ben Hutchings | 6 | 2.07% | 1 | 5.88% |
Arnd Bergmann | 6 | 2.07% | 1 | 5.88% |
David Gibson | 3 | 1.03% | 1 | 5.88% |
Becky Bruce | 3 | 1.03% | 1 | 5.88% |
Michael Ellerman | 3 | 1.03% | 1 | 5.88% |
Al Viro | 1 | 0.34% | 1 | 5.88% |
David Daney | 1 | 0.34% | 1 | 5.88% |
Total | 290 | 100.00% | 17 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.