123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815 |
- From: Matthias Schiffer <mschiffer@universe-factory.net>
- Date: Sun, 8 May 2016 22:06:51 +0200
- Subject: tools: mkimage: sync include/linux/compiler*.h with u-boot master
- Fixes build with GCC 6.
- Signed-off-by: Matthias Schiffer <mschiffer@universe-factory.net>
- diff --git a/tools/mkimage/patches/200-compiler-support.patch b/tools/mkimage/patches/200-compiler-support.patch
- new file mode 100644
- index 0000000..ca9c5b5
- --- /dev/null
- +++ b/tools/mkimage/patches/200-compiler-support.patch
- @@ -0,0 +1,702 @@
- +diff --git b/include/linux/compiler-gcc.h a/include/linux/compiler-gcc.h
- +index e057bd2..22ab246 100644
- +--- b/include/linux/compiler-gcc.h
- ++++ a/include/linux/compiler-gcc.h
- +@@ -5,14 +5,28 @@
- + /*
- + * Common definitions for all gcc versions go here.
- + */
- +-#define GCC_VERSION (__GNUC__ * 10000 \
- +- + __GNUC_MINOR__ * 100 \
- +- + __GNUC_PATCHLEVEL__)
- +-
- ++#define GCC_VERSION (__GNUC__ * 10000 \
- ++ + __GNUC_MINOR__ * 100 \
- ++ + __GNUC_PATCHLEVEL__)
- +
- + /* Optimization barrier */
- ++
- + /* The "volatile" is due to gcc bugs */
- + #define barrier() __asm__ __volatile__("": : :"memory")
- ++/*
- ++ * This version is i.e. to prevent dead stores elimination on @ptr
- ++ * where gcc and llvm may behave differently when otherwise using
- ++ * normal barrier(): while gcc behavior gets along with a normal
- ++ * barrier(), llvm needs an explicit input variable to be assumed
- ++ * clobbered. The issue is as follows: while the inline asm might
- ++ * access any memory it wants, the compiler could have fit all of
- ++ * @ptr into memory registers instead, and since @ptr never escaped
- ++ * from that, it proofed that the inline asm wasn't touching any of
- ++ * it. This version works well with both compilers, i.e. we're telling
- ++ * the compiler that the inline asm absolutely may see the contents
- ++ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
- ++ */
- ++#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
- +
- + /*
- + * This macro obfuscates arithmetic on a variable address so that gcc
- +@@ -32,58 +46,63 @@
- + * the inline assembly constraint from =g to =r, in this particular
- + * case either is valid.
- + */
- +-#define RELOC_HIDE(ptr, off) \
- +- ({ unsigned long __ptr; \
- +- __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
- +- (typeof(ptr)) (__ptr + (off)); })
- ++#define RELOC_HIDE(ptr, off) \
- ++({ \
- ++ unsigned long __ptr; \
- ++ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
- ++ (typeof(ptr)) (__ptr + (off)); \
- ++})
- +
- + /* Make the optimizer believe the variable can be manipulated arbitrarily. */
- +-#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
- ++#define OPTIMIZER_HIDE_VAR(var) \
- ++ __asm__ ("" : "=r" (var) : "0" (var))
- +
- + #ifdef __CHECKER__
- +-#define __must_be_array(arr) 0
- ++#define __must_be_array(a) 0
- + #else
- + /* &a[0] degrades to a pointer: a different type from an array */
- +-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
- ++#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
- + #endif
- +
- + /*
- + * Force always-inline if the user requests it so via the .config,
- + * or if gcc is too old:
- + */
- +-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
- ++#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
- + !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
- +-# define inline inline __attribute__((always_inline)) notrace
- +-# define __inline__ __inline__ __attribute__((always_inline)) notrace
- +-# define __inline __inline __attribute__((always_inline)) notrace
- ++#define inline inline __attribute__((always_inline)) notrace
- ++#define __inline__ __inline__ __attribute__((always_inline)) notrace
- ++#define __inline __inline __attribute__((always_inline)) notrace
- + #else
- + /* A lot of inline functions can cause havoc with function tracing */
- +-# define inline inline notrace
- +-# define __inline__ __inline__ notrace
- +-# define __inline __inline notrace
- ++#define inline inline notrace
- ++#define __inline__ __inline__ notrace
- ++#define __inline __inline notrace
- + #endif
- +
- +-#define __deprecated __attribute__((deprecated))
- +-#ifndef __packed
- +-#define __packed __attribute__((packed))
- +-#endif
- +-#ifndef __weak
- +-#define __weak __attribute__((weak))
- +-#endif
- ++#define __always_inline inline __attribute__((always_inline))
- ++#define noinline __attribute__((noinline))
- ++
- ++#define __deprecated __attribute__((deprecated))
- ++#define __packed __attribute__((packed))
- ++#define __weak __attribute__((weak))
- ++#define __alias(symbol) __attribute__((alias(#symbol)))
- +
- + /*
- +- * it doesn't make sense on ARM (currently the only user of __naked) to trace
- +- * naked functions because then mcount is called without stack and frame pointer
- +- * being set up and there is no chance to restore the lr register to the value
- +- * before mcount was called.
- ++ * it doesn't make sense on ARM (currently the only user of __naked)
- ++ * to trace naked functions because then mcount is called without
- ++ * stack and frame pointer being set up and there is no chance to
- ++ * restore the lr register to the value before mcount was called.
- ++ *
- ++ * The asm() bodies of naked functions often depend on standard calling
- ++ * conventions, therefore they must be noinline and noclone.
- + *
- +- * The asm() bodies of naked functions often depend on standard calling conventions,
- +- * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
- +- * this, so we must do so ourselves. See GCC PR44290.
- ++ * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
- ++ * See GCC PR44290.
- + */
- +-#define __naked __attribute__((naked)) noinline __noclone notrace
- ++#define __naked __attribute__((naked)) noinline __noclone notrace
- +
- +-#define __noreturn __attribute__((noreturn))
- ++#define __noreturn __attribute__((noreturn))
- +
- + /*
- + * From the GCC manual:
- +@@ -95,34 +114,170 @@
- + * would be.
- + * [...]
- + */
- +-#ifndef __pure
- +-#define __pure __attribute__((pure))
- ++#define __pure __attribute__((pure))
- ++#define __aligned(x) __attribute__((aligned(x)))
- ++#define __printf(a, b) __attribute__((format(printf, a, b)))
- ++#define __scanf(a, b) __attribute__((format(scanf, a, b)))
- ++#define __attribute_const__ __attribute__((__const__))
- ++#define __maybe_unused __attribute__((unused))
- ++#define __always_unused __attribute__((unused))
- ++
- ++/* gcc version specific checks */
- ++
- ++#if GCC_VERSION < 30200
- ++# error Sorry, your compiler is too old - please upgrade it.
- ++#endif
- ++
- ++#if GCC_VERSION < 30300
- ++# define __used __attribute__((__unused__))
- ++#else
- ++# define __used __attribute__((__used__))
- ++#endif
- ++
- ++#ifdef CONFIG_GCOV_KERNEL
- ++# if GCC_VERSION < 30400
- ++# error "GCOV profiling support for gcc versions below 3.4 not included"
- ++# endif /* __GNUC_MINOR__ */
- ++#endif /* CONFIG_GCOV_KERNEL */
- ++
- ++#if GCC_VERSION >= 30400
- ++#define __must_check __attribute__((warn_unused_result))
- ++#endif
- ++
- ++#if GCC_VERSION >= 40000
- ++
- ++/* GCC 4.1.[01] miscompiles __weak */
- ++#ifdef __KERNEL__
- ++# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
- ++# error Your version of gcc miscompiles the __weak directive
- ++# endif
- ++#endif
- ++
- ++#define __used __attribute__((__used__))
- ++#define __compiler_offsetof(a, b) \
- ++ __builtin_offsetof(a, b)
- ++
- ++#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
- ++# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
- ++#endif
- ++
- ++#if GCC_VERSION >= 40300
- ++/* Mark functions as cold. gcc will assume any path leading to a call
- ++ * to them will be unlikely. This means a lot of manual unlikely()s
- ++ * are unnecessary now for any paths leading to the usual suspects
- ++ * like BUG(), printk(), panic() etc. [but let's keep them for now for
- ++ * older compilers]
- ++ *
- ++ * Early snapshots of gcc 4.3 don't support this and we can't detect this
- ++ * in the preprocessor, but we can live with this because they're unreleased.
- ++ * Maketime probing would be overkill here.
- ++ *
- ++ * gcc also has a __attribute__((__hot__)) to move hot functions into
- ++ * a special section, but I don't see any sense in this right now in
- ++ * the kernel context
- ++ */
- ++#define __cold __attribute__((__cold__))
- ++
- ++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
- ++
- ++#ifndef __CHECKER__
- ++# define __compiletime_warning(message) __attribute__((warning(message)))
- ++# define __compiletime_error(message) __attribute__((error(message)))
- ++#endif /* __CHECKER__ */
- ++#endif /* GCC_VERSION >= 40300 */
- ++
- ++#if GCC_VERSION >= 40500
- ++/*
- ++ * Mark a position in code as unreachable. This can be used to
- ++ * suppress control flow warnings after asm blocks that transfer
- ++ * control elsewhere.
- ++ *
- ++ * Early snapshots of gcc 4.5 don't support this and we can't detect
- ++ * this in the preprocessor, but we can live with this because they're
- ++ * unreleased. Really, we need to have autoconf for the kernel.
- ++ */
- ++#define unreachable() __builtin_unreachable()
- ++
- ++/* Mark a function definition as prohibited from being cloned. */
- ++#define __noclone __attribute__((__noclone__))
- ++
- ++#endif /* GCC_VERSION >= 40500 */
- ++
- ++#if GCC_VERSION >= 40600
- ++/*
- ++ * When used with Link Time Optimization, gcc can optimize away C functions or
- ++ * variables which are referenced only from assembly code. __visible tells the
- ++ * optimizer that something else uses this function or variable, thus preventing
- ++ * this.
- ++ */
- ++#define __visible __attribute__((externally_visible))
- + #endif
- +-#ifndef __aligned
- +-#define __aligned(x) __attribute__((aligned(x)))
- ++
- ++
- ++#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
- ++/*
- ++ * __assume_aligned(n, k): Tell the optimizer that the returned
- ++ * pointer can be assumed to be k modulo n. The second argument is
- ++ * optional (default 0), so we use a variadic macro to make the
- ++ * shorthand.
- ++ *
- ++ * Beware: Do not apply this to functions which may return
- ++ * ERR_PTRs. Also, it is probably unwise to apply it to functions
- ++ * returning extra information in the low bits (but in that case the
- ++ * compiler should see some alignment anyway, when the return value is
- ++ * massaged by 'flags = ptr & 3; ptr &= ~3;').
- ++ */
- ++#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
- + #endif
- +-#define __printf(a, b) __attribute__((format(printf, a, b)))
- +-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
- +-#define noinline __attribute__((noinline))
- +-#define __attribute_const__ __attribute__((__const__))
- +-#define __maybe_unused __attribute__((unused))
- +-#define __always_unused __attribute__((unused))
- +
- +-#define __gcc_header(x) #x
- +-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
- +-#define gcc_header(x) _gcc_header(x)
- +-#include gcc_header(__GNUC__)
- ++/*
- ++ * GCC 'asm goto' miscompiles certain code sequences:
- ++ *
- ++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- ++ *
- ++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- ++ *
- ++ * (asm goto is automatically volatile - the naming reflects this.)
- ++ */
- ++#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
- ++
- ++#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
- ++#if GCC_VERSION >= 40400
- ++#define __HAVE_BUILTIN_BSWAP32__
- ++#define __HAVE_BUILTIN_BSWAP64__
- ++#endif
- ++#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
- ++#define __HAVE_BUILTIN_BSWAP16__
- ++#endif
- ++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
- ++
- ++#if GCC_VERSION >= 50000
- ++#define KASAN_ABI_VERSION 4
- ++#elif GCC_VERSION >= 40902
- ++#define KASAN_ABI_VERSION 3
- ++#endif
- ++
- ++#if GCC_VERSION >= 40902
- ++/*
- ++ * Tell the compiler that address safety instrumentation (KASAN)
- ++ * should not be applied to that function.
- ++ * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- ++ */
- ++#define __no_sanitize_address __attribute__((no_sanitize_address))
- ++#endif
- ++
- ++#endif /* gcc version >= 40000 specific checks */
- +
- + #if !defined(__noclone)
- + #define __noclone /* not needed */
- + #endif
- +
- ++#if !defined(__no_sanitize_address)
- ++#define __no_sanitize_address
- ++#endif
- ++
- + /*
- + * A trick to suppress uninitialized variable warning without generating any
- + * code
- + */
- + #define uninitialized_var(x) x = x
- +-
- +-#ifndef __always_inline
- +-#define __always_inline inline __attribute__((always_inline))
- +-#endif
- +diff --git b/include/linux/compiler-gcc3.h a/include/linux/compiler-gcc3.h
- +deleted file mode 100644
- +index 7d89feb..0000000
- +--- b/include/linux/compiler-gcc3.h
- ++++ /dev/null
- +@@ -1,23 +0,0 @@
- +-#ifndef __LINUX_COMPILER_H
- +-#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
- +-#endif
- +-
- +-#if GCC_VERSION < 30200
- +-# error Sorry, your compiler is too old - please upgrade it.
- +-#endif
- +-
- +-#if GCC_VERSION >= 30300
- +-# define __used __attribute__((__used__))
- +-#else
- +-# define __used __attribute__((__unused__))
- +-#endif
- +-
- +-#if GCC_VERSION >= 30400
- +-#define __must_check __attribute__((warn_unused_result))
- +-#endif
- +-
- +-#ifdef CONFIG_GCOV_KERNEL
- +-# if GCC_VERSION < 30400
- +-# error "GCOV profiling support for gcc versions below 3.4 not included"
- +-# endif /* __GNUC_MINOR__ */
- +-#endif /* CONFIG_GCOV_KERNEL */
- +diff --git b/include/linux/compiler-gcc4.h a/include/linux/compiler-gcc4.h
- +deleted file mode 100644
- +index c982a09..0000000
- +--- b/include/linux/compiler-gcc4.h
- ++++ /dev/null
- +@@ -1,81 +0,0 @@
- +-#ifndef __LINUX_COMPILER_H
- +-#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
- +-#endif
- +-
- +-#define __used __attribute__((__used__))
- +-#define __must_check __attribute__((warn_unused_result))
- +-#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
- +-
- +-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
- +-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
- +-#endif
- +-
- +-#if GCC_VERSION >= 40300
- +-/* Mark functions as cold. gcc will assume any path leading to a call
- +- to them will be unlikely. This means a lot of manual unlikely()s
- +- are unnecessary now for any paths leading to the usual suspects
- +- like BUG(), printk(), panic() etc. [but let's keep them for now for
- +- older compilers]
- +-
- +- Early snapshots of gcc 4.3 don't support this and we can't detect this
- +- in the preprocessor, but we can live with this because they're unreleased.
- +- Maketime probing would be overkill here.
- +-
- +- gcc also has a __attribute__((__hot__)) to move hot functions into
- +- a special section, but I don't see any sense in this right now in
- +- the kernel context */
- +-#define __cold __attribute__((__cold__))
- +-
- +-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
- +-
- +-#ifndef __CHECKER__
- +-# define __compiletime_warning(message) __attribute__((warning(message)))
- +-# define __compiletime_error(message) __attribute__((error(message)))
- +-#endif /* __CHECKER__ */
- +-#endif /* GCC_VERSION >= 40300 */
- +-
- +-#if GCC_VERSION >= 40500
- +-/*
- +- * Mark a position in code as unreachable. This can be used to
- +- * suppress control flow warnings after asm blocks that transfer
- +- * control elsewhere.
- +- *
- +- * Early snapshots of gcc 4.5 don't support this and we can't detect
- +- * this in the preprocessor, but we can live with this because they're
- +- * unreleased. Really, we need to have autoconf for the kernel.
- +- */
- +-#define unreachable() __builtin_unreachable()
- +-
- +-/* Mark a function definition as prohibited from being cloned. */
- +-#define __noclone __attribute__((__noclone__))
- +-
- +-#endif /* GCC_VERSION >= 40500 */
- +-
- +-#if GCC_VERSION >= 40600
- +-/*
- +- * Tell the optimizer that something else uses this function or variable.
- +- */
- +-#define __visible __attribute__((externally_visible))
- +-#endif
- +-
- +-/*
- +- * GCC 'asm goto' miscompiles certain code sequences:
- +- *
- +- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- +- *
- +- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- +- * Fixed in GCC 4.8.2 and later versions.
- +- *
- +- * (asm goto is automatically volatile - the naming reflects this.)
- +- */
- +-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
- +-
- +-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
- +-#if GCC_VERSION >= 40400
- +-#define __HAVE_BUILTIN_BSWAP32__
- +-#define __HAVE_BUILTIN_BSWAP64__
- +-#endif
- +-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
- +-#define __HAVE_BUILTIN_BSWAP16__
- +-#endif
- +-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
- +diff --git b/include/linux/compiler-intel.h a/include/linux/compiler-intel.h
- +index ba147a1..d4c7113 100644
- +--- b/include/linux/compiler-intel.h
- ++++ a/include/linux/compiler-intel.h
- +@@ -13,9 +13,14 @@
- + /* Intel ECC compiler doesn't support gcc specific asm stmts.
- + * It uses intrinsics to do the equivalent things.
- + */
- ++#undef barrier
- ++#undef barrier_data
- + #undef RELOC_HIDE
- + #undef OPTIMIZER_HIDE_VAR
- +
- ++#define barrier() __memory_barrier()
- ++#define barrier_data(ptr) barrier()
- ++
- + #define RELOC_HIDE(ptr, off) \
- + ({ unsigned long __ptr; \
- + __ptr = (unsigned long) (ptr); \
- +diff --git b/include/linux/compiler.h a/include/linux/compiler.h
- +index d5ad7b1..020ad16 100644
- +--- b/include/linux/compiler.h
- ++++ a/include/linux/compiler.h
- +@@ -17,6 +17,7 @@
- + # define __release(x) __context__(x,-1)
- + # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
- + # define __percpu __attribute__((noderef, address_space(3)))
- ++# define __pmem __attribute__((noderef, address_space(5)))
- + #ifdef CONFIG_SPARSE_RCU_POINTER
- + # define __rcu __attribute__((noderef, address_space(4)))
- + #else
- +@@ -42,6 +43,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
- + # define __cond_lock(x,c) (c)
- + # define __percpu
- + # define __rcu
- ++# define __pmem
- + #endif
- +
- + /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
- +@@ -54,7 +56,11 @@ extern void __chk_io_ptr(const volatile void __iomem *);
- + #include <linux/compiler-gcc.h>
- + #endif
- +
- ++#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
- ++#define notrace __attribute__((hotpatch(0,0)))
- ++#else
- + #define notrace __attribute__((no_instrument_function))
- ++#endif
- +
- + /* Intel compiler defines __GNUC__. So we will overwrite implementations
- + * coming from above header files here
- +@@ -138,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- + */
- + #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
- + #define __trace_if(cond) \
- +- if (__builtin_constant_p((cond)) ? !!(cond) : \
- ++ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
- + ({ \
- + int ______r; \
- + static struct ftrace_branch_data \
- +@@ -165,6 +171,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- + # define barrier() __memory_barrier()
- + #endif
- +
- ++#ifndef barrier_data
- ++# define barrier_data(ptr) barrier()
- ++#endif
- ++
- + /* Unreachable code */
- + #ifndef unreachable
- + # define unreachable() do { } while (1)
- +@@ -186,6 +196,126 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- + # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
- + #endif
- +
- ++#include <linux/types.h>
- ++
- ++#define __READ_ONCE_SIZE \
- ++({ \
- ++ switch (size) { \
- ++ case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
- ++ case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
- ++ case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
- ++ case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
- ++ default: \
- ++ barrier(); \
- ++ __builtin_memcpy((void *)res, (const void *)p, size); \
- ++ barrier(); \
- ++ } \
- ++})
- ++
- ++static __always_inline
- ++void __read_once_size(const volatile void *p, void *res, int size)
- ++{
- ++ __READ_ONCE_SIZE;
- ++}
- ++
- ++#ifdef CONFIG_KASAN
- ++/*
- ++ * This function is not 'inline' because __no_sanitize_address confilcts
- ++ * with inlining. Attempt to inline it may cause a build failure.
- ++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- ++ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
- ++ */
- ++static __no_sanitize_address __maybe_unused
- ++void __read_once_size_nocheck(const volatile void *p, void *res, int size)
- ++{
- ++ __READ_ONCE_SIZE;
- ++}
- ++#else
- ++static __always_inline
- ++void __read_once_size_nocheck(const volatile void *p, void *res, int size)
- ++{
- ++ __READ_ONCE_SIZE;
- ++}
- ++#endif
- ++
- ++static __always_inline void __write_once_size(volatile void *p, void *res, int size)
- ++{
- ++ switch (size) {
- ++ case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
- ++ case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
- ++ case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
- ++ case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
- ++ default:
- ++ barrier();
- ++ __builtin_memcpy((void *)p, (const void *)res, size);
- ++ barrier();
- ++ }
- ++}
- ++
- ++/*
- ++ * Prevent the compiler from merging or refetching reads or writes. The
- ++ * compiler is also forbidden from reordering successive instances of
- ++ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
- ++ * compiler is aware of some particular ordering. One way to make the
- ++ * compiler aware of ordering is to put the two invocations of READ_ONCE,
- ++ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
- ++ *
- ++ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
- ++ * data types like structs or unions. If the size of the accessed data
- ++ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- ++ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
- ++ * compile-time warning.
- ++ *
- ++ * Their two major use cases are: (1) Mediating communication between
- ++ * process-level code and irq/NMI handlers, all running on the same CPU,
- ++ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
- ++ * mutilate accesses that either do not require ordering or that interact
- ++ * with an explicit memory barrier or atomic instruction that provides the
- ++ * required ordering.
- ++ */
- ++
- ++#define __READ_ONCE(x, check) \
- ++({ \
- ++ union { typeof(x) __val; char __c[1]; } __u; \
- ++ if (check) \
- ++ __read_once_size(&(x), __u.__c, sizeof(x)); \
- ++ else \
- ++ __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
- ++ __u.__val; \
- ++})
- ++#define READ_ONCE(x) __READ_ONCE(x, 1)
- ++
- ++/*
- ++ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
- ++ * to hide memory access from KASAN.
- ++ */
- ++#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
- ++
- ++#define WRITE_ONCE(x, val) \
- ++({ \
- ++ union { typeof(x) __val; char __c[1]; } __u = \
- ++ { .__val = (__force typeof(x)) (val) }; \
- ++ __write_once_size(&(x), __u.__c, sizeof(x)); \
- ++ __u.__val; \
- ++})
- ++
- ++/**
- ++ * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
- ++ * @cond: boolean expression to wait for
- ++ *
- ++ * Equivalent to using smp_load_acquire() on the condition variable but employs
- ++ * the control dependency of the wait to reduce the barrier on many platforms.
- ++ *
- ++ * The control dependency provides a LOAD->STORE order, the additional RMB
- ++ * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
- ++ * aka. ACQUIRE.
- ++ */
- ++#define smp_cond_acquire(cond) do { \
- ++ while (!(cond)) \
- ++ cpu_relax(); \
- ++ smp_rmb(); /* ctrl + rmb := acquire */ \
- ++} while (0)
- ++
- + #endif /* __KERNEL__ */
- +
- + #endif /* __ASSEMBLY__ */
- +@@ -304,6 +434,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- + #define __visible
- + #endif
- +
- ++/*
- ++ * Assume alignment of return value.
- ++ */
- ++#ifndef __assume_aligned
- ++#define __assume_aligned(a, ...)
- ++#endif
- ++
- ++
- + /* Are two types/vars the same type (ignoring qualifiers)? */
- + #ifndef __same_type
- + # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
- +@@ -311,7 +449,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- +
- + /* Is this type a native word size -- useful for atomic operations */
- + #ifndef __native_word
- +-# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
- ++# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
- + #endif
- +
- + /* Compile time object size, -1 for unknown */
- +@@ -373,12 +511,38 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- + * to make the compiler aware of ordering is to put the two invocations of
- + * ACCESS_ONCE() in different C statements.
- + *
- +- * This macro does absolutely -nothing- to prevent the CPU from reordering,
- +- * merging, or refetching absolutely anything at any time. Its main intended
- +- * use is to mediate communication between process-level code and irq/NMI
- +- * handlers, all running on the same CPU.
- ++ * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
- ++ * on a union member will work as long as the size of the member matches the
- ++ * size of the union and the size is smaller than word size.
- ++ *
- ++ * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
- ++ * between process-level code and irq/NMI handlers, all running on the same CPU,
- ++ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
- ++ * mutilate accesses that either do not require ordering or that interact
- ++ * with an explicit memory barrier or atomic instruction that provides the
- ++ * required ordering.
- ++ *
- ++ * If possible use READ_ONCE()/WRITE_ONCE() instead.
- ++ */
- ++#define __ACCESS_ONCE(x) ({ \
- ++ __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
- ++ (volatile typeof(x) *)&(x); })
- ++#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
- ++
- ++/**
- ++ * lockless_dereference() - safely load a pointer for later dereference
- ++ * @p: The pointer to load
- ++ *
- ++ * Similar to rcu_dereference(), but for situations where the pointed-to
- ++ * object's lifetime is managed by something other than RCU. That
- ++ * "something other" might be reference counting or simple immortality.
- + */
- +-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
- ++#define lockless_dereference(p) \
- ++({ \
- ++ typeof(p) _________p1 = READ_ONCE(p); \
- ++ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- ++ (_________p1); \
- ++})
- +
- + /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
- + #ifdef CONFIG_KPROBES
- diff --git a/tools/mkimage/patches/200-gcc5_compat.patch b/tools/mkimage/patches/200-gcc5_compat.patch
- deleted file mode 100644
- index 4d55f00..0000000
- --- a/tools/mkimage/patches/200-gcc5_compat.patch
- +++ /dev/null
- @@ -1,93 +0,0 @@
- -From 478b02f1a7043b673565075ea5016376f3293b23 Mon Sep 17 00:00:00 2001
- -From: Hans de Goede <hdegoede@redhat.com>
- -Date: Sat, 7 Feb 2015 22:52:40 +0100
- -Subject: [PATCH] Add linux/compiler-gcc5.h to fix builds with gcc5
- -
- -Add linux/compiler-gcc5/h from the kernel sources at:
- -
- -commit 5631b8fba640a4ab2f8a954f63a603fa34eda96b
- -Author: Steven Noonan <steven@uplinklabs.net>
- -Date: Sat Oct 25 15:09:42 2014 -0700
- -
- - compiler/gcc4+: Remove inaccurate comment about 'asm goto' miscompiles
- -
- -Signed-off-by: Hans de Goede <hdegoede@redhat.com>
- ----
- - include/linux/compiler-gcc5.h | 65 +++++++++++++++++++++++++++++++++++++++++
- - 1 file changed, 65 insertions(+)
- - create mode 100644 include/linux/compiler-gcc5.h
- -
- -diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
- -new file mode 100644
- -index 0000000..c8c5659
- ---- /dev/null
- -+++ b/include/linux/compiler-gcc5.h
- -@@ -0,0 +1,65 @@
- -+#ifndef __LINUX_COMPILER_H
- -+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
- -+#endif
- -+
- -+#define __used __attribute__((__used__))
- -+#define __must_check __attribute__((warn_unused_result))
- -+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
- -+
- -+/* Mark functions as cold. gcc will assume any path leading to a call
- -+ to them will be unlikely. This means a lot of manual unlikely()s
- -+ are unnecessary now for any paths leading to the usual suspects
- -+ like BUG(), printk(), panic() etc. [but let's keep them for now for
- -+ older compilers]
- -+
- -+ Early snapshots of gcc 4.3 don't support this and we can't detect this
- -+ in the preprocessor, but we can live with this because they're unreleased.
- -+ Maketime probing would be overkill here.
- -+
- -+ gcc also has a __attribute__((__hot__)) to move hot functions into
- -+ a special section, but I don't see any sense in this right now in
- -+ the kernel context */
- -+#define __cold __attribute__((__cold__))
- -+
- -+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
- -+
- -+#ifndef __CHECKER__
- -+# define __compiletime_warning(message) __attribute__((warning(message)))
- -+# define __compiletime_error(message) __attribute__((error(message)))
- -+#endif /* __CHECKER__ */
- -+
- -+/*
- -+ * Mark a position in code as unreachable. This can be used to
- -+ * suppress control flow warnings after asm blocks that transfer
- -+ * control elsewhere.
- -+ *
- -+ * Early snapshots of gcc 4.5 don't support this and we can't detect
- -+ * this in the preprocessor, but we can live with this because they're
- -+ * unreleased. Really, we need to have autoconf for the kernel.
- -+ */
- -+#define unreachable() __builtin_unreachable()
- -+
- -+/* Mark a function definition as prohibited from being cloned. */
- -+#define __noclone __attribute__((__noclone__))
- -+
- -+/*
- -+ * Tell the optimizer that something else uses this function or variable.
- -+ */
- -+#define __visible __attribute__((externally_visible))
- -+
- -+/*
- -+ * GCC 'asm goto' miscompiles certain code sequences:
- -+ *
- -+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- -+ *
- -+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- -+ *
- -+ * (asm goto is automatically volatile - the naming reflects this.)
- -+ */
- -+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
- -+
- -+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
- -+#define __HAVE_BUILTIN_BSWAP32__
- -+#define __HAVE_BUILTIN_BSWAP64__
- -+#define __HAVE_BUILTIN_BSWAP16__
- -+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
- ---
- -1.7.10.4
- -
|