0024-tools-mkimage-sync-include-linux-compiler-.h-with-u-boot-master.patch 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815
  1. From: Matthias Schiffer <mschiffer@universe-factory.net>
  2. Date: Sun, 8 May 2016 22:06:51 +0200
  3. Subject: tools: mkimage: sync include/linux/compiler*.h with u-boot master
  4. Fixes build with GCC 6.
  5. Signed-off-by: Matthias Schiffer <mschiffer@universe-factory.net>
  6. diff --git a/tools/mkimage/patches/200-compiler-support.patch b/tools/mkimage/patches/200-compiler-support.patch
  7. new file mode 100644
  8. index 0000000..ca9c5b5
  9. --- /dev/null
  10. +++ b/tools/mkimage/patches/200-compiler-support.patch
  11. @@ -0,0 +1,702 @@
  12. +diff --git b/include/linux/compiler-gcc.h a/include/linux/compiler-gcc.h
  13. +index e057bd2..22ab246 100644
  14. +--- b/include/linux/compiler-gcc.h
  15. ++++ a/include/linux/compiler-gcc.h
  16. +@@ -5,14 +5,28 @@
  17. + /*
  18. + * Common definitions for all gcc versions go here.
  19. + */
  20. +-#define GCC_VERSION (__GNUC__ * 10000 \
  21. +- + __GNUC_MINOR__ * 100 \
  22. +- + __GNUC_PATCHLEVEL__)
  23. +-
  24. ++#define GCC_VERSION (__GNUC__ * 10000 \
  25. ++ + __GNUC_MINOR__ * 100 \
  26. ++ + __GNUC_PATCHLEVEL__)
  27. +
  28. + /* Optimization barrier */
  29. ++
  30. + /* The "volatile" is due to gcc bugs */
  31. + #define barrier() __asm__ __volatile__("": : :"memory")
  32. ++/*
  33. ++ * This version is i.e. to prevent dead stores elimination on @ptr
  34. ++ * where gcc and llvm may behave differently when otherwise using
  35. ++ * normal barrier(): while gcc behavior gets along with a normal
  36. ++ * barrier(), llvm needs an explicit input variable to be assumed
  37. ++ * clobbered. The issue is as follows: while the inline asm might
  38. ++ * access any memory it wants, the compiler could have fit all of
  39. ++ * @ptr into memory registers instead, and since @ptr never escaped
  40. ++ * from that, it proofed that the inline asm wasn't touching any of
  41. ++ * it. This version works well with both compilers, i.e. we're telling
  42. ++ * the compiler that the inline asm absolutely may see the contents
  43. ++ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
  44. ++ */
  45. ++#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
  46. +
  47. + /*
  48. + * This macro obfuscates arithmetic on a variable address so that gcc
  49. +@@ -32,58 +46,63 @@
  50. + * the inline assembly constraint from =g to =r, in this particular
  51. + * case either is valid.
  52. + */
  53. +-#define RELOC_HIDE(ptr, off) \
  54. +- ({ unsigned long __ptr; \
  55. +- __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
  56. +- (typeof(ptr)) (__ptr + (off)); })
  57. ++#define RELOC_HIDE(ptr, off) \
  58. ++({ \
  59. ++ unsigned long __ptr; \
  60. ++ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
  61. ++ (typeof(ptr)) (__ptr + (off)); \
  62. ++})
  63. +
  64. + /* Make the optimizer believe the variable can be manipulated arbitrarily. */
  65. +-#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
  66. ++#define OPTIMIZER_HIDE_VAR(var) \
  67. ++ __asm__ ("" : "=r" (var) : "0" (var))
  68. +
  69. + #ifdef __CHECKER__
  70. +-#define __must_be_array(arr) 0
  71. ++#define __must_be_array(a) 0
  72. + #else
  73. + /* &a[0] degrades to a pointer: a different type from an array */
  74. +-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
  75. ++#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
  76. + #endif
  77. +
  78. + /*
  79. + * Force always-inline if the user requests it so via the .config,
  80. + * or if gcc is too old:
  81. + */
  82. +-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
  83. ++#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
  84. + !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
  85. +-# define inline inline __attribute__((always_inline)) notrace
  86. +-# define __inline__ __inline__ __attribute__((always_inline)) notrace
  87. +-# define __inline __inline __attribute__((always_inline)) notrace
  88. ++#define inline inline __attribute__((always_inline)) notrace
  89. ++#define __inline__ __inline__ __attribute__((always_inline)) notrace
  90. ++#define __inline __inline __attribute__((always_inline)) notrace
  91. + #else
  92. + /* A lot of inline functions can cause havoc with function tracing */
  93. +-# define inline inline notrace
  94. +-# define __inline__ __inline__ notrace
  95. +-# define __inline __inline notrace
  96. ++#define inline inline notrace
  97. ++#define __inline__ __inline__ notrace
  98. ++#define __inline __inline notrace
  99. + #endif
  100. +
  101. +-#define __deprecated __attribute__((deprecated))
  102. +-#ifndef __packed
  103. +-#define __packed __attribute__((packed))
  104. +-#endif
  105. +-#ifndef __weak
  106. +-#define __weak __attribute__((weak))
  107. +-#endif
  108. ++#define __always_inline inline __attribute__((always_inline))
  109. ++#define noinline __attribute__((noinline))
  110. ++
  111. ++#define __deprecated __attribute__((deprecated))
  112. ++#define __packed __attribute__((packed))
  113. ++#define __weak __attribute__((weak))
  114. ++#define __alias(symbol) __attribute__((alias(#symbol)))
  115. +
  116. + /*
  117. +- * it doesn't make sense on ARM (currently the only user of __naked) to trace
  118. +- * naked functions because then mcount is called without stack and frame pointer
  119. +- * being set up and there is no chance to restore the lr register to the value
  120. +- * before mcount was called.
  121. ++ * it doesn't make sense on ARM (currently the only user of __naked)
  122. ++ * to trace naked functions because then mcount is called without
  123. ++ * stack and frame pointer being set up and there is no chance to
  124. ++ * restore the lr register to the value before mcount was called.
  125. ++ *
  126. ++ * The asm() bodies of naked functions often depend on standard calling
  127. ++ * conventions, therefore they must be noinline and noclone.
  128. + *
  129. +- * The asm() bodies of naked functions often depend on standard calling conventions,
  130. +- * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
  131. +- * this, so we must do so ourselves. See GCC PR44290.
  132. ++ * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
  133. ++ * See GCC PR44290.
  134. + */
  135. +-#define __naked __attribute__((naked)) noinline __noclone notrace
  136. ++#define __naked __attribute__((naked)) noinline __noclone notrace
  137. +
  138. +-#define __noreturn __attribute__((noreturn))
  139. ++#define __noreturn __attribute__((noreturn))
  140. +
  141. + /*
  142. + * From the GCC manual:
  143. +@@ -95,34 +114,170 @@
  144. + * would be.
  145. + * [...]
  146. + */
  147. +-#ifndef __pure
  148. +-#define __pure __attribute__((pure))
  149. ++#define __pure __attribute__((pure))
  150. ++#define __aligned(x) __attribute__((aligned(x)))
  151. ++#define __printf(a, b) __attribute__((format(printf, a, b)))
  152. ++#define __scanf(a, b) __attribute__((format(scanf, a, b)))
  153. ++#define __attribute_const__ __attribute__((__const__))
  154. ++#define __maybe_unused __attribute__((unused))
  155. ++#define __always_unused __attribute__((unused))
  156. ++
  157. ++/* gcc version specific checks */
  158. ++
  159. ++#if GCC_VERSION < 30200
  160. ++# error Sorry, your compiler is too old - please upgrade it.
  161. ++#endif
  162. ++
  163. ++#if GCC_VERSION < 30300
  164. ++# define __used __attribute__((__unused__))
  165. ++#else
  166. ++# define __used __attribute__((__used__))
  167. ++#endif
  168. ++
  169. ++#ifdef CONFIG_GCOV_KERNEL
  170. ++# if GCC_VERSION < 30400
  171. ++# error "GCOV profiling support for gcc versions below 3.4 not included"
  172. ++# endif /* __GNUC_MINOR__ */
  173. ++#endif /* CONFIG_GCOV_KERNEL */
  174. ++
  175. ++#if GCC_VERSION >= 30400
  176. ++#define __must_check __attribute__((warn_unused_result))
  177. ++#endif
  178. ++
  179. ++#if GCC_VERSION >= 40000
  180. ++
  181. ++/* GCC 4.1.[01] miscompiles __weak */
  182. ++#ifdef __KERNEL__
  183. ++# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
  184. ++# error Your version of gcc miscompiles the __weak directive
  185. ++# endif
  186. ++#endif
  187. ++
  188. ++#define __used __attribute__((__used__))
  189. ++#define __compiler_offsetof(a, b) \
  190. ++ __builtin_offsetof(a, b)
  191. ++
  192. ++#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
  193. ++# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
  194. ++#endif
  195. ++
  196. ++#if GCC_VERSION >= 40300
  197. ++/* Mark functions as cold. gcc will assume any path leading to a call
  198. ++ * to them will be unlikely. This means a lot of manual unlikely()s
  199. ++ * are unnecessary now for any paths leading to the usual suspects
  200. ++ * like BUG(), printk(), panic() etc. [but let's keep them for now for
  201. ++ * older compilers]
  202. ++ *
  203. ++ * Early snapshots of gcc 4.3 don't support this and we can't detect this
  204. ++ * in the preprocessor, but we can live with this because they're unreleased.
  205. ++ * Maketime probing would be overkill here.
  206. ++ *
  207. ++ * gcc also has a __attribute__((__hot__)) to move hot functions into
  208. ++ * a special section, but I don't see any sense in this right now in
  209. ++ * the kernel context
  210. ++ */
  211. ++#define __cold __attribute__((__cold__))
  212. ++
  213. ++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
  214. ++
  215. ++#ifndef __CHECKER__
  216. ++# define __compiletime_warning(message) __attribute__((warning(message)))
  217. ++# define __compiletime_error(message) __attribute__((error(message)))
  218. ++#endif /* __CHECKER__ */
  219. ++#endif /* GCC_VERSION >= 40300 */
  220. ++
  221. ++#if GCC_VERSION >= 40500
  222. ++/*
  223. ++ * Mark a position in code as unreachable. This can be used to
  224. ++ * suppress control flow warnings after asm blocks that transfer
  225. ++ * control elsewhere.
  226. ++ *
  227. ++ * Early snapshots of gcc 4.5 don't support this and we can't detect
  228. ++ * this in the preprocessor, but we can live with this because they're
  229. ++ * unreleased. Really, we need to have autoconf for the kernel.
  230. ++ */
  231. ++#define unreachable() __builtin_unreachable()
  232. ++
  233. ++/* Mark a function definition as prohibited from being cloned. */
  234. ++#define __noclone __attribute__((__noclone__))
  235. ++
  236. ++#endif /* GCC_VERSION >= 40500 */
  237. ++
  238. ++#if GCC_VERSION >= 40600
  239. ++/*
  240. ++ * When used with Link Time Optimization, gcc can optimize away C functions or
  241. ++ * variables which are referenced only from assembly code. __visible tells the
  242. ++ * optimizer that something else uses this function or variable, thus preventing
  243. ++ * this.
  244. ++ */
  245. ++#define __visible __attribute__((externally_visible))
  246. + #endif
  247. +-#ifndef __aligned
  248. +-#define __aligned(x) __attribute__((aligned(x)))
  249. ++
  250. ++
  251. ++#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
  252. ++/*
  253. ++ * __assume_aligned(n, k): Tell the optimizer that the returned
  254. ++ * pointer can be assumed to be k modulo n. The second argument is
  255. ++ * optional (default 0), so we use a variadic macro to make the
  256. ++ * shorthand.
  257. ++ *
  258. ++ * Beware: Do not apply this to functions which may return
  259. ++ * ERR_PTRs. Also, it is probably unwise to apply it to functions
  260. ++ * returning extra information in the low bits (but in that case the
  261. ++ * compiler should see some alignment anyway, when the return value is
  262. ++ * massaged by 'flags = ptr & 3; ptr &= ~3;').
  263. ++ */
  264. ++#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
  265. + #endif
  266. +-#define __printf(a, b) __attribute__((format(printf, a, b)))
  267. +-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
  268. +-#define noinline __attribute__((noinline))
  269. +-#define __attribute_const__ __attribute__((__const__))
  270. +-#define __maybe_unused __attribute__((unused))
  271. +-#define __always_unused __attribute__((unused))
  272. +
  273. +-#define __gcc_header(x) #x
  274. +-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
  275. +-#define gcc_header(x) _gcc_header(x)
  276. +-#include gcc_header(__GNUC__)
  277. ++/*
  278. ++ * GCC 'asm goto' miscompiles certain code sequences:
  279. ++ *
  280. ++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
  281. ++ *
  282. ++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
  283. ++ *
  284. ++ * (asm goto is automatically volatile - the naming reflects this.)
  285. ++ */
  286. ++#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
  287. ++
  288. ++#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
  289. ++#if GCC_VERSION >= 40400
  290. ++#define __HAVE_BUILTIN_BSWAP32__
  291. ++#define __HAVE_BUILTIN_BSWAP64__
  292. ++#endif
  293. ++#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
  294. ++#define __HAVE_BUILTIN_BSWAP16__
  295. ++#endif
  296. ++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
  297. ++
  298. ++#if GCC_VERSION >= 50000
  299. ++#define KASAN_ABI_VERSION 4
  300. ++#elif GCC_VERSION >= 40902
  301. ++#define KASAN_ABI_VERSION 3
  302. ++#endif
  303. ++
  304. ++#if GCC_VERSION >= 40902
  305. ++/*
  306. ++ * Tell the compiler that address safety instrumentation (KASAN)
  307. ++ * should not be applied to that function.
  308. ++ * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
  309. ++ */
  310. ++#define __no_sanitize_address __attribute__((no_sanitize_address))
  311. ++#endif
  312. ++
  313. ++#endif /* gcc version >= 40000 specific checks */
  314. +
  315. + #if !defined(__noclone)
  316. + #define __noclone /* not needed */
  317. + #endif
  318. +
  319. ++#if !defined(__no_sanitize_address)
  320. ++#define __no_sanitize_address
  321. ++#endif
  322. ++
  323. + /*
  324. + * A trick to suppress uninitialized variable warning without generating any
  325. + * code
  326. + */
  327. + #define uninitialized_var(x) x = x
  328. +-
  329. +-#ifndef __always_inline
  330. +-#define __always_inline inline __attribute__((always_inline))
  331. +-#endif
  332. +diff --git b/include/linux/compiler-gcc3.h a/include/linux/compiler-gcc3.h
  333. +deleted file mode 100644
  334. +index 7d89feb..0000000
  335. +--- b/include/linux/compiler-gcc3.h
  336. ++++ /dev/null
  337. +@@ -1,23 +0,0 @@
  338. +-#ifndef __LINUX_COMPILER_H
  339. +-#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
  340. +-#endif
  341. +-
  342. +-#if GCC_VERSION < 30200
  343. +-# error Sorry, your compiler is too old - please upgrade it.
  344. +-#endif
  345. +-
  346. +-#if GCC_VERSION >= 30300
  347. +-# define __used __attribute__((__used__))
  348. +-#else
  349. +-# define __used __attribute__((__unused__))
  350. +-#endif
  351. +-
  352. +-#if GCC_VERSION >= 30400
  353. +-#define __must_check __attribute__((warn_unused_result))
  354. +-#endif
  355. +-
  356. +-#ifdef CONFIG_GCOV_KERNEL
  357. +-# if GCC_VERSION < 30400
  358. +-# error "GCOV profiling support for gcc versions below 3.4 not included"
  359. +-# endif /* __GNUC_MINOR__ */
  360. +-#endif /* CONFIG_GCOV_KERNEL */
  361. +diff --git b/include/linux/compiler-gcc4.h a/include/linux/compiler-gcc4.h
  362. +deleted file mode 100644
  363. +index c982a09..0000000
  364. +--- b/include/linux/compiler-gcc4.h
  365. ++++ /dev/null
  366. +@@ -1,81 +0,0 @@
  367. +-#ifndef __LINUX_COMPILER_H
  368. +-#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
  369. +-#endif
  370. +-
  371. +-#define __used __attribute__((__used__))
  372. +-#define __must_check __attribute__((warn_unused_result))
  373. +-#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
  374. +-
  375. +-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
  376. +-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
  377. +-#endif
  378. +-
  379. +-#if GCC_VERSION >= 40300
  380. +-/* Mark functions as cold. gcc will assume any path leading to a call
  381. +- to them will be unlikely. This means a lot of manual unlikely()s
  382. +- are unnecessary now for any paths leading to the usual suspects
  383. +- like BUG(), printk(), panic() etc. [but let's keep them for now for
  384. +- older compilers]
  385. +-
  386. +- Early snapshots of gcc 4.3 don't support this and we can't detect this
  387. +- in the preprocessor, but we can live with this because they're unreleased.
  388. +- Maketime probing would be overkill here.
  389. +-
  390. +- gcc also has a __attribute__((__hot__)) to move hot functions into
  391. +- a special section, but I don't see any sense in this right now in
  392. +- the kernel context */
  393. +-#define __cold __attribute__((__cold__))
  394. +-
  395. +-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
  396. +-
  397. +-#ifndef __CHECKER__
  398. +-# define __compiletime_warning(message) __attribute__((warning(message)))
  399. +-# define __compiletime_error(message) __attribute__((error(message)))
  400. +-#endif /* __CHECKER__ */
  401. +-#endif /* GCC_VERSION >= 40300 */
  402. +-
  403. +-#if GCC_VERSION >= 40500
  404. +-/*
  405. +- * Mark a position in code as unreachable. This can be used to
  406. +- * suppress control flow warnings after asm blocks that transfer
  407. +- * control elsewhere.
  408. +- *
  409. +- * Early snapshots of gcc 4.5 don't support this and we can't detect
  410. +- * this in the preprocessor, but we can live with this because they're
  411. +- * unreleased. Really, we need to have autoconf for the kernel.
  412. +- */
  413. +-#define unreachable() __builtin_unreachable()
  414. +-
  415. +-/* Mark a function definition as prohibited from being cloned. */
  416. +-#define __noclone __attribute__((__noclone__))
  417. +-
  418. +-#endif /* GCC_VERSION >= 40500 */
  419. +-
  420. +-#if GCC_VERSION >= 40600
  421. +-/*
  422. +- * Tell the optimizer that something else uses this function or variable.
  423. +- */
  424. +-#define __visible __attribute__((externally_visible))
  425. +-#endif
  426. +-
  427. +-/*
  428. +- * GCC 'asm goto' miscompiles certain code sequences:
  429. +- *
  430. +- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
  431. +- *
  432. +- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
  433. +- * Fixed in GCC 4.8.2 and later versions.
  434. +- *
  435. +- * (asm goto is automatically volatile - the naming reflects this.)
  436. +- */
  437. +-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
  438. +-
  439. +-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
  440. +-#if GCC_VERSION >= 40400
  441. +-#define __HAVE_BUILTIN_BSWAP32__
  442. +-#define __HAVE_BUILTIN_BSWAP64__
  443. +-#endif
  444. +-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
  445. +-#define __HAVE_BUILTIN_BSWAP16__
  446. +-#endif
  447. +-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
  448. +diff --git b/include/linux/compiler-intel.h a/include/linux/compiler-intel.h
  449. +index ba147a1..d4c7113 100644
  450. +--- b/include/linux/compiler-intel.h
  451. ++++ a/include/linux/compiler-intel.h
  452. +@@ -13,9 +13,14 @@
  453. + /* Intel ECC compiler doesn't support gcc specific asm stmts.
  454. + * It uses intrinsics to do the equivalent things.
  455. + */
  456. ++#undef barrier
  457. ++#undef barrier_data
  458. + #undef RELOC_HIDE
  459. + #undef OPTIMIZER_HIDE_VAR
  460. +
  461. ++#define barrier() __memory_barrier()
  462. ++#define barrier_data(ptr) barrier()
  463. ++
  464. + #define RELOC_HIDE(ptr, off) \
  465. + ({ unsigned long __ptr; \
  466. + __ptr = (unsigned long) (ptr); \
  467. +diff --git b/include/linux/compiler.h a/include/linux/compiler.h
  468. +index d5ad7b1..020ad16 100644
  469. +--- b/include/linux/compiler.h
  470. ++++ a/include/linux/compiler.h
  471. +@@ -17,6 +17,7 @@
  472. + # define __release(x) __context__(x,-1)
  473. + # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
  474. + # define __percpu __attribute__((noderef, address_space(3)))
  475. ++# define __pmem __attribute__((noderef, address_space(5)))
  476. + #ifdef CONFIG_SPARSE_RCU_POINTER
  477. + # define __rcu __attribute__((noderef, address_space(4)))
  478. + #else
  479. +@@ -42,6 +43,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
  480. + # define __cond_lock(x,c) (c)
  481. + # define __percpu
  482. + # define __rcu
  483. ++# define __pmem
  484. + #endif
  485. +
  486. + /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
  487. +@@ -54,7 +56,11 @@ extern void __chk_io_ptr(const volatile void __iomem *);
  488. + #include <linux/compiler-gcc.h>
  489. + #endif
  490. +
  491. ++#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
  492. ++#define notrace __attribute__((hotpatch(0,0)))
  493. ++#else
  494. + #define notrace __attribute__((no_instrument_function))
  495. ++#endif
  496. +
  497. + /* Intel compiler defines __GNUC__. So we will overwrite implementations
  498. + * coming from above header files here
  499. +@@ -138,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  500. + */
  501. + #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
  502. + #define __trace_if(cond) \
  503. +- if (__builtin_constant_p((cond)) ? !!(cond) : \
  504. ++ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
  505. + ({ \
  506. + int ______r; \
  507. + static struct ftrace_branch_data \
  508. +@@ -165,6 +171,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  509. + # define barrier() __memory_barrier()
  510. + #endif
  511. +
  512. ++#ifndef barrier_data
  513. ++# define barrier_data(ptr) barrier()
  514. ++#endif
  515. ++
  516. + /* Unreachable code */
  517. + #ifndef unreachable
  518. + # define unreachable() do { } while (1)
  519. +@@ -186,6 +196,126 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  520. + # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
  521. + #endif
  522. +
  523. ++#include <linux/types.h>
  524. ++
  525. ++#define __READ_ONCE_SIZE \
  526. ++({ \
  527. ++ switch (size) { \
  528. ++ case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
  529. ++ case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
  530. ++ case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
  531. ++ case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
  532. ++ default: \
  533. ++ barrier(); \
  534. ++ __builtin_memcpy((void *)res, (const void *)p, size); \
  535. ++ barrier(); \
  536. ++ } \
  537. ++})
  538. ++
  539. ++static __always_inline
  540. ++void __read_once_size(const volatile void *p, void *res, int size)
  541. ++{
  542. ++ __READ_ONCE_SIZE;
  543. ++}
  544. ++
  545. ++#ifdef CONFIG_KASAN
  546. ++/*
  547. ++ * This function is not 'inline' because __no_sanitize_address confilcts
  548. ++ * with inlining. Attempt to inline it may cause a build failure.
  549. ++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
  550. ++ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
  551. ++ */
  552. ++static __no_sanitize_address __maybe_unused
  553. ++void __read_once_size_nocheck(const volatile void *p, void *res, int size)
  554. ++{
  555. ++ __READ_ONCE_SIZE;
  556. ++}
  557. ++#else
  558. ++static __always_inline
  559. ++void __read_once_size_nocheck(const volatile void *p, void *res, int size)
  560. ++{
  561. ++ __READ_ONCE_SIZE;
  562. ++}
  563. ++#endif
  564. ++
  565. ++static __always_inline void __write_once_size(volatile void *p, void *res, int size)
  566. ++{
  567. ++ switch (size) {
  568. ++ case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
  569. ++ case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
  570. ++ case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
  571. ++ case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
  572. ++ default:
  573. ++ barrier();
  574. ++ __builtin_memcpy((void *)p, (const void *)res, size);
  575. ++ barrier();
  576. ++ }
  577. ++}
  578. ++
  579. ++/*
  580. ++ * Prevent the compiler from merging or refetching reads or writes. The
  581. ++ * compiler is also forbidden from reordering successive instances of
  582. ++ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
  583. ++ * compiler is aware of some particular ordering. One way to make the
  584. ++ * compiler aware of ordering is to put the two invocations of READ_ONCE,
  585. ++ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
  586. ++ *
  587. ++ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
  588. ++ * data types like structs or unions. If the size of the accessed data
  589. ++ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
  590. ++ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
  591. ++ * compile-time warning.
  592. ++ *
  593. ++ * Their two major use cases are: (1) Mediating communication between
  594. ++ * process-level code and irq/NMI handlers, all running on the same CPU,
  595. ++ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
  596. ++ * mutilate accesses that either do not require ordering or that interact
  597. ++ * with an explicit memory barrier or atomic instruction that provides the
  598. ++ * required ordering.
  599. ++ */
  600. ++
  601. ++#define __READ_ONCE(x, check) \
  602. ++({ \
  603. ++ union { typeof(x) __val; char __c[1]; } __u; \
  604. ++ if (check) \
  605. ++ __read_once_size(&(x), __u.__c, sizeof(x)); \
  606. ++ else \
  607. ++ __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
  608. ++ __u.__val; \
  609. ++})
  610. ++#define READ_ONCE(x) __READ_ONCE(x, 1)
  611. ++
  612. ++/*
  613. ++ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
  614. ++ * to hide memory access from KASAN.
  615. ++ */
  616. ++#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
  617. ++
  618. ++#define WRITE_ONCE(x, val) \
  619. ++({ \
  620. ++ union { typeof(x) __val; char __c[1]; } __u = \
  621. ++ { .__val = (__force typeof(x)) (val) }; \
  622. ++ __write_once_size(&(x), __u.__c, sizeof(x)); \
  623. ++ __u.__val; \
  624. ++})
  625. ++
  626. ++/**
  627. ++ * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
  628. ++ * @cond: boolean expression to wait for
  629. ++ *
  630. ++ * Equivalent to using smp_load_acquire() on the condition variable but employs
  631. ++ * the control dependency of the wait to reduce the barrier on many platforms.
  632. ++ *
  633. ++ * The control dependency provides a LOAD->STORE order, the additional RMB
  634. ++ * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
  635. ++ * aka. ACQUIRE.
  636. ++ */
  637. ++#define smp_cond_acquire(cond) do { \
  638. ++ while (!(cond)) \
  639. ++ cpu_relax(); \
  640. ++ smp_rmb(); /* ctrl + rmb := acquire */ \
  641. ++} while (0)
  642. ++
  643. + #endif /* __KERNEL__ */
  644. +
  645. + #endif /* __ASSEMBLY__ */
  646. +@@ -304,6 +434,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  647. + #define __visible
  648. + #endif
  649. +
  650. ++/*
  651. ++ * Assume alignment of return value.
  652. ++ */
  653. ++#ifndef __assume_aligned
  654. ++#define __assume_aligned(a, ...)
  655. ++#endif
  656. ++
  657. ++
  658. + /* Are two types/vars the same type (ignoring qualifiers)? */
  659. + #ifndef __same_type
  660. + # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
  661. +@@ -311,7 +449,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  662. +
  663. + /* Is this type a native word size -- useful for atomic operations */
  664. + #ifndef __native_word
  665. +-# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
  666. ++# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
  667. + #endif
  668. +
  669. + /* Compile time object size, -1 for unknown */
  670. +@@ -373,12 +511,38 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  671. + * to make the compiler aware of ordering is to put the two invocations of
  672. + * ACCESS_ONCE() in different C statements.
  673. + *
  674. +- * This macro does absolutely -nothing- to prevent the CPU from reordering,
  675. +- * merging, or refetching absolutely anything at any time. Its main intended
  676. +- * use is to mediate communication between process-level code and irq/NMI
  677. +- * handlers, all running on the same CPU.
  678. ++ * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
  679. ++ * on a union member will work as long as the size of the member matches the
  680. ++ * size of the union and the size is smaller than word size.
  681. ++ *
  682. ++ * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
  683. ++ * between process-level code and irq/NMI handlers, all running on the same CPU,
  684. ++ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
  685. ++ * mutilate accesses that either do not require ordering or that interact
  686. ++ * with an explicit memory barrier or atomic instruction that provides the
  687. ++ * required ordering.
  688. ++ *
  689. ++ * If possible use READ_ONCE()/WRITE_ONCE() instead.
  690. ++ */
  691. ++#define __ACCESS_ONCE(x) ({ \
  692. ++ __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
  693. ++ (volatile typeof(x) *)&(x); })
  694. ++#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
  695. ++
  696. ++/**
  697. ++ * lockless_dereference() - safely load a pointer for later dereference
  698. ++ * @p: The pointer to load
  699. ++ *
  700. ++ * Similar to rcu_dereference(), but for situations where the pointed-to
  701. ++ * object's lifetime is managed by something other than RCU. That
  702. ++ * "something other" might be reference counting or simple immortality.
  703. + */
  704. +-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
  705. ++#define lockless_dereference(p) \
  706. ++({ \
  707. ++ typeof(p) _________p1 = READ_ONCE(p); \
  708. ++ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
  709. ++ (_________p1); \
  710. ++})
  711. +
  712. + /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
  713. + #ifdef CONFIG_KPROBES
  714. diff --git a/tools/mkimage/patches/200-gcc5_compat.patch b/tools/mkimage/patches/200-gcc5_compat.patch
  715. deleted file mode 100644
  716. index 4d55f00..0000000
  717. --- a/tools/mkimage/patches/200-gcc5_compat.patch
  718. +++ /dev/null
  719. @@ -1,93 +0,0 @@
  720. -From 478b02f1a7043b673565075ea5016376f3293b23 Mon Sep 17 00:00:00 2001
  721. -From: Hans de Goede <hdegoede@redhat.com>
  722. -Date: Sat, 7 Feb 2015 22:52:40 +0100
  723. -Subject: [PATCH] Add linux/compiler-gcc5.h to fix builds with gcc5
  724. -
  725. -Add linux/compiler-gcc5/h from the kernel sources at:
  726. -
  727. -commit 5631b8fba640a4ab2f8a954f63a603fa34eda96b
  728. -Author: Steven Noonan <steven@uplinklabs.net>
  729. -Date: Sat Oct 25 15:09:42 2014 -0700
  730. -
  731. - compiler/gcc4+: Remove inaccurate comment about 'asm goto' miscompiles
  732. -
  733. -Signed-off-by: Hans de Goede <hdegoede@redhat.com>
  734. ----
  735. - include/linux/compiler-gcc5.h | 65 +++++++++++++++++++++++++++++++++++++++++
  736. - 1 file changed, 65 insertions(+)
  737. - create mode 100644 include/linux/compiler-gcc5.h
  738. -
  739. -diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
  740. -new file mode 100644
  741. -index 0000000..c8c5659
  742. ---- /dev/null
  743. -+++ b/include/linux/compiler-gcc5.h
  744. -@@ -0,0 +1,65 @@
  745. -+#ifndef __LINUX_COMPILER_H
  746. -+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
  747. -+#endif
  748. -+
  749. -+#define __used __attribute__((__used__))
  750. -+#define __must_check __attribute__((warn_unused_result))
  751. -+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
  752. -+
  753. -+/* Mark functions as cold. gcc will assume any path leading to a call
  754. -+ to them will be unlikely. This means a lot of manual unlikely()s
  755. -+ are unnecessary now for any paths leading to the usual suspects
  756. -+ like BUG(), printk(), panic() etc. [but let's keep them for now for
  757. -+ older compilers]
  758. -+
  759. -+ Early snapshots of gcc 4.3 don't support this and we can't detect this
  760. -+ in the preprocessor, but we can live with this because they're unreleased.
  761. -+ Maketime probing would be overkill here.
  762. -+
  763. -+ gcc also has a __attribute__((__hot__)) to move hot functions into
  764. -+ a special section, but I don't see any sense in this right now in
  765. -+ the kernel context */
  766. -+#define __cold __attribute__((__cold__))
  767. -+
  768. -+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
  769. -+
  770. -+#ifndef __CHECKER__
  771. -+# define __compiletime_warning(message) __attribute__((warning(message)))
  772. -+# define __compiletime_error(message) __attribute__((error(message)))
  773. -+#endif /* __CHECKER__ */
  774. -+
  775. -+/*
  776. -+ * Mark a position in code as unreachable. This can be used to
  777. -+ * suppress control flow warnings after asm blocks that transfer
  778. -+ * control elsewhere.
  779. -+ *
  780. -+ * Early snapshots of gcc 4.5 don't support this and we can't detect
  781. -+ * this in the preprocessor, but we can live with this because they're
  782. -+ * unreleased. Really, we need to have autoconf for the kernel.
  783. -+ */
  784. -+#define unreachable() __builtin_unreachable()
  785. -+
  786. -+/* Mark a function definition as prohibited from being cloned. */
  787. -+#define __noclone __attribute__((__noclone__))
  788. -+
  789. -+/*
  790. -+ * Tell the optimizer that something else uses this function or variable.
  791. -+ */
  792. -+#define __visible __attribute__((externally_visible))
  793. -+
  794. -+/*
  795. -+ * GCC 'asm goto' miscompiles certain code sequences:
  796. -+ *
  797. -+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
  798. -+ *
  799. -+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
  800. -+ *
  801. -+ * (asm goto is automatically volatile - the naming reflects this.)
  802. -+ */
  803. -+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
  804. -+
  805. -+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
  806. -+#define __HAVE_BUILTIN_BSWAP32__
  807. -+#define __HAVE_BUILTIN_BSWAP64__
  808. -+#define __HAVE_BUILTIN_BSWAP16__
  809. -+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
  810. ---
  811. -1.7.10.4
  812. -