intrinsics.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /*
  2. * intrinsics.h
  3. *
  4. * Compiler intrinsics for ARMv7-M core.
  5. *
  6. * Written & released by Keir Fraser <keir.xen@gmail.com>
  7. *
  8. * This is free and unencumbered software released into the public domain.
  9. * See the file COPYING for more details, or visit <http://unlicense.org>.
  10. */
  11. struct exception_frame {
  12. uint32_t r0, r1, r2, r3, r12, lr, pc, psr;
  13. };
  14. #define _STR(x) #x
  15. #define STR(x) _STR(x)
  16. /* Force a compilation error if condition is true */
  17. #define BUILD_BUG_ON(cond) ({ _Static_assert(!(cond), "!(" #cond ")"); })
  18. #define __aligned(x) __attribute__((aligned(x)))
  19. #define __packed __attribute((packed))
  20. #define always_inline __inline__ __attribute__((always_inline))
  21. #define noinline __attribute__((noinline))
  22. #define likely(x) __builtin_expect(!!(x),1)
  23. #define unlikely(x) __builtin_expect(!!(x),0)
  24. #define illegal() asm volatile (".short 0xde00");
  25. #define barrier() asm volatile ("" ::: "memory")
  26. #define cpu_sync() asm volatile("dsb; isb" ::: "memory")
  27. #define cpu_relax() asm volatile ("nop" ::: "memory")
  28. #define sv_call(imm) asm volatile ( "svc %0" : : "i" (imm) )
  29. #define read_special(reg) ({ \
  30. uint32_t __x; \
  31. asm volatile ("mrs %0,"#reg : "=r" (__x) ::); \
  32. __x; \
  33. })
  34. #define write_special(reg,val) ({ \
  35. uint32_t __x = (uint32_t)(val); \
  36. asm volatile ("msr "#reg",%0" :: "r" (__x) :); \
  37. })
  38. /* CONTROL[1] == 0 => running on Master Stack (Exception Handler mode). */
  39. #define CONTROL_SPSEL 2
  40. #define in_exception() (!(read_special(control) & CONTROL_SPSEL))
  41. #define global_disable_exceptions() \
  42. asm volatile ("cpsid f; cpsid i" ::: "memory")
  43. #define global_enable_exceptions() \
  44. asm volatile ("cpsie f; cpsie i" ::: "memory")
  45. /* NB. IRQ disable via CPSID/MSR is self-synchronising. No barrier needed. */
  46. #define IRQ_global_disable() asm volatile ("cpsid i" ::: "memory")
  47. #define IRQ_global_enable() asm volatile ("cpsie i" ::: "memory")
  48. /* Save/restore IRQ priority levels.
  49. * NB. IRQ disable via MSR is self-synchronising. I have confirmed this on
  50. * Cortex-M3: any pending IRQs are handled before they are disabled by
  51. * a BASEPRI update. Hence no barrier is needed here. */
  52. #define IRQ_save(newpri) ({ \
  53. uint8_t __newpri = (newpri)<<4; \
  54. uint8_t __oldpri = read_special(basepri); \
  55. if (!__oldpri || (__oldpri > __newpri)) \
  56. write_special(basepri, __newpri); \
  57. __oldpri; })
  58. /* NB. Same as CPSIE, any pending IRQ enabled by this BASEPRI update may
  59. * execute a couple of instructions after the MSR instruction. This has been
  60. * confirmed on Cortex-M3. */
  61. #define IRQ_restore(oldpri) write_special(basepri, (oldpri))
  62. static inline uint16_t _rev16(uint16_t x)
  63. {
  64. uint16_t result;
  65. asm volatile ("rev16 %0,%1" : "=r" (result) : "r" (x));
  66. return result;
  67. }
  68. static inline uint32_t _rev32(uint32_t x)
  69. {
  70. uint32_t result;
  71. asm volatile ("rev %0,%1" : "=r" (result) : "r" (x));
  72. return result;
  73. }
  74. static inline uint32_t _rbit32(uint32_t x)
  75. {
  76. uint32_t result;
  77. asm volatile ("rbit %0,%1" : "=r" (result) : "r" (x));
  78. return result;
  79. }
  80. extern void __bad_cmpxchg(volatile void *ptr, int size);
  81. static always_inline unsigned long __cmpxchg(
  82. volatile void *ptr, unsigned long old, unsigned long new, int size)
  83. {
  84. unsigned long oldval, res;
  85. switch (size) {
  86. case 1:
  87. do {
  88. asm volatile(" ldrexb %1,[%2] \n"
  89. " movs %0,#0 \n"
  90. " cmp %1,%3 \n"
  91. " it eq \n"
  92. " strexbeq %0,%4,[%2] \n"
  93. : "=&r" (res), "=&r" (oldval)
  94. : "r" (ptr), "Ir" (old), "r" (new)
  95. : "memory", "cc");
  96. } while (res);
  97. break;
  98. case 2:
  99. do {
  100. asm volatile(" ldrexh %1,[%2] \n"
  101. " movs %0,#0 \n"
  102. " cmp %1,%3 \n"
  103. " it eq \n"
  104. " strexheq %0,%4,[%2] \n"
  105. : "=&r" (res), "=&r" (oldval)
  106. : "r" (ptr), "Ir" (old), "r" (new)
  107. : "memory", "cc");
  108. } while (res);
  109. break;
  110. case 4:
  111. do {
  112. asm volatile(" ldrex %1,[%2] \n"
  113. " movs %0,#0 \n"
  114. " cmp %1,%3 \n"
  115. " it eq \n"
  116. " strexeq %0,%4,[%2] \n"
  117. : "=&r" (res), "=&r" (oldval)
  118. : "r" (ptr), "Ir" (old), "r" (new)
  119. : "memory", "cc");
  120. } while (res);
  121. break;
  122. default:
  123. __bad_cmpxchg(ptr, size);
  124. oldval = 0;
  125. }
  126. return oldval;
  127. }
  128. #define cmpxchg(ptr,o,n) \
  129. ((__typeof__(*(ptr)))__cmpxchg((ptr), \
  130. (unsigned long)(o), \
  131. (unsigned long)(n), \
  132. sizeof(*(ptr))))
  133. /*
  134. * Local variables:
  135. * mode: C
  136. * c-file-style: "Linux"
  137. * c-basic-offset: 4
  138. * tab-width: 4
  139. * indent-tabs-mode: nil
  140. * End:
  141. */