asm_arm.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /********************************************************************
  2. * *
  3. * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
  4. * *
  5. * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  6. * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  7. * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  8. * *
  9. * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
  10. * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
  11. * *
  12. ********************************************************************
  13. function: arm7 and later wide math functions
  14. ********************************************************************/
  15. #ifdef _ARM_ASSEM_
  16. #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
  17. #define _V_WIDE_MATH
  18. static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
  19. int lo,hi;
  20. asm volatile("smull\t%0, %1, %2, %3"
  21. : "=&r"(lo),"=&r"(hi)
  22. : "%r"(x),"r"(y)
  23. : "cc");
  24. return(hi);
  25. }
  26. static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
  27. return MULT32(x,y)<<1;
  28. }
  29. static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
  30. int lo,hi;
  31. asm volatile("smull %0, %1, %2, %3\n\t"
  32. "movs %0, %0, lsr #15\n\t"
  33. "adc %1, %0, %1, lsl #17\n\t"
  34. : "=&r"(lo),"=&r"(hi)
  35. : "%r"(x),"r"(y)
  36. : "cc");
  37. return(hi);
  38. }
  39. #define MB() asm volatile ("" : : : "memory")
  40. static inline void XPROD32(ogg_int32_t a, ogg_int32_t b,
  41. ogg_int32_t t, ogg_int32_t v,
  42. ogg_int32_t *x, ogg_int32_t *y)
  43. {
  44. int x1, y1, l;
  45. asm( "smull %0, %1, %4, %6\n\t"
  46. "smlal %0, %1, %5, %7\n\t"
  47. "rsb %3, %4, #0\n\t"
  48. "smull %0, %2, %5, %6\n\t"
  49. "smlal %0, %2, %3, %7"
  50. : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
  51. : "3" (a), "r" (b), "r" (t), "r" (v)
  52. : "cc" );
  53. *x = x1;
  54. MB();
  55. *y = y1;
  56. }
  57. static inline void XPROD31(ogg_int32_t a, ogg_int32_t b,
  58. ogg_int32_t t, ogg_int32_t v,
  59. ogg_int32_t *x, ogg_int32_t *y)
  60. {
  61. int x1, y1, l;
  62. asm( "smull %0, %1, %4, %6\n\t"
  63. "smlal %0, %1, %5, %7\n\t"
  64. "rsb %3, %4, #0\n\t"
  65. "smull %0, %2, %5, %6\n\t"
  66. "smlal %0, %2, %3, %7"
  67. : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
  68. : "3" (a), "r" (b), "r" (t), "r" (v)
  69. : "cc" );
  70. *x = x1 << 1;
  71. MB();
  72. *y = y1 << 1;
  73. }
  74. static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b,
  75. ogg_int32_t t, ogg_int32_t v,
  76. ogg_int32_t *x, ogg_int32_t *y)
  77. {
  78. int x1, y1, l;
  79. asm( "rsb %2, %4, #0\n\t"
  80. "smull %0, %1, %3, %5\n\t"
  81. "smlal %0, %1, %2, %6\n\t"
  82. "smull %0, %2, %4, %5\n\t"
  83. "smlal %0, %2, %3, %6"
  84. : "=&r" (l), "=&r" (x1), "=&r" (y1)
  85. : "r" (a), "r" (b), "r" (t), "r" (v)
  86. : "cc" );
  87. *x = x1 << 1;
  88. MB();
  89. *y = y1 << 1;
  90. }
  91. #endif
  92. #ifndef _V_CLIP_MATH
  93. #define _V_CLIP_MATH
  94. static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
  95. int tmp;
  96. asm volatile("subs %1, %0, #32768\n\t"
  97. "movpl %0, #0x7f00\n\t"
  98. "orrpl %0, %0, #0xff\n"
  99. "adds %1, %0, #32768\n\t"
  100. "movmi %0, #0x8000"
  101. : "+r"(x),"=r"(tmp)
  102. :
  103. : "cc");
  104. return(x);
  105. }
  106. #endif
  107. #ifndef _V_LSP_MATH_ASM
  108. #define _V_LSP_MATH_ASM
  109. static inline void lsp_loop_asm(ogg_uint32_t *qip,ogg_uint32_t *pip,
  110. ogg_int32_t *qexpp,
  111. ogg_int32_t *ilsp,ogg_int32_t wi,
  112. ogg_int32_t m){
  113. ogg_uint32_t qi=*qip,pi=*pip;
  114. ogg_int32_t qexp=*qexpp;
  115. asm("mov r0,%3;"
  116. "mov r1,%5,asr#1;"
  117. "add r0,r0,r1,lsl#3;"
  118. "1:"
  119. "ldmdb r0!,{r1,r3};"
  120. "subs r1,r1,%4;" //ilsp[j]-wi
  121. "rsbmi r1,r1,#0;" //labs(ilsp[j]-wi)
  122. "umull %0,r2,r1,%0;" //qi*=labs(ilsp[j]-wi)
  123. "subs r1,r3,%4;" //ilsp[j+1]-wi
  124. "rsbmi r1,r1,#0;" //labs(ilsp[j+1]-wi)
  125. "umull %1,r3,r1,%1;" //pi*=labs(ilsp[j+1]-wi)
  126. "cmn r2,r3;" // shift down 16?
  127. "beq 0f;"
  128. "add %2,%2,#16;"
  129. "mov %0,%0,lsr #16;"
  130. "orr %0,%0,r2,lsl #16;"
  131. "mov %1,%1,lsr #16;"
  132. "orr %1,%1,r3,lsl #16;"
  133. "0:"
  134. "cmp r0,%3;\n"
  135. "bhi 1b;\n"
  136. // odd filter assymetry
  137. "ands r0,%5,#1;\n"
  138. "beq 2f;\n"
  139. "add r0,%3,%5,lsl#2;\n"
  140. "ldr r1,[r0,#-4];\n"
  141. "mov r0,#0x4000;\n"
  142. "subs r1,r1,%4;\n" //ilsp[j]-wi
  143. "rsbmi r1,r1,#0;\n" //labs(ilsp[j]-wi)
  144. "umull %0,r2,r1,%0;\n" //qi*=labs(ilsp[j]-wi)
  145. "umull %1,r3,r0,%1;\n" //pi*=labs(ilsp[j+1]-wi)
  146. "cmn r2,r3;\n" // shift down 16?
  147. "beq 2f;\n"
  148. "add %2,%2,#16;\n"
  149. "mov %0,%0,lsr #16;\n"
  150. "orr %0,%0,r2,lsl #16;\n"
  151. "mov %1,%1,lsr #16;\n"
  152. "orr %1,%1,r3,lsl #16;\n"
  153. //qi=(pi>>shift)*labs(ilsp[j]-wi);
  154. //pi=(qi>>shift)*labs(ilsp[j+1]-wi);
  155. //qexp+=shift;
  156. //}
  157. /* normalize to max 16 sig figs */
  158. "2:"
  159. "mov r2,#0;"
  160. "orr r1,%0,%1;"
  161. "tst r1,#0xff000000;"
  162. "addne r2,r2,#8;"
  163. "movne r1,r1,lsr #8;"
  164. "tst r1,#0x00f00000;"
  165. "addne r2,r2,#4;"
  166. "movne r1,r1,lsr #4;"
  167. "tst r1,#0x000c0000;"
  168. "addne r2,r2,#2;"
  169. "movne r1,r1,lsr #2;"
  170. "tst r1,#0x00020000;"
  171. "addne r2,r2,#1;"
  172. "movne r1,r1,lsr #1;"
  173. "tst r1,#0x00010000;"
  174. "addne r2,r2,#1;"
  175. "mov %0,%0,lsr r2;"
  176. "mov %1,%1,lsr r2;"
  177. "add %2,%2,r2;"
  178. : "+r"(qi),"+r"(pi),"+r"(qexp)
  179. : "r"(ilsp),"r"(wi),"r"(m)
  180. : "r0","r1","r2","r3","cc");
  181. *qip=qi;
  182. *pip=pi;
  183. *qexpp=qexp;
  184. }
  185. static inline void lsp_norm_asm(ogg_uint32_t *qip,ogg_int32_t *qexpp){
  186. ogg_uint32_t qi=*qip;
  187. ogg_int32_t qexp=*qexpp;
  188. asm("tst %0,#0x0000ff00;"
  189. "moveq %0,%0,lsl #8;"
  190. "subeq %1,%1,#8;"
  191. "tst %0,#0x0000f000;"
  192. "moveq %0,%0,lsl #4;"
  193. "subeq %1,%1,#4;"
  194. "tst %0,#0x0000c000;"
  195. "moveq %0,%0,lsl #2;"
  196. "subeq %1,%1,#2;"
  197. "tst %0,#0x00008000;"
  198. "moveq %0,%0,lsl #1;"
  199. "subeq %1,%1,#1;"
  200. : "+r"(qi),"+r"(qexp)
  201. :
  202. : "cc");
  203. *qip=qi;
  204. *qexpp=qexp;
  205. }
  206. #endif
  207. #endif