misc.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /********************************************************************
  2. * *
  3. * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
  4. * *
  5. * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  6. * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  7. * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  8. * *
  9. * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2003 *
  10. * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
  11. * *
  12. ********************************************************************
  13. function: miscellaneous math and prototypes
  14. ********************************************************************/
  15. #ifndef _V_RANDOM_H_
  16. #define _V_RANDOM_H_
  17. #include "ivorbiscodec.h"
  18. #include "os_types.h"
  19. /*#define _VDBG_GRAPHFILE "_0.m"*/
  20. #ifdef _VDBG_GRAPHFILE
  21. extern void *_VDBG_malloc(void *ptr,long bytes,char *file,long line);
  22. extern void _VDBG_free(void *ptr,char *file,long line);
  23. #undef _ogg_malloc
  24. #undef _ogg_calloc
  25. #undef _ogg_realloc
  26. #undef _ogg_free
  27. #define _ogg_malloc(x) _VDBG_malloc(NULL,(x),__FILE__,__LINE__)
  28. #define _ogg_calloc(x,y) _VDBG_malloc(NULL,(x)*(y),__FILE__,__LINE__)
  29. #define _ogg_realloc(x,y) _VDBG_malloc((x),(y),__FILE__,__LINE__)
  30. #define _ogg_free(x) _VDBG_free((x),__FILE__,__LINE__)
  31. #endif
  32. #include "asm_arm.h"
  33. #ifndef _V_WIDE_MATH
  34. #define _V_WIDE_MATH
  35. #ifndef _LOW_ACCURACY_
  36. /* 64 bit multiply */
  37. #include <sys/types.h>
  38. #if BYTE_ORDER==LITTLE_ENDIAN
  39. union magic {
  40. struct {
  41. ogg_int32_t lo;
  42. ogg_int32_t hi;
  43. } halves;
  44. ogg_int64_t whole;
  45. };
  46. #else
  47. union magic {
  48. struct {
  49. ogg_int32_t hi;
  50. ogg_int32_t lo;
  51. } halves;
  52. ogg_int64_t whole;
  53. };
  54. #endif
  55. static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
  56. union magic magic;
  57. magic.whole = (ogg_int64_t)x * y;
  58. return magic.halves.hi;
  59. }
  60. static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
  61. return MULT32(x,y)<<1;
  62. }
  63. static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
  64. union magic magic;
  65. magic.whole = (ogg_int64_t)x * y;
  66. return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
  67. }
  68. #else
  69. /* 32 bit multiply, more portable but less accurate */
  70. /*
  71. * Note: Precision is biased towards the first argument therefore ordering
  72. * is important. Shift values were chosen for the best sound quality after
  73. * many listening tests.
  74. */
  75. /*
  76. * For MULT32 and MULT31: The second argument is always a lookup table
  77. * value already preshifted from 31 to 8 bits. We therefore take the
  78. * opportunity to save on text space and use unsigned char for those
  79. * tables in this case.
  80. */
  81. static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
  82. return (x >> 9) * y; /* y preshifted >>23 */
  83. }
  84. static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
  85. return (x >> 8) * y; /* y preshifted >>23 */
  86. }
  87. static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
  88. return (x >> 6) * y; /* y preshifted >>9 */
  89. }
  90. #endif
  91. /*
  92. * This should be used as a memory barrier, forcing all cached values in
  93. * registers to wr writen back to memory. Might or might not be beneficial
  94. * depending on the architecture and compiler.
  95. */
  96. #define MB()
  97. /*
  98. * The XPROD functions are meant to optimize the cross products found all
  99. * over the place in mdct.c by forcing memory operation ordering to avoid
  100. * unnecessary register reloads as soon as memory is being written to.
  101. * However this is only beneficial on CPUs with a sane number of general
  102. * purpose registers which exclude the Intel x86. On Intel, better let the
  103. * compiler actually reload registers directly from original memory by using
  104. * macros.
  105. */
  106. #ifdef __i386__
  107. #define XPROD32(_a, _b, _t, _v, _x, _y) \
  108. { *(_x)=MULT32(_a,_t)+MULT32(_b,_v); \
  109. *(_y)=MULT32(_b,_t)-MULT32(_a,_v); }
  110. #define XPROD31(_a, _b, _t, _v, _x, _y) \
  111. { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
  112. *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
  113. #define XNPROD31(_a, _b, _t, _v, _x, _y) \
  114. { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
  115. *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
  116. #else
  117. static inline void XPROD32(ogg_int32_t a, ogg_int32_t b,
  118. ogg_int32_t t, ogg_int32_t v,
  119. ogg_int32_t *x, ogg_int32_t *y)
  120. {
  121. *x = MULT32(a, t) + MULT32(b, v);
  122. *y = MULT32(b, t) - MULT32(a, v);
  123. }
  124. static inline void XPROD31(ogg_int32_t a, ogg_int32_t b,
  125. ogg_int32_t t, ogg_int32_t v,
  126. ogg_int32_t *x, ogg_int32_t *y)
  127. {
  128. *x = MULT31(a, t) + MULT31(b, v);
  129. *y = MULT31(b, t) - MULT31(a, v);
  130. }
  131. static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b,
  132. ogg_int32_t t, ogg_int32_t v,
  133. ogg_int32_t *x, ogg_int32_t *y)
  134. {
  135. *x = MULT31(a, t) - MULT31(b, v);
  136. *y = MULT31(b, t) + MULT31(a, v);
  137. }
  138. #endif
  139. #endif
  140. #ifndef _V_CLIP_MATH
  141. #define _V_CLIP_MATH
  142. static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
  143. int ret=x;
  144. ret-= ((x<=32767)-1)&(x-32767);
  145. ret-= ((x>=-32768)-1)&(x+32768);
  146. return(ret);
  147. }
  148. #endif
  149. #endif