1 Index: codecs/gsm/src/k6opt.s
2 ===================================================================
3 --- codecs/gsm/src/k6opt.s (revision 49815)
4 +++ codecs/gsm/src/k6opt.s (working copy)
27 +/* load a mask into a mmx register ... use the stack to avoid TEXTRELS */
28 +.macro load_immq mask reg
33 +.macro cleanup_immq num
37 +/* some coefficients for us to play with */
38 +.set h_coefs1, 0xFE8AFF7A /* -134 -374 */
39 +.set l_coefs1, 0x08060000 /* 0 2504 */
40 +.set h_coefs2, 0x2000166D /* 5741 8192 */
41 +.set l_coefs2, 0x0806166D /* 5741 2054 */
42 +.set h_coefs3, 0xFE8A0000 /* 0 -374 */
43 +.set l_coefs3, 0x0000FF7A /* -134 0 */
47 /* void Weighting_filter (const short *e, short *x) */
48 @@ -34,9 +35,10 @@ Weighting_filter:
51 movl $0x1000,%eax; movd %eax,%mm5 /* for rounding */
55 + load_immq coefs1, %mm1
56 + load_immq coefs2, %mm2
57 + load_immq coefs3, %mm3
62 @@ -229,6 +231,8 @@ k6iprod:
65 /* void k6vsraw P3((short *p, int n, int bits) */
66 +.set h_ones, 0x00010001
67 +.set l_ones, 0x00010001
69 .type k6vsraw,@function
71 @@ -242,7 +246,8 @@ k6vsraw:
72 leal -16(%esi,%eax,2),%edx /* edx = top - 16 */
76 + load_immq ones, %mm2
78 psllw %mm3,%mm2; psrlw $1,%mm2
79 cmpl %edx,%esi; ja .L306
81 @@ -362,22 +367,10 @@ k6vsllw:
82 .size k6vsllw,.Lfe5-k6vsllw
87 - .type extremes,@object
100 /* long k6maxmin (const short *p, int n, short *out) */
101 +.set extremes_1, 0x80008000
102 +.set extremes_2, 0x7fff7fff
104 .type k6maxmin,@function
106 @@ -391,8 +384,10 @@ k6maxmin:
111 - movd extremes+4,%mm1
112 + mov $extremes_1, %ecx
114 + mov $extremes_2, %ecx