source: trunk/lib/sse_simd.h @ 87

Last change on this file since 87 was 87, checked in by cameron, 11 years ago

Adaptations for MSVC

File size: 19.2 KB
Line 
1/*  Idealized SIMD Operations with SSE versions
2    Copyright (C) 2006, 2007, 2008, Robert D. Cameron
3    Licensed to the public under the Open Software License 3.0.
4    Licensed to International Characters Inc.
5       under the Academic Free License version 3.0.
6*/
7#ifndef SSE_SIMD_H
8#define SSE_SIMD_H
9
10/*------------------------------------------------------------*/
11#include <stdint.h>
12#include <limits.h>
13#ifndef LONG_BIT
14#define LONG_BIT (8* sizeof(unsigned long))
15#endif
16#include <emmintrin.h>
17#ifdef USE_LDDQU
18#include <pmmintrin.h>
19#endif
20typedef __m128i SIMD_type;
21/*------------------------------------------------------------*/
22/* I. SIMD bitwise logical operations */
23
24#define simd_or(b1, b2) _mm_or_si128(b1, b2)
25#define simd_and(b1, b2) _mm_and_si128(b1, b2)
26#define simd_xor(b1, b2) _mm_xor_si128(b1, b2)
27#define simd_andc(b1, b2) _mm_andnot_si128(b2, b1)
28#define simd_if(cond, then_val, else_val) \
29  simd_or(simd_and(then_val, cond), simd_andc(else_val, cond))
30#define simd_not(b) (simd_xor(b, _mm_set1_epi32(0xFFFFFFFF)))
31#define simd_nor(a,b) (simd_not(simd_or(a,b)))
32
33
34/*  Specific constants. */
35#define simd_himask_2 _mm_set1_epi8(0xAA)
36#define simd_himask_4 _mm_set1_epi8(0xCC)
37#define simd_himask_8 _mm_set1_epi8(0xF0)
38/* Little-endian */
39#define simd_himask_16 _mm_set1_epi16(0xFF00)
40#define simd_himask_32 _mm_set1_epi32(0xFFFF0000)
41#define simd_himask_64 _mm_set_epi32(-1,0,-1,0)
42#define simd_himask_128 _mm_set_epi32(-1,-1,0,0)
43
44/* Idealized operations with direct implementation by built-in
45   operations for various target architectures. */
46
47#define simd_add_8(a, b) _mm_add_epi8(a, b)
48#define simd_add_16(a, b) _mm_add_epi16(a, b)
49#define simd_add_32(a, b) _mm_add_epi32(a, b)
50#define simd_add_64(a, b) _mm_add_epi64(a, b)
51#define simd_sub_8(a, b) _mm_sub_epi8(a, b)
52#define simd_sub_16(a, b) _mm_sub_epi16(a, b)
53#define simd_sub_32(a, b) _mm_sub_epi32(a, b)
54#define simd_sub_64(a, b) _mm_sub_epi64(a, b)
55#define simd_mult_16(a, b) _mm_mullo_epi16(a, b)
56#define simd_slli_16(r, shft) _mm_slli_epi16(r, shft)
57#define simd_srli_16(r, shft) _mm_srli_epi16(r, shft)
58#define simd_srai_16(r, shft) _mm_srai_epi16(r, shft)
59#define simd_slli_32(r, shft) _mm_slli_epi32(r, shft)
60#define simd_srli_32(r, shft) _mm_srli_epi32(r, shft)
61#define simd_srai_32(r, shft) _mm_srai_epi32(r, shft)
62#define simd_slli_64(r, shft) _mm_slli_epi64(r, shft)
63#define simd_srli_64(r, shft) _mm_srli_epi64(r, shft)
64#define simd_sll_64(r, shft_reg) _mm_sll_epi64(r, shft_reg)
65#define simd_srl_64(r, shft_reg) _mm_srl_epi64(r, shft_reg)
66#define simd_pack_16(a, b) \
67  _mm_packus_epi16(simd_andc(b, simd_himask_16), simd_andc(a, simd_himask_16))
68#define simd_mergeh_8(a, b) _mm_unpackhi_epi8(b, a)
69#define simd_mergeh_16(a, b) _mm_unpackhi_epi16(b, a)
70#define simd_mergeh_32(a, b) _mm_unpackhi_epi32(b, a)
71#define simd_mergeh_64(a, b) _mm_unpackhi_epi64(b, a)
72#define simd_mergel_8(a, b) _mm_unpacklo_epi8(b, a)
73#define simd_mergel_16(a, b) _mm_unpacklo_epi16(b, a)
74#define simd_mergel_32(a, b) _mm_unpacklo_epi32(b, a)
75#define simd_mergel_64(a, b) _mm_unpacklo_epi64(b, a)
76#define simd_eq_8(a, b) _mm_cmpeq_epi8(a, b)
77#define simd_eq_16(a, b) _mm_cmpeq_epi16(a, b)
78#define simd_eq_32(a, b) _mm_cmpeq_epi32(a, b)
79
80#define simd_max_8(a, b) _mm_max_epu8(a, b)
81
82#define simd_slli_128(r, shft) \
83  ((shft) % 8 == 0 ? _mm_slli_si128(r, (shft)/8) : \
84   (shft) >= 64 ? simd_slli_64(_mm_slli_si128(r, 8), (shft) - 64) : \
85   simd_or(simd_slli_64(r, shft), _mm_slli_si128(simd_srli_64(r, 64-(shft)), 8)))
86
87#define simd_srli_128(r, shft) \
88  ((shft) % 8 == 0 ? _mm_srli_si128(r, (shft)/8) : \
89   (shft) >= 64 ? simd_srli_64(_mm_srli_si128(r, 8), (shft) - 64) : \
90   simd_or(simd_srli_64(r, shft), _mm_srli_si128(simd_slli_64(r, 64-(shft)), 8)))
91
92#define simd_sll_128(r, shft) \
93   simd_or(simd_sll_64(r, shft), \
94           simd_or(_mm_slli_si128(simd_sll_64(r, simd_sub_32(shft, sisd_from_int(64))), 8), \
95                   _mm_slli_si128(simd_srl_64(r, simd_sub_32(sisd_from_int(64), shft)), 8)))
96
97#define simd_srl_128(r, shft) \
98   simd_or(simd_srl_64(r, shft), \
99           simd_or(_mm_srli_si128(simd_srl_64(r, simd_sub_32(shft, sisd_from_int(64))), 8), \
100                   _mm_srli_si128(simd_sll_64(r, simd_sub_32(sisd_from_int(64), shft)), 8)))
101
102#define sisd_sll(r, shft) simd_sll_128(r, shft)
103#define sisd_srl(r, shft) simd_srl_128(r, shft)
104#define sisd_slli(r, shft) simd_slli_128(r, shft)
105#define sisd_srli(r, shft) simd_srli_128(r, shft)
106#define sisd_add(a, b) simd_add_128(a, b)
107#define sisd_sub(a, b) simd_sub_128(a, b)
108
109#define sisd_store_aligned(r, addr) _mm_store_si128(addr, r)
110#define sisd_store_unaligned(r, addr) _mm_storeu_si128(addr, r)
111#define sisd_load_aligned(addr) _mm_load_si128(addr)
112#ifndef USE_LDDQU
113#define sisd_load_unaligned(addr) _mm_loadu_si128(addr)
114#endif
115#ifdef USE_LDDQU
116#define sisd_load_unaligned(addr) _mm_lddqu_si128(addr)
117#endif
118
119
120
121#define simd_const_32(n) _mm_set1_epi32(n)
122#define simd_const_16(n) _mm_set1_epi16(n)
123#define simd_const_8(n) _mm_set1_epi8(n)
124#define simd_const_4(n) _mm_set1_epi8((n)<<4|(n))
125#define simd_const_2(n) simd_const_4(n<<2|n)
126#define simd_const_1(n) \
127  (n==0 ? simd_const_8(0): simd_const_8(-1))
128
129#define simd_pack_16_ll(a, b) simd_pack_16(a, b)
130#define simd_pack_16_hh(a, b) \
131  simd_pack_16(simd_srli_16(a, 8), simd_srli_16(b, 8))
132
133
134static inline
135SIMD_type simd_add_2(SIMD_type a, SIMD_type b)
136{
137         SIMD_type c1 = simd_xor(a,b);
138         SIMD_type borrow = simd_and(a,b);
139         SIMD_type c2 = simd_xor(c1,(sisd_slli(borrow,1)));
140         return simd_if(simd_himask_2,c2,c1);
141}
142#define simd_add_4(a, b)\
143        simd_if(simd_himask_8, simd_add_8(simd_and(a,simd_himask_8),simd_and(b,simd_himask_8))\
144        ,simd_add_8(simd_andc(a,simd_himask_8),simd_andc(b,simd_himask_8)))
145
146#define simd_srli_2(r, sh)\
147         simd_and(simd_srli_32(r,sh),simd_const_2(3>>sh))
148
149#define simd_srli_4(r, sh)\
150         simd_and(simd_srli_32(r,sh),simd_const_4(15>>sh))
151#define simd_srli_8(r, sh)\
152         simd_and(simd_srli_32(r,sh),simd_const_8(255>>sh))
153
154#define simd_slli_2(r, sh)\
155         simd_and(simd_slli_32(r,sh),simd_const_2((3<<sh)&3))
156
157#define simd_slli_4(r, sh)\
158         simd_and(simd_slli_32(r,sh),simd_const_4((15<<sh)&15))
159#define simd_slli_8(r, sh)\
160         simd_and(simd_slli_32(r,sh),simd_const_8((255<<sh) &255))
161
162
163
164
165#define simd_mergeh_4(a,b)\
166        simd_mergeh_8(simd_if(simd_himask_8,a,simd_srli_8(b,4)),\
167        simd_if(simd_himask_8,simd_slli_8(a,4),b))
168#define simd_mergel_4(a,b)\
169        simd_mergel_8(simd_if(simd_himask_8,a,simd_srli_8(b,4)),\
170        simd_if(simd_himask_8,simd_slli_8(a,4),b))
171#define simd_mergeh_2(a,b)\
172        simd_mergeh_4(simd_if(simd_himask_4,a,simd_srli_4(b,2)),\
173        simd_if(simd_himask_4,simd_slli_4(a,2),b))
174#define simd_mergel_2(a,b)\
175        simd_mergel_4(simd_if(simd_himask_4,a,simd_srli_4(b,2)),\
176        simd_if(simd_himask_4,simd_slli_4(a,2),b))
177#define simd_mergeh_1(a,b)\
178        simd_mergeh_2(simd_if(simd_himask_2,a,simd_srli_2(b,1)),\
179        simd_if(simd_himask_2,simd_slli_2(a,1),b))
180#define simd_mergel_1(a,b)\
181        simd_mergel_2(simd_if(simd_himask_2,a,simd_srli_2(b,1)),\
182        simd_if(simd_himask_2,simd_slli_2(a,1),b))
183
184#define sisd_to_int(x) _mm_cvtsi128_si32(x)
185
186#define sisd_from_int(n) _mm_cvtsi32_si128(n)
187
188static inline int simd_all_true_8(SIMD_type v) {
189  return _mm_movemask_epi8(v) == 0xFFFF;
190}
191
192static inline int simd_any_true_8(SIMD_type v) {
193  return _mm_movemask_epi8(v) != 0;
194}
195
196static inline int simd_any_sign_bit_8(SIMD_type v) {
197  return _mm_movemask_epi8(v) != 0;
198}
199
200#define simd_all_eq_8(v1, v2) simd_all_true_8(_mm_cmpeq_epi8(v1, v2))
201#define simd_all_le_8(v1, v2) \
202  simd_all_eq_8(simd_max_8(v1, v2), v2)
203
204#define simd_all_signed_gt_8(v1, v2) simd_all_true_8(_mm_cmpgt_epi8(v1, v2))
205
206static inline int bitblock_has_bit(SIMD_type v) {
207  return !simd_all_true_8(simd_eq_8(v, simd_const_8(0)));
208}
209
210
211
212#define bitblock_test_bit(blk, n) \
213   sisd_to_int(sisd_srli(sisd_slli(blk, ((BLOCKSIZE-1)-(n))), BLOCKSIZE-1))
214
215#define simd_pack_2(a,b)\
216        simd_pack_4(simd_if(simd_himask_2,sisd_srli(a,1),a),\
217        simd_if(simd_himask_2,sisd_srli(b,1),b))
218#define simd_pack_4(a,b)\
219        simd_pack_8(simd_if(simd_himask_4,sisd_srli(a,2),a),\
220        simd_if(simd_himask_4,sisd_srli(b,2),b))
221#define simd_pack_8(a,b)\
222        simd_pack_16(simd_if(simd_himask_8,sisd_srli(a,4),a),\
223        simd_if(simd_himask_8,sisd_srli(b,4),b))
224
225#ifndef simd_add_2_xx
226#define simd_add_2_xx(v1, v2) simd_add_2(v1, v2)
227#endif
228
229#ifndef simd_add_2_xl
230#define simd_add_2_xl(v1, v2) simd_add_2(v1, simd_andc(v2, simd_himask_2))
231#endif
232
233#ifndef simd_add_2_xh
234#define simd_add_2_xh(v1, v2) simd_add_2(v1, simd_srli_2(v2, 1))
235#endif
236
237#ifndef simd_add_2_lx
238#define simd_add_2_lx(v1, v2) simd_add_2(simd_andc(v1, simd_himask_2), v2)
239#endif
240
241#ifndef simd_add_2_ll
242#define simd_add_2_ll(v1, v2) simd_add_8(simd_andc(v1, simd_himask_2), simd_andc(v2, simd_himask_2))
243#endif
244
245#ifndef simd_add_2_lh
246#define simd_add_2_lh(v1, v2) simd_add_8(simd_andc(v1, simd_himask_2), simd_srli_2(v2, 1))
247#endif
248
249#ifndef simd_add_2_hx
250#define simd_add_2_hx(v1, v2) simd_add_2(simd_srli_2(v1, 1), v2)
251#endif
252
253#ifndef simd_add_2_hl
254#define simd_add_2_hl(v1, v2) simd_add_8(simd_srli_2(v1, 1), simd_andc(v2, simd_himask_2))
255#endif
256
257#ifndef simd_add_2_hh
258#define simd_add_2_hh(v1, v2) simd_add_8(simd_srli_2(v1, 1), simd_srli_2(v2, 1))
259#endif
260
261#ifndef simd_add_4_xx
262#define simd_add_4_xx(v1, v2) simd_add_4(v1, v2)
263#endif
264
265#ifndef simd_add_4_xl
266#define simd_add_4_xl(v1, v2) simd_add_4(v1, simd_andc(v2, simd_himask_4))
267#endif
268
269#ifndef simd_add_4_xh
270#define simd_add_4_xh(v1, v2) simd_add_4(v1, simd_srli_4(v2, 2))
271#endif
272
273#ifndef simd_add_4_lx
274#define simd_add_4_lx(v1, v2) simd_add_4(simd_andc(v1, simd_himask_4), v2)
275#endif
276
277#ifndef simd_add_4_ll
278#define simd_add_4_ll(v1, v2) simd_add_8(simd_andc(v1, simd_himask_4), simd_andc(v2, simd_himask_4))
279#endif
280
281#ifndef simd_add_4_lh
282#define simd_add_4_lh(v1, v2) simd_add_8(simd_andc(v1, simd_himask_4), simd_srli_4(v2, 2))
283#endif
284
285#ifndef simd_add_4_hx
286#define simd_add_4_hx(v1, v2) simd_add_4(simd_srli_4(v1, 2), v2)
287#endif
288
289#ifndef simd_add_4_hl
290#define simd_add_4_hl(v1, v2) simd_add_8(simd_srli_4(v1, 2), simd_andc(v2, simd_himask_4))
291#endif
292
293#ifndef simd_add_4_hh
294#define simd_add_4_hh(v1, v2) simd_add_8(simd_srli_4(v1, 2), simd_srli_4(v2, 2))
295#endif
296
297#ifndef simd_add_8_xx
298#define simd_add_8_xx(v1, v2) simd_add_8(v1, v2)
299#endif
300
301#ifndef simd_add_8_xl
302#define simd_add_8_xl(v1, v2) simd_add_8(v1, simd_andc(v2, simd_himask_8))
303#endif
304
305#ifndef simd_add_8_xh
306#define simd_add_8_xh(v1, v2) simd_add_8(v1, simd_srli_8(v2, 4))
307#endif
308
309#ifndef simd_add_8_lx
310#define simd_add_8_lx(v1, v2) simd_add_8(simd_andc(v1, simd_himask_8), v2)
311#endif
312
313#ifndef simd_add_8_ll
314#define simd_add_8_ll(v1, v2) simd_add_8(simd_andc(v1, simd_himask_8), simd_andc(v2, simd_himask_8))
315#endif
316
317#ifndef simd_add_8_lh
318#define simd_add_8_lh(v1, v2) simd_add_8(simd_andc(v1, simd_himask_8), simd_srli_8(v2, 4))
319#endif
320
321#ifndef simd_add_8_hx
322#define simd_add_8_hx(v1, v2) simd_add_8(simd_srli_8(v1, 4), v2)
323#endif
324
325#ifndef simd_add_8_hl
326#define simd_add_8_hl(v1, v2) simd_add_8(simd_srli_8(v1, 4), simd_andc(v2, simd_himask_8))
327#endif
328
329#ifndef simd_add_8_hh
330#define simd_add_8_hh(v1, v2) simd_add_8(simd_srli_8(v1, 4), simd_srli_8(v2, 4))
331#endif
332
333#ifndef simd_add_16_xx
334#define simd_add_16_xx(v1, v2) simd_add_16(v1, v2)
335#endif
336
337#ifndef simd_add_16_xl
338#define simd_add_16_xl(v1, v2) simd_add_16(v1, simd_andc(v2, simd_himask_16))
339#endif
340
341#ifndef simd_add_16_xh
342#define simd_add_16_xh(v1, v2) simd_add_16(v1, simd_srli_16(v2, 8))
343#endif
344
345#ifndef simd_add_16_lx
346#define simd_add_16_lx(v1, v2) simd_add_16(simd_andc(v1, simd_himask_16), v2)
347#endif
348
349#ifndef simd_add_16_ll
350#define simd_add_16_ll(v1, v2) simd_add_16(simd_andc(v1, simd_himask_16), simd_andc(v2, simd_himask_16))
351#endif
352
353#ifndef simd_add_16_lh
354#define simd_add_16_lh(v1, v2) simd_add_16(simd_andc(v1, simd_himask_16), simd_srli_16(v2, 8))
355#endif
356
357#ifndef simd_add_16_hx
358#define simd_add_16_hx(v1, v2) simd_add_16(simd_srli_16(v1, 8), v2)
359#endif
360
361#ifndef simd_add_16_hl
362#define simd_add_16_hl(v1, v2) simd_add_16(simd_srli_16(v1, 8), simd_andc(v2, simd_himask_16))
363#endif
364
365#ifndef simd_add_16_hh
366#define simd_add_16_hh(v1, v2) simd_add_16(simd_srli_16(v1, 8), simd_srli_16(v2, 8))
367#endif
368
369#ifndef simd_add_32_xx
370#define simd_add_32_xx(v1, v2) simd_add_32(v1, v2)
371#endif
372
373#ifndef simd_add_32_xl
374#define simd_add_32_xl(v1, v2) simd_add_32(v1, simd_andc(v2, simd_himask_32))
375#endif
376
377#ifndef simd_add_32_xh
378#define simd_add_32_xh(v1, v2) simd_add_32(v1, simd_srli_32(v2, 16))
379#endif
380
381#ifndef simd_add_32_lx
382#define simd_add_32_lx(v1, v2) simd_add_32(simd_andc(v1, simd_himask_32), v2)
383#endif
384
385#ifndef simd_add_32_ll
386#define simd_add_32_ll(v1, v2) simd_add_32(simd_andc(v1, simd_himask_32), simd_andc(v2, simd_himask_32))
387#endif
388
389#ifndef simd_add_32_lh
390#define simd_add_32_lh(v1, v2) simd_add_32(simd_andc(v1, simd_himask_32), simd_srli_32(v2, 16))
391#endif
392
393#ifndef simd_add_32_hx
394#define simd_add_32_hx(v1, v2) simd_add_32(simd_srli_32(v1, 16), v2)
395#endif
396
397#ifndef simd_add_32_hl
398#define simd_add_32_hl(v1, v2) simd_add_32(simd_srli_32(v1, 16), simd_andc(v2, simd_himask_32))
399#endif
400
401#ifndef simd_add_32_hh
402#define simd_add_32_hh(v1, v2) simd_add_32(simd_srli_32(v1, 16), simd_srli_32(v2, 16))
403#endif
404
405#ifndef simd_add_64_xx
406#define simd_add_64_xx(v1, v2) simd_add_64(v1, v2)
407#endif
408
409#ifndef simd_add_64_xl
410#define simd_add_64_xl(v1, v2) simd_add_64(v1, simd_andc(v2, simd_himask_64))
411#endif
412
413#ifndef simd_add_64_xh
414#define simd_add_64_xh(v1, v2) simd_add_64(v1, simd_srli_64(v2, 32))
415#endif
416
417#ifndef simd_add_64_lx
418#define simd_add_64_lx(v1, v2) simd_add_64(simd_andc(v1, simd_himask_64), v2)
419#endif
420
421#ifndef simd_add_64_ll
422#define simd_add_64_ll(v1, v2) simd_add_64(simd_andc(v1, simd_himask_64), simd_andc(v2, simd_himask_64))
423#endif
424
425#ifndef simd_add_64_lh
426#define simd_add_64_lh(v1, v2) simd_add_64(simd_andc(v1, simd_himask_64), simd_srli_64(v2, 32))
427#endif
428
429#ifndef simd_add_64_hx
430#define simd_add_64_hx(v1, v2) simd_add_64(simd_srli_64(v1, 32), v2)
431#endif
432
433#ifndef simd_add_64_hl
434#define simd_add_64_hl(v1, v2) simd_add_64(simd_srli_64(v1, 32), simd_andc(v2, simd_himask_64))
435#endif
436
437#ifndef simd_add_64_hh
438#define simd_add_64_hh(v1, v2) simd_add_64(simd_srli_64(v1, 32), simd_srli_64(v2, 32))
439#endif
440
441#ifndef simd_add_128_xx
442#define simd_add_128_xx(v1, v2) simd_add_128(v1, v2)
443#endif
444
445#ifndef simd_add_128_xl
446#define simd_add_128_xl(v1, v2) simd_add_128(v1, simd_andc(v2, simd_himask_128))
447#endif
448
449#ifndef simd_add_128_xh
450#define simd_add_128_xh(v1, v2) simd_add_128(v1, simd_srli_128(v2, 64))
451#endif
452
453#ifndef simd_add_128_lx
454#define simd_add_128_lx(v1, v2) simd_add_128(simd_andc(v1, simd_himask_128), v2)
455#endif
456
457#ifndef simd_add_128_ll
458#define simd_add_128_ll(v1, v2) simd_add_128(simd_andc(v1, simd_himask_128), simd_andc(v2, simd_himask_128))
459#endif
460
461#ifndef simd_add_128_lh
462#define simd_add_128_lh(v1, v2) simd_add_128(simd_andc(v1, simd_himask_128), simd_srli_128(v2, 64))
463#endif
464
465#ifndef simd_add_128_hx
466#define simd_add_128_hx(v1, v2) simd_add_128(simd_srli_128(v1, 64), v2)
467#endif
468
469#ifndef simd_add_128_hl
470#define simd_add_128_hl(v1, v2) simd_add_128(simd_srli_128(v1, 64), simd_andc(v2, simd_himask_128))
471#endif
472
473#ifndef simd_add_128_hh
474#define simd_add_128_hh(v1, v2) simd_add_128(simd_srli_128(v1, 64), simd_srli_128(v2, 64))
475#endif
476
477#ifndef simd_pack_2_xx
478#define simd_pack_2_xx(v1, v2) simd_pack_2(v1, v2)
479#endif
480
481#ifndef simd_pack_2_xl
482#define simd_pack_2_xl(v1, v2) simd_pack_2(v1, v2)
483#endif
484
485#ifndef simd_pack_2_xh
486#define simd_pack_2_xh(v1, v2) simd_pack_2(v1, simd_srli_16(v2, 1))
487#endif
488
489#ifndef simd_pack_2_lx
490#define simd_pack_2_lx(v1, v2) simd_pack_2(v1, v2)
491#endif
492
493#ifndef simd_pack_2_ll
494#define simd_pack_2_ll(v1, v2) simd_pack_2(v1, v2)
495#endif
496
497#ifndef simd_pack_2_lh
498#define simd_pack_2_lh(v1, v2) simd_pack_2(v1, simd_srli_16(v2, 1))
499#endif
500
501#ifndef simd_pack_2_hx
502#define simd_pack_2_hx(v1, v2) simd_pack_2(simd_srli_16(v1, 1), v2)
503#endif
504
505#ifndef simd_pack_2_hl
506#define simd_pack_2_hl(v1, v2) simd_pack_2(simd_srli_16(v1, 1), v2)
507#endif
508
509#ifndef simd_pack_2_hh
510#define simd_pack_2_hh(v1, v2) simd_pack_2(simd_srli_16(v1, 1), simd_srli_16(v2, 1))
511#endif
512
513#ifndef simd_pack_4_xx
514#define simd_pack_4_xx(v1, v2) simd_pack_4(v1, v2)
515#endif
516
517#ifndef simd_pack_4_xl
518#define simd_pack_4_xl(v1, v2) simd_pack_4(v1, v2)
519#endif
520
521#ifndef simd_pack_4_xh
522#define simd_pack_4_xh(v1, v2) simd_pack_4(v1, simd_srli_16(v2, 2))
523#endif
524
525#ifndef simd_pack_4_lx
526#define simd_pack_4_lx(v1, v2) simd_pack_4(v1, v2)
527#endif
528
529#ifndef simd_pack_4_ll
530#define simd_pack_4_ll(v1, v2) simd_pack_4(v1, v2)
531#endif
532
533#ifndef simd_pack_4_lh
534#define simd_pack_4_lh(v1, v2) simd_pack_4(v1, simd_srli_16(v2, 2))
535#endif
536
537#ifndef simd_pack_4_hx
538#define simd_pack_4_hx(v1, v2) simd_pack_4(simd_srli_16(v1, 2), v2)
539#endif
540
541#ifndef simd_pack_4_hl
542#define simd_pack_4_hl(v1, v2) simd_pack_4(simd_srli_16(v1, 2), v2)
543#endif
544
545#ifndef simd_pack_4_hh
546#define simd_pack_4_hh(v1, v2) simd_pack_4(simd_srli_16(v1, 2), simd_srli_16(v2, 2))
547#endif
548
549#ifndef simd_pack_8_xx
550#define simd_pack_8_xx(v1, v2) simd_pack_8(v1, v2)
551#endif
552
553#ifndef simd_pack_8_xl
554#define simd_pack_8_xl(v1, v2) simd_pack_8(v1, v2)
555#endif
556
557#ifndef simd_pack_8_xh
558#define simd_pack_8_xh(v1, v2) simd_pack_8(v1, simd_srli_16(v2, 4))
559#endif
560
561#ifndef simd_pack_8_lx
562#define simd_pack_8_lx(v1, v2) simd_pack_8(v1, v2)
563#endif
564
565#ifndef simd_pack_8_ll
566#define simd_pack_8_ll(v1, v2) simd_pack_8(v1, v2)
567#endif
568
569#ifndef simd_pack_8_lh
570#define simd_pack_8_lh(v1, v2) simd_pack_8(v1, simd_srli_16(v2, 4))
571#endif
572
573#ifndef simd_pack_8_hx
574#define simd_pack_8_hx(v1, v2) simd_pack_8(simd_srli_16(v1, 4), v2)
575#endif
576
577#ifndef simd_pack_8_hl
578#define simd_pack_8_hl(v1, v2) simd_pack_8(simd_srli_16(v1, 4), v2)
579#endif
580
581#ifndef simd_pack_8_hh
582#define simd_pack_8_hh(v1, v2) simd_pack_8(simd_srli_16(v1, 4), simd_srli_16(v2, 4))
583#endif
584
585#ifndef simd_pack_16_xx
586#define simd_pack_16_xx(v1, v2) simd_pack_16(v1, v2)
587#endif
588
589#ifndef simd_pack_16_xl
590#define simd_pack_16_xl(v1, v2) simd_pack_16(v1, v2)
591#endif
592
593#ifndef simd_pack_16_xh
594#define simd_pack_16_xh(v1, v2) simd_pack_16(v1, simd_srli_16(v2, 8))
595#endif
596
597#ifndef simd_pack_16_lx
598#define simd_pack_16_lx(v1, v2) simd_pack_16(v1, v2)
599#endif
600
601#ifndef simd_pack_16_ll
602#define simd_pack_16_ll(v1, v2) simd_pack_16(v1, v2)
603#endif
604
605#ifndef simd_pack_16_lh
606#define simd_pack_16_lh(v1, v2) simd_pack_16(v1, simd_srli_16(v2, 8))
607#endif
608
609#ifndef simd_pack_16_hx
610#define simd_pack_16_hx(v1, v2) simd_pack_16(simd_srli_16(v1, 8), v2)
611#endif
612
613#ifndef simd_pack_16_hl
614#define simd_pack_16_hl(v1, v2) simd_pack_16(simd_srli_16(v1, 8), v2)
615#endif
616
617#ifndef simd_pack_16_hh
618#define simd_pack_16_hh(v1, v2) simd_pack_16(simd_srli_16(v1, 8), simd_srli_16(v2, 8))
619#endif
620
621
622// Splat the first 16-bit int into all positions.
623static inline SIMD_type simd_splat_16(SIMD_type x) {
624  SIMD_type t = _mm_shufflelo_epi16(x,0);
625  return _mm_shuffle_epi32(t,0);
626}
627
628// Splat the first 32-bit int into all positions.
629static inline SIMD_type simd_splat_32(SIMD_type x) {
630  return _mm_shuffle_epi32(x,0);
631}
632
633
634void print_bit_block(char * var_name, SIMD_type v) {
635  union {SIMD_type vec; unsigned char elems[8];} x;
636  x.vec = v;
637  unsigned char c, bit_reversed;
638  int i;
639  printf("%20s = ", var_name);
640  for (i = 0; i < sizeof(SIMD_type); i++) {
641    c = x.elems[i];
642     printf("%02X ", c); 
643  }
644  printf("\n");
645}
646
647static inline int bitblock_bit_count(SIMD_type v) {
648  int bit_count = 0;
649  SIMD_type cts_2 = simd_add_2_lh(v, v);
650  SIMD_type cts_4 = simd_add_4_lh(cts_2, cts_2);
651  SIMD_type cts_8 = simd_add_8_lh(cts_4, cts_4);
652  SIMD_type cts_64 = _mm_sad_epu8(cts_8, simd_const_8(0));
653  /* SIMD_type cts_128 = simd_add_128_lh(cts_64, cts_64) */;
654  SIMD_type cts_128 = simd_add_64(cts_64, sisd_srli(cts_64,64));
655  return (int) sisd_to_int(cts_128);
656}
657
658#endif
659
Note: See TracBrowser for help on using the repository browser.