source: trunk/lib/bitblock256.hpp @ 2200

Last change on this file since 2200 was 2200, checked in by cameron, 7 years ago

Custom USE_S2P_AVX transposition; ADCMAGIC variations

File size: 8.7 KB
Line 
1#ifndef BITBLOCK256_HPP_
2#define BITBLOCK256_HPP_
3
4/*=============================================================================
5    bitblock256 - Specific 256 bit IDISA implementations.
6
7    Idealized SIMD Operations with SSE versions
8    Copyright (C) 2011, Robert D. Cameron, Kenneth S. Herdy, Hua Huang and Nigel Medforth.
9    Licensed to the public under the Open Software License 3.0.
10    Licensed to International Characters Inc.
11       under the Academic Free License version 3.0.
12
13=============================================================================*/
14
15#include "idisa128.hpp"
16#include "idisa256.hpp"
17#include "builtins.hpp"
18
19union ubitblock {
20        bitblock256_t _256;
21        bitblock256_t _128[sizeof(bitblock256_t)/sizeof(bitblock256_t)];
22        uint64_t _64[sizeof(bitblock256_t)/sizeof(uint64_t)];
23        uint32_t _32[sizeof(bitblock256_t)/sizeof(uint32_t)];
24        uint16_t _16[sizeof(bitblock256_t)/sizeof(uint16_t)];
25        uint8_t _8[sizeof(bitblock256_t)/sizeof(uint8_t)];
26};
27
28/* The type used to store a carry bit. */
29typedef bitblock256_t carry_t;
30
31static IDISA_ALWAYS_INLINE bitblock256_t carry2bitblock(carry_t carry);
32static IDISA_ALWAYS_INLINE carry_t bitblock2carry(bitblock256_t carry);
33
34static IDISA_ALWAYS_INLINE void adc(bitblock256_t x, bitblock256_t y, carry_t & carry, bitblock256_t & sum);
35static IDISA_ALWAYS_INLINE void sbb(bitblock256_t x, bitblock256_t y, carry_t & borrow, bitblock256_t & difference);
36static IDISA_ALWAYS_INLINE void advance_with_carry(bitblock256_t cursor, carry_t & carry, bitblock256_t & rslt);
37
38static IDISA_ALWAYS_INLINE void adc(bitblock256_t x, bitblock256_t y, carry_t carry_in, carry_t & carry_out, bitblock256_t & sum);
39static IDISA_ALWAYS_INLINE void sbb(bitblock256_t x, bitblock256_t y, carry_t borrow_in, carry_t & borrow_out, bitblock256_t & difference);
40static IDISA_ALWAYS_INLINE void advance_with_carry(bitblock256_t cursor, carry_t carry_in, carry_t & carry_out, bitblock256_t & rslt);
41
42static IDISA_ALWAYS_INLINE bitblock256_t convert (uint64_t s);
43static IDISA_ALWAYS_INLINE uint64_t convert (bitblock256_t v);
44
45static IDISA_ALWAYS_INLINE bitblock256_t carry2bitblock(carry_t carry) {  return carry;}
46static IDISA_ALWAYS_INLINE carry_t bitblock2carry(bitblock256_t carry) {  return carry;}
47
48
49
50#define avx_select_lo128(x) \
51        ((__m128i) _mm256_castps256_ps128(x))
52
53#define avx_select_hi128(x) \
54        ((__m128i)(_mm256_extractf128_ps(x, 1)))
55
56#define avx_general_combine256(x, y) \
57   (_mm256_insertf128_ps(_mm256_castps128_ps256((__m128) y), (__m128) x, 1))
58
59IDISA_ALWAYS_INLINE void adc128(bitblock128_t x, bitblock128_t y, bitblock128_t carry_in, bitblock128_t & carry_out, bitblock128_t & sum)
60{
61        bitblock128_t gen = simd_and(x, y);
62        bitblock128_t prop = simd_or(x, y);
63        bitblock128_t partial = simd128<64>::add(simd128<64>::add(x, y), carry_in);
64        bitblock128_t c1 = simd128<128>::slli<64>(simd128<64>::srli<63>(simd_or(gen, simd_andc(prop, partial))));
65        sum = simd128<64>::add(c1, partial);
66        carry_out = simd128<128>::srli<127>(simd_or(gen, simd_andc(prop, sum)));
67}
68
69#ifndef ADCMAGIC
70IDISA_ALWAYS_INLINE void adc(bitblock256_t x, bitblock256_t y, carry_t carry_in, carry_t & carry_out, bitblock256_t & sum)
71{
72//  Really Slow!
73//        bitblock256_t gen = simd_and(x, y);
74//        bitblock256_t prop = simd_or(x, y);
75//        sum = simd256<256>::add(simd256<256>::add(x, y), carry2bitblock(carry_in));
76//        carry_out = bitblock2carry(simd256<256>::srli<255>(simd_or(gen, simd_andc(prop, sum))));
77
78  bitblock128_t x0 = avx_select_lo128(x);
79  bitblock128_t x1 = avx_select_hi128(x);
80  bitblock128_t y0 = avx_select_lo128(y);
81  bitblock128_t y1 = avx_select_hi128(y);
82  bitblock128_t c0 = avx_select_lo128(carry2bitblock(carry_in));
83  bitblock128_t s0, s1, c1, c2;
84  adc128(x0, y0, c0, c1, s0);
85  adc128(x1, y1, c1, c2, s1);
86  sum = avx_general_combine256(s1, s0);
87  carry_out = _mm256_castps128_ps256((__m128) c2);
88}
89#endif
90
91#ifdef ADCMAGIC
92static inline void adc(bitblock256_t x, bitblock256_t y, carry_t carry_in, carry_t & carry_out, bitblock256_t & sum) {
93        bitblock128_t all_ones = simd128<1>::constant<1>();
94        //bitblock256_t gen = simd_and(x, y);
95        //bitblock256_t prop = simd_xor(x, y);
96        bitblock128_t x0 = avx_select_lo128(x);
97        bitblock128_t x1 = avx_select_hi128(x);
98        bitblock128_t y0 = avx_select_lo128(y);
99        bitblock128_t y1 = avx_select_hi128(y);
100        bitblock128_t sum0 = simd128<64>::add(x0, y0);
101        bitblock128_t sum1 = simd128<64>::add(x1, y1);
102        //bitblock256_t icarry = simd_or(gen, simd_andc(prop, avx_general_combine256(sum1, sum0)));
103        bitblock128_t icarry0 = simd_or(simd_and(x0, y0), simd_andc(simd_or(x0, y0), sum0));
104        bitblock128_t icarry1 = simd_or(simd_and(x1, y1), simd_andc(simd_or(x1, y1), sum1));
105        // A carry may bubble through a field if it is all ones.
106        bitblock128_t bubble0 = simd128<64>::eq(sum0, all_ones);
107        bitblock128_t bubble1 = simd128<64>::eq(sum1, all_ones);
108        //bitblock128_t bubble = hsimd128<64>::packss(bubble1, bubble0);
109        bitblock256_t bubble = avx_general_combine256(bubble1, bubble0);
110        //uint64_t carry_mask = _mm256_movemask_pd((__m256d) icarry) * 2 + convert(carry_in);
111        uint64_t carry_mask = hsimd128<64>::signmask(icarry1) * 8 + hsimd128<64>::signmask(icarry0) * 2 + convert(carry_in);
112        uint64_t bubble_mask = _mm256_movemask_pd((__m256d) bubble);
113        //uint64_t bubble_mask = hsimd128<64>::signmask(bubble1) * 4 + hsimd128<64>::signmask(bubble0);
114        //uint64_t bubble_mask = hsimd128<32>::signmask(bubble);
115        uint64_t carry_scan_thru_bubbles = (carry_mask + bubble_mask) &~ bubble_mask;
116        uint64_t increments = carry_scan_thru_bubbles | (carry_scan_thru_bubbles - carry_mask);
117        carry_out = convert(increments >> 4);
118        uint64_t spread = 0x0000200040008001 * increments & 0x0001000100010001;
119        bitblock128_t inc_32 = _mm_cvtepu16_epi32(_mm_cvtsi64_si128(spread));
120        bitblock128_t inc_64_0 = esimd128<32>::mergel(simd128<1>::constant<0>(), inc_32);
121        bitblock128_t inc_64_1 = esimd128<32>::mergeh(simd128<1>::constant<0>(), inc_32);
122        sum = avx_general_combine256(simd128<64>::add(sum1, inc_64_1), simd128<64>::add(sum0, inc_64_0));
123}
124#endif
125
126
127
128
129
130IDISA_ALWAYS_INLINE void sbb128(bitblock128_t x, bitblock128_t y, bitblock128_t borrow_in, bitblock128_t & borrow_out, bitblock128_t & difference)
131{
132        bitblock128_t gen = simd_andc(y, x);
133        bitblock128_t prop = simd_not(simd_xor(x, y));
134        bitblock128_t partial = simd128<64>::sub(simd128<64>::sub(x, y), borrow_in);
135        bitblock128_t b1 = simd128<128>::slli<64>(simd128<64>::srli<63>(simd_or(gen, simd_and(prop, partial))));
136        difference = simd128<64>::sub(partial, b1);
137        borrow_out = simd128<128>::srli<127>(simd_or(gen, simd_and(prop, difference)));
138}
139
140
141IDISA_ALWAYS_INLINE void sbb(bitblock256_t x, bitblock256_t y, carry_t borrow_in, carry_t & borrow_out, bitblock256_t & difference)
142{
143//        bitblock256_t gen = simd_andc(y, x);
144//        bitblock256_t prop = simd_not(simd_xor(x, y));
145//        difference = simd256<256>::sub(simd256<256>::sub(x, y), carry2bitblock(borrow_in));
146//        borrow_out = bitblock2carry(simd256<256>::srli<255>(simd_or(gen, simd_and(prop, difference))));
147  bitblock128_t x0 = avx_select_lo128(x);
148  bitblock128_t x1 = avx_select_hi128(x);
149  bitblock128_t y0 = avx_select_lo128(y);
150  bitblock128_t y1 = avx_select_hi128(y);
151  bitblock128_t b0 = avx_select_lo128(carry2bitblock(borrow_in));
152  bitblock128_t d0, d1, b1, b2;
153  sbb128(x0, y0, b0, b1, d0);
154  sbb128(x1, y1, b1, b2, d1);
155  difference = avx_general_combine256(d1, d0);
156  borrow_out = _mm256_castps128_ps256((__m128) b2);
157}
158
159IDISA_ALWAYS_INLINE void advance_with_carry128(bitblock128_t cursor, bitblock128_t carry_in, bitblock128_t & carry_out, bitblock128_t & rslt)
160{
161bitblock128_t shift_out = simd128<64>::srli<63>(cursor);
162bitblock128_t low_bits = esimd128<64>::mergel(shift_out, carry_in);
163carry_out = simd128<128>::srli<64>(shift_out);
164rslt = simd_or(simd128<64>::add(cursor, cursor), low_bits);
165}
166
167IDISA_ALWAYS_INLINE void advance_with_carry(bitblock256_t cursor, carry_t carry_in, carry_t & carry_out, bitblock256_t & rslt)
168{
169  bitblock128_t cursor0 = avx_select_lo128(cursor);
170  bitblock128_t cursor1 = avx_select_hi128(cursor);
171  bitblock128_t  carry0 = avx_select_lo128(carry_in);
172  bitblock128_t  carry1, carry2, rslt0, rslt1;
173  advance_with_carry128(cursor0, carry0, carry1, rslt0);
174  advance_with_carry128(cursor1, carry1, carry2, rslt1);
175  rslt = avx_general_combine256(rslt1, rslt0);
176  carry_out = _mm256_castps128_ps256((__m128)carry2);
177}
178
179IDISA_ALWAYS_INLINE bitblock256_t convert(uint64_t s)
180{
181        ubitblock b = {b._256 = simd256<128>::constant<0>()}; // = {0};
182        b._64[0] = s;
183        return b._256;
184}
185
186IDISA_ALWAYS_INLINE uint64_t convert (bitblock256_t v)
187{
188        return (uint64_t) mvmd256<64>::extract<0>(v);
189}
190
191
192#endif // BITBLOCK256_HPP_
Note: See TracBrowser for help on using the repository browser.