source: proto/Compiler/CCGO_HMCPS.py @ 2798

Last change on this file since 2798 was 2798, checked in by cameron, 6 years ago

Alignment block size parameters; use carry 'pack' consistently.

File size: 21.8 KB
Line 
1#
2# CCGO_HMCPS.py
3#
4# Carry Code Generator Object using Hierarchical Merging Carry Pack Strategy
5#
6# Robert D. Cameron
7# November 26, 2012
8# Licensed under Open Software License 3.0
9#
10import ast
11import CCGO
12
13#
14# Helper functions
15#
16def TestHelper_Bitblock_Or(testExpr, bitBlockExpr):
17    if isinstance(testExpr, ast.Call):
18      assert isinstance(testExpr.func, ast.Name)
19      assert testExpr.func.id == 'bitblock::any'
20      testExpr.args[0] = make_call('simd_or', [bitBlockExpr, testExpr.args[0]])
21      return testExpr
22    else:
23      return ast.BinOp(testExpr, ast.BitOr(), make_call('bitblock::any', [bitBlockExpr]))
24
25def TestHelper_Integer_Or(testExpr, intExpr):
26    return ast.BinOp(testExpr, ast.BitOr(), intExpr)
27
28def mk_var(var, mode=ast.Load()):
29  if isinstance(var, str): 
30        var = ast.Name(var, mode)
31  return var
32 
33def make_assign(var, expr):
34   if isinstance(var, str): 
35        var = ast.Name(var, ast.Store())
36   return ast.Assign([var], expr)
37
38def make_index(var, num, mode=ast.Load()):
39  if isinstance(var, str): 
40        var = ast.Name(var, ast.Load())
41  return ast.Subscript(var, ast.Index(ast.Num(num)), mode)
42
43def make_index_store(var, num):
44  if isinstance(var, str): 
45        var = ast.Name(var, ast.Load())
46  return ast.Subscript(var, ast.Index(ast.Num(num)), ast.Store())
47
48def make_att(var, att, mode=ast.Load()):
49  if isinstance(var, str): 
50        var = ast.Name(var, ast.Load())
51  return ast.Attribute(var, att, mode)
52
53def make_att_store(var, att):
54  if isinstance(var, str): 
55        var = ast.Name(var, ast.Load())
56  return ast.Attribute(var, att, ast.Store())
57
58def make_call(fn_name, args):
59  if isinstance(fn_name, str): 
60        fn_name = ast.Name(fn_name, ast.Load())
61  return ast.Call(fn_name, args, [], None, None)
62
63def make_callStmt(fn_name, args):
64  if isinstance(fn_name, str): fn_name = ast.Name(fn_name, ast.Load())
65  return ast.Expr(ast.Call(fn_name, args, [], None, None))
66
67def make_mergeh(fw, x, y):
68  return make_call("esimd<%i>::mergeh" % fw, [mk_var(x), mk_var(y)])
69
70def make_zero(fw):
71  return make_call("simd<%i>::constant<0>" % fw, [])
72
73 
74#
75#
76# Carry Pack Assignment Strategy
77#
78# The hierarchical merging carry pack strategy packs carries
79# into packs of 2, 4, 8 and 16.   For example, to pack
80# 4 carries c0, c1, c2, and c3 into the 32-bit fields of
81# a 128-bit register, the following operations are used.
82#
83# c0 = pablo.SomeCarryGeneratingFn(...)
84# c1 = pablo.SomeCarryGeneratingFn(...)
85# c1_0 = esimd::mergeh<32>(c1, c0)
86# c2 = pablo.SomeCarryGeneratingFn(...)
87# c3 = pablo.SomeCarryGeneratingFn(...)
88# c3_2 = esimd::mergeh<32>(c3, c2)
89# c3_0 = esimd::mergeh<64>(c3_2, c1_0)
90#
91#
92# Packing operations are generated sequentially when
93# the appropriate individual carries or subpacks become
94# available.   
95#
96# Generate the packing operations assuming that the
97# carry_num carry has just been generated.
98#
99
100def pow2ceil(n):
101   c = 1
102   while c < n: c *= 2 
103   return c
104
105def pow2floor(n):
106   c = 1
107   while c <= n: c *= 2 
108   return c/2
109
110def low_bit(n):
111   return n - (n & (n-1))
112   
113def align(n, align_base):
114  return ((n + align_base - 1) / align_base) * align_base
115
116def determine_aligned_block_sizes(pack_size, cis, max_whiles_per_pack = 1, min_block_size = 1):
117  aligned_size = {}
118  for i in range(cis.block_count): aligned_size[i] = 0
119  seen = []
120  for i in range(cis.block_count):
121    # Work backwards to process all child blocks before the parent
122    # so that the parent incorporates the updated child counts.
123    b = cis.block_count - i - 1
124    b_carries = 0
125    op = cis.block_first_op[b]
126    while op < cis.block_first_op[b] + cis.block_op_count[b]:
127      sb = cis.containing_block[op]
128      if sb == b:
129        if op not in cis.advance_amount.keys(): b_carries += 1
130        elif cis.advance_amount[op] == 1: b_carries += 1
131        op += 1
132      else: 
133        align_base = aligned_size[sb]
134        if align_base > pack_size: align_base = pack_size
135        b_carries = align(b_carries, align_base)
136        b_carries += aligned_size[sb]
137        op += cis.block_op_count[sb]
138    #
139    # Align to min block size
140    aligned_size[b] = align(b_carries, min_block_size)
141    # Force whiles to use full packs; this possibly can be relaxed.
142    if cis.whileblock[b]:
143      aligned_size[b] = align(aligned_size[b], pack_size/max_whiles_per_pack)
144    if aligned_size[b] > pack_size:
145      aligned_size[b] = align(aligned_size[b], pack_size)
146    else:
147      aligned_size[b] = pow2ceil(aligned_size[b])
148  return aligned_size
149 
150MAX_LINE_LENGTH = 80
151
152def BitBlock_decls_from_vars(varlist):
153  global MAX_LINE_LENGTH
154  decls =  ""
155  if not len(varlist) == 0:
156          decls = "             BitBlock"
157          pending = ""
158          linelgth = 10
159          for v in varlist:
160            if linelgth + len(v) + 2 <= MAX_LINE_LENGTH:
161              decls += pending + " " + v
162              linelgth += len(pending + v) + 1
163            else:
164              decls += ";\n             BitBlock " + v
165              linelgth = 11 + len(v)
166            pending = ","
167          decls += ";"
168  return decls
169 
170def block_contains(b0, b1, parent_block_map):
171  if b0 == b1: return True
172  elif b1 == 0: return False
173  else: return block_contains(b0, parent_block_map[b1], parent_block_map)
174 
175class HMCPS_CCGO(CCGO.CCGO):
176    def __init__(self, fw, carryInfoSet, carryPackVarName='carryG', temp_prefix='__c'):
177        self.fw = fw
178        self.field_count = 128/fw
179        self.carryInfoSet = carryInfoSet
180        self.carryPackVar = carryPackVarName
181        self.temp_prefix = temp_prefix
182        self.aligned_size = determine_aligned_block_sizes(self.field_count, carryInfoSet)
183        self.carryPack_count = (self.aligned_size[0] + self.field_count - 1) / self.field_count
184        self.totalPack_count = self.carryPack_count + carryInfoSet.adv_n_count
185        self.alloc_map = {}
186        self.alloc_map[0] = 0
187        self.adv_n_map = {}
188        self.block_base = {}
189        self.allocate_ops()
190        # carry_offset is used within the inner body of while loops to access local carries.
191        # The calculated (ub, rp) value is reduced by this amount for the local carry Pack(s).
192        self.carry_offset = 0
193#
194# Carry Storage/Access
195#
196# Carries are stored in one or more ubitblocks as byte values.
197# For each block, the carry count is rounded up to the nearest power of 2 ceiling P,
198# so that the carry test for that block is accessible as a single value of P bytes.
199# Packs of 1, 2, 4 or 8 carries are respectively represented
200# as one or more _8, _16, _32 or _64 values.  (Members of ubitblock union.)
201#
202#
203# Allocation phase determines the ubitblock_no and count for each block.
204
205#  carry-in access is a byte load  carryG[packno]._8[offset]
206#  carryout store is to a local pack var until we get to the final byte of a pack
207#
208#  if-test: let P be pack_size in {1,2,4,8,...}
209#    if P <= 8, use an integer test expression cG[packno]._%i % (P * 8)[block_offset]
210#     
211#  while test similar
212#    local while decl: use a copy of carryPack
213#    while finalize  carry combine:   round up and |= into structure
214#
215    def carry_pack_full(self, ub, v = None, mode = ast.Load()):
216       if v == None: v = self.carryPackVar
217       return make_att(make_index(v, ub), '_128', mode)
218
219    def carry_pack_index(self, fw, ub, rp, mode = ast.Load()):
220       return make_index(make_att(make_index(self.carryPackVar, ub), '_%i' % fw), rp, mode)
221
222    def local_pack_full(self, ub, mode = ast.Load()):
223       return self.carry_pack_full(ub, "sub" + self.carryPackVar, mode)
224
225
226
227    def cg_temp(self, hi_carry, lo_carry = None):
228      if lo_carry == None or hi_carry == lo_carry: return "%s%i" % (self.temp_prefix, hi_carry)
229      else: return "%s%i_%i" % (self.temp_prefix, hi_carry, lo_carry)
230   
231    def local_temp(self, hi_carry, lo_carry = None):
232      if lo_carry == None or hi_carry == lo_carry: return "sub%s%i" % (self.temp_prefix, hi_carry)
233      else: return "sub%s%i_%i" % (self.temp_prefix, hi_carry, lo_carry)
234   
235    def gen_merges(self, carry_last, carry_base):
236      size = carry_last - carry_base + 1
237      if carry_last & size: 
238        v1 = mk_var(self.cg_temp(carry_last, carry_base))
239        v0 = mk_var(self.cg_temp(carry_last - size, carry_base - size))
240        v2 = mk_var(self.cg_temp(carry_last, carry_base - size), ast.Store())
241        return [make_assign(v2, make_mergeh(self.fw * size, v1, v0))] + self.gen_merges(carry_last, carry_base - size)
242      else: return []
243
244    #
245    #  Given that carry_num carries have been generated and packed,
246    #  add zero_count additional carry zero values and pack.
247    #  Use shifts to introduce multiple zeroes, where possible.
248    #
249    def gen_multiple_carry_zero_then_pack(self, carry_num, zero_count):
250      if zero_count == 0: return []
251      pending_carry_pack_size = low_bit(carry_num)
252      pending_carry_base = carry_num - pending_carry_pack_size
253      # We may be able to fill zeroes by shifting.
254      # But the shift is limited by any further pending carry pack and
255      # the constraint that the result must produce a well-formed pack
256      # having a power-of-2 entries.
257      #
258      final_num = carry_num + zero_count
259      pack_size2 = low_bit(pending_carry_base)
260      if pending_carry_base == 0:
261        shift = pow2floor(final_num) - pending_carry_pack_size
262      else:
263        shift = min(low_bit(pending_carry_base), low_bit(final_num)) - pending_carry_pack_size
264      if pending_carry_pack_size == 0 or shift == 0:
265        # There is either no pending pack or we are not generating enough
266        # carry zeroes to combine into the pending pack, so we can only add new
267        # packs.
268        #
269        if zero_count == 1:  return [make_assign(self.cg_temp(carry_num), make_zero(self.fw))]
270        else: 
271          zero_count_floor = pow2floor(zero_count)
272          hi_num = carry_num + zero_count_floor
273          a1 = make_assign(self.cg_temp(hi_num - 1, carry_num), make_zero(self.fw))
274          remaining_zeroes = zero_count - zero_count_floor
275          return [a1] + self.gen_multiple_carry_zero_then_pack(hi_num, remaining_zeroes) 
276      #
277      shift_result = self.cg_temp(carry_num + shift - 1, pending_carry_base)
278      pending = self.cg_temp(carry_num - 1, pending_carry_base)
279      #print shift_result, " by shift ", pending, shift
280      a1 = make_assign(shift_result, make_call('bitblock::srli<%i>' % (self.fw * shift), [mk_var(pending)]))
281      # Do any necessary merges
282      m = self.gen_merges(carry_num + shift - 1,  pending_carry_base)
283      return [a1] + m + self.gen_multiple_carry_zero_then_pack(carry_num + shift, zero_count - shift)
284
285
286    def allocate_ops(self):
287      carry_count = 0
288      adv_n_count = 0
289      for op in range(self.carryInfoSet.operation_count):
290        b = self.carryInfoSet.containing_block[op]
291        if op != 0: 
292          # If we've just left a block, ensure that we are aligned.
293          b_last = self.carryInfoSet.containing_block[op-1]
294          if not block_contains(b_last, b, self.carryInfoSet.parent_block):
295            # find the max-sized block just exited.
296            while not block_contains(self.carryInfoSet.parent_block[b_last], b, self.carryInfoSet.parent_block):
297              b_last = self.carryInfoSet.parent_block[b_last]
298            align_base = self.aligned_size[b_last]
299            if align_base > self.field_count: align_base = self.field_count
300            carry_count = align(carry_count, align_base)         
301        if self.carryInfoSet.block_first_op[b] == op:
302          # If we're just entering a block, ensure that we are aligned.
303          align_base = self.aligned_size[b]
304          if align_base > self.field_count: align_base = self.field_count
305          carry_count = align(carry_count, align_base)
306        if op not in self.carryInfoSet.advance_amount.keys():
307          self.alloc_map[op] = carry_count
308          carry_count += 1
309        elif self.carryInfoSet.advance_amount[op] == 1: 
310          self.alloc_map[op] = carry_count
311          carry_count += 1
312        else:
313          # Advance_n op, carry_count does not change.
314          self.alloc_map[op] = carry_count
315          self.adv_n_map[op] = adv_n_count
316          adv_n_count += 1
317      # When processing the last operation, make sure that the "next" operation
318      # appears to start a new pack.
319      self.alloc_map[self.carryInfoSet.operation_count] = align(carry_count, self.field_count)
320      for b in range(self.carryInfoSet.block_count): 
321         self.block_base[b] = self.alloc_map[self.carryInfoSet.block_first_op[b]]
322     
323    def GenerateCarryDecls(self):
324        return "  ubitblock %s [%i];\n" % (self.carryPackVar, self.totalPack_count)
325    def GenerateInitializations(self):
326        v = self.carryPackVar       
327        inits = ""
328        for i in range(0, self.totalPack_count):
329          inits += "%s[%i]._128 = simd<%i>::constant<0>();\n" % (v, i, self.fw)
330        for op_no in range(self.carryInfoSet.block_op_count[0]):
331          if op_no in self.carryInfoSet.init_one_list: 
332            posn = self.alloc_map[op_no]
333            ub = posn/self.field_count
334            rp = posn%self.field_count
335            inits += "%s[%i]._%i[%i] = 1;\n" % (self.carryPackVar, ub, self.fw, rp)
336        return inits
337    def GenerateStreamFunctionDecls(self):
338        f = self.field_count
339        s = 1
340        decls = []
341        while f > 0:
342          decls += [self.cg_temp(s*(i+1)-1, s*i) for i in range(f)]
343          f = f/2
344          s = s * 2
345        return BitBlock_decls_from_vars(decls)
346
347    def GenerateCarryInAccess(self, operation_no):
348        block_no = self.carryInfoSet.containing_block[operation_no]
349        posn = self.alloc_map[operation_no] - self.carry_offset
350        ub = posn/self.field_count
351        rp = posn%self.field_count
352        return make_call("convert", [self.carry_pack_index(self.fw, ub, rp)])
353    def GenerateCarryOutStore(self, operation_no, carry_out_expr):
354        block_no = self.carryInfoSet.containing_block[operation_no]
355        posn = self.alloc_map[operation_no] - self.carry_offset
356        ub = posn/self.field_count
357        rp = posn%self.field_count
358        # Save the carry in the carry temp variable and then merge
359        # pending carry temps as far as possible.
360        assigs = [make_assign(self.temp_prefix + repr(rp), carry_out_expr)] 
361        assigs += self.gen_merges(rp, rp)
362        # Only generate an actual store for the last carryout in a pack.
363        next_op = operation_no + 1
364        while self.adv_n_map.has_key(next_op): next_op += 1
365        next_posn = self.alloc_map[next_op] - self.carry_offset
366        skip = next_posn - posn - 1
367        if skip > 0: 
368          assigs += self.gen_multiple_carry_zero_then_pack(rp+1, skip)
369        #print (posn, skip)
370        if next_posn % self.field_count == 0:
371          shift_op = "simd<%i>::srli<%i>" % (self.fw, self.fw-1)
372          storable_carry_in_form = make_call(shift_op, [mk_var(self.cg_temp(self.field_count - 1, 0))])
373          assigs.append(make_assign(self.carry_pack_full(ub, mode = ast.Store()), storable_carry_in_form))
374        return assigs
375    def GenerateAdvanceInAccess(self, operation_no):
376        return self.carry_pack_full(self.carryPack_count + self.adv_n_map[operation_no])
377    def GenerateAdvanceOutStore(self, operation_no, adv_out_expr):
378        return [ast.Assign([self.carry_pack_full(self.carryPack_count + self.adv_n_map[operation_no], mode=ast.Store())], 
379                           make_call("bitblock::srli<64>", [adv_out_expr]))]
380    def GenerateTestAll(self, instance_name):
381        if self.totalPack_count == 0: return ast.Num(0)
382        else:
383            v = make_att(instance_name, self.carryPackVar)
384            t = self.carry_pack_full(0, v)
385            for i in range(1, self.totalPack_count): 
386              t2 = self.carry_pack_full(i, v)
387              t = make_call('simd_or', [t, t2])
388            return make_call('bitblock::any', [t])
389    def GenerateTest(self, block_no, testExpr):
390        posn = self.block_base[block_no] - self.carry_offset
391        ub = posn/self.field_count
392        rp = posn%self.field_count
393        count = self.aligned_size[block_no] 
394        width = count * self.fw
395        if count < self.field_count:
396            t = self.carry_pack_index(width, ub, rp/count)
397            return TestHelper_Integer_Or(testExpr, t)
398        else:
399            t = self.carry_pack_full(ub)
400            for i in range(1, count/self.field_count): 
401              v2 = self.carry_pack_full(ub + i)
402              t = make_call('simd_or', [t, v2])
403            return TestHelper_Bitblock_Or(testExpr, t)
404    def GenerateCarryIfTest(self, block_no, ifTest):
405        return self.GenerateTest(block_no, ifTest)
406
407    def GenerateCarryElseFinalization(self, block_no):
408        # if the block consists of full carry packs, then
409        # no action need be taken: the corresponding carry-in packs
410        # must already be zero, or the then branch would have been taken.
411        count = self.aligned_size[block_no]
412        if count % self.field_count == 0: return []
413        # The block has half a carry-pack or less.
414        assigs = []
415        posn = self.block_base[block_no] - self.carry_offset
416        ub = posn / self.field_count
417        rp = posn % self.field_count
418        next_op = self.carryInfoSet.block_first_op[block_no] + self.carryInfoSet.block_op_count[block_no]
419        end_pos = (self.alloc_map[next_op]  - self.carry_offset - 1) % self.field_count
420        #print rp, next_op,self.alloc_map[next_op]
421        #assigs = [make_assign(self.cg_temp(end_pos, rp), make_zero(self.fw))]
422        assigs = self.gen_multiple_carry_zero_then_pack(rp, end_pos - rp + 1)
423        if (end_pos + 1) % self.field_count == 0:
424          shift_op = "simd<%i>::srli<%i>" % (self.fw, self.fw-1)
425          storable_carry_in_form = make_call(shift_op, [mk_var(self.cg_temp(self.field_count - 1, 0))])
426          assigs.append(make_assign(self.carry_pack_full(ub, mode = ast.Store()), storable_carry_in_form))
427        return assigs
428
429    def GenerateLocalDeclare(self, block_no):
430        if self.carryInfoSet.block_op_count[block_no] == 0: return []
431        count = self.aligned_size[block_no] 
432        if count >= self.field_count:
433          ub_count = count / self.field_count
434          decls = [make_callStmt('ubitblock_declare', [mk_var('sub' + self.carryPackVar), ast.Num(ub_count)])]
435          count = self.field_count
436        else: decls = []
437        # Generate carry pack temps.
438        f = count
439        s = 1
440        temps = []
441        while f > 0:
442          temps += [self.local_temp(s*(i+1)-1, s*i) for i in range(f)]
443          f = f/2
444          s = s * 2
445        #return BitBlock_decls_from_vars(decls)
446        return decls + [make_callStmt('BitBlock_declare', [mk_var(t)]) for t in temps]
447   
448    def GenerateCarryWhileTest(self, block_no, testExpr):
449        return self.GenerateTest(block_no, testExpr)
450
451    def EnterLocalWhileBlock(self, operation_offset): 
452        self.carryPackVar = "sub" + self.carryPackVar
453        self.temp_prefix = "sub" + self.temp_prefix
454        self.carry_offset = self.alloc_map[operation_offset]
455        #print "self.carry_offset = %i" % self.carry_offset
456    def ExitLocalWhileBlock(self): 
457        self.carryPackVar = self.carryPackVar[3:]
458        self.temp_prefix = self.temp_prefix[3:]
459        self.carry_offset = 0
460       
461    def GenerateCarryWhileFinalization(self, block_no):
462        posn = self.block_base[block_no]
463        ub = posn/self.field_count
464        rp = posn%self.field_count
465        count = self.aligned_size[block_no] 
466        if count < self.field_count:
467          v0 = self.cg_temp(rp + count - 1, rp)
468          lv0 = self.local_temp(count - 1, 0)
469          return [make_assign(v0, make_call('simd_or', [mk_var(v0), mk_var(lv0)]))]
470        n = (count+self.field_count-1)/self.field_count
471        assigs = []
472        for i in range(n):
473          assigs.append(make_assign(self.carry_pack_full(ub + i, mode = ast.Store()), make_call('simd_or', [self.carry_pack_full(ub + i), self.local_pack_full(i)])))
474        return assigs
475    def GenerateStreamFunctionFinalization(self):
476        return []
477
478#
479#  A version of HMCPS_CCGO eliminating use of "convert"
480#
481class HMCPS_CCGO2(HMCPS_CCGO):
482
483
484    def GenerateCarryInAccess(self, operation_no):
485        block_no = self.carryInfoSet.containing_block[operation_no]
486        posn = self.alloc_map[operation_no] - self.carry_offset
487        ub = posn/self.field_count
488        rp = posn%self.field_count
489        #return make_call("convert", [self.carry_pack_index(self.fw, ub, rp)])
490        if rp == 0: e = self.carry_pack_full(ub)
491        else: e = make_call("mvmd<%i>::srli<%i>" %(self.fw, rp), [self.carry_pack_full(ub)])
492        if rp == self.field_count - 1:
493          return e
494        else: return make_call('simd_and', [e, mk_var("simd_const_1")])
495
496#
497#  Eliminating ubitblock
498#
499class HMCPS_CCGO3(HMCPS_CCGO2):
500
501    def carry_pack_full(self, ub, v = None, mode = ast.Load()):
502       if v == None: v = self.carryPackVar
503       return make_index(v, ub, mode)
504
505    def carry_pack_index(self, fw, ub, rp, mode = ast.Load()):
506       return make_call("mvmd<%i>::extract<%i>" % (fw, rp), [self.carry_pack_full(ub)])
507
508    def GenerateCarryDecls(self):
509        return "  BitBlock %s [%i];\n" % (self.carryPackVar, self.totalPack_count)
510
511    def GenerateInitializations(self):
512        v = self.carryPackVar       
513        inits = ""
514        for i in range(0, self.totalPack_count):
515          inits += "%s[%i] = simd<%i>::constant<0>();\n" % (v, i, self.fw)
516        for op_no in range(self.carryInfoSet.block_op_count[0]):
517          if op_no in self.carryInfoSet.init_one_list: 
518            posn = self.alloc_map[op_no]
519            ub = posn/self.field_count
520            rp = posn%self.field_count
521            v = "%s[%i]" % (self.carryPackVar, ub)
522            inits += "%s = simd_or(%s, mvmd<%i>::slli<%i>(simd_const_1)) ;\n" % (v, v, self.fw, rp)
523        return inits
524
525    def GenerateLocalDeclare(self, block_no):
526        if self.carryInfoSet.block_op_count[block_no] == 0: return []
527        count = self.aligned_size[block_no] 
528        if count >= self.field_count:
529          ub_count = count / self.field_count
530          decls = [make_callStmt('BitBlock_declare', [self.local_pack_full(ub_count)])]
531          decls += [make_assign(self.local_pack_full(i, ast.Store()), make_zero(self.fw)) for i in range(ub_count)]
532          count = self.field_count
533        else: decls = []
534        # Generate carry pack temps.
535        f = count
536        s = 1
537        temps = []
538        while f > 0:
539          temps += [self.local_temp(s*(i+1)-1, s*i) for i in range(f)]
540          f = f/2
541          s = s * 2
542        #return BitBlock_decls_from_vars(decls)
543        return decls + [make_callStmt('BitBlock_declare', [mk_var(t)]) for t in temps]
544
Note: See TracBrowser for help on using the repository browser.