source: proto/Compiler/CCGO_HMCPS.py @ 2791

Last change on this file since 2791 was 2791, checked in by cameron, 6 years ago

Resolve advance_n bugs for experimental compiler mode.

File size: 21.7 KB
Line 
1#
2# CCGO_HMCPS.py
3#
4# Carry Code Generator Object using Hierarchical Merging Carry Pack Strategy
5#
6# Robert D. Cameron
7# November 26, 2012
8# Licensed under Open Software License 3.0
9#
10import ast
11import CCGO
12
13#
14# Helper functions
15#
16def TestHelper_Bitblock_Or(testExpr, bitBlockExpr):
17    if isinstance(testExpr, ast.Call):
18      assert isinstance(testExpr.func, ast.Name)
19      assert testExpr.func.id == 'bitblock::any'
20      testExpr.args[0] = make_call('simd_or', [bitBlockExpr, testExpr.args[0]])
21      return testExpr
22    else:
23      return ast.BinOp(testExpr, ast.BitOr(), make_call('bitblock::any', [bitBlockExpr]))
24
25def TestHelper_Integer_Or(testExpr, intExpr):
26    return ast.BinOp(testExpr, ast.BitOr(), intExpr)
27
28def mk_var(var, mode=ast.Load()):
29  if isinstance(var, str): 
30        var = ast.Name(var, mode)
31  return var
32 
33def make_assign(var, expr):
34   if isinstance(var, str): 
35        var = ast.Name(var, ast.Store())
36   return ast.Assign([var], expr)
37
38def make_index(var, num, mode=ast.Load()):
39  if isinstance(var, str): 
40        var = ast.Name(var, ast.Load())
41  return ast.Subscript(var, ast.Index(ast.Num(num)), mode)
42
43def make_index_store(var, num):
44  if isinstance(var, str): 
45        var = ast.Name(var, ast.Load())
46  return ast.Subscript(var, ast.Index(ast.Num(num)), ast.Store())
47
48def make_att(var, att, mode=ast.Load()):
49  if isinstance(var, str): 
50        var = ast.Name(var, ast.Load())
51  return ast.Attribute(var, att, mode)
52
53def make_att_store(var, att):
54  if isinstance(var, str): 
55        var = ast.Name(var, ast.Load())
56  return ast.Attribute(var, att, ast.Store())
57
58def make_call(fn_name, args):
59  if isinstance(fn_name, str): 
60        fn_name = ast.Name(fn_name, ast.Load())
61  return ast.Call(fn_name, args, [], None, None)
62
63def make_callStmt(fn_name, args):
64  if isinstance(fn_name, str): fn_name = ast.Name(fn_name, ast.Load())
65  return ast.Expr(ast.Call(fn_name, args, [], None, None))
66
67def make_mergeh(fw, x, y):
68  return make_call("esimd<%i>::mergeh" % fw, [mk_var(x), mk_var(y)])
69
70def make_zero(fw):
71  return make_call("simd<%i>::constant<0>" % fw, [])
72
73 
74#
75#
76# Carry Pack Assignment Strategy
77#
78# The hierarchical merging carry pack strategy packs carries
79# into groups of 2, 4, 8 and 16.   For example, to pack
80# 4 carries c0, c1, c2, and c3 into the 32-bit fields of
81# a 128-bit register, the following operations are used.
82#
83# c0 = pablo.SomeCarryGeneratingFn(...)
84# c1 = pablo.SomeCarryGeneratingFn(...)
85# c1_0 = esimd::mergeh<32>(c1, c0)
86# c2 = pablo.SomeCarryGeneratingFn(...)
87# c3 = pablo.SomeCarryGeneratingFn(...)
88# c3_2 = esimd::mergeh<32>(c3, c2)
89# c3_0 = esimd::mergeh<64>(c3_2, c1_0)
90#
91#
92# Packing operations are generated sequentially when
93# the appropriate individual carries or subpacks become
94# available.   
95#
96# Generate the packing operations assuming that the
97# carry_num carry has just been generated.
98#
99
100def pow2ceil(n):
101   c = 1
102   while c < n: c *= 2 
103   return c
104
105def pow2floor(n):
106   c = 1
107   while c <= n: c *= 2 
108   return c/2
109
110def low_bit(n):
111   return n - (n & (n-1))
112   
113def align(n, align_base):
114  return ((n + align_base - 1) / align_base) * align_base
115
116def determine_aligned_block_sizes(pack_size, cis):
117  aligned_size = {}
118  for i in range(cis.block_count): aligned_size[i] = 0
119  seen = []
120  for i in range(cis.block_count):
121    # Work backwards to process all child blocks before the parent
122    # so that the parent incorporates the updated child counts.
123    b = cis.block_count - i - 1
124    b_carries = 0
125    op = cis.block_first_op[b]
126    while op < cis.block_first_op[b] + cis.block_op_count[b]:
127      sb = cis.containing_block[op]
128      if sb == b:
129        if op not in cis.advance_amount.keys(): b_carries += 1
130        elif cis.advance_amount[op] == 1: b_carries += 1
131        op += 1
132      else: 
133        align_base = aligned_size[sb]
134        if align_base > pack_size: align_base = pack_size
135        b_carries = align(b_carries, align_base)
136        b_carries += aligned_size[sb]
137        op += cis.block_op_count[sb]
138    # Force whiles to use full blocks; this possibly can be relaxed.
139    if cis.whileblock[b] or b_carries > pack_size:
140      aligned_size[b] = align(b_carries, pack_size)
141    else:
142      aligned_size[b] = pow2ceil(b_carries)
143  return aligned_size
144 
145MAX_LINE_LENGTH = 80
146
147def BitBlock_decls_from_vars(varlist):
148  global MAX_LINE_LENGTH
149  decls =  ""
150  if not len(varlist) == 0:
151          decls = "             BitBlock"
152          pending = ""
153          linelgth = 10
154          for v in varlist:
155            if linelgth + len(v) + 2 <= MAX_LINE_LENGTH:
156              decls += pending + " " + v
157              linelgth += len(pending + v) + 1
158            else:
159              decls += ";\n             BitBlock " + v
160              linelgth = 11 + len(v)
161            pending = ","
162          decls += ";"
163  return decls
164 
165def block_contains(b0, b1, parent_block_map):
166  if b0 == b1: return True
167  elif b1 == 0: return False
168  else: return block_contains(b0, parent_block_map[b1], parent_block_map)
169 
170class HMCPS_CCGO(CCGO.CCGO):
171    def __init__(self, fw, carryInfoSet, carryGroupVarName='carryG', temp_prefix='__c'):
172        self.fw = fw
173        self.field_count = 128/fw
174        self.carryInfoSet = carryInfoSet
175        self.carryGroupVar = carryGroupVarName
176        self.temp_prefix = temp_prefix
177        self.aligned_size = determine_aligned_block_sizes(self.field_count, carryInfoSet)
178        self.carryblock_count = (self.aligned_size[0] + self.field_count - 1) / self.field_count
179        self.totalblock_count = self.carryblock_count + carryInfoSet.adv_n_count
180        self.alloc_map = {}
181        self.alloc_map[0] = 0
182        self.adv_n_map = {}
183        self.block_base = {}
184        self.allocate_ops()
185        # carry_offset is used within the inner body of while loops to access local carries.
186        # The calculated (ub, rp) value is reduced by this amount for the local carry group(s).
187        self.carry_offset = 0
188#
189# Carry Storage/Access
190#
191# Carries are stored in one or more ubitblocks as byte values.
192# For each block, the carry count is rounded up to the nearest power of 2 ceiling P,
193# so that the carry test for that block is accessible as a single value of P bytes.
194# Packs of 1, 2, 4 or 8 carries are respectively represented
195# as one or more _8, _16, _32 or _64 values.  (Members of ubitblock union.)
196#
197#
198# Allocation phase determines the ubitblock_no and count for each block.
199
200#  carry-in access is a byte load  carryG[packno]._8[offset]
201#  carryout store is to a local pack var until we get to the final byte of a pack
202#
203#  if-test: let P be pack_size in {1,2,4,8,...}
204#    if P <= 8, use an integer test expression cG[packno]._%i % (P * 8)[block_offset]
205#     
206#  while test similar
207#    local while decl: use a copy of carryGroup
208#    while finalize  carry combine:   round up and |= into structure
209#
210    def carry_pack_full(self, ub, v = None, mode = ast.Load()):
211       if v == None: v = self.carryGroupVar
212       return make_att(make_index(v, ub), '_128', mode)
213
214    def carry_pack_index(self, fw, ub, rp, mode = ast.Load()):
215       return make_index(make_att(make_index(self.carryGroupVar, ub), '_%i' % fw), rp, mode)
216
217    def local_pack_full(self, ub, mode = ast.Load()):
218       return self.carry_pack_full(ub, "sub" + self.carryGroupVar, mode)
219
220
221
222    def cg_temp(self, hi_carry, lo_carry = None):
223      if lo_carry == None or hi_carry == lo_carry: return "%s%i" % (self.temp_prefix, hi_carry)
224      else: return "%s%i_%i" % (self.temp_prefix, hi_carry, lo_carry)
225   
226    def local_temp(self, hi_carry, lo_carry = None):
227      if lo_carry == None or hi_carry == lo_carry: return "sub%s%i" % (self.temp_prefix, hi_carry)
228      else: return "sub%s%i_%i" % (self.temp_prefix, hi_carry, lo_carry)
229   
230    def gen_merges(self, carry_last, carry_base):
231      size = carry_last - carry_base + 1
232      if carry_last & size: 
233        v1 = mk_var(self.cg_temp(carry_last, carry_base))
234        v0 = mk_var(self.cg_temp(carry_last - size, carry_base - size))
235        v2 = mk_var(self.cg_temp(carry_last, carry_base - size), ast.Store())
236        return [make_assign(v2, make_mergeh(self.fw * size, v1, v0))] + self.gen_merges(carry_last, carry_base - size)
237      else: return []
238
239    #
240    #  Given that carry_num carries have been generated and packed,
241    #  add zero_count additional carry zero values and pack.
242    #  Use shifts to introduce multiple zeroes, where possible.
243    #
244    def gen_multiple_carry_zero_then_pack(self, carry_num, zero_count):
245      if zero_count == 0: return []
246      pending_carry_pack_size = low_bit(carry_num)
247      pending_carry_base = carry_num - pending_carry_pack_size
248      # We may be able to fill zeroes by shifting.
249      # But the shift is limited by any further pending carry pack and
250      # the constraint that the result must produce a well-formed pack
251      # having a power-of-2 entries.
252      #
253      final_num = carry_num + zero_count
254      pack_size2 = low_bit(pending_carry_base)
255      if pending_carry_base == 0:
256        shift = pow2floor(final_num) - pending_carry_pack_size
257      else:
258        shift = min(low_bit(pending_carry_base), low_bit(final_num)) - pending_carry_pack_size
259      if pending_carry_pack_size == 0 or shift == 0:
260        # There is either no pending pack or we are not generating enough
261        # carry zeroes to combine into the pending pack, so we can only add new
262        # packs.
263        #
264        if zero_count == 1:  return [make_assign(self.cg_temp(carry_num), make_zero(self.fw))]
265        else: 
266          zero_count_floor = pow2floor(zero_count)
267          hi_num = carry_num + zero_count_floor
268          a1 = make_assign(self.cg_temp(hi_num - 1, carry_num), make_zero(self.fw))
269          remaining_zeroes = zero_count - zero_count_floor
270          return [a1] + self.gen_multiple_carry_zero_then_pack(hi_num, remaining_zeroes) 
271      #
272      shift_result = self.cg_temp(carry_num + shift - 1, pending_carry_base)
273      pending = self.cg_temp(carry_num - 1, pending_carry_base)
274      #print shift_result, " by shift ", pending, shift
275      a1 = make_assign(shift_result, make_call('bitblock::srli<%i>' % (self.fw * shift), [mk_var(pending)]))
276      # Do any necessary merges
277      m = self.gen_merges(carry_num + shift - 1,  pending_carry_base)
278      return [a1] + m + self.gen_multiple_carry_zero_then_pack(carry_num + shift, zero_count - shift)
279
280
281    def allocate_ops(self):
282      carry_count = 0
283      adv_n_count = 0
284      for op in range(self.carryInfoSet.operation_count):
285        b = self.carryInfoSet.containing_block[op]
286        if op != 0: 
287          # If we've just left a block, ensure that we are aligned.
288          b_last = self.carryInfoSet.containing_block[op-1]
289          if not block_contains(b_last, b, self.carryInfoSet.parent_block):
290            # find the max-sized block just exited.
291            while not block_contains(self.carryInfoSet.parent_block[b_last], b, self.carryInfoSet.parent_block):
292              b_last = self.carryInfoSet.parent_block[b_last]
293            align_base = self.aligned_size[b_last]
294            if align_base > self.field_count: align_base = self.field_count
295            carry_count = align(carry_count, align_base)         
296        if self.carryInfoSet.block_first_op[b] == op:
297          # If we're just entering a block, ensure that we are aligned.
298          align_base = self.aligned_size[b]
299          if align_base > self.field_count: align_base = self.field_count
300          carry_count = align(carry_count, align_base)
301        if op not in self.carryInfoSet.advance_amount.keys():
302          self.alloc_map[op] = carry_count
303          carry_count += 1
304        elif self.carryInfoSet.advance_amount[op] == 1: 
305          self.alloc_map[op] = carry_count
306          carry_count += 1
307        else:
308          # Advance_n op, carry_count does not change.
309          self.alloc_map[op] = carry_count
310          self.adv_n_map[op] = adv_n_count
311          adv_n_count += 1
312      # When processing the last operation, make sure that the "next" operation
313      # appears to start a new pack.
314      self.alloc_map[self.carryInfoSet.operation_count] = align(carry_count, self.field_count)
315      for b in range(self.carryInfoSet.block_count): 
316         self.block_base[b] = self.alloc_map[self.carryInfoSet.block_first_op[b]]
317     
318    def GenerateCarryDecls(self):
319        return "  ubitblock %s [%i];\n" % (self.carryGroupVar, self.totalblock_count)
320    def GenerateInitializations(self):
321        v = self.carryGroupVar       
322        inits = ""
323        for i in range(0, self.totalblock_count):
324          inits += "%s[%i]._128 = simd<%i>::constant<0>();\n" % (v, i, self.fw)
325        for op_no in range(self.carryInfoSet.block_op_count[0]):
326          if op_no in self.carryInfoSet.init_one_list: 
327            posn = self.alloc_map[op_no]
328            ub = posn/self.field_count
329            rp = posn%self.field_count
330            inits += "%s[%i]._%i[%i] = 1;\n" % (self.carryGroupVar, ub, self.fw, rp)
331        return inits
332    def GenerateStreamFunctionDecls(self):
333        f = self.field_count
334        s = 1
335        decls = []
336        while f > 0:
337          decls += [self.cg_temp(s*(i+1)-1, s*i) for i in range(f)]
338          f = f/2
339          s = s * 2
340        return BitBlock_decls_from_vars(decls)
341
342    def GenerateCarryInAccess(self, operation_no):
343        block_no = self.carryInfoSet.containing_block[operation_no]
344        posn = self.alloc_map[operation_no] - self.carry_offset
345        ub = posn/self.field_count
346        rp = posn%self.field_count
347        return make_call("convert", [self.carry_pack_index(self.fw, ub, rp)])
348    def GenerateCarryOutStore(self, operation_no, carry_out_expr):
349        block_no = self.carryInfoSet.containing_block[operation_no]
350        posn = self.alloc_map[operation_no] - self.carry_offset
351        ub = posn/self.field_count
352        rp = posn%self.field_count
353        # Save the carry in the carry temp variable and then merge
354        # pending carry temps as far as possible.
355        assigs = [make_assign(self.temp_prefix + repr(rp), carry_out_expr)] 
356        assigs += self.gen_merges(rp, rp)
357        # Only generate an actual store for the last carryout in a pack.
358        next_op = operation_no + 1
359        while self.adv_n_map.has_key(next_op): next_op += 1
360        next_posn = self.alloc_map[next_op] - self.carry_offset
361        skip = next_posn - posn - 1
362        if skip > 0: 
363          assigs += self.gen_multiple_carry_zero_then_pack(rp+1, skip)
364        #print (posn, skip)
365        if next_posn % self.field_count == 0:
366          shift_op = "simd<%i>::srli<%i>" % (self.fw, self.fw-1)
367          storable_carry_in_form = make_call(shift_op, [mk_var(self.cg_temp(self.field_count - 1, 0))])
368          assigs.append(make_assign(self.carry_pack_full(ub, mode = ast.Store()), storable_carry_in_form))
369        return assigs
370    def GenerateAdvanceInAccess(self, operation_no):
371        return self.carry_pack_full(self.carryblock_count + self.adv_n_map[operation_no])
372    def GenerateAdvanceOutStore(self, operation_no, adv_out_expr):
373        return [ast.Assign([self.carry_pack_full(self.carryblock_count + self.adv_n_map[operation_no], mode=ast.Store())], 
374                           make_call("bitblock::srli<64>", [adv_out_expr]))]
375    def GenerateTestAll(self, instance_name):
376        if self.totalblock_count == 0: return ast.Num(0)
377        else:
378            v = make_att(instance_name, self.carryGroupVar)
379            t = self.carry_pack_full(0, v)
380            for i in range(1, self.totalblock_count): 
381              t2 = self.carry_pack_full(i, v)
382              t = make_call('simd_or', [t, t2])
383            return make_call('bitblock::any', [t])
384    def GenerateTest(self, block_no, testExpr):
385        posn = self.block_base[block_no] - self.carry_offset
386        ub = posn/self.field_count
387        rp = posn%self.field_count
388        count = self.aligned_size[block_no] 
389        width = count * self.fw
390        if count < self.field_count:
391            t = self.carry_pack_index(width, ub, rp/count)
392            return TestHelper_Integer_Or(testExpr, t)
393        else:
394            t = self.carry_pack_full(ub)
395            for i in range(1, count/self.field_count): 
396              v2 = self.carry_pack_full(ub + i)
397              t = make_call('simd_or', [t, v2])
398            return TestHelper_Bitblock_Or(testExpr, t)
399    def GenerateCarryIfTest(self, block_no, ifTest):
400        return self.GenerateTest(block_no, ifTest)
401
402    def GenerateCarryElseFinalization(self, block_no):
403        # if the block consists of full carry packs, then
404        # no action need be taken: the corresponding carry-in packs
405        # must already be zero, or the then branch would have been taken.
406        count = self.aligned_size[block_no]
407        if count % self.field_count == 0: return []
408        # The block has half a carry-pack or less.
409        assigs = []
410        posn = self.block_base[block_no] - self.carry_offset
411        ub = posn / self.field_count
412        rp = posn % self.field_count
413        next_op = self.carryInfoSet.block_first_op[block_no] + self.carryInfoSet.block_op_count[block_no]
414        end_pos = (self.alloc_map[next_op]  - self.carry_offset - 1) % self.field_count
415        #print rp, next_op,self.alloc_map[next_op]
416        #assigs = [make_assign(self.cg_temp(end_pos, rp), make_zero(self.fw))]
417        assigs = self.gen_multiple_carry_zero_then_pack(rp, end_pos - rp + 1)
418        if (end_pos + 1) % self.field_count == 0:
419          shift_op = "simd<%i>::srli<%i>" % (self.fw, self.fw-1)
420          storable_carry_in_form = make_call(shift_op, [mk_var(self.cg_temp(self.field_count - 1, 0))])
421          assigs.append(make_assign(self.carry_pack_full(ub, mode = ast.Store()), storable_carry_in_form))
422        return assigs
423
424    def GenerateLocalDeclare(self, block_no):
425        if self.carryInfoSet.block_op_count[block_no] == 0: return []
426        count = self.aligned_size[block_no] 
427        if count >= self.field_count:
428          ub_count = count / self.field_count
429          decls = [make_callStmt('ubitblock_declare', [mk_var('sub' + self.carryGroupVar), ast.Num(ub_count)])]
430          count = self.field_count
431        else: decls = []
432        # Generate carry pack temps.
433        f = count
434        s = 1
435        temps = []
436        while f > 0:
437          temps += [self.local_temp(s*(i+1)-1, s*i) for i in range(f)]
438          f = f/2
439          s = s * 2
440        #return BitBlock_decls_from_vars(decls)
441        return decls + [make_callStmt('BitBlock_declare', [mk_var(t)]) for t in temps]
442   
443    def GenerateCarryWhileTest(self, block_no, testExpr):
444        return self.GenerateTest(block_no, testExpr)
445
446    def EnterLocalWhileBlock(self, operation_offset): 
447        self.carryGroupVar = "sub" + self.carryGroupVar
448        self.temp_prefix = "sub" + self.temp_prefix
449        self.carry_offset = self.alloc_map[operation_offset]
450        #print "self.carry_offset = %i" % self.carry_offset
451    def ExitLocalWhileBlock(self): 
452        self.carryGroupVar = self.carryGroupVar[3:]
453        self.temp_prefix = self.temp_prefix[3:]
454        self.carry_offset = 0
455       
456    def GenerateCarryWhileFinalization(self, block_no):
457        posn = self.block_base[block_no]
458        ub = posn/self.field_count
459        rp = posn%self.field_count
460        count = self.aligned_size[block_no] 
461        if count < self.field_count:
462          v0 = self.cg_temp(rp + count - 1, rp)
463          lv0 = self.local_temp(count - 1, 0)
464          return [make_assign(v0, make_call('simd_or', [mk_var(v0), mk_var(lv0)]))]
465        n = (count+self.field_count-1)/self.field_count
466        assigs = []
467        for i in range(n):
468          assigs.append(make_assign(self.carry_pack_full(ub + i, mode = ast.Store()), make_call('simd_or', [self.carry_pack_full(ub + i), self.local_pack_full(i)])))
469        return assigs
470    def GenerateStreamFunctionFinalization(self):
471        return []
472
473#
474#  A version of HMCPS_CCGO eliminating ubitblocks
475#
476class HMCPS_CCGO2(HMCPS_CCGO):
477
478    def carry_pack_full(self, ub, v = None, mode = ast.Load()):
479       if v == None: v = self.carryGroupVar
480       return make_index(v, ub, mode)
481
482    def carry_pack_index(self, fw, ub, rp, mode = ast.Load()):
483       return make_call("mvmd<%i>::extract<%i>" % (fw, rp), [self.carry_pack_full(ub)])
484
485    def GenerateCarryDecls(self):
486        return "  BitBlock simd_const_1;\n    BitBlock %s [%i];\n" % (self.carryGroupVar, self.totalblock_count)
487
488    def GenerateInitializations(self):
489        v = self.carryGroupVar       
490        inits = "simd_const_1 = mvmd<16>::srli<7>(simd<16>::constant<1>());\n"
491        for i in range(0, self.totalblock_count):
492          inits += "%s[%i] = simd<%i>::constant<0>();\n" % (v, i, self.fw)
493        for op_no in range(self.carryInfoSet.block_op_count[0]):
494          if op_no in self.carryInfoSet.init_one_list: 
495            posn = self.alloc_map[op_no]
496            ub = posn/self.field_count
497            rp = posn%self.field_count
498            v = "%s[%i]" % (self.carryGroupVar, ub)
499            inits += "%s = simd_or(%s, mvmd<%i>::slli<%i>(simd_const_1)) ;\n" % (v, v, self.fw, rp)
500        return inits
501
502    def GenerateCarryInAccess(self, operation_no):
503        block_no = self.carryInfoSet.containing_block[operation_no]
504        posn = self.alloc_map[operation_no] - self.carry_offset
505        ub = posn/self.field_count
506        rp = posn%self.field_count
507        #return make_call("convert", [self.carry_pack_index(self.fw, ub, rp)])
508        if rp == 0: e = self.carry_pack_full(ub)
509        else: e = make_call("mvmd<%i>::srli<%i>" %(self.fw, rp), [self.carry_pack_full(ub)])
510        if rp == self.field_count - 1:
511          return e
512        else: return make_call('simd_and', [e, mk_var("simd_const_1")])
513
514    def GenerateLocalDeclare(self, block_no):
515        if self.carryInfoSet.block_op_count[block_no] == 0: return []
516        count = self.aligned_size[block_no] 
517        if count >= self.field_count:
518          ub_count = count / self.field_count
519          decls = [make_callStmt('BitBlock_declare', [self.local_pack_full(ub_count)])]
520          decls += [make_assign(self.local_pack_full(i, ast.Store()), make_zero(self.fw)) for i in range(ub_count)]
521          count = self.field_count
522        else: decls = []
523        # Generate carry pack temps.
524        f = count
525        s = 1
526        temps = []
527        while f > 0:
528          temps += [self.local_temp(s*(i+1)-1, s*i) for i in range(f)]
529          f = f/2
530          s = s * 2
531        #return BitBlock_decls_from_vars(decls)
532        return decls + [make_callStmt('BitBlock_declare', [mk_var(t)]) for t in temps]
533   
534# Copyright 2012, Robert D. Cameron
Note: See TracBrowser for help on using the repository browser.