#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <[email protected]> for the OpenSSL
# project. Rights for redistribution and usage in source and binary
# forms are granted according to the OpenSSL license.
# ====================================================================
#
# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
# "hand-coded assembler"] doesn't stand for the whole improvement
# coefficient. It turned out that eliminating RC4_CHAR from config
# line results in ~40% improvement (yes, even for C implementation).
# Presumably it has everything to do with AMD cache architecture and
# RAW or whatever penalties. Once again! The module *requires* config
# line *without* RC4_CHAR! As for coding "secret," I bet on partial
# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
# I simply 'inc %r8b'. Even though optimization manual discourages
# to operate on partial registers, it turned out to be the best bet.
# At least for AMD... How IA32E would perform remains to be seen...
# As was shown by Marc Bevand reordering of couple of load operations
# results in even higher performance gain of 3.3x:-) At least on
# Opteron... For reference, 1x in this case is RC4_CHAR C-code
# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
# Latter means that if you want to *estimate* what to expect from
# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
# Intel P4 EM64T core was found to run the AMD64 code really slow...
# The only way to achieve comparable performance on P4 was to keep
# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
# compose blended code, which would perform even within 30% marginal
# on either AMD and Intel platforms, I implement both cases. See
# rc4_skey.c for further details...
# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
# those with add/sub results in 50% performance improvement of folded
# loop...
# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
# performance by >30% [unlike P4 32-bit case that is]. But this is
# provided that loads are reordered even more aggressively! Both code
# pathes, AMD64 and EM64T, reorder loads in essentially same manner
# as my IA-64 implementation. On Opteron this resulted in modest 5%
# improvement [I had to test it], while final Intel P4 performance
# achieves respectful 432MBps on 2.8GHz processor now. For reference.
# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
# RC4_INT code-path. While if executed on Opteron, it's only 25%
# slower than the RC4_INT one [meaning that if CPU �arch detection
# is not implemented, then this final RC4_CHAR code-path should be
# preferred, as it provides better *all-round* performance].
$output=shift;
open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
$dat="%rdi"; # arg1
$len="%rsi"; # arg2
$inp="%rdx"; # arg3
$out="%rcx"; # arg4
@XX=("%r8","%r10");
@TX=("%r9","%r11");
$YY="%r12";
$TY="%r13";
$code=<<___;
.text
.globl RC4
.type RC4,\@function,4
.align 16
RC4: or $len,$len
jne .Lentry
ret
.Lentry:
push %r12
push %r13
add \$8,$dat
movl -8($dat),$XX[0]#d
movl -4($dat),$YY#d
cmpl \$-1,256($dat)
je .LRC4_CHAR
inc $XX[0]#b
movl ($dat,$XX[0],4),$TX[0]#d
test \$-8,$len
jz .Lloop1
jmp .Lloop8
.align 16
.Lloop8:
___
for ($i=0;$i<8;$i++) {
$code.=<<___;
add $TX[0]#b,$YY#b
mov $XX[0],$XX[1]
movl ($dat,$YY,4),$TY#d
ror \$8,%rax # ror is redundant when $i=0
inc $XX[1]#b
movl ($dat,$XX[1],4),$TX[1]#d
cmp $XX[1],$YY
movl $TX[0]#d,($dat,$YY,4)
cmove $TX[0],$TX[1]
movl $TY#d,($dat,$XX[0],4)
add $TX[0]#b,$TY#b
movb ($dat,$TY,4),%al
___
push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
}
$code.=<<___;
ror \$8,%rax
sub \$8,$len
xor ($inp),%rax
add \$8,$inp
mov %rax,($out)
add \$8,$out
test \$-8,$len
jnz .Lloop8
cmp \$0,$len
jne .Lloop1
___
$code.=<<___;
.Lexit:
sub \$1,$XX[0]#b
movl $XX[0]#d,-8($dat)
movl $YY#d,-4($dat)
pop %r13
pop %r12
ret
.align 16
.Lloop1:
add $TX[0]#b,$YY#b
movl ($dat,$YY,4),$TY#d
movl $TX[0]#d,($dat,$YY,4)
movl $TY#d,($dat,$XX[0],4)
add $TY#b,$TX[0]#b
inc $XX[0]#b
movl ($dat,$TX[0],4),$TY#d
movl ($dat,$XX[0],4),$TX[0]#d
xorb ($inp),$TY#b
inc $inp
movb $TY#b,($out)
inc $out
dec $len
jnz .Lloop1
jmp .Lexit
.align 16
.LRC4_CHAR:
add \$1,$XX[0]#b
movzb ($dat,$XX[0]),$TX[0]#d
test \$-8,$len
jz .Lcloop1
push %rbx
jmp .Lcloop8
.align 16
.Lcloop8:
mov ($inp),%eax
mov 4($inp),%ebx
___
# unroll 2x4-wise, because 64-bit rotates kill Intel P4...
for ($i=0;$i<4;$i++) {
$code.=<<___;
add $TX[0]#b,$YY#b
lea 1($XX[0]),$XX[1]
movzb ($dat,$YY),$TY#d
movzb $XX[1]#b,$XX[1]#d
movzb ($dat,$XX[1]),$TX[1]#d
movb $TX[0]#b,($dat,$YY)
cmp $XX[1],$YY
movb $TY#b,($dat,$XX[0])
jne .Lcmov$i # Intel cmov is sloooow...
mov $TX[0],$TX[1]
.Lcmov$i:
add $TX[0]#b,$TY#b
xor ($dat,$TY),%al
ror \$8,%eax
___
push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
}
for ($i=4;$i<8;$i++) {
$code.=<<___;
add $TX[0]#b,$YY#b
lea 1($XX[0]),$XX[1]
movzb ($dat,$YY),$TY#d
movzb $XX[1]#b,$XX[1]#d
movzb ($dat,$XX[1]),$TX[1]#d
movb $TX[0]#b,($dat,$YY)
cmp $XX[1],$YY
movb $TY#b,($dat,$XX[0])
jne .Lcmov$i # Intel cmov is sloooow...
mov $TX[0],$TX[1]
.Lcmov$i:
add $TX[0]#b,$TY#b
xor ($dat,$TY),%bl
ror \$8,%ebx
___
push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
}
$code.=<<___;
lea -8($len),$len
mov %eax,($out)
lea 8($inp),$inp
mov %ebx,4($out)
lea 8($out),$out
test \$-8,$len
jnz .Lcloop8
pop %rbx
cmp \$0,$len
jne .Lcloop1
jmp .Lexit
___
$code.=<<___;
.align 16
.Lcloop1:
add $TX[0]#b,$YY#b
movzb ($dat,$YY),$TY#d
movb $TX[0]#b,($dat,$YY)
movb $TY#b,($dat,$XX[0])
add $TX[0]#b,$TY#b
add \$1,$XX[0]#b
movzb ($dat,$TY),$TY#d
movzb ($dat,$XX[0]),$TX[0]#d
xorb ($inp),$TY#b
lea 1($inp),$inp
movb $TY#b,($out)
lea 1($out),$out
sub \$1,$len
jnz .Lcloop1
jmp .Lexit
.size RC4,.-RC4
___
$code =~ s/#([bwd])/$1/gm;
print $code;
close STDOUT;
|