summary refs log tree commit diff
path: root/arch/cris/arch-v32/lib/checksum.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/cris/arch-v32/lib/checksum.S')
-rw-r--r--arch/cris/arch-v32/lib/checksum.S72
1 files changed, 23 insertions, 49 deletions
diff --git a/arch/cris/arch-v32/lib/checksum.S b/arch/cris/arch-v32/lib/checksum.S
index 32e66181b826..87f3fd71ab10 100644
--- a/arch/cris/arch-v32/lib/checksum.S
+++ b/arch/cris/arch-v32/lib/checksum.S
@@ -1,6 +1,6 @@
 /*
  * A fast checksum routine using movem
- * Copyright (c) 1998-2001, 2003 Axis Communications AB
+ * Copyright (c) 1998-2007 Axis Communications AB
  *
  * csum_partial(const unsigned char * buff, int len, unsigned int sum)
  */
@@ -12,30 +12,23 @@ csum_partial:
 	;; r11 - length
 	;; r12 - checksum
 
-	;; check for breakeven length between movem and normal word looping versions
-	;; we also do _NOT_ want to compute a checksum over more than the
-	;; actual length when length < 40
-
-	cmpu.w	80,$r11
-	blo	_word_loop
-	nop
-
-	;; need to save the registers we use below in the movem loop
-	;; this overhead is why we have a check above for breakeven length
-	;; only r0 - r8 have to be saved, the other ones are clobber-able
-	;; according to the ABI
+	;; Optimized for large packets
+	subq	10*4, $r11
+	blt	_word_loop
+	move.d	$r11, $acr
 
 	subq	9*4,$sp
-	subq	10*4,$r11	; update length for the first loop
+	clearf	c
 	movem	$r8,[$sp]
 
 	;; do a movem checksum
 
 _mloop:	movem	[$r10+],$r9	; read 10 longwords
-
+	;; Loop count without touching the c flag.
+	addoq	-10*4, $acr, $acr
 	;; perform dword checksumming on the 10 longwords
 
-	add.d	$r0,$r12
+	addc	$r0,$r12
 	addc	$r1,$r12
 	addc	$r2,$r12
 	addc	$r3,$r12
@@ -46,60 +39,41 @@ _mloop:	movem	[$r10+],$r9	; read 10 longwords
 	addc	$r8,$r12
 	addc	$r9,$r12
 
-	;; fold the carry into the checksum, to avoid having to loop the carry
-	;; back into the top
-
-	addc	0,$r12
-	addc	0,$r12		; do it again, since we might have generated a carry
-
-	subq	10*4,$r11
-	bge	_mloop
-	nop
-
-	addq	10*4,$r11	; compensate for last loop underflowing length
+	;; test $acr without trashing carry.
+	move.d	$acr, $acr
+	bpl	_mloop
+	;; r11 <= acr  is not really needed in the mloop, just using the dslot
+	;; to prepare for what is needed after mloop.
+	move.d	$acr, $r11
 
+	;; fold the last carry into r13
+	addc	0, $r12
 	movem	[$sp+],$r8	; restore regs
 
 _word_loop:
-	;; only fold if there is anything to fold.
-
-	cmpq	0,$r12
-	beq	_no_fold
-
-	;; fold 32-bit checksum into a 16-bit checksum, to avoid carries below.
-	;; r9 and r13 can be used as temporaries.
+	addq	10*4,$r11	; compensate for last loop underflowing length
 
 	moveq	-1,$r9		; put 0xffff in r9, faster than move.d 0xffff,r9
 	lsrq	16,$r9
 
 	move.d	$r12,$r13
 	lsrq	16,$r13		; r13 = checksum >> 16
-	and.d	$r9,$r12		; checksum = checksum & 0xffff
-	add.d	$r13,$r12		; checksum += r13
-	move.d	$r12,$r13		; do the same again, maybe we got a carry last add
-	lsrq	16,$r13
-	and.d	$r9,$r12
-	add.d	$r13,$r12
+	and.d	$r9,$r12	; checksum = checksum & 0xffff
 
 _no_fold:
-	cmpq	2,$r11
+	subq	2,$r11
 	blt	_no_words
-	nop
+	add.d	$r13,$r12	; checksum += r13
 
 	;; checksum the rest of the words
-
-	subq	2,$r11
-
 _wloop:	subq	2,$r11
 	bge	_wloop
 	addu.w	[$r10+],$r12
 
-	addq	2,$r11
-
 _no_words:
+	addq	2,$r11
 	;; see if we have one odd byte more
-	cmpq	1,$r11
-	beq	_do_byte
+	bne	_do_byte
 	nop
 	ret
 	move.d	$r12,$r10