summary refs log tree commit diff
path: root/arch/x86/um/asm/checksum_32.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-10 11:15:20 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-10 11:15:20 +0900
commitf59b51fe3d3092c08d7d554ecb40db24011b2ebc (patch)
treef19a5cb72adda3963f83609dbe2314620a4cf63e /arch/x86/um/asm/checksum_32.h
parentaac2b1f5747ea34696d0da5bdc4d8247aa6437af (diff)
parent062d52672446014222942cae6934d97769b329f0 (diff)
downloadlinux-f59b51fe3d3092c08d7d554ecb40db24011b2ebc.tar.gz
Merge branch 'for-linus-37rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
Pull UML changes from Richard Weinberger:
 "UML receives this time only cleanups.

  The most outstanding change is the 'include "foo.h"' do 'include
  <foo.h>' conversion done by Al Viro.

  It touches many files, that's why the diffstat is rather big."

* 'for-linus-37rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml:
  typo in UserModeLinux-HOWTO
  hppfs: fix the return value of get_inode()
  hostfs: drop vmtruncate
  um: get rid of pointless include "..." where include <...> will do
  um: move sysrq.h out of include/shared
  um/x86: merge 32 and 64 bit variants of ptrace.h
  um/x86: merge 32 and 64bit variants of checksum.h
Diffstat (limited to 'arch/x86/um/asm/checksum_32.h')
-rw-r--r--arch/x86/um/asm/checksum_32.h140
1 files changed, 0 insertions, 140 deletions
diff --git a/arch/x86/um/asm/checksum_32.h b/arch/x86/um/asm/checksum_32.h
index caab74252e27..ab77b6f9a4bf 100644
--- a/arch/x86/um/asm/checksum_32.h
+++ b/arch/x86/um/asm/checksum_32.h
@@ -5,145 +5,6 @@
 #ifndef __UM_SYSDEP_CHECKSUM_H
 #define __UM_SYSDEP_CHECKSUM_H
 
-#include "linux/in6.h"
-#include "linux/string.h"
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum);
-
-/*
- *	Note: when you get a NULL pointer exception here this means someone
- *	passed in an incorrect kernel address to one of these functions.
- *
- *	If you use these functions directly please don't forget the
- *	access_ok().
- */
-
-static __inline__
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
-				       int len, __wsum sum)
-{
-	memcpy(dst, src, len);
-	return csum_partial(dst, len, sum);
-}
-
-/*
- * the same as csum_partial, but copies from src while it
- * checksums, and handles user-space pointer exceptions correctly, when needed.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-static __inline__
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
-					 int len, __wsum sum, int *err_ptr)
-{
-	if (copy_from_user(dst, src, len)) {
-		*err_ptr = -EFAULT;
-		return (__force __wsum)-1;
-	}
-
-	return csum_partial(dst, len, sum);
-}
-
-/*
- *	This is a version of ip_compute_csum() optimized for IP headers,
- *	which always checksum on 4 octet boundaries.
- *
- *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
- *	Arnt Gulbrandsen.
- */
-static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
-	unsigned int sum;
-
-	__asm__ __volatile__(
-	    "movl (%1), %0	;\n"
-	    "subl $4, %2	;\n"
-	    "jbe 2f		;\n"
-	    "addl 4(%1), %0	;\n"
-	    "adcl 8(%1), %0	;\n"
-	    "adcl 12(%1), %0	;\n"
-"1:	    adcl 16(%1), %0	;\n"
-	    "lea 4(%1), %1	;\n"
-	    "decl %2		;\n"
-	    "jne 1b		;\n"
-	    "adcl $0, %0	;\n"
-	    "movl %0, %2	;\n"
-	    "shrl $16, %0	;\n"
-	    "addw %w2, %w0	;\n"
-	    "adcl $0, %0	;\n"
-	    "notl %0		;\n"
-"2:				;\n"
-	/* Since the input registers which are loaded with iph and ipl
-	   are modified, we must also specify them as outputs, or gcc
-	   will assume they contain their original values. */
-	: "=r" (sum), "=r" (iph), "=r" (ihl)
-	: "1" (iph), "2" (ihl)
-	: "memory");
-	return (__force __sum16)sum;
-}
-
-/*
- *	Fold a partial checksum
- */
-
-static inline __sum16 csum_fold(__wsum sum)
-{
-	__asm__(
-		"addl %1, %0		;\n"
-		"adcl $0xffff, %0	;\n"
-		: "=r" (sum)
-		: "r" ((__force u32)sum << 16),
-		  "0" ((__force u32)sum & 0xffff0000)
-	);
-	return (__force __sum16)(~(__force u32)sum >> 16);
-}
-
-static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
-						   unsigned short len,
-						   unsigned short proto,
-						   __wsum sum)
-{
-    __asm__(
-	"addl %1, %0	;\n"
-	"adcl %2, %0	;\n"
-	"adcl %3, %0	;\n"
-	"adcl $0, %0	;\n"
-	: "=r" (sum)
-	: "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
-    return sum;
-}
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-						   unsigned short len,
-						   unsigned short proto,
-						   __wsum sum)
-{
-	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
-}
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-
 static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
     return csum_fold (csum_partial(buff, len, 0));
@@ -198,4 +59,3 @@ static __inline__ __wsum csum_and_copy_to_user(const void *src,
 }
 
 #endif
-