summary refs log tree commit diff
path: root/drivers/net
diff options
context:
space:
mode:
authordingtianhong <dingtianhong@huawei.com>2014-03-25 17:00:09 +0800
committerDavid S. Miller <davem@davemloft.net>2014-03-26 16:40:24 -0400
commit054bb8801038c93c42cb6cde75141aa396afd065 (patch)
tree825cf25616cca0b1959c2a81e58abdfdb20b8cb1 /drivers/net
parent7aceb47a9df3383b24824c3e4bd4f029e4598fda (diff)
downloadlinux-054bb8801038c93c42cb6cde75141aa396afd065.tar.gz
bonding: slight optimization for bond xmit path
Add unlikely() micro to the unlikely conditions in the bond
xmit path for slight optimization.

Signed-off-by: Ding Tianhong <dingtianhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e717db301d46..ee17c246326e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2957,7 +2957,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
 	fk->ports = 0;
 	noff = skb_network_offset(skb);
 	if (skb->protocol == htons(ETH_P_IP)) {
-		if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
 			return false;
 		iph = ip_hdr(skb);
 		fk->src = iph->saddr;
@@ -2966,7 +2966,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
 		if (!ip_is_fragment(iph))
 			proto = iph->protocol;
 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
-		if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
 			return false;
 		iph6 = ipv6_hdr(skb);
 		fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
@@ -3768,7 +3768,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	 * If we risk deadlock from transmitting this in the
 	 * netpoll path, tell netpoll to queue the frame for later tx
 	 */
-	if (is_netpoll_tx_blocked(dev))
+	if (unlikely(is_netpoll_tx_blocked(dev)))
 		return NETDEV_TX_BUSY;
 
 	rcu_read_lock();