summary refs log tree commit diff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-09-12 17:18:09 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-12 17:18:09 -0400
commitdcbc0054d783d53cf0479c391334e9a478daa25f (patch)
tree3af460f097438ad1de67bc6db1f4c8999e998541
parent1d7efe9dfaa6025acd29a726315f6f7d30a9f1ca (diff)
parent74dd40bca95f9968e368751ea81f3b98471109ce (diff)
downloadlinux-dcbc0054d783d53cf0479c391334e9a478daa25f.tar.gz
Merge branch 'arc_emac'
Beniamino Galvani says:

====================
net: arc_emac: fix tx issues

the patches below solve some issues found in the tx ring reclaim
strategy currently implemented in the arc_emac driver.

Without these patches a simple outgoing UDP flow blocks almost
immediately with the socket send buffer full, until some new rx
packets trigger a clean of the tx ring.

Everything seems to work fine on a Radxa Rock with this fix applied.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
1 files changed, 37 insertions, 16 deletions
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index fe5cfeace6e3..5919394d9f58 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -30,6 +30,17 @@
 #define DRV_VERSION	"1.0"
 
 /**
+ * arc_emac_tx_avail - Return the number of available slots in the tx ring.
+ * @priv: Pointer to ARC EMAC private data structure.
+ *
+ * returns: the number of slots available for transmission in tx the ring.
+ */
+static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
+{
+	return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
+}
+
+/**
  * arc_emac_adjust_link - Adjust the PHY link duplex.
  * @ndev:	Pointer to the net_device structure.
  *
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
 		txbd->info = 0;
 
 		*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
-
-		if (netif_queue_stopped(ndev))
-			netif_wake_queue(ndev);
 	}
+
+	/* Ensure that txbd_dirty is visible to tx() before checking
+	 * for queue stopped.
+	 */
+	smp_mb();
+
+	if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
+		netif_wake_queue(ndev);
 }
 
 /**
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
 	work_done = arc_emac_rx(ndev, budget);
 	if (work_done < budget) {
 		napi_complete(napi);
-		arc_reg_or(priv, R_ENABLE, RXINT_MASK);
+		arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
 	}
 
 	return work_done;
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
 	/* Reset all flags except "MDIO complete" */
 	arc_reg_set(priv, R_STATUS, status);
 
-	if (status & RXINT_MASK) {
+	if (status & (RXINT_MASK | TXINT_MASK)) {
 		if (likely(napi_schedule_prep(&priv->napi))) {
-			arc_reg_clr(priv, R_ENABLE, RXINT_MASK);
+			arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
 			__napi_schedule(&priv->napi);
 		}
 	}
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev)
 	arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
 
 	/* Enable interrupts */
-	arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+	arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 
 	/* Set CONTROL */
 	arc_reg_set(priv, R_CTRL,
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev)
 	netif_stop_queue(ndev);
 
 	/* Disable interrupts */
-	arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+	arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 
 	/* Disable EMAC */
 	arc_reg_clr(priv, R_CTRL, EN_MASK);
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 
 	len = max_t(unsigned int, ETH_ZLEN, skb->len);
 
-	/* EMAC still holds this buffer in its possession.
-	 * CPU must not modify this buffer descriptor
-	 */
-	if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
+	if (unlikely(!arc_emac_tx_avail(priv))) {
 		netif_stop_queue(ndev);
+		netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
 		return NETDEV_TX_BUSY;
 	}
 
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 	/* Increment index to point to the next BD */
 	*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
 
-	/* Get "info" of the next BD */
-	info = &priv->txbd[*txbd_curr].info;
+	/* Ensure that tx_clean() sees the new txbd_curr before
+	 * checking the queue status. This prevents an unneeded wake
+	 * of the queue in tx_clean().
+	 */
+	smp_mb();
 
-	/* Check if if Tx BD ring is full - next BD is still owned by EMAC */
-	if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
+	if (!arc_emac_tx_avail(priv)) {
 		netif_stop_queue(ndev);
+		/* Refresh tx_dirty */
+		smp_mb();
+		if (arc_emac_tx_avail(priv))
+			netif_start_queue(ndev);
+	}
 
 	arc_reg_set(priv, R_STATUS, TXPL_MASK);