summary refs log tree commit diff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_alb.c54
-rw-r--r--drivers/net/bonding/bond_main.c134
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/c_can/Kconfig7
-rw-r--r--drivers/net/can/c_can/c_can.c36
-rw-r--r--drivers/net/can/sja1000/peak_pci.c14
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c103
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c179
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h26
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h47
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c108
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c133
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c20
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/jme.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c54
-rw-r--r--drivers/net/ethernet/sfc/nic.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/macvlan.c18
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c30
-rw-r--r--drivers/net/xen-netback/netback.c102
43 files changed, 776 insertions, 537 deletions
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 03e0bcade234..7bbbf1ca0887 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
 }
 
 /* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+				      bool strict_match);
 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
 static void rlb_src_unlink(struct bonding *bond, u32 index);
 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 
 	bond->alb_info.rlb_promisc_timeout_counter = 0;
 
-	alb_send_learning_packets(bond->curr_active_slave, addr);
+	alb_send_learning_packets(bond->curr_active_slave, addr, true);
 }
 
 /* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
 /*********************** tlb/rlb shared functions *********************/
 
 static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
-			    u16 vid)
+			    __be16 vlan_proto, u16 vid)
 {
 	struct learning_pkt pkt;
 	struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
 	skb->dev = slave->dev;
 
 	if (vid) {
-		skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+		skb = vlan_put_tag(skb, vlan_proto, vid);
 		if (!skb) {
 			pr_err("%s: Error: failed to insert VLAN tag\n",
 			       slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
 	dev_queue_xmit(skb);
 }
 
-
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+				      bool strict_match)
 {
 	struct bonding *bond = bond_get_bond_by_slave(slave);
 	struct net_device *upper;
 	struct list_head *iter;
 
 	/* send untagged */
-	alb_send_lp_vid(slave, mac_addr, 0);
+	alb_send_lp_vid(slave, mac_addr, 0, 0);
 
 	/* loop through vlans and send one packet for each */
 	rcu_read_lock();
 	netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-		if (upper->priv_flags & IFF_802_1Q_VLAN)
-			alb_send_lp_vid(slave, mac_addr,
-					vlan_dev_vlan_id(upper));
+		if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+			if (strict_match &&
+			    ether_addr_equal_64bits(mac_addr,
+						    upper->dev_addr)) {
+				alb_send_lp_vid(slave, mac_addr,
+						vlan_dev_vlan_proto(upper),
+						vlan_dev_vlan_id(upper));
+			} else if (!strict_match) {
+				alb_send_lp_vid(slave, upper->dev_addr,
+						vlan_dev_vlan_proto(upper),
+						vlan_dev_vlan_id(upper));
+			}
+		}
 	}
 	rcu_read_unlock();
 }
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
 
 	/* fasten the change in the switch */
 	if (bond_slave_can_tx(slave1)) {
-		alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+		alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
 		if (bond->alb_info.rlb_enabled) {
 			/* inform the clients that the mac address
 			 * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
 	}
 
 	if (bond_slave_can_tx(slave2)) {
-		alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+		alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
 		if (bond->alb_info.rlb_enabled) {
 			/* inform the clients that the mac address
 			 * has changed
@@ -1536,6 +1547,8 @@ void bond_alb_monitor(struct work_struct *work)
 
 	/* send learning packets */
 	if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+		bool strict_match;
+
 		/* change of curr_active_slave involves swapping of mac addresses.
 		 * in order to avoid this swapping from happening while
 		 * sending the learning packets, the curr_slave_lock must be held for
@@ -1543,8 +1556,15 @@ void bond_alb_monitor(struct work_struct *work)
 		 */
 		read_lock(&bond->curr_slave_lock);
 
-		bond_for_each_slave_rcu(bond, slave, iter)
-			alb_send_learning_packets(slave, slave->dev->dev_addr);
+		bond_for_each_slave_rcu(bond, slave, iter) {
+			/* If updating current_active, use all currently
+			 * user mac addreses (!strict_match).  Otherwise, only
+			 * use mac of the slave device.
+			 */
+			strict_match = (slave != bond->curr_active_slave);
+			alb_send_learning_packets(slave, slave->dev->dev_addr,
+						  strict_match);
+		}
 
 		read_unlock(&bond->curr_slave_lock);
 
@@ -1767,7 +1787,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
 	} else {
 		/* set the new_slave to the bond mac address */
 		alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
-		alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+		alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+					  false);
 	}
 
 	write_lock_bh(&bond->curr_slave_lock);
@@ -1810,7 +1831,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
 		alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
 
 		read_lock(&bond->lock);
-		alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+		alb_send_learning_packets(bond->curr_active_slave,
+					  bond_dev->dev_addr, false);
 		if (bond->alb_info.rlb_enabled) {
 			/* inform clients mac address has changed */
 			rlb_req_update_slave_clients(bond, bond->curr_active_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 499645b0925c..59a12c61ceb4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2157,10 +2157,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
  */
 static void bond_arp_send(struct net_device *slave_dev, int arp_op,
 			  __be32 dest_ip, __be32 src_ip,
-			  struct bond_vlan_tag *inner,
-			  struct bond_vlan_tag *outer)
+			  struct bond_vlan_tag *tags)
 {
 	struct sk_buff *skb;
+	int i;
 
 	pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
 		 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2172,21 +2172,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
 		net_err_ratelimited("ARP packet allocation failed\n");
 		return;
 	}
-	if (outer->vlan_id) {
-		if (inner->vlan_id) {
-			pr_debug("inner tag: proto %X vid %X\n",
-				 ntohs(inner->vlan_proto), inner->vlan_id);
-			skb = __vlan_put_tag(skb, inner->vlan_proto,
-					     inner->vlan_id);
-			if (!skb) {
-				net_err_ratelimited("failed to insert inner VLAN tag\n");
-				return;
-			}
-		}
 
-		pr_debug("outer reg: proto %X vid %X\n",
-			 ntohs(outer->vlan_proto), outer->vlan_id);
-		skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+	/* Go through all the tags backwards and add them to the packet */
+	for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+		if (!tags[i].vlan_id)
+			continue;
+
+		pr_debug("inner tag: proto %X vid %X\n",
+			 ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+		skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+				     tags[i].vlan_id);
+		if (!skb) {
+			net_err_ratelimited("failed to insert inner VLAN tag\n");
+			return;
+		}
+	}
+	/* Set the outer tag */
+	if (tags[0].vlan_id) {
+		pr_debug("outer tag: proto %X vid %X\n",
+			 ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+		skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
 		if (!skb) {
 			net_err_ratelimited("failed to insert outer VLAN tag\n");
 			return;
@@ -2195,22 +2200,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
 	arp_xmit(skb);
 }
 
+/* Validate the device path between the @start_dev and the @end_dev.
+ * The path is valid if the @end_dev is reachable through device
+ * stacking.
+ * When the path is validated, collect any vlan information in the
+ * path.
+ */
+static bool bond_verify_device_path(struct net_device *start_dev,
+				    struct net_device *end_dev,
+				    struct bond_vlan_tag *tags)
+{
+	struct net_device *upper;
+	struct list_head  *iter;
+	int  idx;
+
+	if (start_dev == end_dev)
+		return true;
+
+	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+		if (bond_verify_device_path(upper, end_dev, tags)) {
+			if (is_vlan_dev(upper)) {
+				idx = vlan_get_encap_level(upper);
+				if (idx >= BOND_MAX_VLAN_ENCAP)
+					return false;
+
+				tags[idx].vlan_proto =
+						    vlan_dev_vlan_proto(upper);
+				tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+			}
+			return true;
+		}
+	}
+
+	return false;
+}
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
-	struct net_device *upper, *vlan_upper;
-	struct list_head *iter, *vlan_iter;
 	struct rtable *rt;
-	struct bond_vlan_tag inner, outer;
+	struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
 	__be32 *targets = bond->params.arp_targets, addr;
 	int i;
+	bool ret;
 
 	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
 		pr_debug("basa: target %pI4\n", &targets[i]);
-		inner.vlan_proto = 0;
-		inner.vlan_id = 0;
-		outer.vlan_proto = 0;
-		outer.vlan_id = 0;
+		memset(tags, 0, sizeof(tags));
 
 		/* Find out through which dev should the packet go */
 		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2223,7 +2258,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 				net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
 						     bond->dev->name,
 						     &targets[i]);
-			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+			bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+				      0, tags);
 			continue;
 		}
 
@@ -2232,52 +2268,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 			goto found;
 
 		rcu_read_lock();
-		/* first we search only for vlan devices. for every vlan
-		 * found we verify its upper dev list, searching for the
-		 * rt->dst.dev. If found we save the tag of the vlan and
-		 * proceed to send the packet.
-		 */
-		netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
-						  vlan_iter) {
-			if (!is_vlan_dev(vlan_upper))
-				continue;
-
-			if (vlan_upper == rt->dst.dev) {
-				outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-				outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-				rcu_read_unlock();
-				goto found;
-			}
-			netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
-							  iter) {
-				if (upper == rt->dst.dev) {
-					/* If the upper dev is a vlan dev too,
-					 *  set the vlan tag to inner tag.
-					 */
-					if (is_vlan_dev(upper)) {
-						inner.vlan_proto = vlan_dev_vlan_proto(upper);
-						inner.vlan_id = vlan_dev_vlan_id(upper);
-					}
-					outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
-					outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
-					rcu_read_unlock();
-					goto found;
-				}
-			}
-		}
-
-		/* if the device we're looking for is not on top of any of
-		 * our upper vlans, then just search for any dev that
-		 * matches, and in case it's a vlan - save the id
-		 */
-		netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
-			if (upper == rt->dst.dev) {
-				rcu_read_unlock();
-				goto found;
-			}
-		}
+		ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
 		rcu_read_unlock();
 
+		if (ret)
+			goto found;
+
 		/* Not our device - skip */
 		pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
 			 bond->dev->name, &targets[i],
@@ -2290,7 +2286,7 @@ found:
 		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
 		ip_rt_put(rt);
 		bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
-			      addr, &inner, &outer);
+			      addr, tags);
 	}
 }
 
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 94094b3d5a3e..540e0167bf24 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -127,6 +127,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
 static const struct bond_opt_value bond_intmax_tbl[] = {
 	{ "off",     0,       BOND_VALFLAG_DEFAULT},
 	{ "maxval",  INT_MAX, BOND_VALFLAG_MAX},
+	{ NULL,      -1,      0}
 };
 
 static const struct bond_opt_value bond_lacp_rate_tbl[] = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index dfc37797df41..ea64aa2f8b95 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -36,6 +36,7 @@
 
 #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
 
+#define BOND_MAX_VLAN_ENCAP	2
 #define BOND_MAX_ARP_TARGETS	16
 
 #define BOND_DEFAULT_MIIMON	100
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 8ab7103d4f44..61ffc12d8fd8 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
 	  SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
 	  boards like am335x, dm814x, dm813x and dm811x.
 
-config CAN_C_CAN_STRICT_FRAME_ORDERING
-	bool "Force a strict RX CAN frame order (may cause frame loss)"
-	---help---
-	  The RX split buffer prevents packet reordering but can cause packet
-	  loss. Only enable this option when you accept to lose CAN frames
-	  in favour of getting the received CAN frames in the correct order.
-
 config CAN_C_CAN_PCI
 	tristate "Generic PCI Bus based C_CAN/D_CAN driver"
 	depends on PCI
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e154b4cb0f1a..8e78bb48f5a4 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -727,26 +727,12 @@ static u32 c_can_adjust_pending(u32 pend)
 static inline void c_can_rx_object_get(struct net_device *dev,
 				       struct c_can_priv *priv, u32 obj)
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-	if (obj < C_CAN_MSG_RX_LOW_LAST)
-		c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
-	else
-#endif
 		c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
 }
 
 static inline void c_can_rx_finalize(struct net_device *dev,
 				     struct c_can_priv *priv, u32 obj)
 {
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-	if (obj < C_CAN_MSG_RX_LOW_LAST)
-		priv->rxmasked |= BIT(obj - 1);
-	else if (obj == C_CAN_MSG_RX_LOW_LAST) {
-		priv->rxmasked = 0;
-		/* activate all lower message objects */
-		c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
-	}
-#endif
 	if (priv->type != BOSCH_D_CAN)
 		c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
 }
@@ -794,9 +780,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
 {
 	u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
 
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
-	pend &= ~priv->rxmasked;
-#endif
 	return pend;
 }
 
@@ -809,25 +792,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
  * has arrived. To work-around this issue, we keep two groups of message
  * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
  *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
- *
- * To ensure in-order frame reception we use the following
- * approach while re-activating a message object to receive further
- * frames:
- * - if the current message object number is lower than
- *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
- *   the INTPND bit.
- * - if the current message object number is equal to
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
- *   receive message objects.
- * - if the current message object number is greater than
- *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
- *   only this message object.
- *
- * This can cause packet loss!
- *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
- *
  * We clear the newdat bit right away.
  *
  * This can result in packet reordering when the readout is slow.
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index c540e3d12e3d..564933ae218c 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	struct sja1000_priv *priv;
 	struct peak_pci_chan *chan;
-	struct net_device *dev;
+	struct net_device *dev, *prev_dev;
 	void __iomem *cfg_base, *reg_base;
 	u16 sub_sys_id, icr;
 	int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
 	writew(0x0, cfg_base + PITA_ICR + 2);
 
 	chan = NULL;
-	for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
-		unregister_sja1000dev(dev);
-		free_sja1000dev(dev);
+	for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
 		priv = netdev_priv(dev);
 		chan = priv->priv;
+		prev_dev = chan->prev_dev;
+
+		unregister_sja1000dev(dev);
+		free_sja1000dev(dev);
 	}
 
 	/* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
 
 	/* Loop over all registered devices */
 	while (1) {
+		struct net_device *prev_dev = chan->prev_dev;
+
 		dev_info(&pdev->dev, "removing device %s\n", dev->name);
 		unregister_sja1000dev(dev);
 		free_sja1000dev(dev);
-		dev = chan->prev_dev;
+		dev = prev_dev;
 
 		if (!dev) {
 			/* do that only for first channel */
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index d4a187e45369..3eff2fd3997e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
 altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
 altera_msgdma.o altera_sgdma.o altera_utils.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 38c500f95b9e..0fb986ba3290 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -37,16 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
 void msgdma_reset(struct altera_tse_private *priv)
 {
 	int counter;
-	struct msgdma_csr *txcsr = priv->tx_dma_csr;
-	struct msgdma_csr *rxcsr = priv->rx_dma_csr;
 
 	/* Reset Rx mSGDMA */
-	iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
-	iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+	csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
+		msgdma_csroffs(status));
+	csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
+		msgdma_csroffs(control));
 
 	counter = 0;
 	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-		if (tse_bit_is_clear(&rxcsr->status,
+		if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
 				     MSGDMA_CSR_STAT_RESETTING))
 			break;
 		udelay(1);
@@ -57,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
 			   "TSE Rx mSGDMA resetting bit never cleared!\n");
 
 	/* clear all status bits */
-	iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+	csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
 
 	/* Reset Tx mSGDMA */
-	iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
-	iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+	csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
+		msgdma_csroffs(status));
+
+	csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
+		msgdma_csroffs(control));
 
 	counter = 0;
 	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-		if (tse_bit_is_clear(&txcsr->status,
+		if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
 				     MSGDMA_CSR_STAT_RESETTING))
 			break;
 		udelay(1);
@@ -76,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
 			   "TSE Tx mSGDMA resetting bit never cleared!\n");
 
 	/* clear all status bits */
-	iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+	csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_disable_rxirq(struct altera_tse_private *priv)
 {
-	struct msgdma_csr *csr = priv->rx_dma_csr;
-	tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+	tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+		      MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_rxirq(struct altera_tse_private *priv)
 {
-	struct msgdma_csr *csr = priv->rx_dma_csr;
-	tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+	tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+		    MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_disable_txirq(struct altera_tse_private *priv)
 {
-	struct msgdma_csr *csr = priv->tx_dma_csr;
-	tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+	tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+		      MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_enable_txirq(struct altera_tse_private *priv)
 {
-	struct msgdma_csr *csr = priv->tx_dma_csr;
-	tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+	tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+		    MSGDMA_CSR_CTL_GLOBAL_INTR);
 }
 
 void msgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-	struct msgdma_csr *csr = priv->rx_dma_csr;
-	iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+	csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
 }
 
 void msgdma_clear_txirq(struct altera_tse_private *priv)
 {
-	struct msgdma_csr *csr = priv->tx_dma_csr;
-	iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+	csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
 }
 
 /* return 0 to indicate transmit is pending */
 int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-	struct msgdma_extended_desc *desc = priv->tx_dma_desc;
-
-	iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
-	iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
-	iowrite32(0, &desc->write_addr_lo);
-	iowrite32(0, &desc->write_addr_hi);
-	iowrite32(buffer->len, &desc->len);
-	iowrite32(0, &desc->burst_seq_num);
-	iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
-	iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+	csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+		msgdma_descroffs(read_addr_lo));
+	csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+		msgdma_descroffs(read_addr_hi));
+	csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
+	csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
+	csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
+	csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
+	csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
+		msgdma_descroffs(stride));
+	csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
+		msgdma_descroffs(control));
 	return 0;
 }
 
@@ -136,16 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
 	u32 ready = 0;
 	u32 inuse;
 	u32 status;
-	struct msgdma_csr *txcsr = priv->tx_dma_csr;
 
 	/* Get number of sent descriptors */
-	inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+	inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
+			& 0xffff;
 
 	if (inuse) { /* Tx FIFO is not empty */
 		ready = priv->tx_prod - priv->tx_cons - inuse - 1;
 	} else {
 		/* Check for buffered last packet */
-		status = ioread32(&txcsr->status);
+		status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
 		if (status & MSGDMA_CSR_STAT_BUSY)
 			ready = priv->tx_prod - priv->tx_cons - 1;
 		else
@@ -159,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
 void msgdma_add_rx_desc(struct altera_tse_private *priv,
 			struct tse_buffer *rxbuffer)
 {
-	struct msgdma_extended_desc *desc = priv->rx_dma_desc;
 	u32 len = priv->rx_dma_buf_sz;
 	dma_addr_t dma_addr = rxbuffer->dma_addr;
 	u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -169,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
 			| MSGDMA_DESC_CTL_TR_ERR_IRQ
 			| MSGDMA_DESC_CTL_GO);
 
-	iowrite32(0, &desc->read_addr_lo);
-	iowrite32(0, &desc->read_addr_hi);
-	iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
-	iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
-	iowrite32(len, &desc->len);
-	iowrite32(0, &desc->burst_seq_num);
-	iowrite32(0x00010001, &desc->stride);
-	iowrite32(control, &desc->control);
+	csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
+	csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
+	csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
+		msgdma_descroffs(write_addr_lo));
+	csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
+		msgdma_descroffs(write_addr_hi));
+	csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
+	csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
+	csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
+	csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
 }
 
 /* status is returned on upper 16 bits,
@@ -187,12 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
 	u32 rxstatus = 0;
 	u32 pktlength;
 	u32 pktstatus;
-	struct msgdma_csr *rxcsr = priv->rx_dma_csr;
-	struct msgdma_response *rxresp = priv->rx_dma_resp;
 
-	if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
-		pktlength = ioread32(&rxresp->bytes_transferred);
-		pktstatus = ioread32(&rxresp->status);
+	if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
+	    & 0xffff) {
+		pktlength = csrrd32(priv->rx_dma_resp,
+				    msgdma_respoffs(bytes_transferred));
+		pktstatus = csrrd32(priv->rx_dma_resp,
+				    msgdma_respoffs(status));
 		rxstatus = pktstatus;
 		rxstatus = rxstatus << 16;
 		rxstatus |= (pktlength & 0xffff);
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index d7b59ba4019c..e335626e1b6b 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -17,15 +17,6 @@
 #ifndef __ALTERA_MSGDMAHW_H__
 #define __ALTERA_MSGDMAHW_H__
 
-/* mSGDMA standard descriptor format
- */
-struct msgdma_desc {
-	u32 read_addr;	/* data buffer source address */
-	u32 write_addr;	/* data buffer destination address */
-	u32 len;	/* the number of bytes to transfer per descriptor */
-	u32 control;	/* characteristics of the transfer */
-};
-
 /* mSGDMA extended descriptor format
  */
 struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
 	u32 status;
 };
 
+#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
+#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
+#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
+
 /* mSGDMA response register bit definitions
  */
 #define MSGDMA_RESP_EARLY_TERM	BIT(8)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index dbd40e15b5cc..580553d42d34 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,8 +20,8 @@
 #include "altera_sgdmahw.h"
 #include "altera_sgdma.h"
 
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-				struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+				struct sgdma_descrip __iomem *ndesc,
 				dma_addr_t ndesc_phys,
 				dma_addr_t raddr,
 				dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
 				int wfixed);
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-			      struct sgdma_descrip *desc);
+			      struct sgdma_descrip __iomem *desc);
 
 static int sgdma_async_read(struct altera_tse_private *priv);
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-		 struct sgdma_descrip *desc);
+		 struct sgdma_descrip __iomem *desc);
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-		 struct sgdma_descrip *desc);
+		 struct sgdma_descrip __iomem *desc);
 
 static int sgdma_txbusy(struct altera_tse_private *priv);
 
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
 	priv->rxdescphys = (dma_addr_t) 0;
 	priv->txdescphys = (dma_addr_t) 0;
 
-	priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+	priv->rxdescphys = dma_map_single(priv->device,
+					  (void __force *)priv->rx_dma_desc,
 					  priv->rxdescmem, DMA_BIDIRECTIONAL);
 
 	if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
 		return -EINVAL;
 	}
 
-	priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+	priv->txdescphys = dma_map_single(priv->device,
+					  (void __force *)priv->tx_dma_desc,
 					  priv->txdescmem, DMA_TO_DEVICE);
 
 	if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
 	}
 
 	/* Initialize descriptor memory to all 0's, sync memory to cache */
-	memset(priv->tx_dma_desc, 0, priv->txdescmem);
-	memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+	memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+	memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
 	dma_sync_single_for_device(priv->device, priv->txdescphys,
 				   priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
  */
 void sgdma_reset(struct altera_tse_private *priv)
 {
-	u32 *ptxdescripmem = priv->tx_dma_desc;
-	u32 txdescriplen   = priv->txdescmem;
-	u32 *prxdescripmem = priv->rx_dma_desc;
-	u32 rxdescriplen   = priv->rxdescmem;
-	struct sgdma_csr *ptxsgdma = priv->tx_dma_csr;
-	struct sgdma_csr *prxsgdma = priv->rx_dma_csr;
-
 	/* Initialize descriptor memory to 0 */
-	memset(ptxdescripmem, 0, txdescriplen);
-	memset(prxdescripmem, 0, rxdescriplen);
+	memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+	memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
 
-	iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
-	iowrite32(0, &ptxsgdma->control);
+	csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+	csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 
-	iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
-	iowrite32(0, &prxsgdma->control);
+	csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+	csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 }
 
 /* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
 
 void sgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-	struct sgdma_csr *csr = priv->rx_dma_csr;
-	tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+	tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+		    SGDMA_CTRLREG_CLRINT);
 }
 
 void sgdma_clear_txirq(struct altera_tse_private *priv)
 {
-	struct sgdma_csr *csr = priv->tx_dma_csr;
-	tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+	tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+		    SGDMA_CTRLREG_CLRINT);
 }
 
 /* transmits buffer through SGDMA. Returns number of buffers
@@ -184,11 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
  */
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
-	int pktstx = 0;
-	struct sgdma_descrip *descbase = priv->tx_dma_desc;
+	struct sgdma_descrip __iomem *descbase =
+		(struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 
-	struct sgdma_descrip *cdesc = &descbase[0];
-	struct sgdma_descrip *ndesc = &descbase[1];
+	struct sgdma_descrip __iomem *cdesc = &descbase[0];
+	struct sgdma_descrip __iomem *ndesc = &descbase[1];
 
 	/* wait 'til the tx sgdma is ready for the next transmit request */
 	if (sgdma_txbusy(priv))
@@ -204,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 			    0,				/* read fixed */
 			    SGDMA_CONTROL_WR_FIXED);	/* Generate SOP */
 
-	pktstx = sgdma_async_write(priv, cdesc);
+	sgdma_async_write(priv, cdesc);
 
 	/* enqueue the request to the pending transmit queue */
 	queue_tx(priv, buffer);
@@ -218,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 u32 sgdma_tx_completions(struct altera_tse_private *priv)
 {
 	u32 ready = 0;
-	struct sgdma_descrip *desc = priv->tx_dma_desc;
 
 	if (!sgdma_txbusy(priv) &&
-	    ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+	    ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+	     & SGDMA_CONTROL_HW_OWNED) == 0) &&
 	    (dequeue_tx(priv))) {
 		ready = 1;
 	}
@@ -245,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
  */
 u32 sgdma_rx_status(struct altera_tse_private *priv)
 {
-	struct sgdma_csr *csr = priv->rx_dma_csr;
-	struct sgdma_descrip *base = priv->rx_dma_desc;
-	struct sgdma_descrip *desc = NULL;
-	int pktsrx;
-	unsigned int rxstatus = 0;
-	unsigned int pktlength = 0;
-	unsigned int pktstatus = 0;
+	struct sgdma_descrip __iomem *base =
+		(struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+	struct sgdma_descrip __iomem *desc = NULL;
 	struct tse_buffer *rxbuffer = NULL;
+	unsigned int rxstatus = 0;
 
-	u32 sts = ioread32(&csr->status);
+	u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
 
 	desc = &base[0];
 	if (sts & SGDMA_STSREG_EOP) {
+		unsigned int pktlength = 0;
+		unsigned int pktstatus = 0;
 		dma_sync_single_for_cpu(priv->device,
 					priv->rxdescphys,
 					priv->sgdmadesclen,
 					DMA_FROM_DEVICE);
 
-		pktlength = desc->bytes_xferred;
-		pktstatus = desc->status & 0x3f;
-		rxstatus = pktstatus;
+		pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+		pktstatus = csrrd8(desc, sgdma_descroffs(status));
+		rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
 		rxstatus = rxstatus << 16;
 		rxstatus |= (pktlength & 0xffff);
 
 		if (rxstatus) {
-			desc->status = 0;
+			csrwr8(0, desc, sgdma_descroffs(status));
 
 			rxbuffer = dequeue_rx(priv);
 			if (rxbuffer == NULL)
@@ -278,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 					    "sgdma rx and rx queue empty!\n");
 
 			/* Clear control */
-			iowrite32(0, &csr->control);
+			csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
 			/* clear status */
-			iowrite32(0xf, &csr->status);
+			csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
 
 			/* kick the rx sgdma after reaping this descriptor */
-			pktsrx = sgdma_async_read(priv);
+			sgdma_async_read(priv);
 
 		} else {
 			/* If the SGDMA indicated an end of packet on recv,
@@ -297,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 			 */
 			netdev_err(priv->dev,
 				   "SGDMA RX Error Info: %x, %x, %x\n",
-				   sts, desc->status, rxstatus);
+				   sts, csrrd8(desc, sgdma_descroffs(status)),
+				   rxstatus);
 		}
 	} else if (sts == 0) {
-		pktsrx = sgdma_async_read(priv);
+		sgdma_async_read(priv);
 	}
 
 	return rxstatus;
@@ -308,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 
 
 /* Private functions */
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
-				struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+				struct sgdma_descrip __iomem *ndesc,
 				dma_addr_t ndesc_phys,
 				dma_addr_t raddr,
 				dma_addr_t waddr,
@@ -319,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
 				int wfixed)
 {
 	/* Clear the next descriptor as not owned by hardware */
-	u32 ctrl = ndesc->control;
+
+	u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
 	ctrl &= ~SGDMA_CONTROL_HW_OWNED;
-	ndesc->control = ctrl;
+	csrwr8(ctrl, ndesc, sgdma_descroffs(control));
 
-	ctrl = 0;
 	ctrl = SGDMA_CONTROL_HW_OWNED;
 	ctrl |= generate_eop;
 	ctrl |= rfixed;
 	ctrl |= wfixed;
 
 	/* Channel is implicitly zero, initialized to 0 by default */
-
-	desc->raddr = raddr;
-	desc->waddr = waddr;
-	desc->next = lower_32_bits(ndesc_phys);
-	desc->control = ctrl;
-	desc->status = 0;
-	desc->rburst = 0;
-	desc->wburst = 0;
-	desc->bytes = length;
-	desc->bytes_xferred = 0;
+	csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+	csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+
+	csrwr32(0, desc, sgdma_descroffs(pad1));
+	csrwr32(0, desc, sgdma_descroffs(pad2));
+	csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+
+	csrwr8(ctrl, desc, sgdma_descroffs(control));
+	csrwr8(0, desc, sgdma_descroffs(status));
+	csrwr8(0, desc, sgdma_descroffs(wburst));
+	csrwr8(0, desc, sgdma_descroffs(rburst));
+	csrwr16(length, desc, sgdma_descroffs(bytes));
+	csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
 }
 
 /* If hardware is busy, don't restart async read.
@@ -350,11 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
  */
 static int sgdma_async_read(struct altera_tse_private *priv)
 {
-	struct sgdma_csr *csr = priv->rx_dma_csr;
-	struct sgdma_descrip *descbase = priv->rx_dma_desc;
-	struct sgdma_descrip *cdesc = &descbase[0];
-	struct sgdma_descrip *ndesc = &descbase[1];
+	struct sgdma_descrip __iomem *descbase =
+		(struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 
+	struct sgdma_descrip __iomem *cdesc = &descbase[0];
+	struct sgdma_descrip __iomem *ndesc = &descbase[1];
 	struct tse_buffer *rxbuffer = NULL;
 
 	if (!sgdma_rxbusy(priv)) {
@@ -379,11 +377,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 					   priv->sgdmadesclen,
 					   DMA_TO_DEVICE);
 
-		iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
-			  &csr->next_descrip);
+		csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+			priv->rx_dma_csr,
+			sgdma_csroffs(next_descrip));
 
-		iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
-			  &csr->control);
+		csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+			priv->rx_dma_csr,
+			sgdma_csroffs(control));
 
 		return 1;
 	}
@@ -392,32 +392,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 }
 
 static int sgdma_async_write(struct altera_tse_private *priv,
-			     struct sgdma_descrip *desc)
+			     struct sgdma_descrip __iomem *desc)
 {
-	struct sgdma_csr *csr = priv->tx_dma_csr;
-
 	if (sgdma_txbusy(priv))
 		return 0;
 
 	/* clear control and status */
-	iowrite32(0, &csr->control);
-	iowrite32(0x1f, &csr->status);
+	csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+	csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 
 	dma_sync_single_for_device(priv->device, priv->txdescphys,
 				   priv->sgdmadesclen, DMA_TO_DEVICE);
 
-	iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
-		  &csr->next_descrip);
+	csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+		priv->tx_dma_csr,
+		sgdma_csroffs(next_descrip));
 
-	iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
-		  &csr->control);
+	csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+		priv->tx_dma_csr,
+		sgdma_csroffs(control));
 
 	return 1;
 }
 
 static dma_addr_t
 sgdma_txphysaddr(struct altera_tse_private *priv,
-		 struct sgdma_descrip *desc)
+		 struct sgdma_descrip __iomem *desc)
 {
 	dma_addr_t paddr = priv->txdescmem_busaddr;
 	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -426,7 +426,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
 
 static dma_addr_t
 sgdma_rxphysaddr(struct altera_tse_private *priv,
-		 struct sgdma_descrip *desc)
+		 struct sgdma_descrip __iomem *desc)
 {
 	dma_addr_t paddr = priv->rxdescmem_busaddr;
 	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -515,8 +515,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
  */
 static int sgdma_rxbusy(struct altera_tse_private *priv)
 {
-	struct sgdma_csr *csr = priv->rx_dma_csr;
-	return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+	return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+		       & SGDMA_STSREG_BUSY;
 }
 
 /* waits for the tx sgdma to finish it's current operation, returns 0
@@ -525,13 +525,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
 static int sgdma_txbusy(struct altera_tse_private *priv)
 {
 	int delay = 0;
-	struct sgdma_csr *csr = priv->tx_dma_csr;
 
 	/* if DMA is busy, wait for current transactino to finish */
-	while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+	while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+		& SGDMA_STSREG_BUSY) && (delay++ < 100))
 		udelay(1);
 
-	if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+	if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+	    & SGDMA_STSREG_BUSY) {
 		netdev_err(priv->dev, "timeout waiting for tx dma\n");
 		return 1;
 	}
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index ba3334f35383..85bc33b218d9 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -19,16 +19,16 @@
 
 /* SGDMA descriptor structure */
 struct sgdma_descrip {
-	unsigned int	raddr; /* address of data to be read */
-	unsigned int	pad1;
-	unsigned int	waddr;
-	unsigned int    pad2;
-	unsigned int	next;
-	unsigned int	pad3;
-	unsigned short  bytes;
-	unsigned char   rburst;
-	unsigned char	wburst;
-	unsigned short	bytes_xferred;	/* 16 bits, bytes xferred */
+	u32	raddr; /* address of data to be read */
+	u32	pad1;
+	u32	waddr;
+	u32	pad2;
+	u32	next;
+	u32	pad3;
+	u16	bytes;
+	u8	rburst;
+	u8	wburst;
+	u16	bytes_xferred;	/* 16 bits, bytes xferred */
 
 	/* bit 0: error
 	 * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
 	 * bit 6: reserved
 	 * bit 7: status eop for recv case
 	 */
-	unsigned char	status;
+	u8	status;
 
 	/* bit 0: eop
 	 * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
 	 * bits 3,4,5,6: Channel (always 0)
 	 * bit 7: hardware owned
 	 */
-	unsigned char	control;
+	u8	control;
 } __packed;
 
 
@@ -101,6 +101,8 @@ struct sgdma_csr {
 	u32	pad3[3];
 };
 
+#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
+#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
 
 #define SGDMA_STSREG_ERR	BIT(0) /* Error */
 #define SGDMA_STSREG_EOP	BIT(1) /* EOP */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 465c4aabebbd..2adb24d4523c 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -357,6 +357,8 @@ struct altera_tse_mac {
 	u32 reserved5[42];
 };
 
+#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
+
 /* Transmit and Receive Command Registers Bit Definitions
  */
 #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC		BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
  */
 void altera_tse_set_ethtool_ops(struct net_device *);
 
+static inline
+u32 csrrd32(void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+	return readl(paddr);
+}
+
+static inline
+u16 csrrd16(void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+	return readw(paddr);
+}
+
+static inline
+u8 csrrd8(void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+	return readb(paddr);
+}
+
+static inline
+void csrwr32(u32 val, void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+	writel(val, paddr);
+}
+
+static inline
+void csrwr16(u16 val, void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+	writew(val, paddr);
+}
+
+static inline
+void csrwr8(u8 val, void __iomem *mac, size_t offs)
+{
+	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+	writeb(val, paddr);
+}
+
 #endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index d817e285b266..be72e1e64525 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
 			   u64 *buf)
 {
 	struct altera_tse_private *priv = netdev_priv(dev);
-	struct altera_tse_mac *mac = priv->mac_dev;
 	u64 ext;
 
-	buf[0] = ioread32(&mac->frames_transmitted_ok);
-	buf[1] = ioread32(&mac->frames_received_ok);
-	buf[2] = ioread32(&mac->frames_check_sequence_errors);
-	buf[3] = ioread32(&mac->alignment_errors);
+	buf[0] = csrrd32(priv->mac_dev,
+			 tse_csroffs(frames_transmitted_ok));
+	buf[1] = csrrd32(priv->mac_dev,
+			 tse_csroffs(frames_received_ok));
+	buf[2] = csrrd32(priv->mac_dev,
+			 tse_csroffs(frames_check_sequence_errors));
+	buf[3] = csrrd32(priv->mac_dev,
+			 tse_csroffs(alignment_errors));
 
 	/* Extended aOctetsTransmittedOK counter */
-	ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
-	ext |= ioread32(&mac->octets_transmitted_ok);
+	ext = (u64) csrrd32(priv->mac_dev,
+			    tse_csroffs(msb_octets_transmitted_ok)) << 32;
+
+	ext |= csrrd32(priv->mac_dev,
+		       tse_csroffs(octets_transmitted_ok));
 	buf[4] = ext;
 
 	/* Extended aOctetsReceivedOK counter */
-	ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
-	ext |= ioread32(&mac->octets_received_ok);
+	ext = (u64) csrrd32(priv->mac_dev,
+			    tse_csroffs(msb_octets_received_ok)) << 32;
+
+	ext |= csrrd32(priv->mac_dev,
+		       tse_csroffs(octets_received_ok));
 	buf[5] = ext;
 
-	buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
-	buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
-	buf[8] = ioread32(&mac->if_in_errors);
-	buf[9] = ioread32(&mac->if_out_errors);
-	buf[10] = ioread32(&mac->if_in_ucast_pkts);
-	buf[11] = ioread32(&mac->if_in_multicast_pkts);
-	buf[12] = ioread32(&mac->if_in_broadcast_pkts);
-	buf[13] = ioread32(&mac->if_out_discards);
-	buf[14] = ioread32(&mac->if_out_ucast_pkts);
-	buf[15] = ioread32(&mac->if_out_multicast_pkts);
-	buf[16] = ioread32(&mac->if_out_broadcast_pkts);
-	buf[17] = ioread32(&mac->ether_stats_drop_events);
+	buf[6] = csrrd32(priv->mac_dev,
+			 tse_csroffs(tx_pause_mac_ctrl_frames));
+	buf[7] = csrrd32(priv->mac_dev,
+			 tse_csroffs(rx_pause_mac_ctrl_frames));
+	buf[8] = csrrd32(priv->mac_dev,
+			 tse_csroffs(if_in_errors));
+	buf[9] = csrrd32(priv->mac_dev,
+			 tse_csroffs(if_out_errors));
+	buf[10] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_in_ucast_pkts));
+	buf[11] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_in_multicast_pkts));
+	buf[12] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_in_broadcast_pkts));
+	buf[13] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_out_discards));
+	buf[14] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_out_ucast_pkts));
+	buf[15] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_out_multicast_pkts));
+	buf[16] = csrrd32(priv->mac_dev,
+			  tse_csroffs(if_out_broadcast_pkts));
+	buf[17] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_drop_events));
 
 	/* Extended etherStatsOctets counter */
-	ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
-	ext |= ioread32(&mac->ether_stats_octets);
+	ext = (u64) csrrd32(priv->mac_dev,
+			    tse_csroffs(msb_ether_stats_octets)) << 32;
+	ext |= csrrd32(priv->mac_dev,
+		       tse_csroffs(ether_stats_octets));
 	buf[18] = ext;
 
-	buf[19] = ioread32(&mac->ether_stats_pkts);
-	buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
-	buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
-	buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
-	buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
-	buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
-	buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
-	buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
-	buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
-	buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
-	buf[29] = ioread32(&mac->ether_stats_jabbers);
-	buf[30] = ioread32(&mac->ether_stats_fragments);
+	buf[19] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts));
+	buf[20] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_undersize_pkts));
+	buf[21] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_oversize_pkts));
+	buf[22] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_64_octets));
+	buf[23] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_65to127_octets));
+	buf[24] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_128to255_octets));
+	buf[25] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_256to511_octets));
+	buf[26] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_512to1023_octets));
+	buf[27] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_1024to1518_octets));
+	buf[28] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_pkts_1519tox_octets));
+	buf[29] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_jabbers));
+	buf[30] = csrrd32(priv->mac_dev,
+			  tse_csroffs(ether_stats_fragments));
 }
 
 static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 {
 	int i;
 	struct altera_tse_private *priv = netdev_priv(dev);
-	u32 *tse_mac_regs = (u32 *)priv->mac_dev;
 	u32 *buf = regbuf;
 
 	/* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 	regs->version = 1;
 
 	for (i = 0; i < TSE_NUM_REGS; i++)
-		buf[i] = ioread32(&tse_mac_regs[i]);
+		buf[i] = csrrd32(priv->mac_dev, i * 4);
 }
 
 static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index e44a4aeb9701..7330681574d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  */
 static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-	struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-	unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
-	u32 data;
+	struct net_device *ndev = bus->priv;
+	struct altera_tse_private *priv = netdev_priv(ndev);
 
 	/* set MDIO address */
-	iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+	csrwr32((mii_id & 0x1f), priv->mac_dev,
+		tse_csroffs(mdio_phy0_addr));
 
 	/* get the data */
-	data = ioread32(&mdio_regs[regnum]) & 0xffff;
-	return data;
+	return csrrd32(priv->mac_dev,
+		       tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
 }
 
 static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 				 u16 value)
 {
-	struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
-	unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+	struct net_device *ndev = bus->priv;
+	struct altera_tse_private *priv = netdev_priv(ndev);
 
 	/* set MDIO address */
-	iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+	csrwr32((mii_id & 0x1f), priv->mac_dev,
+		tse_csroffs(mdio_phy0_addr));
 
 	/* write the data */
-	iowrite32((u32) value, &mdio_regs[regnum]);
+	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
 	return 0;
 }
 
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
 	for (i = 0; i < PHY_MAX_ADDR; i++)
 		mdio->irq[i] = PHY_POLL;
 
-	mdio->priv = priv->mac_dev;
+	mdio->priv = dev;
 	mdio->parent = priv->device;
 
 	ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	unsigned int nopaged_len = skb_headlen(skb);
 	enum netdev_tx ret = NETDEV_TX_OK;
 	dma_addr_t dma_addr;
-	int txcomplete = 0;
 
 	spin_lock_bh(&priv->tx_lock);
 
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	dma_sync_single_for_device(priv->device, buffer->dma_addr,
 				   buffer->len, DMA_TO_DEVICE);
 
-	txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+	priv->dmaops->tx_buffer(priv, buffer);
 
 	skb_tx_timestamp(skb);
 
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
 	struct altera_tse_private *priv = netdev_priv(dev);
 	struct phy_device *phydev = NULL;
 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-	int ret;
 
 	if (priv->phy_addr != POLL_PHY) {
 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
 			netdev_err(dev, "Could not attach to PHY\n");
 
 	} else {
+		int ret;
 		phydev = phy_find_first(priv->mdio);
 		if (phydev == NULL) {
 			netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
 
 static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 {
-	struct altera_tse_mac *mac = priv->mac_dev;
 	u32 msb;
 	u32 lsb;
 
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 	lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 
 	/* Set primary MAC address */
-	iowrite32(msb, &mac->mac_addr_0);
-	iowrite32(lsb, &mac->mac_addr_1);
+	csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
+	csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 }
 
 /* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
  */
 static int reset_mac(struct altera_tse_private *priv)
 {
-	void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
 	int counter;
 	u32 dat;
 
-	dat = ioread32(cmd_cfg_reg);
+	dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 	dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 	dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
-	iowrite32(dat, cmd_cfg_reg);
+	csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 
 	counter = 0;
 	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-		if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+		if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
+				     MAC_CMDCFG_SW_RESET))
 			break;
 		udelay(1);
 	}
 
 	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
-		dat = ioread32(cmd_cfg_reg);
+		dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 		dat &= ~MAC_CMDCFG_SW_RESET;
-		iowrite32(dat, cmd_cfg_reg);
+		csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 		return -1;
 	}
 	return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
 */
 static int init_mac(struct altera_tse_private *priv)
 {
-	struct altera_tse_mac *mac = priv->mac_dev;
 	unsigned int cmd = 0;
 	u32 frm_length;
 
 	/* Setup Rx FIFO */
-	iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
-		  &mac->rx_section_empty);
-	iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
-	iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
-	iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+	csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+		priv->mac_dev, tse_csroffs(rx_section_empty));
+
+	csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
+		tse_csroffs(rx_section_full));
+
+	csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
+		tse_csroffs(rx_almost_empty));
+
+	csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
+		tse_csroffs(rx_almost_full));
 
 	/* Setup Tx FIFO */
-	iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
-		  &mac->tx_section_empty);
-	iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
-	iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
-	iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+	csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+		priv->mac_dev, tse_csroffs(tx_section_empty));
+
+	csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
+		tse_csroffs(tx_section_full));
+
+	csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
+		tse_csroffs(tx_almost_empty));
+
+	csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
+		tse_csroffs(tx_almost_full));
 
 	/* MAC Address Configuration */
 	tse_update_mac_addr(priv, priv->dev->dev_addr);
 
 	/* MAC Function Configuration */
 	frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
-	iowrite32(frm_length, &mac->frm_length);
-	iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+	csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
+
+	csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
+		tse_csroffs(tx_ipg_length));
 
 	/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
 	 * start address
 	 */
-	tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
-	tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
-					 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+	tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
+		    ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+
+	tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
+		      ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+		      ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 
 	/* Set the MAC options */
-	cmd = ioread32(&mac->command_config);
+	cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 	cmd &= ~MAC_CMDCFG_PAD_EN;	/* No padding Removal on Receive */
 	cmd &= ~MAC_CMDCFG_CRC_FWD;	/* CRC Removal */
 	cmd |= MAC_CMDCFG_RX_ERR_DISC;	/* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
 	cmd &= ~MAC_CMDCFG_ETH_SPEED;
 	cmd &= ~MAC_CMDCFG_ENA_10;
 
-	iowrite32(cmd, &mac->command_config);
+	csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 
-	iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+	csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
+		tse_csroffs(pause_quanta));
 
 	if (netif_msg_hw(priv))
 		dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
  */
 static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 {
-	struct altera_tse_mac *mac = priv->mac_dev;
-	u32 value = ioread32(&mac->command_config);
+	u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 
 	if (enable)
 		value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
 	else
 		value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 
-	iowrite32(value, &mac->command_config);
+	csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
 }
 
 /* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
 static void altera_tse_set_mcfilter(struct net_device *dev)
 {
 	struct altera_tse_private *priv = netdev_priv(dev);
-	struct altera_tse_mac *mac = priv->mac_dev;
 	int i;
 	struct netdev_hw_addr *ha;
 
 	/* clear the hash filter */
 	for (i = 0; i < 64; i++)
-		iowrite32(0, &(mac->hash_table[i]));
+		csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 
 	netdev_for_each_mc_addr(ha, dev) {
 		unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 
 			hash = (hash << 1) | xor_bit;
 		}
-		iowrite32(1, &(mac->hash_table[hash]));
+		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
 	}
 }
 
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
 static void altera_tse_set_mcfilterall(struct net_device *dev)
 {
 	struct altera_tse_private *priv = netdev_priv(dev);
-	struct altera_tse_mac *mac = priv->mac_dev;
 	int i;
 
 	/* set the hash filter */
 	for (i = 0; i < 64; i++)
-		iowrite32(1, &(mac->hash_table[i]));
+		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
 }
 
 /* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
 static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 {
 	struct altera_tse_private *priv = netdev_priv(dev);
-	struct altera_tse_mac *mac = priv->mac_dev;
 
 	spin_lock(&priv->mac_cfg_lock);
 
 	if (dev->flags & IFF_PROMISC)
-		tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+			    MAC_CMDCFG_PROMIS_EN);
 
 	if (dev->flags & IFF_ALLMULTI)
 		altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
 static void tse_set_rx_mode(struct net_device *dev)
 {
 	struct altera_tse_private *priv = netdev_priv(dev);
-	struct altera_tse_mac *mac = priv->mac_dev;
 
 	spin_lock(&priv->mac_cfg_lock);
 
 	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
 	    !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
-		tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+		tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+			    MAC_CMDCFG_PROMIS_EN);
 	else
-		tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+		tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
+			      MAC_CMDCFG_PROMIS_EN);
 
 	spin_unlock(&priv->mac_cfg_lock);
 }
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
 		of_property_read_bool(pdev->dev.of_node,
 				      "altr,has-hash-multicast-filter");
 
+	/* Set hash filter to not set for now until the
+	 * multicast filter receive issue is debugged
+	 */
+	priv->hash_filter = 0;
+
 	/* get supplemental address settings for this instance */
 	priv->added_unicast =
 		of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
 	return 0;
 }
 
-struct altera_dmaops altera_dtype_sgdma = {
+static const struct altera_dmaops altera_dtype_sgdma = {
 	.altera_dtype = ALTERA_DTYPE_SGDMA,
 	.dmamask = 32,
 	.reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
 	.start_rxdma = sgdma_start_rxdma,
 };
 
-struct altera_dmaops altera_dtype_msgdma = {
+static const struct altera_dmaops altera_dtype_msgdma = {
 	.altera_dtype = ALTERA_DTYPE_MSGDMA,
 	.dmamask = 64,
 	.reset_dma = msgdma_reset,
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
index 70fa13f486b2..d7eeb1713ad2 100644
--- a/drivers/net/ethernet/altera/altera_utils.c
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -17,28 +17,28 @@
 #include "altera_tse.h"
 #include "altera_utils.h"
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-	u32 value = ioread32(ioaddr);
+	u32 value = csrrd32(ioaddr, offs);
 	value |= bit_mask;
-	iowrite32(value, ioaddr);
+	csrwr32(value, ioaddr, offs);
 }
 
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-	u32 value = ioread32(ioaddr);
+	u32 value = csrrd32(ioaddr, offs);
 	value &= ~bit_mask;
-	iowrite32(value, ioaddr);
+	csrwr32(value, ioaddr, offs);
 }
 
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-	u32 value = ioread32(ioaddr);
+	u32 value = csrrd32(ioaddr, offs);
 	return (value & bit_mask) ? 1 : 0;
 }
 
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
 {
-	u32 value = ioread32(ioaddr);
+	u32 value = csrrd32(ioaddr, offs);
 	return (value & bit_mask) ? 0 : 1;
 }
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index ce1db36d3583..baf100ccf587 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -19,9 +19,9 @@
 #ifndef __ALTERA_UTILS_H__
 #define __ALTERA_UTILS_H__
 
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
 
 #endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 8d0479d5be8e..02a23c2901c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2696,7 +2696,7 @@ out:
 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
 	}
 
-	return 0;
+	return rc;
 }
 
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0c067e8564dd..784c7155b98a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
 out:
 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
-	return 0;
+	return rc;
 }
 
 /* request pf to config rss table for vf queues*/
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4693d004a223..e1d445dd8564 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4957,6 +4957,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
 	if (status)
 		goto err;
 
+	/* On some BE3 FW versions, after a HW reset,
+	 * interrupts will remain disabled for each function.
+	 * So, explicitly enable interrupts
+	 */
+	be_intr_set(adapter, true);
+
 	/* tell fw we're ready to fire cmds */
 	status = be_cmd_fw_init(adapter);
 	if (status)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 6e664d9038d6..b78378cea5e3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2027,14 +2027,14 @@ jme_fill_tx_map(struct pci_dev *pdev,
 	return 0;
 }
 
-static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int endidx)
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
 {
 	struct jme_ring *txring = &(jme->txring[0]);
 	struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
 	int mask = jme->tx_ring_mask;
 	int j;
 
-	for (j = startidx ; j < endidx ; ++j) {
+	for (j = 0 ; j < count ; j++) {
 		ctxbi = txbi + ((startidx + j + 2) & (mask));
 		pci_unmap_page(jme->pdev,
 				ctxbi->mapping,
@@ -2069,7 +2069,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 				skb_frag_page(frag),
 				frag->page_offset, skb_frag_size(frag), hidma);
 		if (ret) {
-			jme_drop_tx_map(jme, idx, idx+i);
+			jme_drop_tx_map(jme, idx, i);
 			goto out;
 		}
 
@@ -2081,7 +2081,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 	ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
 			offset_in_page(skb->data), len, hidma);
 	if (ret)
-		jme_drop_tx_map(jme, idx, idx+i);
+		jme_drop_tx_map(jme, idx, i);
 
 out:
 	return ret;
@@ -2269,7 +2269,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	}
 
 	if (jme_fill_tx_desc(jme, skb, idx))
-		return NETDEV_TX_BUSY;
+		return NETDEV_TX_OK;
 
 	jwrite32(jme, JME_TXCS, jme->reg_txcs |
 				TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index ca8e7cb5a8e4..a89e46430c74 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1262,12 +1262,12 @@ static struct mlx4_cmd_info cmd_info[] = {
 	},
 	{
 		.opcode = MLX4_CMD_UPDATE_QP,
-		.has_inbox = false,
+		.has_inbox = true,
 		.has_outbox = false,
 		.out_is_imm = false,
 		.encode_slave_id = false,
 		.verify = NULL,
-		.wrapper = mlx4_CMD_EPERM_wrapper
+		.wrapper = mlx4_UPDATE_QP_wrapper
 	},
 	{
 		.opcode = MLX4_CMD_GET_OP_REQ,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 52c1e7da74c4..9dd1b30ea757 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1196,6 +1196,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
 			   struct mlx4_cmd_mailbox *outbox,
 			   struct mlx4_cmd_info *cmd);
 
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd);
+
 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
 			 struct mlx4_vhcr *vhcr,
 			 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 9bdb6aeb3721..1d3234a6744d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -389,6 +389,41 @@ err_icm:
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+		   enum mlx4_update_qp_attr attr,
+		   struct mlx4_update_qp_params *params)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_update_qp_context *cmd;
+	u64 pri_addr_path_mask = 0;
+	int err = 0;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+
+	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+		return -EINVAL;
+
+	if (attr & MLX4_UPDATE_QP_SMAC) {
+		pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+		cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+	}
+
+	cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+
+	err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+		       MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_update_qp);
+
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 4094e11f9d4d..dd821b363686 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
 
 }
 
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+			   struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox,
+			   struct mlx4_cmd_mailbox *outbox,
+			   struct mlx4_cmd_info *cmd_info)
+{
+	int err;
+	u32 qpn = vhcr->in_modifier & 0xffffff;
+	struct res_qp *rqp;
+	u64 mac;
+	unsigned port;
+	u64 pri_addr_path_mask;
+	struct mlx4_update_qp_context *cmd;
+	int smac_index;
+
+	cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+	pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+	if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+	    (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+		return -EPERM;
+
+	/* Just change the smac for the QP */
+	err = get_res(dev, slave, qpn, RES_QP, &rqp);
+	if (err) {
+		mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+		return err;
+	}
+
+	port = (rqp->sched_queue >> 6 & 1) + 1;
+	smac_index = cmd->qp_context.pri_path.grh_mylmc;
+	err = mac_find_smac_ix_in_slave(dev, slave, port,
+					smac_index, &mac);
+	if (err) {
+		mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+			 qpn, smac_index);
+		goto err_mac;
+	}
+
+	err = mlx4_cmd(dev, inbox->dma,
+		       vhcr->in_modifier, 0,
+		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+		       MLX4_CMD_NATIVE);
+	if (err) {
+		mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+		goto err_mac;
+	}
+
+err_mac:
+	put_res(dev, slave, qpn, RES_QP);
+	return err;
+}
+
 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
 					 struct mlx4_vhcr *vhcr,
 					 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 32d969e857f7..89b83e59e1dc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
 	efx->net_dev->rx_cpu_rmap = NULL;
 #endif
 
-	/* Disable MSI/MSI-X interrupts */
-	efx_for_each_channel(channel, efx)
-		free_irq(channel->irq, &efx->msi_context[channel->channel]);
-
-	/* Disable legacy interrupt */
-	if (efx->legacy_irq)
+	if (EFX_INT_MODE_USE_MSI(efx)) {
+		/* Disable MSI/MSI-X interrupts */
+		efx_for_each_channel(channel, efx)
+			free_irq(channel->irq,
+				 &efx->msi_context[channel->channel]);
+	} else {
+		/* Disable legacy interrupt */
 		free_irq(efx->legacy_irq, efx);
+	}
 }
 
 /* Register dump */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 93cf4f63f426..110ca1c766d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
 		if (ret) {
 			pr_err("%s: Cannot attach to PHY (error: %d)\n",
 			       __func__, ret);
-			goto phy_error;
+			return ret;
 		}
 	}
 
@@ -1779,8 +1779,6 @@ init_error:
 dma_desc_error:
 	if (priv->phydev)
 		phy_disconnect(priv->phydev);
-phy_error:
-	clk_disable_unprepare(priv->stmmac_clk);
 
 	return ret;
 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f4701da19a02..a665e902b989 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -528,8 +528,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct net_device *lowerdev = vlan->lowerdev;
 
-	if (change & IFF_ALLMULTI)
-		dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+	if (dev->flags & IFF_UP) {
+		if (change & IFF_ALLMULTI)
+			dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+	}
 }
 
 static void macvlan_set_mac_lists(struct net_device *dev)
@@ -585,6 +587,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 #define MACVLAN_STATE_MASK \
 	((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
+static int macvlan_get_nest_level(struct net_device *dev)
+{
+	return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+}
+
 static void macvlan_set_lockdep_class_one(struct net_device *dev,
 					  struct netdev_queue *txq,
 					  void *_unused)
@@ -595,8 +602,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
 
 static void macvlan_set_lockdep_class(struct net_device *dev)
 {
-	lockdep_set_class(&dev->addr_list_lock,
-			  &macvlan_netdev_addr_lock_key);
+	lockdep_set_class_and_subclass(&dev->addr_list_lock,
+				       &macvlan_netdev_addr_lock_key,
+				       macvlan_get_nest_level(dev));
 	netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
 }
 
@@ -790,6 +798,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
 	.ndo_fdb_add		= macvlan_fdb_add,
 	.ndo_fdb_del		= macvlan_fdb_del,
 	.ndo_fdb_dump		= ndo_dflt_fdb_dump,
+	.ndo_get_lock_subclass  = macvlan_get_nest_level,
 };
 
 void macvlan_common_setup(struct net_device *dev)
@@ -922,6 +931,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 	vlan->dev      = dev;
 	vlan->port     = port;
 	vlan->set_features = MACVLAN_FEATURES;
+	vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
 
 	vlan->mode     = MACVLAN_MODE_VEPA;
 	if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a972056b2249..3bc079a67a3d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
 	struct delayed_work *dwork = to_delayed_work(work);
 	struct phy_device *phydev =
 			container_of(dwork, struct phy_device, state_queue);
-	int needs_aneg = 0, do_suspend = 0;
+	bool needs_aneg = false, do_suspend = false, do_resume = false;
 	int err = 0;
 
 	mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
 	case PHY_PENDING:
 		break;
 	case PHY_UP:
-		needs_aneg = 1;
+		needs_aneg = true;
 
 		phydev->link_timeout = PHY_AN_TIMEOUT;
 
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
 			phydev->adjust_link(phydev->attached_dev);
 
 		} else if (0 == phydev->link_timeout--)
-			needs_aneg = 1;
+			needs_aneg = true;
 		break;
 	case PHY_NOLINK:
 		err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
 			netif_carrier_on(phydev->attached_dev);
 		} else {
 			if (0 == phydev->link_timeout--)
-				needs_aneg = 1;
+				needs_aneg = true;
 		}
 
 		phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
 			phydev->link = 0;
 			netif_carrier_off(phydev->attached_dev);
 			phydev->adjust_link(phydev->attached_dev);
-			do_suspend = 1;
+			do_suspend = true;
 		}
 		break;
 	case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
 			}
 			phydev->adjust_link(phydev->attached_dev);
 		}
+		do_resume = true;
 		break;
 	}
 
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
 
 	if (needs_aneg)
 		err = phy_start_aneg(phydev);
-
-	if (do_suspend)
+	else if (do_suspend)
 		phy_suspend(phydev);
+	else if (do_resume)
+		phy_resume(phydev);
 
 	if (err < 0)
 		phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index eb3b946bd8c0..35d753d22f78 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -615,8 +615,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
 	err = phy_init_hw(phydev);
 	if (err)
 		phy_detach(phydev);
-
-	phy_resume(phydev);
+	else
+		phy_resume(phydev);
 
 	return err;
 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f46cd0250e48..5627917c5ff7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 
 	if ((vif->type == NL80211_IFTYPE_AP ||
 	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
-	    bss_conf->enable_beacon)
+	    bss_conf->enable_beacon) {
 		priv->reconfig_beacon = true;
+		priv->rearm_ani = true;
+	}
 
 	if (bss_conf->assoc) {
 		priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
 
 	ath9k_htc_ps_wakeup(priv);
 
+	ath9k_htc_stop_ani(priv);
 	del_timer_sync(&priv->tx.cleanup_timer);
 	ath9k_htc_tx_drain(priv);
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index afb3d15e38ff..be1985296bdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
 	if (!err) {
 		/* only set 2G bandwidth using bw_cap command */
 		band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
-		band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+		band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
 		err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
 					       sizeof(band_bwcap));
 	} else {
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 8f4b03dbaf3f..4284672d0397 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 		bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
 
 	if (IWL_MVM_BT_COEX_CORUNNING) {
-		bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-						    BT_VALID_CORUN_LUT_40);
+		bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+						     BT_VALID_CORUN_LUT_40);
 		bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
 	}
 
 	if (IWL_MVM_BT_COEX_MPLUT) {
 		bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-		bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+		bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
 	}
 
 	if (mvm->cfg->bt_shared_single_ant)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 6174c027ff59..6959fda3fe09 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -187,9 +187,9 @@ enum iwl_scan_type {
  *	this number of packets were received (typically 1)
  * @passive2active: is auto switching from passive to active during scan allowed
  * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
+ * @max_out_time: in TUs, max out of serving channel time
  * @suspend_time: how long to pause scan when returning to service channel:
- *	bits 0-19: beacon interal in usecs (suspend before executing)
+ *	bits 0-19: beacon interal in TUs (suspend before executing)
  *	bits 20-23: reserved
  *	bits 24-31: number of beacons (suspend between channels)
  * @rxon_flags: RXON_FLG_*
@@ -387,8 +387,8 @@ enum scan_framework_client {
  * @quiet_plcp_th:	quiet channel num of packets threshold
  * @good_CRC_th:	passive to active promotion threshold
  * @rx_chain:		RXON rx chain.
- * @max_out_time:	max uSec to be out of assoceated channel
- * @suspend_time:	pause scan this long when returning to service channel
+ * @max_out_time:	max TUs to be out of assoceated channel
+ * @suspend_time:	pause scan this TUs when returning to service channel
  * @flags:		RXON flags
  * @filter_flags:	RXONfilter
  * @tx_cmd:		tx command for active scan; for 2GHz and for 5GHz.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 97c3deae6552..32682edfe5a4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1005,7 +1005,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
 	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
 	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
 
-	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
 	if (ret)
 		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
 }
@@ -1021,7 +1021,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
 	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
 		return;
 
-	ieee80211_iterate_active_interfaces(
+	ieee80211_iterate_active_interfaces_atomic(
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 		iwl_mvm_mc_iface_iterator, &iter_data);
 }
@@ -1814,6 +1814,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
 	mutex_lock(&mvm->mutex);
 
+	if (!iwl_mvm_is_idle(mvm)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
 	switch (mvm->scan_status) {
 	case IWL_MVM_SCAN_OS:
 		IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 17c42da5f9f2..107d864b3c0e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1015,6 +1015,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
 	return mvmvif->low_latency;
 }
 
+/* Assoc status */
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
+
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index d44b2b33b5cc..857ddaf6f48c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1031,7 +1031,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
 		return;
 	}
 
-#ifdef CPTCFG_MAC80211_DEBUGFS
+#ifdef CONFIG_MAC80211_DEBUGFS
 	/* Disable last tx check if we are debugging with fixed rate */
 	if (lq_sta->dbg_fixed_rate) {
 		IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 63e7b16edb55..36ae01a18dee 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
 					    IEEE80211_IFACE_ITER_NORMAL,
 					    iwl_mvm_scan_condition_iterator,
 					    &global_bound);
-	/*
-	 * Under low latency traffic passive scan is fragmented meaning
-	 * that dwell on a particular channel will be fragmented. Each fragment
-	 * dwell time is 20ms and fragments period is 105ms. Skipping to next
-	 * channel will be delayed by the same period - 105ms. So suspend_time
-	 * parameter describing both fragments and channels skipping periods is
-	 * set to 105ms. This value is chosen so that overall passive scan
-	 * duration will not be too long. Max_out_time in this case is set to
-	 * 70ms, so for active scanning operating channel will be left for 70ms
-	 * while for passive still for 20ms (fragment dwell).
-	 */
-	if (global_bound) {
-		if (!iwl_mvm_low_latency(mvm)) {
-			params->suspend_time = ieee80211_tu_to_usec(100);
-			params->max_out_time = ieee80211_tu_to_usec(600);
-		} else {
-			params->suspend_time = ieee80211_tu_to_usec(105);
-			/* P2P doesn't support fragmented passive scan, so
-			 * configure max_out_time to be at least longest dwell
-			 * time for passive scan.
-			 */
-			if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
-				params->max_out_time = ieee80211_tu_to_usec(70);
-				params->passive_fragmented = true;
-			} else {
-				u32 passive_dwell;
 
-				/*
-				 * Use band G so that passive channel dwell time
-				 * will be assigned with maximum value.
-				 */
-				band = IEEE80211_BAND_2GHZ;
-				passive_dwell = iwl_mvm_get_passive_dwell(band);
-				params->max_out_time =
-					ieee80211_tu_to_usec(passive_dwell);
-			}
-		}
+	if (!global_bound)
+		goto not_bound;
+
+	params->suspend_time = 100;
+	params->max_out_time = 600;
+
+	if (iwl_mvm_low_latency(mvm)) {
+		params->suspend_time = 250;
+		params->max_out_time = 250;
 	}
 
+not_bound:
+
 	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-		if (params->passive_fragmented)
-			params->dwell[band].passive = 20;
-		else
-			params->dwell[band].passive =
-				iwl_mvm_get_passive_dwell(band);
+		params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
 		params->dwell[band].active = iwl_mvm_get_active_dwell(band,
 								      n_ssids);
 	}
@@ -770,7 +741,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
 	int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
 	int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
 	int head = 0;
-	int tail = band_2ghz + band_5ghz;
+	int tail = band_2ghz + band_5ghz - 1;
 	u32 ssid_bitmap;
 	int cmd_len;
 	int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index c5f4532cafa9..eb2ca64820fc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -688,3 +688,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
 
 	return result;
 }
+
+static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+	bool *idle = _data;
+
+	if (!vif->bss_conf.idle)
+		*idle = false;
+}
+
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+{
+	bool idle = true;
+
+	ieee80211_iterate_active_interfaces_atomic(
+			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+			iwl_mvm_idle_iter, &idle);
+
+	return idle;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index f98ef1e62eb9..c76b148e1aba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1788,6 +1788,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 	 * PCI Tx retries from interfering with C3 CPU state */
 	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
+	trans->dev = &pdev->dev;
+	trans_pcie->pci_dev = pdev;
+	iwl_disable_interrupts(trans);
+
 	err = pci_enable_msi(pdev);
 	if (err) {
 		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1799,8 +1803,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 		}
 	}
 
-	trans->dev = &pdev->dev;
-	trans_pcie->pci_dev = pdev;
 	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
 	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
 	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1826,8 +1828,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 		goto out_pci_disable_msi;
 	}
 
-	trans_pcie->inta_mask = CSR_INI_SET_MASK;
-
 	if (iwl_pcie_alloc_ict(trans))
 		goto out_free_cmd_pool;
 
@@ -1839,6 +1839,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 		goto out_free_ict;
 	}
 
+	trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
 	return trans;
 
 out_free_ict:
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 630a3fcf65bc..0d4a285cbd7e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
 			      grant_ref_t rx_ring_ref);
 
 /* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
 
 /* Prevent the device from generating any further traffic. */
 void xenvif_carrier_off(struct xenvif *vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index a7557331699f..53cdcdf3dfa1 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
 	work_done = xenvif_tx_action(vif, budget);
 
 	if (work_done < budget) {
-		int more_to_do = 0;
-		unsigned long flags;
-
-		/* It is necessary to disable IRQ before calling
-		 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
-		 * lose event from the frontend.
-		 *
-		 * Consider:
-		 *   RING_HAS_UNCONSUMED_REQUESTS
-		 *   <frontend generates event to trigger napi_schedule>
-		 *   __napi_complete
-		 *
-		 * This handler is still in scheduled state so the
-		 * event has no effect at all. After __napi_complete
-		 * this handler is descheduled and cannot get
-		 * scheduled again. We lose event in this case and the ring
-		 * will be completely stalled.
-		 */
-
-		local_irq_save(flags);
-
-		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
-		if (!more_to_do)
-			__napi_complete(napi);
-
-		local_irq_restore(flags);
+		napi_complete(napi);
+		xenvif_napi_schedule_or_enable_events(vif);
 	}
 
 	return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
 	enable_irq(vif->tx_irq);
 	if (vif->tx_irq != vif->rx_irq)
 		enable_irq(vif->rx_irq);
-	xenvif_check_rx_xenvif(vif);
+	xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_down(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 76665405c5aa..7367208ee8cd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
 
 /* Find the containing VIF's structure from a pointer in pending_tx_info array
  */
-static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
 {
 	u16 pending_idx = ubuf->desc;
 	struct pending_tx_info *temp =
@@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
 }
 
 /*
+ * Find the grant ref for a given frag in a chain of struct ubuf_info's
+ * skb: the skb itself
+ * i: the frag's number
+ * ubuf: a pointer to an element in the chain. It should not be NULL
+ *
+ * Returns a pointer to the element in the chain where the page were found. If
+ * not found, returns NULL.
+ * See the definition of callback_struct in common.h for more details about
+ * the chain.
+ */
+static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
+						const int i,
+						const struct ubuf_info *ubuf)
+{
+	struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+
+	do {
+		u16 pending_idx = ubuf->desc;
+
+		if (skb_shinfo(skb)->frags[i].page.p ==
+		    foreign_vif->mmap_pages[pending_idx])
+			break;
+		ubuf = (struct ubuf_info *) ubuf->ctx;
+	} while (ubuf);
+
+	return ubuf;
+}
+
+/*
  * Prepare an SKB to be transmitted to the frontend.
  *
  * This function is responsible for allocating grant operations, meta
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
 	int head = 1;
 	int old_meta_prod;
 	int gso_type;
-	struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
-	grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
-	struct xenvif *foreign_vif = NULL;
+	const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+	const struct ubuf_info *const head_ubuf = ubuf;
 
 	old_meta_prod = npo->meta_prod;
 
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
 	npo->copy_off = 0;
 	npo->copy_gref = req->gref;
 
-	if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
-		 (ubuf->callback == &xenvif_zerocopy_callback)) {
-		int i = 0;
-		foreign_vif = ubuf_to_vif(ubuf);
-
-		do {
-			u16 pending_idx = ubuf->desc;
-			foreign_grefs[i++] =
-				foreign_vif->pending_tx_info[pending_idx].req.gref;
-			ubuf = (struct ubuf_info *) ubuf->ctx;
-		} while (ubuf);
-	}
-
 	data = skb->data;
 	while (data < skb_tail_pointer(skb)) {
 		unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
 	}
 
 	for (i = 0; i < nr_frags; i++) {
+		/* This variable also signals whether foreign_gref has a real
+		 * value or not.
+		 */
+		struct xenvif *foreign_vif = NULL;
+		grant_ref_t foreign_gref;
+
+		if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+			(ubuf->callback == &xenvif_zerocopy_callback)) {
+			const struct ubuf_info *const startpoint = ubuf;
+
+			/* Ideally ubuf points to the chain element which
+			 * belongs to this frag. Or if frags were removed from
+			 * the beginning, then shortly before it.
+			 */
+			ubuf = xenvif_find_gref(skb, i, ubuf);
+
+			/* Try again from the beginning of the list, if we
+			 * haven't tried from there. This only makes sense in
+			 * the unlikely event of reordering the original frags.
+			 * For injected local pages it's an unnecessary second
+			 * run.
+			 */
+			if (unlikely(!ubuf) && startpoint != head_ubuf)
+				ubuf = xenvif_find_gref(skb, i, head_ubuf);
+
+			if (likely(ubuf)) {
+				u16 pending_idx = ubuf->desc;
+
+				foreign_vif = ubuf_to_vif(ubuf);
+				foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+				/* Just a safety measure. If this was the last
+				 * element on the list, the for loop will
+				 * iterate again if a local page were added to
+				 * the end. Using head_ubuf here prevents the
+				 * second search on the chain. Or the original
+				 * frags changed order, but that's less likely.
+				 * In any way, ubuf shouldn't be NULL.
+				 */
+				ubuf = ubuf->ctx ?
+					(struct ubuf_info *) ubuf->ctx :
+					head_ubuf;
+			} else
+				/* This frag was a local page, added to the
+				 * array after the skb left netback.
+				 */
+				ubuf = head_ubuf;
+		}
 		xenvif_gop_frag_copy(vif, skb, npo,
 				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
 				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
 				     skb_shinfo(skb)->frags[i].page_offset,
 				     &head,
 				     foreign_vif,
-				     foreign_grefs[i]);
+				     foreign_vif ? foreign_gref : UINT_MAX);
 	}
 
 	return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
 		notify_remote_via_irq(vif->rx_irq);
 }
 
-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
 {
 	int more_to_do;
 
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
 {
 	struct xenvif *vif = (struct xenvif *)data;
 	tx_add_credit(vif);
-	xenvif_check_rx_xenvif(vif);
+	xenvif_napi_schedule_or_enable_events(vif);
 }
 
 static void xenvif_tx_err(struct xenvif *vif,