summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--include/linux/pkt_sched.h50
-rw-r--r--include/net/inet_ecn.h28
-rw-r--r--include/net/red.h325
-rw-r--r--net/sched/sch_gred.c841
-rw-r--r--net/sched/sch_red.c418
5 files changed, 891 insertions, 771 deletions
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 60ffcb9c5791..e87b233615b3 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -93,6 +93,7 @@ struct tc_fifo_qopt
 /* PRIO section */
 
 #define TCQ_PRIO_BANDS	16
+#define TCQ_MIN_PRIO_BANDS 2
 
 struct tc_prio_qopt
 {
@@ -169,6 +170,7 @@ struct tc_red_qopt
 	unsigned char   Scell_log;	/* cell size for idle damping */
 	unsigned char	flags;
 #define TC_RED_ECN	1
+#define TC_RED_HARDDROP	2
 };
 
 struct tc_red_xstats
@@ -194,38 +196,34 @@ enum
 
 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
 
-#define TCA_SET_OFF TCA_GRED_PARMS
 struct tc_gred_qopt
 {
-       __u32           limit;          /* HARD maximal queue length (bytes)    
-*/
-       __u32           qth_min;        /* Min average length threshold (bytes) 
-*/
-       __u32           qth_max;        /* Max average length threshold (bytes) 
-*/
-       __u32           DP;             /* upto 2^32 DPs */
-       __u32           backlog;        
-       __u32           qave;   
-       __u32           forced; 
-       __u32           early;  
-       __u32           other;  
-       __u32           pdrop;  
-
-       unsigned char   Wlog;           /* log(W)               */
-       unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
-       unsigned char   Scell_log;      /* cell size for idle damping */
-       __u8            prio;		/* prio of this VQ */
-       __u32	packets;
-       __u32	bytesin;
+	__u32		limit;        /* HARD maximal queue length (bytes)    */
+	__u32		qth_min;      /* Min average length threshold (bytes) */
+	__u32		qth_max;      /* Max average length threshold (bytes) */
+	__u32		DP;           /* upto 2^32 DPs */
+	__u32		backlog;
+	__u32		qave;
+	__u32		forced;
+	__u32		early;
+	__u32		other;
+	__u32		pdrop;
+	__u8		Wlog;         /* log(W)               */
+	__u8		Plog;         /* log(P_max/(qth_max-qth_min)) */
+	__u8		Scell_log;    /* cell size for idle damping */
+	__u8		prio;         /* prio of this VQ */
+	__u32		packets;
+	__u32		bytesin;
 };
+
 /* gred setup */
 struct tc_gred_sopt
 {
-       __u32		DPs;
-       __u32		def_DP;
-       __u8		grio;
-       __u8		pad1;
-       __u16		pad2;
+	__u32		DPs;
+	__u32		def_DP;
+	__u8		grio;
+	__u8		flags;
+	__u16		pad1;
 };
 
 /* HTB section */
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index f87845e2e965..b0c47e2eccf1 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -2,6 +2,7 @@
 #define _INET_ECN_H_
 
 #include <linux/ip.h>
+#include <linux/skbuff.h>
 #include <net/dsfield.h>
 
 enum {
@@ -48,7 +49,7 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
 		(label) |= __constant_htons(INET_ECN_ECT_0 << 4);	\
     } while (0)
 
-static inline void IP_ECN_set_ce(struct iphdr *iph)
+static inline int IP_ECN_set_ce(struct iphdr *iph)
 {
 	u32 check = iph->check;
 	u32 ecn = (iph->tos + 1) & INET_ECN_MASK;
@@ -61,7 +62,7 @@ static inline void IP_ECN_set_ce(struct iphdr *iph)
 	 * INET_ECN_CE      => 00
 	 */
 	if (!(ecn & 2))
-		return;
+		return !ecn;
 
 	/*
 	 * The following gives us:
@@ -72,6 +73,7 @@ static inline void IP_ECN_set_ce(struct iphdr *iph)
 
 	iph->check = check + (check>=0xFFFF);
 	iph->tos |= INET_ECN_CE;
+	return 1;
 }
 
 static inline void IP_ECN_clear(struct iphdr *iph)
@@ -87,11 +89,12 @@ static inline void ipv4_copy_dscp(struct iphdr *outer, struct iphdr *inner)
 
 struct ipv6hdr;
 
-static inline void IP6_ECN_set_ce(struct ipv6hdr *iph)
+static inline int IP6_ECN_set_ce(struct ipv6hdr *iph)
 {
 	if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
-		return;
+		return 0;
 	*(u32*)iph |= htonl(INET_ECN_CE << 20);
+	return 1;
 }
 
 static inline void IP6_ECN_clear(struct ipv6hdr *iph)
@@ -105,4 +108,21 @@ static inline void ipv6_copy_dscp(struct ipv6hdr *outer, struct ipv6hdr *inner)
 	ipv6_change_dsfield(inner, INET_ECN_MASK, dscp);
 }
 
+static inline int INET_ECN_set_ce(struct sk_buff *skb)
+{
+	switch (skb->protocol) {
+	case __constant_htons(ETH_P_IP):
+		if (skb->nh.raw + sizeof(struct iphdr) <= skb->tail)
+			return IP_ECN_set_ce(skb->nh.iph);
+		break;
+
+	case __constant_htons(ETH_P_IPV6):
+		if (skb->nh.raw + sizeof(struct ipv6hdr) <= skb->tail)
+			return IP6_ECN_set_ce(skb->nh.ipv6h);
+		break;
+	}
+
+	return 0;
+}
+
 #endif
diff --git a/include/net/red.h b/include/net/red.h
new file mode 100644
index 000000000000..2ed4358e3295
--- /dev/null
+++ b/include/net/red.h
@@ -0,0 +1,325 @@
+#ifndef __NET_SCHED_RED_H
+#define __NET_SCHED_RED_H
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+#include <net/dsfield.h>
+
+/*	Random Early Detection (RED) algorithm.
+	=======================================
+
+	Source: Sally Floyd and Van Jacobson, "Random Early Detection Gateways
+	for Congestion Avoidance", 1993, IEEE/ACM Transactions on Networking.
+
+	This file codes a "divisionless" version of RED algorithm
+	as written down in Fig.17 of the paper.
+
+	Short description.
+	------------------
+
+	When a new packet arrives we calculate the average queue length:
+
+	avg = (1-W)*avg + W*current_queue_len,
+
+	W is the filter time constant (chosen as 2^(-Wlog)), it controls
+	the inertia of the algorithm. To allow larger bursts, W should be
+	decreased.
+
+	if (avg > th_max) -> packet marked (dropped).
+	if (avg < th_min) -> packet passes.
+	if (th_min < avg < th_max) we calculate probability:
+
+	Pb = max_P * (avg - th_min)/(th_max-th_min)
+
+	and mark (drop) packet with this probability.
+	Pb changes from 0 (at avg==th_min) to max_P (avg==th_max).
+	max_P should be small (not 1), usually 0.01..0.02 is good value.
+
+	max_P is chosen as a number, so that max_P/(th_max-th_min)
+	is a negative power of two in order arithmetics to contain
+	only shifts.
+
+
+	Parameters, settable by user:
+	-----------------------------
+
+	qth_min		- bytes (should be < qth_max/2)
+	qth_max		- bytes (should be at least 2*qth_min and less limit)
+	Wlog	       	- bits (<32) log(1/W).
+	Plog	       	- bits (<32)
+
+	Plog is related to max_P by formula:
+
+	max_P = (qth_max-qth_min)/2^Plog;
+
+	F.e. if qth_max=128K and qth_min=32K, then Plog=22
+	corresponds to max_P=0.02
+
+	Scell_log
+	Stab
+
+	Lookup table for log((1-W)^(t/t_ave).
+
+
+	NOTES:
+
+	Upper bound on W.
+	-----------------
+
+	If you want to allow bursts of L packets of size S,
+	you should choose W:
+
+	L + 1 - th_min/S < (1-(1-W)^L)/W
+
+	th_min/S = 32         th_min/S = 4
+
+	log(W)	L
+	-1	33
+	-2	35
+	-3	39
+	-4	46
+	-5	57
+	-6	75
+	-7	101
+	-8	135
+	-9	190
+	etc.
+ */
+
+#define RED_STAB_SIZE	256
+#define RED_STAB_MASK	(RED_STAB_SIZE - 1)
+
+struct red_stats
+{
+	u32		prob_drop;	/* Early probability drops */
+	u32		prob_mark;	/* Early probability marks */
+	u32		forced_drop;	/* Forced drops, qavg > max_thresh */
+	u32		forced_mark;	/* Forced marks, qavg > max_thresh */
+	u32		pdrop;          /* Drops due to queue limits */
+	u32		other;          /* Drops due to drop() calls */
+	u32		backlog;
+};
+
+struct red_parms
+{
+	/* Parameters */
+	u32		qth_min;	/* Min avg length threshold: A scaled */
+	u32		qth_max;	/* Max avg length threshold: A scaled */
+	u32		Scell_max;
+	u32		Rmask;		/* Cached random mask, see red_rmask */
+	u8		Scell_log;
+	u8		Wlog;		/* log(W)		*/
+	u8		Plog;		/* random number bits	*/
+	u8		Stab[RED_STAB_SIZE];
+
+	/* Variables */
+	int		qcount;		/* Number of packets since last random
+					   number generation */
+	u32		qR;		/* Cached random number */
+
+	unsigned long	qavg;		/* Average queue length: A scaled */
+	psched_time_t	qidlestart;	/* Start of current idle period */
+};
+
+static inline u32 red_rmask(u8 Plog)
+{
+	return Plog < 32 ? ((1 << Plog) - 1) : ~0UL;
+}
+
+static inline void red_set_parms(struct red_parms *p,
+				 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
+				 u8 Scell_log, u8 *stab)
+{
+	/* Reset average queue length, the value is strictly bound
+	 * to the parameters below, reseting hurts a bit but leaving
+	 * it might result in an unreasonable qavg for a while. --TGR
+	 */
+	p->qavg		= 0;
+
+	p->qcount	= -1;
+	p->qth_min	= qth_min << Wlog;
+	p->qth_max	= qth_max << Wlog;
+	p->Wlog		= Wlog;
+	p->Plog		= Plog;
+	p->Rmask	= red_rmask(Plog);
+	p->Scell_log	= Scell_log;
+	p->Scell_max	= (255 << Scell_log);
+
+	memcpy(p->Stab, stab, sizeof(p->Stab));
+}
+
+static inline int red_is_idling(struct red_parms *p)
+{
+	return !PSCHED_IS_PASTPERFECT(p->qidlestart);
+}
+
+static inline void red_start_of_idle_period(struct red_parms *p)
+{
+	PSCHED_GET_TIME(p->qidlestart);
+}
+
+static inline void red_end_of_idle_period(struct red_parms *p)
+{
+	PSCHED_SET_PASTPERFECT(p->qidlestart);
+}
+
+static inline void red_restart(struct red_parms *p)
+{
+	red_end_of_idle_period(p);
+	p->qavg = 0;
+	p->qcount = -1;
+}
+
+static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
+{
+	psched_time_t now;
+	long us_idle;
+	int  shift;
+
+	PSCHED_GET_TIME(now);
+	us_idle = PSCHED_TDIFF_SAFE(now, p->qidlestart, p->Scell_max);
+
+	/*
+	 * The problem: ideally, average length queue recalcultion should
+	 * be done over constant clock intervals. This is too expensive, so
+	 * that the calculation is driven by outgoing packets.
+	 * When the queue is idle we have to model this clock by hand.
+	 *
+	 * SF+VJ proposed to "generate":
+	 *
+	 *	m = idletime / (average_pkt_size / bandwidth)
+	 *
+	 * dummy packets as a burst after idle time, i.e.
+	 *
+	 * 	p->qavg *= (1-W)^m
+	 *
+	 * This is an apparently overcomplicated solution (f.e. we have to
+	 * precompute a table to make this calculation in reasonable time)
+	 * I believe that a simpler model may be used here,
+	 * but it is field for experiments.
+	 */
+
+	shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
+
+	if (shift)
+		return p->qavg >> shift;
+	else {
+		/* Approximate initial part of exponent with linear function:
+		 *
+		 * 	(1-W)^m ~= 1-mW + ...
+		 *
+		 * Seems, it is the best solution to
+		 * problem of too coarse exponent tabulation.
+		 */
+		us_idle = (p->qavg * us_idle) >> p->Scell_log;
+
+		if (us_idle < (p->qavg >> 1))
+			return p->qavg - us_idle;
+		else
+			return p->qavg >> 1;
+	}
+}
+
+static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
+						       unsigned int backlog)
+{
+	/*
+	 * NOTE: p->qavg is fixed point number with point at Wlog.
+	 * The formula below is equvalent to floating point
+	 * version:
+	 *
+	 * 	qavg = qavg*(1-W) + backlog*W;
+	 *
+	 * --ANK (980924)
+	 */
+	return p->qavg + (backlog - (p->qavg >> p->Wlog));
+}
+
+static inline unsigned long red_calc_qavg(struct red_parms *p,
+					  unsigned int backlog)
+{
+	if (!red_is_idling(p))
+		return red_calc_qavg_no_idle_time(p, backlog);
+	else
+		return red_calc_qavg_from_idle_time(p);
+}
+
+static inline u32 red_random(struct red_parms *p)
+{
+	return net_random() & p->Rmask;
+}
+
+static inline int red_mark_probability(struct red_parms *p, unsigned long qavg)
+{
+	/* The formula used below causes questions.
+
+	   OK. qR is random number in the interval 0..Rmask
+	   i.e. 0..(2^Plog). If we used floating point
+	   arithmetics, it would be: (2^Plog)*rnd_num,
+	   where rnd_num is less 1.
+
+	   Taking into account, that qavg have fixed
+	   point at Wlog, and Plog is related to max_P by
+	   max_P = (qth_max-qth_min)/2^Plog; two lines
+	   below have the following floating point equivalent:
+
+	   max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
+
+	   Any questions? --ANK (980924)
+	 */
+	return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR);
+}
+
+enum {
+	RED_BELOW_MIN_THRESH,
+	RED_BETWEEN_TRESH,
+	RED_ABOVE_MAX_TRESH,
+};
+
+static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg)
+{
+	if (qavg < p->qth_min)
+		return RED_BELOW_MIN_THRESH;
+	else if (qavg >= p->qth_max)
+		return RED_ABOVE_MAX_TRESH;
+	else
+		return RED_BETWEEN_TRESH;
+}
+
+enum {
+	RED_DONT_MARK,
+	RED_PROB_MARK,
+	RED_HARD_MARK,
+};
+
+static inline int red_action(struct red_parms *p, unsigned long qavg)
+{
+	switch (red_cmp_thresh(p, qavg)) {
+		case RED_BELOW_MIN_THRESH:
+			p->qcount = -1;
+			return RED_DONT_MARK;
+
+		case RED_BETWEEN_TRESH:
+			if (++p->qcount) {
+				if (red_mark_probability(p, qavg)) {
+					p->qcount = 0;
+					p->qR = red_random(p);
+					return RED_PROB_MARK;
+				}
+			} else
+				p->qR = red_random(p);
+
+			return RED_DONT_MARK;
+
+		case RED_ABOVE_MAX_TRESH:
+			p->qcount = -1;
+			return RED_HARD_MARK;
+	}
+
+	BUG();
+	return RED_DONT_MARK;
+}
+
+#endif
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 25c171c32715..29a2dd9f3029 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -15,247 +15,281 @@
  *		         from Ren Liu
  *		       - More error checks
  *
- *
- *
- *  For all the glorious comments look at Alexey's sch_red.c
+ *  For all the glorious comments look at include/net/red.h
  */
 
 #include <linux/config.h>
 #include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
 #include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/route.h>
 #include <linux/skbuff.h>
-#include <net/sock.h>
 #include <net/pkt_sched.h>
+#include <net/red.h>
 
-#if 1 /* control */
-#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define DPRINTK(format,args...)
-#endif
-
-#if 0 /* data */
-#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define D2PRINTK(format,args...)
-#endif
+#define GRED_DEF_PRIO (MAX_DPs / 2)
+#define GRED_VQ_MASK (MAX_DPs - 1)
 
 struct gred_sched_data;
 struct gred_sched;
 
 struct gred_sched_data
 {
-/* Parameters */
 	u32		limit;		/* HARD maximal queue length	*/
-	u32		qth_min;	/* Min average length threshold: A scaled */
-	u32		qth_max;	/* Max average length threshold: A scaled */
 	u32      	DP;		/* the drop pramaters */
-	char		Wlog;		/* log(W)		*/
-	char		Plog;		/* random number bits	*/
-	u32		Scell_max;
-	u32		Rmask;
 	u32		bytesin;	/* bytes seen on virtualQ so far*/
 	u32		packetsin;	/* packets seen on virtualQ so far*/
 	u32		backlog;	/* bytes on the virtualQ */
-	u32		forced;	/* packets dropped for exceeding limits */
-	u32		early;	/* packets dropped as a warning */
-	u32		other;	/* packets dropped by invoking drop() */
-	u32		pdrop;	/* packets dropped because we exceeded physical queue limits */
-	char		Scell_log;
-	u8		Stab[256];
-	u8              prio;        /* the prio of this vq */
-
-/* Variables */
-	unsigned long	qave;		/* Average queue length: A scaled */
-	int		qcount;		/* Packets since last random number generation */
-	u32		qR;		/* Cached random number */
-
-	psched_time_t	qidlestart;	/* Start of idle period	*/
+	u8		prio;		/* the prio of this vq */
+
+	struct red_parms parms;
+	struct red_stats stats;
+};
+
+enum {
+	GRED_WRED_MODE = 1,
+	GRED_RIO_MODE,
 };
 
 struct gred_sched
 {
 	struct gred_sched_data *tab[MAX_DPs];
-	u32 		DPs;   
-	u32 		def; 
-	u8 		initd; 
-	u8 		grio; 
-	u8 		eqp; 
+	unsigned long	flags;
+	u32		red_flags;
+	u32 		DPs;
+	u32 		def;
+	struct red_parms wred_set;
 };
 
-static int
-gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static inline int gred_wred_mode(struct gred_sched *table)
 {
-	psched_time_t now;
-	struct gred_sched_data *q=NULL;
-	struct gred_sched *t= qdisc_priv(sch);
-	unsigned long	qave=0;	
-	int i=0;
+	return test_bit(GRED_WRED_MODE, &table->flags);
+}
+
+static inline void gred_enable_wred_mode(struct gred_sched *table)
+{
+	__set_bit(GRED_WRED_MODE, &table->flags);
+}
+
+static inline void gred_disable_wred_mode(struct gred_sched *table)
+{
+	__clear_bit(GRED_WRED_MODE, &table->flags);
+}
+
+static inline int gred_rio_mode(struct gred_sched *table)
+{
+	return test_bit(GRED_RIO_MODE, &table->flags);
+}
+
+static inline void gred_enable_rio_mode(struct gred_sched *table)
+{
+	__set_bit(GRED_RIO_MODE, &table->flags);
+}
+
+static inline void gred_disable_rio_mode(struct gred_sched *table)
+{
+	__clear_bit(GRED_RIO_MODE, &table->flags);
+}
+
+static inline int gred_wred_mode_check(struct Qdisc *sch)
+{
+	struct gred_sched *table = qdisc_priv(sch);
+	int i;
 
-	if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) {
-		D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
-		goto do_enqueue;
+	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
+	for (i = 0; i < table->DPs; i++) {
+		struct gred_sched_data *q = table->tab[i];
+		int n;
+
+		if (q == NULL)
+			continue;
+
+		for (n = 0; n < table->DPs; n++)
+			if (table->tab[n] && table->tab[n] != q &&
+			    table->tab[n]->prio == q->prio)
+				return 1;
 	}
 
+	return 0;
+}
+
+static inline unsigned int gred_backlog(struct gred_sched *table,
+					struct gred_sched_data *q,
+					struct Qdisc *sch)
+{
+	if (gred_wred_mode(table))
+		return sch->qstats.backlog;
+	else
+		return q->backlog;
+}
+
+static inline u16 tc_index_to_dp(struct sk_buff *skb)
+{
+	return skb->tc_index & GRED_VQ_MASK;
+}
+
+static inline void gred_load_wred_set(struct gred_sched *table,
+				      struct gred_sched_data *q)
+{
+	q->parms.qavg = table->wred_set.qavg;
+	q->parms.qidlestart = table->wred_set.qidlestart;
+}
+
+static inline void gred_store_wred_set(struct gred_sched *table,
+				       struct gred_sched_data *q)
+{
+	table->wred_set.qavg = q->parms.qavg;
+}
+
+static inline int gred_use_ecn(struct gred_sched *t)
+{
+	return t->red_flags & TC_RED_ECN;
+}
 
-	if ( ((skb->tc_index&0xf) > (t->DPs -1)) || !(q=t->tab[skb->tc_index&0xf])) {
-		printk("GRED: setting to default (%d)\n ",t->def);
-		if (!(q=t->tab[t->def])) {
-			DPRINTK("GRED: setting to default FAILED! dropping!! "
-			    "(%d)\n ", t->def);
-			goto drop;
+static inline int gred_use_harddrop(struct gred_sched *t)
+{
+	return t->red_flags & TC_RED_HARDDROP;
+}
+
+static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+{
+	struct gred_sched_data *q=NULL;
+	struct gred_sched *t= qdisc_priv(sch);
+	unsigned long qavg = 0;
+	u16 dp = tc_index_to_dp(skb);
+
+	if (dp >= t->DPs  || (q = t->tab[dp]) == NULL) {
+		dp = t->def;
+
+		if ((q = t->tab[dp]) == NULL) {
+			/* Pass through packets not assigned to a DP
+			 * if no default DP has been configured. This
+			 * allows for DP flows to be left untouched.
+			 */
+			if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
+				return qdisc_enqueue_tail(skb, sch);
+			else
+				goto drop;
 		}
+
 		/* fix tc_index? --could be controvesial but needed for
 		   requeueing */
-		skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
+		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
 	}
 
-	D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
-	    "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
-	    sch->qstats.backlog);
-	/* sum up all the qaves of prios <= to ours to get the new qave*/
-	if (!t->eqp && t->grio) {
-		for (i=0;i<t->DPs;i++) {
-			if ((!t->tab[i]) || (i==q->DP))	
-				continue; 
-				
-			if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart)))
-				qave +=t->tab[i]->qave;
+	/* sum up all the qaves of prios <= to ours to get the new qave */
+	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
+		int i;
+
+		for (i = 0; i < t->DPs; i++) {
+			if (t->tab[i] && t->tab[i]->prio < q->prio &&
+			    !red_is_idling(&t->tab[i]->parms))
+				qavg +=t->tab[i]->parms.qavg;
 		}
-			
+
 	}
 
 	q->packetsin++;
-	q->bytesin+=skb->len;
+	q->bytesin += skb->len;
 
-	if (t->eqp && t->grio) {
-		qave=0;
-		q->qave=t->tab[t->def]->qave;
-		q->qidlestart=t->tab[t->def]->qidlestart;
-	}
+	if (gred_wred_mode(t))
+		gred_load_wred_set(t, q);
 
-	if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
-		long us_idle;
-		PSCHED_GET_TIME(now);
-		us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
-		PSCHED_SET_PASTPERFECT(q->qidlestart);
+	q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
 
-		q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
-	} else {
-		if (t->eqp) {
-			q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
-		} else {
-			q->qave += q->backlog - (q->qave >> q->Wlog);
-		}
+	if (red_is_idling(&q->parms))
+		red_end_of_idle_period(&q->parms);
 
-	}
-	
-
-	if (t->eqp && t->grio) 
-		t->tab[t->def]->qave=q->qave;
-
-	if ((q->qave+qave) < q->qth_min) {
-		q->qcount = -1;
-enqueue:
-		if (q->backlog + skb->len <= q->limit) {
-			q->backlog += skb->len;
-do_enqueue:
-			__skb_queue_tail(&sch->q, skb);
-			sch->qstats.backlog += skb->len;
-			sch->bstats.bytes += skb->len;
-			sch->bstats.packets++;
-			return 0;
-		} else {
-			q->pdrop++;
-		}
+	if (gred_wred_mode(t))
+		gred_store_wred_set(t, q);
 
-drop:
-		kfree_skb(skb);
-		sch->qstats.drops++;
-		return NET_XMIT_DROP;
-	}
-	if ((q->qave+qave) >= q->qth_max) {
-		q->qcount = -1;
-		sch->qstats.overlimits++;
-		q->forced++;
-		goto drop;
+	switch (red_action(&q->parms, q->parms.qavg + qavg)) {
+		case RED_DONT_MARK:
+			break;
+
+		case RED_PROB_MARK:
+			sch->qstats.overlimits++;
+			if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+				q->stats.prob_drop++;
+				goto congestion_drop;
+			}
+
+			q->stats.prob_mark++;
+			break;
+
+		case RED_HARD_MARK:
+			sch->qstats.overlimits++;
+			if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+			    !INET_ECN_set_ce(skb)) {
+				q->stats.forced_drop++;
+				goto congestion_drop;
+			}
+			q->stats.forced_mark++;
+			break;
 	}
-	if (++q->qcount) {
-		if ((((qave+q->qave) - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
-			goto enqueue;
-		q->qcount = 0;
-		q->qR = net_random()&q->Rmask;
-		sch->qstats.overlimits++;
-		q->early++;
-		goto drop;
+
+	if (q->backlog + skb->len <= q->limit) {
+		q->backlog += skb->len;
+		return qdisc_enqueue_tail(skb, sch);
 	}
-	q->qR = net_random()&q->Rmask;
-	goto enqueue;
+
+	q->stats.pdrop++;
+drop:
+	return qdisc_drop(skb, sch);
+
+congestion_drop:
+	qdisc_drop(skb, sch);
+	return NET_XMIT_CN;
 }
 
-static int
-gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
+static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
 {
+	struct gred_sched *t = qdisc_priv(sch);
 	struct gred_sched_data *q;
-	struct gred_sched *t= qdisc_priv(sch);
-	q= t->tab[(skb->tc_index&0xf)];
-/* error checking here -- probably unnecessary */
-	PSCHED_SET_PASTPERFECT(q->qidlestart);
-
-	__skb_queue_head(&sch->q, skb);
-	sch->qstats.backlog += skb->len;
-	sch->qstats.requeues++;
-	q->backlog += skb->len;
-	return 0;
+	u16 dp = tc_index_to_dp(skb);
+
+	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
+		if (net_ratelimit())
+			printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
+			       "for requeue, screwing up backlog.\n",
+			       tc_index_to_dp(skb));
+	} else {
+		if (red_is_idling(&q->parms))
+			red_end_of_idle_period(&q->parms);
+		q->backlog += skb->len;
+	}
+
+	return qdisc_requeue(skb, sch);
 }
 
-static struct sk_buff *
-gred_dequeue(struct Qdisc* sch)
+static struct sk_buff *gred_dequeue(struct Qdisc* sch)
 {
 	struct sk_buff *skb;
-	struct gred_sched_data *q;
-	struct gred_sched *t= qdisc_priv(sch);
+	struct gred_sched *t = qdisc_priv(sch);
+
+	skb = qdisc_dequeue_head(sch);
 
-	skb = __skb_dequeue(&sch->q);
 	if (skb) {
-		sch->qstats.backlog -= skb->len;
-		q= t->tab[(skb->tc_index&0xf)];
-		if (q) {
-			q->backlog -= skb->len;
-			if (!q->backlog && !t->eqp)
-				PSCHED_GET_TIME(q->qidlestart);
+		struct gred_sched_data *q;
+		u16 dp = tc_index_to_dp(skb);
+
+		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
+			if (net_ratelimit())
+				printk(KERN_WARNING "GRED: Unable to relocate "
+				       "VQ 0x%x after dequeue, screwing up "
+				       "backlog.\n", tc_index_to_dp(skb));
 		} else {
-			D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); 
+			q->backlog -= skb->len;
+
+			if (!q->backlog && !gred_wred_mode(t))
+				red_start_of_idle_period(&q->parms);
 		}
+
 		return skb;
 	}
 
-	if (t->eqp) {
-			q= t->tab[t->def];
-			if (!q)	
-				D2PRINTK("no default VQ set: Results will be "
-				       "screwed up\n");
-			else
-				PSCHED_GET_TIME(q->qidlestart);
-	}
+	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
+		red_start_of_idle_period(&t->wred_set);
 
 	return NULL;
 }
@@ -263,36 +297,34 @@ gred_dequeue(struct Qdisc* sch)
 static unsigned int gred_drop(struct Qdisc* sch)
 {
 	struct sk_buff *skb;
+	struct gred_sched *t = qdisc_priv(sch);
 
-	struct gred_sched_data *q;
-	struct gred_sched *t= qdisc_priv(sch);
-
-	skb = __skb_dequeue_tail(&sch->q);
+	skb = qdisc_dequeue_tail(sch);
 	if (skb) {
 		unsigned int len = skb->len;
-		sch->qstats.backlog -= len;
-		sch->qstats.drops++;
-		q= t->tab[(skb->tc_index&0xf)];
-		if (q) {
-			q->backlog -= len;
-			q->other++;
-			if (!q->backlog && !t->eqp)
-				PSCHED_GET_TIME(q->qidlestart);
+		struct gred_sched_data *q;
+		u16 dp = tc_index_to_dp(skb);
+
+		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
+			if (net_ratelimit())
+				printk(KERN_WARNING "GRED: Unable to relocate "
+				       "VQ 0x%x while dropping, screwing up "
+				       "backlog.\n", tc_index_to_dp(skb));
 		} else {
-			D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); 
+			q->backlog -= len;
+			q->stats.other++;
+
+			if (!q->backlog && !gred_wred_mode(t))
+				red_start_of_idle_period(&q->parms);
 		}
 
-		kfree_skb(skb);
+		qdisc_drop(skb, sch);
 		return len;
 	}
 
-	q=t->tab[t->def];
-	if (!q) {
-		D2PRINTK("no default VQ set: Results might be screwed up\n");
-		return 0;
-	}
+	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
+		red_start_of_idle_period(&t->wred_set);
 
-	PSCHED_GET_TIME(q->qidlestart);
 	return 0;
 
 }
@@ -300,293 +332,241 @@ static unsigned int gred_drop(struct Qdisc* sch)
 static void gred_reset(struct Qdisc* sch)
 {
 	int i;
-	struct gred_sched_data *q;
-	struct gred_sched *t= qdisc_priv(sch);
+	struct gred_sched *t = qdisc_priv(sch);
+
+	qdisc_reset_queue(sch);
 
-	__skb_queue_purge(&sch->q);
+        for (i = 0; i < t->DPs; i++) {
+		struct gred_sched_data *q = t->tab[i];
 
-	sch->qstats.backlog = 0;
+		if (!q)
+			continue;
 
-        for (i=0;i<t->DPs;i++) {
-	        q= t->tab[i];
-		if (!q)	
-			continue; 
-		PSCHED_SET_PASTPERFECT(q->qidlestart);
-		q->qave = 0;
-		q->qcount = -1;
+		red_restart(&q->parms);
 		q->backlog = 0;
-		q->other=0;
-		q->forced=0;
-		q->pdrop=0;
-		q->early=0;
 	}
 }
 
-static int gred_change(struct Qdisc *sch, struct rtattr *opt)
+static inline void gred_destroy_vq(struct gred_sched_data *q)
+{
+	kfree(q);
+}
+
+static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
 {
 	struct gred_sched *table = qdisc_priv(sch);
-	struct gred_sched_data *q;
-	struct tc_gred_qopt *ctl;
 	struct tc_gred_sopt *sopt;
-	struct rtattr *tb[TCA_GRED_STAB];
-	struct rtattr *tb2[TCA_GRED_DPS];
 	int i;
 
-	if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_STAB, opt))
+	if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
 		return -EINVAL;
 
-	if (tb[TCA_GRED_PARMS-1] == 0 && tb[TCA_GRED_STAB-1] == 0) {
-		rtattr_parse_nested(tb2, TCA_GRED_DPS, opt);
+	sopt = RTA_DATA(dps);
+
+	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
+		return -EINVAL;
 
-	    if (tb2[TCA_GRED_DPS-1] == 0) 
-			return -EINVAL;
+	sch_tree_lock(sch);
+	table->DPs = sopt->DPs;
+	table->def = sopt->def_DP;
+	table->red_flags = sopt->flags;
+
+	/*
+	 * Every entry point to GRED is synchronized with the above code
+	 * and the DP is checked against DPs, i.e. shadowed VQs can no
+	 * longer be found so we can unlock right here.
+	 */
+	sch_tree_unlock(sch);
+
+	if (sopt->grio) {
+		gred_enable_rio_mode(table);
+		gred_disable_wred_mode(table);
+		if (gred_wred_mode_check(sch))
+			gred_enable_wred_mode(table);
+	} else {
+		gred_disable_rio_mode(table);
+		gred_disable_wred_mode(table);
+	}
 
-		sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]);
-		table->DPs=sopt->DPs;   
-		table->def=sopt->def_DP; 
-		table->grio=sopt->grio; 
-		table->initd=0;
-		/* probably need to clear all the table DP entries as well */
-		return 0;
-	    }
+	for (i = table->DPs; i < MAX_DPs; i++) {
+		if (table->tab[i]) {
+			printk(KERN_WARNING "GRED: Warning: Destroying "
+			       "shadowed VQ 0x%x\n", i);
+			gred_destroy_vq(table->tab[i]);
+			table->tab[i] = NULL;
+  		}
+	}
 
+	return 0;
+}
 
-	if (!table->DPs || tb[TCA_GRED_PARMS-1] == 0 || tb[TCA_GRED_STAB-1] == 0 ||
-		RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
-		RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
-			return -EINVAL;
+static inline int gred_change_vq(struct Qdisc *sch, int dp,
+				 struct tc_gred_qopt *ctl, int prio, u8 *stab)
+{
+	struct gred_sched *table = qdisc_priv(sch);
+	struct gred_sched_data *q;
 
-	ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
-	if (ctl->DP > MAX_DPs-1 ) {
-		/* misbehaving is punished! Put in the default drop probability */
-		DPRINTK("\nGRED: DP %u not in  the proper range fixed. New DP "
-			"set to default at %d\n",ctl->DP,table->def);
-		ctl->DP=table->def;
-	}
-	
-	if (table->tab[ctl->DP] == NULL) {
-		table->tab[ctl->DP]=kmalloc(sizeof(struct gred_sched_data),
-					    GFP_KERNEL);
-		if (NULL == table->tab[ctl->DP])
+	if (table->tab[dp] == NULL) {
+		table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL);
+		if (table->tab[dp] == NULL)
 			return -ENOMEM;
-		memset(table->tab[ctl->DP], 0, (sizeof(struct gred_sched_data)));
-	}
-	q= table->tab[ctl->DP]; 
-
-	if (table->grio) {
-		if (ctl->prio <=0) {
-			if (table->def && table->tab[table->def]) {
-				DPRINTK("\nGRED: DP %u does not have a prio"
-					"setting default to %d\n",ctl->DP,
-					table->tab[table->def]->prio);
-				q->prio=table->tab[table->def]->prio;
-			} else { 
-				DPRINTK("\nGRED: DP %u does not have a prio"
-					" setting default to 8\n",ctl->DP);
-				q->prio=8;
-			}
-		} else {
-			q->prio=ctl->prio;
-		}
-	} else {
-		q->prio=8;
+		memset(table->tab[dp], 0, sizeof(*q));
 	}
 
-
-	q->DP=ctl->DP;
-	q->Wlog = ctl->Wlog;
-	q->Plog = ctl->Plog;
+	q = table->tab[dp];
+	q->DP = dp;
+	q->prio = prio;
 	q->limit = ctl->limit;
-	q->Scell_log = ctl->Scell_log;
-	q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
-	q->Scell_max = (255<<q->Scell_log);
-	q->qth_min = ctl->qth_min<<ctl->Wlog;
-	q->qth_max = ctl->qth_max<<ctl->Wlog;
-	q->qave=0;
-	q->backlog=0;
-	q->qcount = -1;
-	q->other=0;
-	q->forced=0;
-	q->pdrop=0;
-	q->early=0;
-
-	PSCHED_SET_PASTPERFECT(q->qidlestart);
-	memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
-
-	if ( table->initd && table->grio) {
-	/* this looks ugly but it's not in the fast path */
-		for (i=0;i<table->DPs;i++) {
-			if ((!table->tab[i]) || (i==q->DP) )    
-				continue; 
-			if (table->tab[i]->prio == q->prio ){
-				/* WRED mode detected */
-				table->eqp=1;
-				break;
-			}
-		}
-	}
 
-	if (!table->initd) {
-		table->initd=1;
-		/* 
-        	the first entry also goes into the default until
-        	over-written 
-		*/
-
-		if (table->tab[table->def] == NULL) {
-			table->tab[table->def]=
-				kmalloc(sizeof(struct gred_sched_data), GFP_KERNEL);
-			if (NULL == table->tab[table->def])
-				return -ENOMEM;
-
-			memset(table->tab[table->def], 0,
-			       (sizeof(struct gred_sched_data)));
-		}
-		q= table->tab[table->def]; 
-		q->DP=table->def;
-		q->Wlog = ctl->Wlog;
-		q->Plog = ctl->Plog;
-		q->limit = ctl->limit;
-		q->Scell_log = ctl->Scell_log;
-		q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
-		q->Scell_max = (255<<q->Scell_log);
-		q->qth_min = ctl->qth_min<<ctl->Wlog;
-		q->qth_max = ctl->qth_max<<ctl->Wlog;
-
-		if (table->grio)
-			q->prio=table->tab[ctl->DP]->prio;
-		else
-			q->prio=8;
-
-		q->qcount = -1;
-		PSCHED_SET_PASTPERFECT(q->qidlestart);
-		memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
-	}
-	return 0;
+	if (q->backlog == 0)
+		red_end_of_idle_period(&q->parms);
 
+	red_set_parms(&q->parms,
+		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
+		      ctl->Scell_log, stab);
+
+	return 0;
 }
 
-static int gred_init(struct Qdisc *sch, struct rtattr *opt)
+static int gred_change(struct Qdisc *sch, struct rtattr *opt)
 {
 	struct gred_sched *table = qdisc_priv(sch);
-	struct tc_gred_sopt *sopt;
-	struct rtattr *tb[TCA_GRED_STAB];
-	struct rtattr *tb2[TCA_GRED_DPS];
+	struct tc_gred_qopt *ctl;
+	struct rtattr *tb[TCA_GRED_MAX];
+	int err = -EINVAL, prio = GRED_DEF_PRIO;
+	u8 *stab;
 
-	if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_STAB, opt))
+	if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
 		return -EINVAL;
 
-	if (tb[TCA_GRED_PARMS-1] == 0 && tb[TCA_GRED_STAB-1] == 0) {
-		rtattr_parse_nested(tb2, TCA_GRED_DPS, opt);
+	if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
+		return gred_change_table_def(sch, opt);
+
+	if (tb[TCA_GRED_PARMS-1] == NULL ||
+	    RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
+	    tb[TCA_GRED_STAB-1] == NULL ||
+	    RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
+		return -EINVAL;
+
+	ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
+	stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
+
+	if (ctl->DP >= table->DPs)
+		goto errout;
 
-	    if (tb2[TCA_GRED_DPS-1] == 0) 
-			return -EINVAL;
+	if (gred_rio_mode(table)) {
+		if (ctl->prio == 0) {
+			int def_prio = GRED_DEF_PRIO;
 
-		sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]);
-		table->DPs=sopt->DPs;   
-		table->def=sopt->def_DP; 
-		table->grio=sopt->grio; 
-		table->initd=0;
-		return 0;
+			if (table->tab[table->def])
+				def_prio = table->tab[table->def]->prio;
+
+			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
+			       "setting default to %d\n", ctl->DP, def_prio);
+
+			prio = def_prio;
+		} else
+			prio = ctl->prio;
+	}
+
+	sch_tree_lock(sch);
+
+	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
+	if (err < 0)
+		goto errout_locked;
+
+	if (gred_rio_mode(table)) {
+		gred_disable_wred_mode(table);
+		if (gred_wred_mode_check(sch))
+			gred_enable_wred_mode(table);
 	}
 
-	DPRINTK("\n GRED_INIT error!\n");
-	return -EINVAL;
+	err = 0;
+
+errout_locked:
+	sch_tree_unlock(sch);
+errout:
+	return err;
 }
 
-static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
+static int gred_init(struct Qdisc *sch, struct rtattr *opt)
 {
-	unsigned long qave;
-	struct rtattr *rta;
-	struct tc_gred_qopt *opt = NULL ;
-	struct tc_gred_qopt *dst;
-	struct gred_sched *table = qdisc_priv(sch);
-	struct gred_sched_data *q;
-	int i;
-	unsigned char	 *b = skb->tail;
+	struct rtattr *tb[TCA_GRED_MAX];
 
-	rta = (struct rtattr*)b;
-	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
+	if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
+		return -EINVAL;
 
-	opt=kmalloc(sizeof(struct tc_gred_qopt)*MAX_DPs, GFP_KERNEL);
+	if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
+		return -EINVAL;
 
-	if (opt  == NULL) {
-		DPRINTK("gred_dump:failed to malloc for %Zd\n",
-		    sizeof(struct tc_gred_qopt)*MAX_DPs);
-		goto rtattr_failure;
-	}
+	return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
+}
 
-	memset(opt, 0, (sizeof(struct tc_gred_qopt))*table->DPs);
+static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	struct gred_sched *table = qdisc_priv(sch);
+	struct rtattr *parms, *opts = NULL;
+	int i;
+	struct tc_gred_sopt sopt = {
+		.DPs	= table->DPs,
+		.def_DP	= table->def,
+		.grio	= gred_rio_mode(table),
+		.flags	= table->red_flags,
+	};
 
-	if (!table->initd) {
-		DPRINTK("NO GRED Queues setup!\n");
-	}
+	opts = RTA_NEST(skb, TCA_OPTIONS);
+	RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+	parms = RTA_NEST(skb, TCA_GRED_PARMS);
+
+	for (i = 0; i < MAX_DPs; i++) {
+		struct gred_sched_data *q = table->tab[i];
+		struct tc_gred_qopt opt;
 
-	for (i=0;i<MAX_DPs;i++) {
-		dst= &opt[i]; 
-		q= table->tab[i]; 
+		memset(&opt, 0, sizeof(opt));
 
 		if (!q) {
 			/* hack -- fix at some point with proper message
 			   This is how we indicate to tc that there is no VQ
 			   at this DP */
 
-			dst->DP=MAX_DPs+i;
-			continue;
+			opt.DP = MAX_DPs + i;
+			goto append_opt;
 		}
 
-		dst->limit=q->limit;
-		dst->qth_min=q->qth_min>>q->Wlog;
-		dst->qth_max=q->qth_max>>q->Wlog;
-		dst->DP=q->DP;
-		dst->backlog=q->backlog;
-		if (q->qave) {
-			if (table->eqp && table->grio) {
-				q->qidlestart=table->tab[table->def]->qidlestart;
-				q->qave=table->tab[table->def]->qave;
-			}
-			if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
-				long idle;
-				psched_time_t now;
-				PSCHED_GET_TIME(now);
-				idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
-				qave  = q->qave >> q->Stab[(idle>>q->Scell_log)&0xFF];
-				dst->qave = qave >> q->Wlog;
-
-			} else {
-				dst->qave = q->qave >> q->Wlog;
-			}
-		} else {
-			dst->qave = 0;
+		opt.limit	= q->limit;
+		opt.DP		= q->DP;
+		opt.backlog	= q->backlog;
+		opt.prio	= q->prio;
+		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
+		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
+		opt.Wlog	= q->parms.Wlog;
+		opt.Plog	= q->parms.Plog;
+		opt.Scell_log	= q->parms.Scell_log;
+		opt.other	= q->stats.other;
+		opt.early	= q->stats.prob_drop;
+		opt.forced	= q->stats.forced_drop;
+		opt.pdrop	= q->stats.pdrop;
+		opt.packets	= q->packetsin;
+		opt.bytesin	= q->bytesin;
+
+		if (gred_wred_mode(table)) {
+			q->parms.qidlestart =
+				table->tab[table->def]->parms.qidlestart;
+			q->parms.qavg = table->tab[table->def]->parms.qavg;
 		}
-		
-
-		dst->Wlog = q->Wlog;
-		dst->Plog = q->Plog;
-		dst->Scell_log = q->Scell_log;
-		dst->other = q->other;
-		dst->forced = q->forced;
-		dst->early = q->early;
-		dst->pdrop = q->pdrop;
-		dst->prio = q->prio;
-		dst->packets=q->packetsin;
-		dst->bytesin=q->bytesin;
+
+		opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
+
+append_opt:
+		RTA_APPEND(skb, sizeof(opt), &opt);
 	}
 
-	RTA_PUT(skb, TCA_GRED_PARMS, sizeof(struct tc_gred_qopt)*MAX_DPs, opt);
-	rta->rta_len = skb->tail - b;
+	RTA_NEST_END(skb, parms);
 
-	kfree(opt);
-	return skb->len;
+	return RTA_NEST_END(skb, opts);
 
 rtattr_failure:
-	if (opt)
-		kfree(opt);
-	DPRINTK("gred_dump: FAILURE!!!!\n");
-
-/* also free the opt struct here */
-	skb_trim(skb, b - skb->data);
-	return -1;
+	return RTA_NEST_CANCEL(skb, opts);
 }
 
 static void gred_destroy(struct Qdisc *sch)
@@ -594,15 +574,13 @@ static void gred_destroy(struct Qdisc *sch)
 	struct gred_sched *table = qdisc_priv(sch);
 	int i;
 
-	for (i = 0;i < table->DPs; i++) {
+	for (i = 0; i < table->DPs; i++) {
 		if (table->tab[i])
-			kfree(table->tab[i]);
+			gred_destroy_vq(table->tab[i]);
 	}
 }
 
 static struct Qdisc_ops gred_qdisc_ops = {
-	.next		=	NULL,
-	.cl_ops		=	NULL,
 	.id		=	"gred",
 	.priv_size	=	sizeof(struct gred_sched),
 	.enqueue	=	gred_enqueue,
@@ -621,10 +599,13 @@ static int __init gred_module_init(void)
 {
 	return register_qdisc(&gred_qdisc_ops);
 }
-static void __exit gred_module_exit(void) 
+
+static void __exit gred_module_exit(void)
 {
 	unregister_qdisc(&gred_qdisc_ops);
 }
+
 module_init(gred_module_init)
 module_exit(gred_module_exit)
+
 MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 7845d045eec4..dccfa44c2d71 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -9,76 +9,23 @@
  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  *
  * Changes:
- * J Hadi Salim <hadi@nortel.com> 980914:	computation fixes
+ * J Hadi Salim 980914:	computation fixes
  * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
- * J Hadi Salim <hadi@nortelnetworks.com> 980816:  ECN support	
+ * J Hadi Salim 980816:  ECN support
  */
 
 #include <linux/config.h>
 #include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
 #include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/route.h>
 #include <linux/skbuff.h>
-#include <net/sock.h>
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
-#include <net/dsfield.h>
+#include <net/red.h>
 
 
-/*	Random Early Detection (RED) algorithm.
-	=======================================
-
-	Source: Sally Floyd and Van Jacobson, "Random Early Detection Gateways
-	for Congestion Avoidance", 1993, IEEE/ACM Transactions on Networking.
-
-	This file codes a "divisionless" version of RED algorithm
-	as written down in Fig.17 of the paper.
-
-Short description.
-------------------
-
-	When a new packet arrives we calculate the average queue length:
-
-	avg = (1-W)*avg + W*current_queue_len,
-
-	W is the filter time constant (chosen as 2^(-Wlog)), it controls
-	the inertia of the algorithm. To allow larger bursts, W should be
-	decreased.
-
-	if (avg > th_max) -> packet marked (dropped).
-	if (avg < th_min) -> packet passes.
-	if (th_min < avg < th_max) we calculate probability:
-
-	Pb = max_P * (avg - th_min)/(th_max-th_min)
-
-	and mark (drop) packet with this probability.
-	Pb changes from 0 (at avg==th_min) to max_P (avg==th_max).
-	max_P should be small (not 1), usually 0.01..0.02 is good value.
-
-	max_P is chosen as a number, so that max_P/(th_max-th_min)
-	is a negative power of two in order arithmetics to contain
-	only shifts.
-
-
-	Parameters, settable by user:
+/*	Parameters, settable by user:
 	-----------------------------
 
 	limit		- bytes (must be > qth_max + burst)
@@ -89,243 +36,93 @@ Short description.
 	arbitrarily high (well, less than ram size)
 	Really, this limit will never be reached
 	if RED works correctly.
-
-	qth_min		- bytes (should be < qth_max/2)
-	qth_max		- bytes (should be at least 2*qth_min and less limit)
-	Wlog	       	- bits (<32) log(1/W).
-	Plog	       	- bits (<32)
-
-	Plog is related to max_P by formula:
-
-	max_P = (qth_max-qth_min)/2^Plog;
-
-	F.e. if qth_max=128K and qth_min=32K, then Plog=22
-	corresponds to max_P=0.02
-
-	Scell_log
-	Stab
-
-	Lookup table for log((1-W)^(t/t_ave).
-
-
-NOTES:
-
-Upper bound on W.
------------------
-
-	If you want to allow bursts of L packets of size S,
-	you should choose W:
-
-	L + 1 - th_min/S < (1-(1-W)^L)/W
-
-	th_min/S = 32         th_min/S = 4
-			                       
-	log(W)	L
-	-1	33
-	-2	35
-	-3	39
-	-4	46
-	-5	57
-	-6	75
-	-7	101
-	-8	135
-	-9	190
-	etc.
  */
 
 struct red_sched_data
 {
-/* Parameters */
-	u32		limit;		/* HARD maximal queue length	*/
-	u32		qth_min;	/* Min average length threshold: A scaled */
-	u32		qth_max;	/* Max average length threshold: A scaled */
-	u32		Rmask;
-	u32		Scell_max;
-	unsigned char	flags;
-	char		Wlog;		/* log(W)		*/
-	char		Plog;		/* random number bits	*/
-	char		Scell_log;
-	u8		Stab[256];
-
-/* Variables */
-	unsigned long	qave;		/* Average queue length: A scaled */
-	int		qcount;		/* Packets since last random number generation */
-	u32		qR;		/* Cached random number */
-
-	psched_time_t	qidlestart;	/* Start of idle period		*/
-	struct tc_red_xstats st;
+	u32			limit;		/* HARD maximal queue length */
+	unsigned char		flags;
+	struct red_parms	parms;
+	struct red_stats	stats;
 };
 
-static int red_ecn_mark(struct sk_buff *skb)
+static inline int red_use_ecn(struct red_sched_data *q)
 {
-	if (skb->nh.raw + 20 > skb->tail)
-		return 0;
-
-	switch (skb->protocol) {
-	case __constant_htons(ETH_P_IP):
-		if (INET_ECN_is_not_ect(skb->nh.iph->tos))
-			return 0;
-		IP_ECN_set_ce(skb->nh.iph);
-		return 1;
-	case __constant_htons(ETH_P_IPV6):
-		if (INET_ECN_is_not_ect(ipv6_get_dsfield(skb->nh.ipv6h)))
-			return 0;
-		IP6_ECN_set_ce(skb->nh.ipv6h);
-		return 1;
-	default:
-		return 0;
-	}
+	return q->flags & TC_RED_ECN;
 }
 
-static int
-red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static inline int red_use_harddrop(struct red_sched_data *q)
+{
+	return q->flags & TC_RED_HARDDROP;
+}
+
+static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
-	psched_time_t now;
+	q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog);
 
-	if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
-		long us_idle;
-		int  shift;
+	if (red_is_idling(&q->parms))
+		red_end_of_idle_period(&q->parms);
 
-		PSCHED_GET_TIME(now);
-		us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
-		PSCHED_SET_PASTPERFECT(q->qidlestart);
+	switch (red_action(&q->parms, q->parms.qavg)) {
+		case RED_DONT_MARK:
+			break;
 
-/*
-   The problem: ideally, average length queue recalcultion should
-   be done over constant clock intervals. This is too expensive, so that
-   the calculation is driven by outgoing packets.
-   When the queue is idle we have to model this clock by hand.
-
-   SF+VJ proposed to "generate" m = idletime/(average_pkt_size/bandwidth)
-   dummy packets as a burst after idle time, i.e.
-
-          q->qave *= (1-W)^m
-
-   This is an apparently overcomplicated solution (f.e. we have to precompute
-   a table to make this calculation in reasonable time)
-   I believe that a simpler model may be used here,
-   but it is field for experiments.
-*/
-		shift = q->Stab[us_idle>>q->Scell_log];
-
-		if (shift) {
-			q->qave >>= shift;
-		} else {
-			/* Approximate initial part of exponent
-			   with linear function:
-			   (1-W)^m ~= 1-mW + ...
-
-			   Seems, it is the best solution to
-			   problem of too coarce exponent tabulation.
-			 */
-
-			us_idle = (q->qave * us_idle)>>q->Scell_log;
-			if (us_idle < q->qave/2)
-				q->qave -= us_idle;
-			else
-				q->qave >>= 1;
-		}
-	} else {
-		q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
-		/* NOTE:
-		   q->qave is fixed point number with point at Wlog.
-		   The formulae above is equvalent to floating point
-		   version:
-
-		   qave = qave*(1-W) + sch->qstats.backlog*W;
-		                                           --ANK (980924)
-		 */
-	}
+		case RED_PROB_MARK:
+			sch->qstats.overlimits++;
+			if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
+				q->stats.prob_drop++;
+				goto congestion_drop;
+			}
 
-	if (q->qave < q->qth_min) {
-		q->qcount = -1;
-enqueue:
-		if (sch->qstats.backlog + skb->len <= q->limit) {
-			__skb_queue_tail(&sch->q, skb);
-			sch->qstats.backlog += skb->len;
-			sch->bstats.bytes += skb->len;
-			sch->bstats.packets++;
-			return NET_XMIT_SUCCESS;
-		} else {
-			q->st.pdrop++;
-		}
-		kfree_skb(skb);
-		sch->qstats.drops++;
-		return NET_XMIT_DROP;
-	}
-	if (q->qave >= q->qth_max) {
-		q->qcount = -1;
-		sch->qstats.overlimits++;
-mark:
-		if  (!(q->flags&TC_RED_ECN) || !red_ecn_mark(skb)) {
-			q->st.early++;
-			goto drop;
-		}
-		q->st.marked++;
-		goto enqueue;
-	}
+			q->stats.prob_mark++;
+			break;
+
+		case RED_HARD_MARK:
+			sch->qstats.overlimits++;
+			if (red_use_harddrop(q) || !red_use_ecn(q) ||
+			    !INET_ECN_set_ce(skb)) {
+				q->stats.forced_drop++;
+				goto congestion_drop;
+			}
 
-	if (++q->qcount) {
-		/* The formula used below causes questions.
-
-		   OK. qR is random number in the interval 0..Rmask
-		   i.e. 0..(2^Plog). If we used floating point
-		   arithmetics, it would be: (2^Plog)*rnd_num,
-		   where rnd_num is less 1.
-
-		   Taking into account, that qave have fixed
-		   point at Wlog, and Plog is related to max_P by
-		   max_P = (qth_max-qth_min)/2^Plog; two lines
-		   below have the following floating point equivalent:
-		   
-		   max_P*(qave - qth_min)/(qth_max-qth_min) < rnd/qcount
-
-		   Any questions? --ANK (980924)
-		 */
-		if (((q->qave - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
-			goto enqueue;
-		q->qcount = 0;
-		q->qR = net_random()&q->Rmask;
-		sch->qstats.overlimits++;
-		goto mark;
+			q->stats.forced_mark++;
+			break;
 	}
-	q->qR = net_random()&q->Rmask;
-	goto enqueue;
 
-drop:
-	kfree_skb(skb);
-	sch->qstats.drops++;
+	if (sch->qstats.backlog + skb->len <= q->limit)
+		return qdisc_enqueue_tail(skb, sch);
+
+	q->stats.pdrop++;
+	return qdisc_drop(skb, sch);
+
+congestion_drop:
+	qdisc_drop(skb, sch);
 	return NET_XMIT_CN;
 }
 
-static int
-red_requeue(struct sk_buff *skb, struct Qdisc* sch)
+static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
-	PSCHED_SET_PASTPERFECT(q->qidlestart);
+	if (red_is_idling(&q->parms))
+		red_end_of_idle_period(&q->parms);
 
-	__skb_queue_head(&sch->q, skb);
-	sch->qstats.backlog += skb->len;
-	sch->qstats.requeues++;
-	return 0;
+	return qdisc_requeue(skb, sch);
 }
 
-static struct sk_buff *
-red_dequeue(struct Qdisc* sch)
+static struct sk_buff * red_dequeue(struct Qdisc* sch)
 {
 	struct sk_buff *skb;
 	struct red_sched_data *q = qdisc_priv(sch);
 
-	skb = __skb_dequeue(&sch->q);
-	if (skb) {
-		sch->qstats.backlog -= skb->len;
-		return skb;
-	}
-	PSCHED_GET_TIME(q->qidlestart);
-	return NULL;
+	skb = qdisc_dequeue_head(sch);
+
+	if (skb == NULL && !red_is_idling(&q->parms))
+		red_start_of_idle_period(&q->parms);
+
+	return skb;
 }
 
 static unsigned int red_drop(struct Qdisc* sch)
@@ -333,16 +130,17 @@ static unsigned int red_drop(struct Qdisc* sch)
 	struct sk_buff *skb;
 	struct red_sched_data *q = qdisc_priv(sch);
 
-	skb = __skb_dequeue_tail(&sch->q);
+	skb = qdisc_dequeue_tail(sch);
 	if (skb) {
 		unsigned int len = skb->len;
-		sch->qstats.backlog -= len;
-		sch->qstats.drops++;
-		q->st.other++;
-		kfree_skb(skb);
+		q->stats.other++;
+		qdisc_drop(skb, sch);
 		return len;
 	}
-	PSCHED_GET_TIME(q->qidlestart);
+
+	if (!red_is_idling(&q->parms))
+		red_start_of_idle_period(&q->parms);
+
 	return 0;
 }
 
@@ -350,43 +148,38 @@ static void red_reset(struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
-	__skb_queue_purge(&sch->q);
-	sch->qstats.backlog = 0;
-	PSCHED_SET_PASTPERFECT(q->qidlestart);
-	q->qave = 0;
-	q->qcount = -1;
+	qdisc_reset_queue(sch);
+	red_restart(&q->parms);
 }
 
 static int red_change(struct Qdisc *sch, struct rtattr *opt)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-	struct rtattr *tb[TCA_RED_STAB];
+	struct rtattr *tb[TCA_RED_MAX];
 	struct tc_red_qopt *ctl;
 
-	if (opt == NULL ||
-	    rtattr_parse_nested(tb, TCA_RED_STAB, opt) ||
-	    tb[TCA_RED_PARMS-1] == 0 || tb[TCA_RED_STAB-1] == 0 ||
+	if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt))
+		return -EINVAL;
+
+	if (tb[TCA_RED_PARMS-1] == NULL ||
 	    RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
-	    RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < 256)
+	    tb[TCA_RED_STAB-1] == NULL ||
+	    RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE)
 		return -EINVAL;
 
 	ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
 
 	sch_tree_lock(sch);
 	q->flags = ctl->flags;
-	q->Wlog = ctl->Wlog;
-	q->Plog = ctl->Plog;
-	q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
-	q->Scell_log = ctl->Scell_log;
-	q->Scell_max = (255<<q->Scell_log);
-	q->qth_min = ctl->qth_min<<ctl->Wlog;
-	q->qth_max = ctl->qth_max<<ctl->Wlog;
 	q->limit = ctl->limit;
-	memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256);
 
-	q->qcount = -1;
+	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
+				 ctl->Plog, ctl->Scell_log,
+				 RTA_DATA(tb[TCA_RED_STAB-1]));
+
 	if (skb_queue_empty(&sch->q))
-		PSCHED_SET_PASTPERFECT(q->qidlestart);
+		red_end_of_idle_period(&q->parms);
+
 	sch_tree_unlock(sch);
 	return 0;
 }
@@ -399,39 +192,39 @@ static int red_init(struct Qdisc* sch, struct rtattr *opt)
 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-	unsigned char	 *b = skb->tail;
-	struct rtattr *rta;
-	struct tc_red_qopt opt;
-
-	rta = (struct rtattr*)b;
-	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
-	opt.limit = q->limit;
-	opt.qth_min = q->qth_min>>q->Wlog;
-	opt.qth_max = q->qth_max>>q->Wlog;
-	opt.Wlog = q->Wlog;
-	opt.Plog = q->Plog;
-	opt.Scell_log = q->Scell_log;
-	opt.flags = q->flags;
+	struct rtattr *opts = NULL;
+	struct tc_red_qopt opt = {
+		.limit		= q->limit,
+		.flags		= q->flags,
+		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
+		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
+		.Wlog		= q->parms.Wlog,
+		.Plog		= q->parms.Plog,
+		.Scell_log	= q->parms.Scell_log,
+	};
+
+	opts = RTA_NEST(skb, TCA_OPTIONS);
 	RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
-	rta->rta_len = skb->tail - b;
-
-	return skb->len;
+	return RTA_NEST_END(skb, opts);
 
 rtattr_failure:
-	skb_trim(skb, b - skb->data);
-	return -1;
+	return RTA_NEST_CANCEL(skb, opts);
 }
 
 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-
-	return gnet_stats_copy_app(d, &q->st, sizeof(q->st));
+	struct tc_red_xstats st = {
+		.early	= q->stats.prob_drop + q->stats.forced_drop,
+		.pdrop	= q->stats.pdrop,
+		.other	= q->stats.other,
+		.marked	= q->stats.prob_mark + q->stats.forced_mark,
+	};
+
+	return gnet_stats_copy_app(d, &st, sizeof(st));
 }
 
 static struct Qdisc_ops red_qdisc_ops = {
-	.next		=	NULL,
-	.cl_ops		=	NULL,
 	.id		=	"red",
 	.priv_size	=	sizeof(struct red_sched_data),
 	.enqueue	=	red_enqueue,
@@ -450,10 +243,13 @@ static int __init red_module_init(void)
 {
 	return register_qdisc(&red_qdisc_ops);
 }
-static void __exit red_module_exit(void) 
+
+static void __exit red_module_exit(void)
 {
 	unregister_qdisc(&red_qdisc_ops);
 }
+
 module_init(red_module_init)
 module_exit(red_module_exit)
+
 MODULE_LICENSE("GPL");