From 2ef7e8cef81e6a091de2aebd9d30c273edf6c13c Mon Sep 17 00:00:00 2001
From: Linus Torvalds <torvalds@athlon.transmeta.com>
Date: Mon, 4 Feb 2002 20:24:42 -0800
Subject: [PATCH] v2.4.12.4 -> v2.4.12.5

  - Greg KH: usbnet fix
  - Johannes Erdfelt: uhci.c bulk queueing fixes
---
 Makefile                       |   2 +-
 drivers/char/joystick/analog.c |   6 +-
 drivers/usb/uhci.c             | 232 ++++++++++++++++-----------------
 drivers/usb/uhci.h             |   1 +
 drivers/usb/usbnet.c           | 118 ++++++++++-------
 fs/buffer.c                    |  24 +---
 mm/page_alloc.c                |   2 +-
 mm/vmscan.c                    |  33 ++---
 8 files changed, 205 insertions(+), 213 deletions(-)

diff --git a/Makefile b/Makefile
index 282423721d6c..469ed2c44dfc 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 4
 SUBLEVEL = 13
-EXTRAVERSION =-pre4
+EXTRAVERSION =-pre5
 
 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
 
diff --git a/drivers/char/joystick/analog.c b/drivers/char/joystick/analog.c
index d04cb55ea691..bbab2f946c30 100644
--- a/drivers/char/joystick/analog.c
+++ b/drivers/char/joystick/analog.c
@@ -138,7 +138,7 @@ struct analog_port {
 
 #ifdef __i386__
 #define TSC_PRESENT	(test_bit(X86_FEATURE_TSC, &boot_cpu_data.x86_capability))
-#define GET_TIME(x)	do { if (TSC_PRESENT) rdtscl(x); else outb(0, 0x43); x = inb(0x40); x |= inb(0x40) << 8; } while (0)
+#define GET_TIME(x)	do { if (TSC_PRESENT) rdtscl(x); else { outb(0, 0x43); x = inb(0x40); x |= inb(0x40) << 8; } } while (0)
 #define DELTA(x,y)	(TSC_PRESENT?((y)-(x)):((x)-(y)+((x)<(y)?1193180L/HZ:0)))
 #define TIME_NAME	(TSC_PRESENT?"TSC":"PIT")
 #elif __x86_64__
@@ -499,7 +499,9 @@ static void analog_init_device(struct analog_port *port, struct analog *analog,
 	else
 		printk(" [%s timer, %d %sHz clock, %d ns res]\n", TIME_NAME,
 		port->speed > 10000 ? (port->speed + 800) / 1000 : port->speed,
-		port->speed > 10000 ? "M" : "k", (port->loop * 1000000) / port->speed);
+		port->speed > 10000 ? "M" : "k",
+		port->speed > 10000 ? (port->loop * 1000) / (port->speed / 1000)
+				    : (port->loop * 1000000) / port->speed);
 }
 
 /*
diff --git a/drivers/usb/uhci.c b/drivers/usb/uhci.c
index 511df4453efe..af8f012665b6 100644
--- a/drivers/usb/uhci.c
+++ b/drivers/usb/uhci.c
@@ -113,44 +113,6 @@ static int uhci_alloc_dev(struct usb_device *dev)
 
 static int uhci_free_dev(struct usb_device *dev)
 {
-	struct uhci *uhci = (struct uhci *)dev->bus->hcpriv;
-	struct list_head list, *tmp, *head;
-	unsigned long flags;
-
-	/* Walk through the entire URB list and forcefully remove any */
-	/*  URBs that are still active for that device */
-
-	/* Two stage unlink so we don't deadlock on urb_list_lock */
-	INIT_LIST_HEAD(&list);
-
-	spin_lock_irqsave(&uhci->urb_list_lock, flags);
-	head = &uhci->urb_list;
-	tmp = head->next;
-	while (tmp != head) {
-		struct urb *urb = list_entry(tmp, struct urb, urb_list);
-
-		tmp = tmp->next;
-
-		if (urb->dev == dev) {
-			list_del(&urb->urb_list);
-			list_add(&urb->urb_list, &list);
-		}
-	}
-	spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
-
-	head = &list;
-	tmp = head->next;
-	while (tmp != head) {
-		struct urb *urb = list_entry(tmp, struct urb, urb_list);
-		tmp = tmp->next;
-
-		/* Make sure we block waiting on these to die */
-		urb->transfer_flags &= ~USB_ASYNC_UNLINK;
-
-		/* uhci_unlink_urb will unlink from the temp list */
-		uhci_unlink_urb(urb);
-	}
-
 	return 0;
 }
 
@@ -396,50 +358,96 @@ static void uhci_free_qh(struct uhci *uhci, struct uhci_qh *qh)
 	pci_pool_free(uhci->qh_pool, qh, qh->dma_handle);
 }
 
-static void uhci_insert_qh(struct uhci *uhci, struct uhci_qh *skelqh, struct uhci_qh *qh)
+static void _uhci_insert_qh(struct uhci *uhci, struct uhci_qh *skelqh, struct urb *urb)
 {
+	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
+	struct list_head *head, *tmp;
 	struct uhci_qh *lqh;
-	unsigned long flags;
-
-	spin_lock_irqsave(&uhci->frame_list_lock, flags);
 
 	/* Grab the last QH */
 	lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
 
-	qh->link = lqh->link;
+	if (lqh->urbp) {
+		head = &lqh->urbp->queue_list;
+		tmp = head->next;
+		while (head != tmp) {
+			struct urb_priv *turbp =
+				list_entry(tmp, struct urb_priv, queue_list);
+
+			tmp = tmp->next;
+
+			turbp->qh->link = urbp->qh->dma_handle | UHCI_PTR_QH;
+		}
+	}
+
+	head = &urbp->queue_list;
+	tmp = head->next;
+	while (head != tmp) {
+		struct urb_priv *turbp =
+			list_entry(tmp, struct urb_priv, queue_list);
+
+		tmp = tmp->next;
+
+		turbp->qh->link = lqh->link;
+	}
+
+	urbp->qh->link = lqh->link;
 	mb();				/* Ordering is important */
-	lqh->link = qh->dma_handle | UHCI_PTR_QH;
+	lqh->link = urbp->qh->dma_handle | UHCI_PTR_QH;
+
+	list_add_tail(&urbp->qh->list, &skelqh->list);
+}
 
-	list_add_tail(&qh->list, &skelqh->list);
+static void uhci_insert_qh(struct uhci *uhci, struct uhci_qh *skelqh, struct urb *urb)
+{
+	unsigned long flags;
 
+	spin_lock_irqsave(&uhci->frame_list_lock, flags);
+	_uhci_insert_qh(uhci, skelqh, urb);
 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
 }
 
-static void uhci_remove_qh(struct uhci *uhci, struct uhci_qh *qh)
+static void uhci_remove_qh(struct uhci *uhci, struct urb *urb)
 {
+	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
 	unsigned long flags;
-	struct uhci_qh *prevqh;
+	struct uhci_qh *qh = urbp->qh, *pqh;
+
+	if (!qh)
+		return;
 
 	/* Only go through the hoops if it's actually linked in */
-	if (list_empty(&qh->list)) {
-		goto list;
-	}
+	if (!list_empty(&qh->list)) {
+		qh->urbp = NULL;
 
-	qh->urbp = NULL;
+		spin_lock_irqsave(&uhci->frame_list_lock, flags);
 
-	spin_lock_irqsave(&uhci->frame_list_lock, flags);
+		pqh = list_entry(qh->list.prev, struct uhci_qh, list);
 
-	prevqh = list_entry(qh->list.prev, struct uhci_qh, list);
+		if (pqh->urbp) {
+			struct list_head *head, *tmp;
 
-	prevqh->link = qh->link;
-	mb();
-	qh->element = qh->link = UHCI_PTR_TERM;
+			head = &pqh->urbp->queue_list;
+			tmp = head->next;
+			while (head != tmp) {
+				struct urb_priv *turbp =
+					list_entry(tmp, struct urb_priv, queue_list);
 
-	list_del_init(&qh->list);
+				tmp = tmp->next;
 
-	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
+				turbp->qh->link = qh->link;
+			}
+		}
+
+		pqh->link = qh->link;
+		mb();
+		qh->element = qh->link = UHCI_PTR_TERM;
+
+		list_del_init(&qh->list);
+
+		spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
+	}
 
-list:
 	spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
 
 	/* Check to see if the remove list is empty. Set the IOC bit */
@@ -464,9 +472,10 @@ static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
 
 		tmp = tmp->next;
 
-		td->info &= ~(1 << TD_TOKEN_TOGGLE);
 		if (toggle)
-			td->info |= (1 << TD_TOKEN_TOGGLE);
+			set_bit(TD_TOKEN_TOGGLE, &td->info);
+		else
+			clear_bit(TD_TOKEN_TOGGLE, &td->info);
 
 		toggle ^= 1;
 	}
@@ -481,7 +490,7 @@ static void uhci_append_queued_urb(struct uhci *uhci, struct urb *eurb, struct u
 {
 	struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
 	struct list_head *tmp;
-	struct uhci_td *ftd, *lltd;
+	struct uhci_td *lltd;
 	unsigned long flags;
 
 	eurbp = eurb->hcpriv;
@@ -510,13 +519,15 @@ static void uhci_append_queued_urb(struct uhci *uhci, struct urb *eurb, struct u
 	lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
 
 	lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
-	ftd = list_entry(urbp->td_list.next, struct uhci_td, list);
 
 	uhci_fixup_toggle(urb, uhci_toggle(lltd->info) ^ 1);
 
+	/* All qh's in the queue need to link to the next queue */
+	urbp->qh->link = eurbp->qh->link;
+
 	mb();			/* Make sure we flush everything */
 	/* Only support bulk right now, so no depth */
-	lltd->link = ftd->dma_handle;
+	lltd->link = urbp->qh->dma_handle | UHCI_PTR_QH;
 
 	list_add_tail(&urbp->queue_list, &furbp->queue_list);
 
@@ -576,30 +587,9 @@ static void uhci_delete_queued_urb(struct uhci *uhci, struct urb *urb)
 		usb_pipeout(urb->pipe), toggle);
 
 	if (!urbp->queued) {
-		int status;
-
-		/*  The HC may continue using the current QH if it finished */
-		/* all of the TD's in this URB and may have started on the */
-		/* next URB's TD's already, so we'll take over ownership */
-		/* of this QH and use it instead. Don't forget to delete */
-		/* the old QH first */
-		uhci_free_qh(uhci, nurbp->qh);
-
-		nurbp->qh = urbp->qh;
-		nurbp->qh->urbp = nurbp;
-		urbp->qh = NULL;
-
-		/* If the last TD from the first (this) urb didn't */
-		/*  complete, reset qh->element to the first TD in the */
-		/*  next urb */
-		pltd = list_entry(urbp->td_list.prev, struct uhci_td, list);
-		status = uhci_status_bits(pltd->status);
-		if ((status & TD_CTRL_ACTIVE) || uhci_actual_length(pltd->status) < uhci_expected_length(pltd->info)) {
-			struct uhci_td *ftd = list_entry(nurbp->td_list.next, struct uhci_td, list);
-			nurbp->qh->element = ftd->dma_handle;
-		}
-
 		nurbp->queued = 0;
+
+		_uhci_insert_qh(uhci, uhci->skel_bulk_qh, nurbp->urb);
 	} else {
 		/* We're somewhere in the middle (or end). A bit trickier */
 		/*  than the head scenario */
@@ -607,15 +597,10 @@ static void uhci_delete_queued_urb(struct uhci *uhci, struct urb *urb)
 				queue_list);
 
 		pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
-		if (nurbp->queued) {
-			struct uhci_td *nftd;
-
-			/* Close the gap between the two */
-			nftd = list_entry(nurbp->td_list.next, struct uhci_td,
-					list);
-			pltd->link = nftd->dma_handle;
-		} else
-			/* The next URB happens to be the beggining, so */
+		if (nurbp->queued)
+			pltd->link = nurbp->qh->dma_handle | UHCI_PTR_QH;
+		else
+			/* The next URB happens to be the beginning, so */
 			/*  we're the last, end the chain */
 			pltd->link = UHCI_PTR_TERM;
 	}
@@ -639,6 +624,7 @@ static struct urb_priv *uhci_alloc_urb_priv(struct uhci *uhci, struct urb *urb)
 	memset((void *)urbp, 0, sizeof(*urbp));
 
 	urbp->inserttime = jiffies;
+	urbp->fsbrtime = jiffies;
 	urbp->urb = urb;
 	urbp->dev = urb->dev;
 	
@@ -900,19 +886,19 @@ static int uhci_submit_control(struct urb *urb)
 	if (!qh)
 		return -ENOMEM;
 
+	urbp->qh = qh;
+	qh->urbp = urbp;
+
 	/* Low speed or small transfers gets a different queue and treatment */
 	if (urb->pipe & TD_CTRL_LS) {
 		uhci_insert_tds_in_qh(qh, urb, 0);
-		uhci_insert_qh(uhci, uhci->skel_ls_control_qh, qh);
+		uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb);
 	} else {
 		uhci_insert_tds_in_qh(qh, urb, 1);
-		uhci_insert_qh(uhci, uhci->skel_hs_control_qh, qh);
+		uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb);
 		uhci_inc_fsbr(uhci, urb);
 	}
 
-	urbp->qh = qh;
-	qh->urbp = urbp;
-
 	return -EINPROGRESS;
 }
 
@@ -961,6 +947,7 @@ static int uhci_result_control(struct urb *urb)
 		    !(td->status & TD_CTRL_ACTIVE)) {
 			uhci_inc_fsbr(urb->dev->bus->hcpriv, urb);
 			urbp->fsbr_timeout = 0;
+			urbp->fsbrtime = jiffies;
 			clear_bit(TD_CTRL_IOC_BIT, &td->status);
 		}
 
@@ -1043,7 +1030,7 @@ static int usb_control_retrigger_status(struct urb *urb)
 	urbp->short_control_packet = 1;
 
 	/* Create a new QH to avoid pointer overwriting problems */
-	uhci_remove_qh(uhci, urbp->qh);
+	uhci_remove_qh(uhci, urb);
 
 	/* Delete all of the TD's except for the status TD at the end */
 	head = &urbp->td_list;
@@ -1071,9 +1058,9 @@ static int usb_control_retrigger_status(struct urb *urb)
 
 	/* Low speed or small transfers gets a different queue and treatment */
 	if (urb->pipe & TD_CTRL_LS)
-		uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urbp->qh);
+		uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb);
 	else
-		uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urbp->qh);
+		uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb);
 
 	return -EINPROGRESS;
 }
@@ -1134,6 +1121,7 @@ static int uhci_result_interrupt(struct urb *urb)
 		    !(td->status & TD_CTRL_ACTIVE)) {
 			uhci_inc_fsbr(urb->dev->bus->hcpriv, urb);
 			urbp->fsbr_timeout = 0;
+			urbp->fsbrtime = jiffies;
 			clear_bit(TD_CTRL_IOC_BIT, &td->status);
 		}
 
@@ -1147,10 +1135,6 @@ static int uhci_result_interrupt(struct urb *urb)
 			goto td_error;
 
 		if (uhci_actual_length(td->status) < uhci_expected_length(td->info)) {
-			usb_settoggle(urb->dev, uhci_endpoint(td->info),
-				uhci_packetout(td->info),
-				uhci_toggle(td->info) ^ 1);
-
 			if (urb->transfer_flags & USB_DISABLE_SPD) {
 				ret = -EREMOTEIO;
 				goto err;
@@ -1303,7 +1287,7 @@ static int uhci_submit_bulk(struct urb *urb, struct urb *eurb)
 	if (urb->transfer_flags & USB_QUEUE_BULK && eurb)
 		uhci_append_queued_urb(uhci, eurb, urb);
 	else
-		uhci_insert_qh(uhci, uhci->skel_bulk_qh, qh);
+		uhci_insert_qh(uhci, uhci->skel_bulk_qh, urb);
 
 	uhci_inc_fsbr(uhci, urb);
 
@@ -1681,6 +1665,7 @@ static void uhci_transfer_result(struct uhci *uhci, struct urb *urb)
 
 static void uhci_unlink_generic(struct uhci *uhci, struct urb *urb)
 {
+	struct list_head *head, *tmp;
 	struct urb_priv *urbp = urb->hcpriv;
 
 	/* We can get called when urbp allocation fails, so check */
@@ -1689,15 +1674,30 @@ static void uhci_unlink_generic(struct uhci *uhci, struct urb *urb)
 
 	uhci_dec_fsbr(uhci, urb);	/* Safe since it checks */
 
+	head = &urbp->td_list;
+	tmp = head->next;
+	while (tmp != head) {
+		struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
+
+		tmp = tmp->next;
+
+		/* Control and Isochronous ignore the toggle, so this */
+		/* is safe for all types */
+		if (!(td->status & TD_CTRL_ACTIVE) &&
+		    uhci_actual_length(td->status) < uhci_expected_length(td->info) ||
+		    tmp == head) {
+			usb_settoggle(urb->dev, uhci_endpoint(td->info),
+				uhci_packetout(td->info),
+				uhci_toggle(td->info) ^ 1);
+		}
+	}
+
 	uhci_delete_queued_urb(uhci, urb);
 
-	if (urbp->qh)
-		/* The interrupt loop will reclaim the QH's */
-		uhci_remove_qh(uhci, urbp->qh);
+	/* The interrupt loop will reclaim the QH's */
+	uhci_remove_qh(uhci, urb);
 }
 
-/* FIXME: If we forcefully unlink an urb, we should reset the toggle for */
-/*  that pipe to match what actually completed */
 static int uhci_unlink_urb(struct urb *urb)
 {
 	struct uhci *uhci;
@@ -1951,11 +1951,11 @@ static void rh_int_timer_do(unsigned long ptr)
 		tmp = tmp->next;
 
 		/* Check if the FSBR timed out */
-		if (urbp->fsbr && time_after_eq(jiffies, urbp->inserttime + IDLE_TIMEOUT))
+		if (urbp->fsbr && !urbp->fsbr_timeout && time_after_eq(jiffies, urbp->fsbrtime + IDLE_TIMEOUT))
 			uhci_fsbr_timeout(uhci, u);
 
 		/* Check if the URB timed out */
-		if (u->timeout && time_after_eq(jiffies, u->timeout)) {
+		if (u->timeout && time_after_eq(jiffies, urbp->inserttime + u->timeout)) {
 			list_del(&u->urb_list);
 			list_add_tail(&u->urb_list, &list);
 		}
diff --git a/drivers/usb/uhci.h b/drivers/usb/uhci.h
index d9e728b29963..c2b17f6705a1 100644
--- a/drivers/usb/uhci.h
+++ b/drivers/usb/uhci.h
@@ -345,6 +345,7 @@ struct urb_priv {
 	int status;			/* Final status */
 
 	unsigned long inserttime;	/* In jiffies */
+	unsigned long fsbrtime;	/* In jiffies */
 
 	struct list_head queue_list;
 	struct list_head complete_list;
diff --git a/drivers/usb/usbnet.c b/drivers/usb/usbnet.c
index 32d9f6936e2e..6981f543fe9c 100644
--- a/drivers/usb/usbnet.c
+++ b/drivers/usb/usbnet.c
@@ -15,7 +15,7 @@
  * support as appropriate.  Devices currently supported include:
  *
  *	- AnchorChip 2720
- *	- Belkin F5U104 (custom)
+ *	- Belkin, eTEK (interops with Win32 drivers)
  *	- "Linux Devices" (like iPaq and similar SA-1100 based PDAs)
  *	- NetChip 1080 (interoperates with NetChip Win32 drivers)
  *	- Prolific PL-2301/2302 (replaces "plusb" driver)
@@ -73,6 +73,9 @@
  *		Win32 Belkin driver; other cleanups (db).
  * 16-jul-2001	Bugfixes for uhci oops-on-unplug, Belkin support, various
  *		cleanups for problems not yet seen in the field. (db)
+ * 17-oct-2001	Handle "Advance USBNET" product, like Belkin/eTEK devices,
+ *		from Ioannis Mavroukakis <i.mavroukakis@btinternet.com>;
+ *		rx unlinks somehow weren't async; minor cleanup.
  *
  *-------------------------------------------------------------------------*/
 
@@ -97,7 +100,7 @@
 
 
 #define	CONFIG_USB_AN2720
-#define	CONFIG_USB_BELKIN_F5U104
+#define	CONFIG_USB_BELKIN
 #define	CONFIG_USB_LINUXDEV
 #define	CONFIG_USB_NET1080
 #define	CONFIG_USB_PL2301
@@ -119,7 +122,7 @@
 #endif
 
 // packets are always ethernet inside
-// ... except they can be bigger (up to 64K with this framing)
+// ... except they can be bigger (limit of 64K with NetChip framing)
 #define MIN_PACKET	sizeof(struct ethhdr)
 #define MAX_PACKET	32768
 
@@ -161,7 +164,7 @@ struct usbnet {
 	struct sk_buff_head	txq;
 	struct sk_buff_head	done;
 	struct tasklet_struct	bh;
-	struct tq_struct ctrl_task;
+	struct tq_struct	ctrl_task;
 };
 
 // device-specific info used by the driver
@@ -169,7 +172,8 @@ struct driver_info {
 	char		*description;
 
 	int		flags;
-#define FLAG_FRAMING	0x0001		/* guard against device dropouts */ 
+#define FLAG_FRAMING_NC	0x0001		/* guard against device dropouts */ 
+#define FLAG_NO_SETINT	0x0010		/* device can't set_interface() */
 
 	/* reset device ... can sleep */
 	int	(*reset)(struct usbnet *);
@@ -251,7 +255,7 @@ struct nc_trailer {
 	u16	packet_id;
 } __attribute__((__packed__));
 
-// packets may use FLAG_FRAMING and optional pad
+// packets may use FLAG_FRAMING_NC and optional pad
 #define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \
 				+ sizeof (struct ethhdr) \
 				+ (mtu) \
@@ -288,22 +292,24 @@ static const struct driver_info	an2720_info = {
 
 
 
-#ifdef	CONFIG_USB_BELKIN_F5U104
+#ifdef	CONFIG_USB_BELKIN
 
 /*-------------------------------------------------------------------------
  *
  * Belkin F5U104 ... two NetChip 2280 devices + Atmel microcontroller
  *
+ * ... also two eTEK designs, including one sold as "Advance USBNET"
+ *
  *-------------------------------------------------------------------------*/
 
 static const struct driver_info	belkin_info = {
-	description:	"Belkin USB Direct Connect (F5U104)",
+	description:	"Belkin, eTEK, or compatible",
 
 	in: 1, out: 1,		// direction distinguishes these
 	epsize:	64,
 };
 
-#endif	/* CONFIG_USB_BELKIN_F5U104 */
+#endif	/* CONFIG_USB_BELKIN */
 
 
 
@@ -632,7 +638,7 @@ static int net1080_check_connect (struct usbnet *dev)
 
 static const struct driver_info	net1080_info = {
 	description:	"NetChip TurboCONNECT",
-	flags:		FLAG_FRAMING,
+	flags:		FLAG_FRAMING_NC,
 	reset:		net1080_reset,
 	check_connect:	net1080_check_connect,
 
@@ -729,9 +735,9 @@ static int usbnet_change_mtu (struct net_device *net, int new_mtu)
 {
 	struct usbnet	*dev = (struct usbnet *) net->priv;
 
-	if (new_mtu <= sizeof (struct ethhdr) || new_mtu > MAX_PACKET)
+	if (new_mtu <= MIN_PACKET || new_mtu > MAX_PACKET)
 		return -EINVAL;
-	if (((dev->driver_info->flags) & FLAG_FRAMING)) {
+	if (((dev->driver_info->flags) & FLAG_FRAMING_NC)) {
 		if (FRAMED_SIZE (new_mtu) > MAX_PACKET)
 			return -EINVAL;
 	// no second zero-length packet read wanted after mtu-sized packets
@@ -779,9 +785,11 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, int flags)
 	unsigned long		lockflags;
 	size_t			size;
 
-	size = (dev->driver_info->flags & FLAG_FRAMING)
-			? FRAMED_SIZE (dev->net.mtu)
-			: (sizeof (struct ethhdr) + dev->net.mtu);
+	if (dev->driver_info->flags & FLAG_FRAMING_NC)
+		size = FRAMED_SIZE (dev->net.mtu);
+	else
+		size = (sizeof (struct ethhdr) + dev->net.mtu);
+
 	if ((skb = alloc_skb (size, flags)) == 0) {
 		dbg ("no rx skb");
 		tasklet_schedule (&dev->bh);
@@ -798,9 +806,15 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, int flags)
 	FILL_BULK_URB (urb, dev->udev,
 		usb_rcvbulkpipe (dev->udev, dev->driver_info->in),
 		skb->data, size, rx_complete, skb);
+	urb->transfer_flags |= USB_ASYNC_UNLINK;
 #ifdef	REALLY_QUEUE
 	urb->transfer_flags |= USB_QUEUE_BULK;
 #endif
+#if 0
+	// Idle-but-posted reads with UHCI really chew up
+	// PCI bandwidth unless FSBR is disabled
+	urb->transfer_flags |= USB_NO_FSBR;
+#endif
 
 	spin_lock_irqsave (&dev->rxq.lock, lockflags);
 
@@ -827,7 +841,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, int flags)
 
 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
 {
-	if (dev->driver_info->flags & FLAG_FRAMING) {
+	if (dev->driver_info->flags & FLAG_FRAMING_NC) {
 		struct nc_header	*header;
 		struct nc_trailer	*trailer;
 
@@ -1083,9 +1097,11 @@ static int usbnet_open (struct net_device *net)
 	}
 
 	netif_start_queue (net);
-	devdbg (dev, "open: enable queueing (rx %d, tx %d) mtu %d %sframed",
+	devdbg (dev, "open: enable queueing (rx %d, tx %d) mtu %d %s framing",
 		RX_QLEN, TX_QLEN, dev->net.mtu,
-		(info->flags & FLAG_FRAMING) ? "" : "un"
+		(info->flags & FLAG_FRAMING_NC)
+		    ? "NetChip"
+		    : "raw"
 		);
 
 	// delay posting reads until we're fully open
@@ -1194,7 +1210,7 @@ static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
 
 	flags = in_interrupt () ? GFP_ATOMIC : GFP_KERNEL;
 
-	if (info->flags & FLAG_FRAMING) {
+	if (info->flags & FLAG_FRAMING_NC) {
 		struct sk_buff	*skb2;
 		skb2 = fixup_skb (skb, flags);
 		if (!skb2) {
@@ -1215,7 +1231,7 @@ static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
 	entry->state = tx_start;
 	entry->length = length;
 
-	if (info->flags & FLAG_FRAMING) {
+	if (info->flags & FLAG_FRAMING_NC) {
 		header = (struct nc_header *) skb_push (skb, sizeof *header);
 		header->hdr_len = cpu_to_le16 (sizeof (*header));
 		header->packet_len = cpu_to_le16 (length);
@@ -1223,31 +1239,31 @@ static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
 			*skb_put (skb, 1) = PAD_BYTE;
 		trailer = (struct nc_trailer *) skb_put (skb, sizeof *trailer);
 	} else if ((length % EP_SIZE (dev)) == 0) {
-			if (skb_shared (skb)) {
-				struct sk_buff *skb2;
-				skb2 = skb_unshare (skb, flags);
-				if (!skb2) {
-					dbg ("can't unshare skb");
-					goto drop;
-				}
-				skb = skb2;
+		// not all hardware behaves with USB_ZERO_PACKET,
+		// so we add an extra one-byte packet
+		if (skb_shared (skb)) {
+			struct sk_buff *skb2;
+			skb2 = skb_unshare (skb, flags);
+			if (!skb2) {
+				dbg ("can't unshare skb");
+				goto drop;
 			}
-			skb->len++;
+			skb = skb2;
 		}
+		skb->len++;
+	}
 
 	FILL_BULK_URB (urb, dev->udev,
 			usb_sndbulkpipe (dev->udev, info->out),
 			skb->data, skb->len, tx_complete, skb);
-	// Idle-but-posted reads with UHCI really chew up
-	// PCI bandwidth unless FSBR is disabled
-	urb->transfer_flags |= USB_ASYNC_UNLINK | USB_NO_FSBR;
+	urb->transfer_flags |= USB_ASYNC_UNLINK;
 #ifdef	REALLY_QUEUE
 	urb->transfer_flags |= USB_QUEUE_BULK;
 #endif
 	// FIXME urb->timeout = ... jiffies ... ;
 
 	spin_lock_irqsave (&dev->txq.lock, flags);
-	if (info->flags & FLAG_FRAMING) {
+	if (info->flags & FLAG_FRAMING_NC) {
 		header->packet_id = cpu_to_le16 (dev->packet_id++);
 		put_unaligned (header->packet_id, &trailer->packet_id);
 #if 0
@@ -1408,9 +1424,12 @@ usbnet_probe (struct usb_device *udev, unsigned ifnum,
 		return 0;
 	}
 
-	if (usb_set_interface (udev, ifnum, altnum) < 0) {
-		err ("set_interface failed");
-		return 0;
+	// more sanity (unless the device is broken)
+	if (!(info->flags & FLAG_NO_SETINT)) {
+		if (usb_set_interface (udev, ifnum, altnum) < 0) {
+			err ("set_interface failed");
+			return 0;
+		}
 	}
 
 	// set up our own records
@@ -1484,6 +1503,18 @@ static const struct usb_device_id	products [] = {
 },
 #endif
 
+#ifdef	CONFIG_USB_BELKIN
+{
+	USB_DEVICE (0x050d, 0x0004),	// Belkin
+	driver_info:	(unsigned long) &belkin_info,
+}, {
+	USB_DEVICE (0x056c, 0x8100),	// eTEK
+	driver_info:	(unsigned long) &belkin_info,
+}, {
+	USB_DEVICE (0x0525, 0x9901),	// Advance USBNET (eTEK)
+	driver_info:	(unsigned long) &belkin_info,
+},
+#endif
 
 // GeneSys GL620USB (www.genesyslogic.com.tw)
 // (patch exists against an older driver version)
@@ -1508,16 +1539,6 @@ static const struct usb_device_id	products [] = {
 },
 #endif
 
-#ifdef	CONFIG_USB_BELKIN_F5U104
-{
-	USB_DEVICE (0x050d, 0x0004),	// Belkin
-	driver_info:	(unsigned long) &belkin_info,
-}, {
-	USB_DEVICE (0x056c, 0x8100),	// eTEK
-	driver_info:	(unsigned long) &belkin_info,
-},
-#endif
-
 #ifdef CONFIG_USB_PL2301
 {
 	USB_DEVICE (0x067b, 0x0000),	// PL-2301
@@ -1563,6 +1584,7 @@ static void __exit usbnet_exit (void)
 }
 module_exit (usbnet_exit);
 
+EXPORT_NO_SYMBOLS;
 MODULE_AUTHOR ("David Brownell <dbrownell@users.sourceforge.net>");
-MODULE_DESCRIPTION ("USB Host-to-Host Link Drivers (Belkin, Linux, NetChip, Prolific, ...)");
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION ("USB Host-to-Host Link Drivers (numerous vendors)");
+MODULE_LICENSE ("GPL");
diff --git a/fs/buffer.c b/fs/buffer.c
index 42ba99f03e3c..46e878ff906c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1131,31 +1131,13 @@ void __brelse(struct buffer_head * buf)
 }
 
 /*
- * bforget() is like brelse(), except it might discard any
+ * bforget() is like brelse(), except it discards any
  * potentially dirty data.
  */
 void __bforget(struct buffer_head * buf)
 {
-	/* grab the lru lock here so that "b_count" is stable */
-	spin_lock(&lru_list_lock);
-	write_lock(&hash_table_lock);
-	if (!atomic_dec_and_test(&buf->b_count) || buffer_locked(buf))
-		goto in_use;
-
-	/* Mark it clean */
-	clear_bit(BH_Dirty, &buf->b_state);
-	write_unlock(&hash_table_lock);
-
-	/* After which we can remove it from all queues */
-	remove_inode_queue(buf);
-	__remove_from_lru_list(buf);
-	buf->b_list = BUF_CLEAN;
-	spin_unlock(&lru_list_lock);
-	return;
-
-in_use:
-	write_unlock(&hash_table_lock);
-	spin_unlock(&lru_list_lock);
+	mark_buffer_clean(buf);
+	__brelse(buf);
 }
 
 /**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f92e13e8d821..d1d69e6976b0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -394,7 +394,7 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_
 	}
 
 	/* Don't let big-order allocations loop */
-	if (order)
+	if (order > 1)
 		return NULL;
 
 	/* Yield for kswapd, and try again */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 41d651f6720f..412a2f7694d7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -346,11 +346,7 @@ static int shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask)
 
 		page = list_entry(entry, struct page, lru);
 
-		if (unlikely(!PageInactive(page) && !PageActive(page)))
-			BUG();
-
-		/* Mapping-less page on LRU-list? */
-		if (unlikely(!page->mapping))
+		if (unlikely(!PageInactive(page)))
 			BUG();
 
 		list_del(entry);
@@ -363,31 +359,17 @@ static int shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask)
 			continue;
 
 		/* Racy check to avoid trylocking when not worthwhile */
-		if (!is_page_cache_freeable(page))
-			continue;
-
-		if (unlikely(TryLockPage(page))) {
-			if (gfp_mask & __GFP_FS) {
-				page_cache_get(page);
-				spin_unlock(&pagemap_lru_lock);
-				wait_on_page(page);
-				page_cache_release(page);
-				spin_lock(&pagemap_lru_lock);
-			}
+		if (!page->buffers && page_count(page) != 1)
 			continue;
-		}
 
 		/*
-		 * Still strictly racy - we don't own the pagecache lock,
-		 * so somebody might look up the page while we do this.
-		 * It's just a heuristic, though.
+		 * The page is locked. IO in progress?
+		 * Move it to the back of the list.
 		 */
-		if (!is_page_cache_freeable(page)) {
-			UnlockPage(page);
+		if (unlikely(TryLockPage(page)))
 			continue;
-		}
 
-		if (PageDirty(page)) {
+		if (PageDirty(page) && is_page_cache_freeable(page) && page->mapping) {
 			/*
 			 * It is not critical here to write it only if
 			 * the page is unmapped beause any direct writer
@@ -461,6 +443,9 @@ static int shrink_cache(int nr_pages, int max_scan, unsigned int gfp_mask)
 			}
 		}
 
+		if (unlikely(!page->mapping))
+			BUG();
+
 		if (unlikely(!spin_trylock(&pagecache_lock))) {
 			/* we hold the page lock so the page cannot go away from under us */
 			spin_unlock(&pagemap_lru_lock);
-- 
2.30.9