Commit f477fdbe authored by Paul Mackerras's avatar Paul Mackerras

PPP updates and fixes. This fixes the various SMP races, deadlocks

and scheduling-in-interrupt problems we had, and also makes it
much faster when handling large numbers (100s or more) of PPP units.
parent 22e962f9
......@@ -17,7 +17,7 @@
* PPP driver, written by Michael Callahan and Al Longyear, and
* subsequently hacked by Paul Mackerras.
*
* ==FILEVERSION 20000227==
* ==FILEVERSION 20020125==
*/
#include <linux/module.h>
......@@ -33,7 +33,7 @@
#include <linux/init.h>
#include <asm/uaccess.h>
#define PPP_VERSION "2.4.1"
#define PPP_VERSION "2.4.2"
#define OBUFSIZE 256
......@@ -62,6 +62,8 @@ struct asyncppp {
struct sk_buff *rpkt;
int lcp_fcs;
atomic_t refcnt;
struct semaphore dead_sem;
struct ppp_channel chan; /* interface to generic ppp layer */
unsigned char obuf[OBUFSIZE];
};
......@@ -107,6 +109,35 @@ static struct ppp_channel_ops async_ops = {
* Routines implementing the PPP line discipline.
*/
/*
* We have a potential race on dereferencing tty->disc_data,
* because the tty layer provides no locking at all - thus one
* cpu could be running ppp_asynctty_receive while another
* calls ppp_asynctty_close, which zeroes tty->disc_data and
* frees the memory that ppp_asynctty_receive is using. The best
* way to fix this is to use a rwlock in the tty struct, but for now
* we use a single global rwlock for all ttys in ppp line discipline.
*/
static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;
static struct asyncppp *ap_get(struct tty_struct *tty)
{
struct asyncppp *ap;
read_lock(&disc_data_lock);
ap = tty->disc_data;
if (ap != NULL)
atomic_inc(&ap->refcnt);
read_unlock(&disc_data_lock);
return ap;
}
static void ap_put(struct asyncppp *ap)
{
if (atomic_dec_and_test(&ap->refcnt))
up(&ap->dead_sem);
}
/*
* Called when a tty is put into PPP line discipline.
*/
......@@ -135,6 +166,9 @@ ppp_asynctty_open(struct tty_struct *tty)
ap->olim = ap->obuf;
ap->lcp_fcs = -1;
atomic_set(&ap->refcnt, 1);
init_MUTEX_LOCKED(&ap->dead_sem);
ap->chan.private = ap;
ap->chan.ops = &async_ops;
ap->chan.mtu = PPP_MRU;
......@@ -155,19 +189,34 @@ ppp_asynctty_open(struct tty_struct *tty)
/*
* Called when the tty is put into another line discipline
* or it hangs up.
* We assume that while we are in this routine, the tty layer
* won't call any of the other line discipline entries for the
* same tty.
* or it hangs up. We have to wait for any cpu currently
* executing in any of the other ppp_asynctty_* routines to
* finish before we can call ppp_unregister_channel and free
* the asyncppp struct. This routine must be called from
* process context, not interrupt or softirq context.
*/
static void
ppp_asynctty_close(struct tty_struct *tty)
{
struct asyncppp *ap = tty->disc_data;
struct asyncppp *ap;
write_lock(&disc_data_lock);
ap = tty->disc_data;
tty->disc_data = 0;
write_unlock(&disc_data_lock);
if (ap == 0)
return;
tty->disc_data = 0;
/*
* We have now ensured that nobody can start using ap from now
* on, but we have to wait for all existing users to finish.
* Note that ppp_unregister_channel ensures that no calls to
* our channel ops (i.e. ppp_async_send/ioctl) are in progress
* by the time it returns.
*/
if (!atomic_dec_and_test(&ap->refcnt))
down(&ap->dead_sem);
ppp_unregister_channel(&ap->chan);
if (ap->rpkt != 0)
kfree_skb(ap->rpkt);
......@@ -203,9 +252,11 @@ static int
ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct asyncppp *ap = tty->disc_data;
struct asyncppp *ap = ap_get(tty);
int err, val;
if (ap == 0)
return -ENXIO;
err = -EFAULT;
switch (cmd) {
case PPPIOCGCHAN:
......@@ -251,6 +302,7 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
err = -ENOIOCTLCMD;
}
ap_put(ap);
return err;
}
......@@ -271,13 +323,14 @@ static void
ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
char *flags, int count)
{
struct asyncppp *ap = tty->disc_data;
struct asyncppp *ap = ap_get(tty);
if (ap == 0)
return;
spin_lock_bh(&ap->recv_lock);
ppp_async_input(ap, buf, flags, count);
spin_unlock_bh(&ap->recv_lock);
ap_put(ap);
if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
&& tty->driver.unthrottle)
tty->driver.unthrottle(tty);
......@@ -286,13 +339,14 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
static void
ppp_asynctty_wakeup(struct tty_struct *tty)
{
struct asyncppp *ap = tty->disc_data;
struct asyncppp *ap = ap_get(tty);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (ap == 0)
return;
if (ppp_async_push(ap))
ppp_output_wakeup(&ap->chan);
ap_put(ap);
}
......
......@@ -35,16 +35,12 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
#include <linux/zlib.h>
static spinlock_t comp_free_list_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(comp_free_list);
/*
* State for a Deflate (de)compressor.
*/
......@@ -56,7 +52,6 @@ struct ppp_deflate_state {
int debug;
z_stream strm;
struct compstat stats;
struct list_head list;
};
#define DEFLATE_OVHD 2 /* Deflate overhead/packet */
......@@ -81,27 +76,6 @@ static void z_comp_reset __P((void *state));
static void z_decomp_reset __P((void *state));
static void z_comp_stats __P((void *state, struct compstat *stats));
static void z_comp_delayedfree(void *arg)
{
struct ppp_deflate_state *state;
spin_lock_bh(&comp_free_list_lock);
while(!list_empty(&comp_free_list)) {
state = list_entry(comp_free_list.next, struct ppp_deflate_state, list);
list_del(&state->list);
spin_unlock_bh(&comp_free_list_lock);
if (state->strm.workspace)
vfree(state->strm.workspace);
kfree(state);
spin_lock_bh(&comp_free_list_lock);
}
spin_unlock_bh(&comp_free_list_lock);
}
static struct tq_struct z_comp_task = {
routine: z_comp_delayedfree
};
static void
z_comp_free(arg)
void *arg;
......@@ -110,12 +84,9 @@ z_comp_free(arg)
if (state) {
zlib_deflateEnd(&state->strm);
spin_lock_bh(&comp_free_list_lock);
list_add(&state->list, &comp_free_list);
spin_unlock_bh(&comp_free_list_lock);
schedule_task(&z_comp_task);
if (state->strm.workspace)
vfree(state->strm.workspace);
kfree(state);
MOD_DEC_USE_COUNT;
}
}
......@@ -616,8 +587,6 @@ void __exit deflate_cleanup(void)
{
ppp_unregister_compressor(&ppp_deflate);
ppp_unregister_compressor(&ppp_deflate_draft);
/* Ensure that any deflate state pending free is actually freed */
flush_scheduled_tasks();
}
module_init(deflate_init);
......
This diff is collapsed.
......@@ -25,11 +25,11 @@
* the generic PPP layer to give it frames to send and to process
* received frames. It implements the PPP line discipline.
*
* Part of the code in this driver was inspired by the old sync-only
* Part of the code in this driver was inspired by the old async-only
* PPP driver, written by Michael Callahan and Al Longyear, and
* subsequently hacked by Paul Mackerras.
*
* ==FILEVERSION 20000322==
* ==FILEVERSION 20020125==
*/
#include <linux/module.h>
......@@ -41,10 +41,12 @@
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
#include <linux/ppp_channel.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#define PPP_VERSION "2.4.1"
#define PPP_VERSION "2.4.2"
/* Structure for storing local state. */
struct syncppp {
......@@ -65,6 +67,8 @@ struct syncppp {
struct sk_buff *rpkt;
atomic_t refcnt;
struct semaphore dead_sem;
struct ppp_channel chan; /* interface to generic ppp layer */
};
......@@ -161,7 +165,36 @@ ppp_print_buffer (const char *name, const __u8 *buf, int count)
*/
/*
* Called when a tty is put into line discipline.
* We have a potential race on dereferencing tty->disc_data,
* because the tty layer provides no locking at all - thus one
* cpu could be running ppp_synctty_receive while another
* calls ppp_synctty_close, which zeroes tty->disc_data and
* frees the memory that ppp_synctty_receive is using. The best
* way to fix this is to use a rwlock in the tty struct, but for now
* we use a single global rwlock for all ttys in ppp line discipline.
*/
static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;
static struct syncppp *sp_get(struct tty_struct *tty)
{
struct syncppp *ap;
read_lock(&disc_data_lock);
ap = tty->disc_data;
if (ap != NULL)
atomic_inc(&ap->refcnt);
read_unlock(&disc_data_lock);
return ap;
}
static void sp_put(struct syncppp *ap)
{
if (atomic_dec_and_test(&ap->refcnt))
up(&ap->dead_sem);
}
/*
* Called when a tty is put into sync-PPP line discipline.
*/
static int
ppp_sync_open(struct tty_struct *tty)
......@@ -185,6 +218,9 @@ ppp_sync_open(struct tty_struct *tty)
ap->xaccm[3] = 0x60000000U;
ap->raccm = ~0U;
atomic_set(&ap->refcnt, 1);
init_MUTEX_LOCKED(&ap->dead_sem);
ap->chan.private = ap;
ap->chan.ops = &sync_ops;
ap->chan.mtu = PPP_MRU;
......@@ -206,16 +242,34 @@ ppp_sync_open(struct tty_struct *tty)
/*
* Called when the tty is put into another line discipline
* (or it hangs up).
* or it hangs up. We have to wait for any cpu currently
* executing in any of the other ppp_synctty_* routines to
* finish before we can call ppp_unregister_channel and free
* the syncppp struct. This routine must be called from
* process context, not interrupt or softirq context.
*/
static void
ppp_sync_close(struct tty_struct *tty)
{
struct syncppp *ap = tty->disc_data;
struct syncppp *ap;
write_lock(&disc_data_lock);
ap = tty->disc_data;
tty->disc_data = 0;
write_unlock(&disc_data_lock);
if (ap == 0)
return;
tty->disc_data = 0;
/*
* We have now ensured that nobody can start using ap from now
* on, but we have to wait for all existing users to finish.
* Note that ppp_unregister_channel ensures that no calls to
* our channel ops (i.e. ppp_sync_send/ioctl) are in progress
* by the time it returns.
*/
if (!atomic_dec_and_test(&ap->refcnt))
down(&ap->dead_sem);
ppp_unregister_channel(&ap->chan);
if (ap->rpkt != 0)
kfree_skb(ap->rpkt);
......@@ -251,9 +305,11 @@ static int
ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct syncppp *ap = tty->disc_data;
struct syncppp *ap = sp_get(tty);
int err, val;
if (ap == 0)
return -ENXIO;
err = -EFAULT;
switch (cmd) {
case PPPIOCGCHAN:
......@@ -299,6 +355,7 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
err = -ENOIOCTLCMD;
}
sp_put(ap);
return err;
}
......@@ -319,13 +376,14 @@ static void
ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
char *flags, int count)
{
struct syncppp *ap = tty->disc_data;
struct syncppp *ap = sp_get(tty);
if (ap == 0)
return;
spin_lock_bh(&ap->recv_lock);
ppp_sync_input(ap, buf, flags, count);
spin_unlock_bh(&ap->recv_lock);
sp_put(ap);
if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
&& tty->driver.unthrottle)
tty->driver.unthrottle(tty);
......@@ -334,13 +392,14 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
static void
ppp_sync_wakeup(struct tty_struct *tty)
{
struct syncppp *ap = tty->disc_data;
struct syncppp *ap = sp_get(tty);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (ap == 0)
return;
if (ppp_sync_push(ap))
ppp_output_wakeup(&ap->chan);
sp_put(ap);
}
......
......@@ -5,7 +5,7 @@
* PPPoE --- PPP over Ethernet (RFC 2516)
*
*
* Version: 0.6.9
* Version: 0.6.10
*
* 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme
* 030700 : Fixed connect logic to allow for disconnect.
......@@ -32,6 +32,9 @@
* a memory leak.
* 081001 : Misc. cleanup (licence string, non-blocking, prevent
* reference of device on close).
* 121301 : New ppp channels interface; cannot unregister a channel
* from interrupts. Thus, we mark the socket as a ZOMBIE
* and do the unregistration later.
*
* Author: Michal Ostrowski <mostrows@speakeasy.net>
* Contributors:
......@@ -87,11 +90,11 @@ struct proto_ops pppoe_ops;
#if 0
#define CHECKPTR(x,y) { if (!(x) && pppoe_debug &7 ){ printk(KERN_CRIT "PPPoE Invalid pointer : %s , %p\n",#x,(x)); error=-EINVAL; goto y; }}
#define DEBUG(s,args...) if( pppoe_debug & (s) ) printk(KERN_CRIT args );
#define CHECKPTR(x,y) do { if (!(x) && pppoe_debug &7 ){ printk(KERN_CRIT "PPPoE Invalid pointer : %s , %p\n",#x,(x)); error=-EINVAL; goto y; }} while (0)
#define DEBUG(s,args...) do { if( pppoe_debug & (s) ) printk(KERN_CRIT args ); } while (0)
#else
#define CHECKPTR(x,y) do {} while (0);
#define DEBUG(s,args...) do { } while (0);
#define CHECKPTR(x,y) do { } while (0)
#define DEBUG(s,args...) do { } while (0)
#endif
......@@ -275,10 +278,10 @@ static void pppoe_flush_dev(struct net_device *dev)
lock_sock(sk);
if (sk->state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
if (sk->state & (PPPOX_CONNECTED|PPPOX_BOUND)){
pppox_unbind_sock(sk);
dev_put(dev);
sk->state = PPPOX_DEAD;
sk->state = PPPOX_ZOMBIE;
sk->state_change(sk);
}
......@@ -441,8 +444,10 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
* one socket family type, we cannot (easily) distinguish
* what kind of SKB it is during backlog rcv.
*/
if (sk->lock.users == 0)
if (sk->lock.users == 0) {
sk->state = PPPOX_ZOMBIE;
pppox_unbind_sock(sk);
}
bh_unlock_sock(sk);
sock_put(sk);
......@@ -719,7 +724,7 @@ int pppoe_ioctl(struct socket *sock, unsigned int cmd,
struct pppox_opt *relay_po;
err = -EBUSY;
if (sk->state & PPPOX_BOUND)
if (sk->state & (PPPOX_BOUND|PPPOX_ZOMBIE|PPPOX_DEAD))
break;
err = -ENOTCONN;
......
......@@ -147,13 +147,14 @@ extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
extern int pppox_channel_ioctl(struct ppp_channel *pc, unsigned int cmd,
unsigned long arg);
/* PPPoE socket states */
/* PPPoX socket states */
enum {
PPPOX_NONE = 0, /* initial state */
PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */
PPPOX_BOUND = 2, /* bound to ppp device */
PPPOX_RELAY = 4, /* forwarding is enabled */
PPPOX_DEAD = 8
PPPOX_ZOMBIE = 8, /* dead, but still bound to ppp device */
PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/
};
extern struct ppp_channel_ops pppoe_chan_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment