Commit f2a5155d authored by Paul Mackerras's avatar Paul Mackerras

Merge samba.org:/home/paulus/kernel/linux-2.5

into samba.org:/home/paulus/kernel/for-linus-ppc
parents 81803bc1 9325c684
No related merge requests found
......@@ -230,6 +230,7 @@ static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
static u32 cr4 = 0;
static u32 deftype_lo, deftype_hi;
static spinlock_t set_atomicity_lock = SPIN_LOCK_UNLOCKED;
static void prepare_set(void)
{
......@@ -238,6 +239,7 @@ static void prepare_set(void)
/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */
spin_lock(&set_atomicity_lock);
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( cpu_has_pge ) {
......@@ -273,7 +275,7 @@ static void post_set(void)
/* Restore value of CR4 */
if ( cpu_has_pge )
write_cr4(cr4);
spin_unlock(&set_atomicity_lock);
}
static void generic_set_all(void)
......
......@@ -608,16 +608,17 @@ static inline void free_vm86_irq(int irqnumber)
static inline int task_valid(struct task_struct *tsk)
{
struct task_struct *p;
struct task_struct *g, *p;
int ret = 0;
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p)
if ((p == tsk) && (p->sig)) {
ret = 1;
break;
}
goto out;
}
while_each_thread(g, p);
out:
read_unlock(&tasklist_lock);
return ret;
}
......
......@@ -286,7 +286,7 @@ int __init hvc_init(void)
panic("Couldn't register hvc console driver\n");
if (hvc_driver.num > 0)
kernel_thread(khvcd, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
kernel_thread(khvcd, NULL, CLONE_KERNEL);
return 0;
}
......
......@@ -299,7 +299,7 @@ static void send_sig_all(int sig)
{
struct task_struct *p;
for_each_task(p) {
for_each_process(p) {
if (p->mm && p->pid != 1)
/* Not swapper, init nor kernel thread */
force_sig(sig, p);
......
......@@ -496,7 +496,7 @@ void do_tty_hangup(void *data)
}
read_lock(&tasklist_lock);
for_each_task(p) {
for_each_process(p) {
if ((tty->session > 0) && (p->session == tty->session) &&
p->leader) {
send_sig(SIGHUP,p,1);
......@@ -598,7 +598,7 @@ void disassociate_ctty(int on_exit)
tty->pgrp = -1;
read_lock(&tasklist_lock);
for_each_task(p)
for_each_process(p)
if (p->session == current->session)
p->tty = NULL;
read_unlock(&tasklist_lock);
......@@ -1223,7 +1223,7 @@ static void release_dev(struct file * filp)
struct task_struct *p;
read_lock(&tasklist_lock);
for_each_task(p) {
for_each_process(p) {
if (p->tty == tty || (o_tty && p->tty == o_tty))
p->tty = NULL;
}
......@@ -1561,7 +1561,7 @@ static int tiocsctty(struct tty_struct *tty, int arg)
struct task_struct *p;
read_lock(&tasklist_lock);
for_each_task(p)
for_each_process(p)
if (p->tty == tty)
p->tty = NULL;
read_unlock(&tasklist_lock);
......@@ -1834,7 +1834,7 @@ static void __do_SAK(void *arg)
if (tty->driver.flush_buffer)
tty->driver.flush_buffer(tty);
read_lock(&tasklist_lock);
for_each_task(p) {
for_each_process(p) {
if ((p->tty == tty) ||
((session > 0) && (p->session == session))) {
printk(KERN_NOTICE "SAK: killed process %d"
......
......@@ -1299,7 +1299,7 @@ static int __init pnpbios_thread_init(void)
{
#ifdef CONFIG_HOTPLUG
init_completion(&unload_sem);
if(kernel_thread(pnp_dock_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL)>0)
if (kernel_thread(pnp_dock_thread, NULL, CLONE_KERNEL) > 0)
unloading = 0;
#endif
return 0;
......
......@@ -40,6 +40,7 @@
#define __NO_VERSION__
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
......@@ -493,50 +494,149 @@ static int exec_mmap(struct mm_struct *mm)
return 0;
}
static struct dentry *clean_proc_dentry(struct task_struct *p)
{
struct dentry *proc_dentry = p->proc_dentry;
if (proc_dentry) {
spin_lock(&dcache_lock);
if (!list_empty(&proc_dentry->d_hash)) {
dget_locked(proc_dentry);
list_del_init(&proc_dentry->d_hash);
} else
proc_dentry = NULL;
spin_unlock(&dcache_lock);
}
return proc_dentry;
}
static inline void put_proc_dentry(struct dentry *dentry)
{
if (dentry) {
shrink_dcache_parent(dentry);
dput(dentry);
}
}
/*
* This function makes sure the current process has its own signal table,
* so that flush_signal_handlers can later reset the handlers without
* disturbing other processes. (Other processes might share the signal
* table via the CLONE_SIGNAL option to clone().)
* table via the CLONE_SIGHAND option to clone().)
*/
static inline int make_private_signals(void)
static inline int de_thread(struct signal_struct *oldsig)
{
struct signal_struct * newsig;
remove_thread_group(current, current->sig);
struct signal_struct *newsig;
int count;
if (atomic_read(&current->sig->count) <= 1)
return 0;
newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
if (newsig == NULL)
if (!newsig)
return -ENOMEM;
if (list_empty(&current->thread_group))
goto out;
/*
* Kill all other threads in the thread group:
*/
spin_lock_irq(&oldsig->siglock);
if (oldsig->group_exit) {
/*
* Another group action in progress, just
* return so that the signal is processed.
*/
spin_unlock_irq(&oldsig->siglock);
kmem_cache_free(sigact_cachep, newsig);
return -EAGAIN;
}
oldsig->group_exit = 1;
__broadcast_thread_group(current, SIGKILL);
/*
* Account for the thread group leader hanging around:
*/
count = 2;
if (current->pid == current->tgid)
count = 1;
while (atomic_read(&oldsig->count) > count) {
oldsig->group_exit_task = current;
current->state = TASK_UNINTERRUPTIBLE;
spin_unlock_irq(&oldsig->siglock);
schedule();
spin_lock_irq(&oldsig->siglock);
if (oldsig->group_exit_task)
BUG();
}
spin_unlock_irq(&oldsig->siglock);
/*
* At this point all other threads have exited, all we have to
* do is to wait for the thread group leader to become inactive,
* and to assume its PID:
*/
if (current->pid != current->tgid) {
struct task_struct *leader = current->group_leader;
struct dentry *proc_dentry1, *proc_dentry2;
unsigned long state;
wait_task_inactive(leader);
write_lock_irq(&tasklist_lock);
proc_dentry1 = clean_proc_dentry(current);
proc_dentry2 = clean_proc_dentry(leader);
if (leader->tgid != current->tgid)
BUG();
if (current->pid == current->tgid)
BUG();
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
* two threads with a switched PID, and release
* the former thread group leader:
*/
unhash_pid(current);
unhash_pid(leader);
leader->pid = leader->tgid = current->pid;
current->pid = current->tgid;
hash_pid(current);
hash_pid(leader);
list_add_tail(&current->tasks, &init_task.tasks);
state = leader->state;
write_unlock_irq(&tasklist_lock);
if (state == TASK_ZOMBIE)
release_task(leader);
put_proc_dentry(proc_dentry1);
put_proc_dentry(proc_dentry2);
}
out:
spin_lock_init(&newsig->siglock);
atomic_set(&newsig->count, 1);
newsig->group_exit = 0;
newsig->group_exit_code = 0;
newsig->group_exit_task = NULL;
memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
init_sigpending(&newsig->shared_pending);
remove_thread_group(current, current->sig);
spin_lock_irq(&current->sigmask_lock);
current->sig = newsig;
spin_unlock_irq(&current->sigmask_lock);
return 0;
}
/*
* If make_private_signals() made a copy of the signal table, decrement the
* refcount of the original table, and free it if necessary.
* We don't do that in make_private_signals() so that we can back off
* in flush_old_exec() if an error occurs after calling make_private_signals().
*/
static inline void release_old_signals(struct signal_struct * oldsig)
{
if (current->sig == oldsig)
return;
if (atomic_dec_and_test(&oldsig->count))
kmem_cache_free(sigact_cachep, oldsig);
if (!list_empty(&current->thread_group))
BUG();
if (current->tgid != current->pid)
BUG();
return 0;
}
/*
......@@ -572,44 +672,27 @@ static inline void flush_old_files(struct files_struct * files)
write_unlock(&files->file_lock);
}
/*
* An execve() will automatically "de-thread" the process.
* - if a master thread (PID==TGID) is doing this, then all subsidiary threads
* will be killed (otherwise there will end up being two independent thread
* groups with the same TGID).
* - if a subsidary thread is doing this, then it just leaves the thread group
*/
static void de_thread(struct task_struct *tsk)
{
if (!list_empty(&tsk->thread_group))
BUG();
/* An exec() starts a new thread group: */
tsk->tgid = tsk->pid;
}
int flush_old_exec(struct linux_binprm * bprm)
{
char * name;
int i, ch, retval;
struct signal_struct * oldsig;
/*
* Make sure we have a private signal table
*/
oldsig = current->sig;
retval = make_private_signals();
if (retval) goto flush_failed;
struct signal_struct * oldsig = current->sig;
/*
* Release all of the old mmap stuff
*/
retval = exec_mmap(bprm->mm);
if (retval) goto mmap_failed;
if (retval)
goto mmap_failed;
/*
* Make sure we have a private signal table and that
* we are unassociated from the previous thread group.
*/
retval = de_thread(oldsig);
if (retval)
goto flush_failed;
/* This is the point of no return */
de_thread(current);
release_old_signals(oldsig);
current->sas_ss_sp = current->sas_ss_size = 0;
......
......@@ -493,7 +493,7 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
send_sigio_to_task(p, fown, fd, band);
goto out_unlock_task;
}
for_each_task(p) {
for_each_process(p) {
int match = p->pid;
if (pid < 0)
match = -p->pgrp;
......@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown)
send_sigurg_to_task(p, fown);
goto out_unlock_task;
}
for_each_task(p) {
for_each_process(p) {
int match = p->pid;
if (pid < 0)
match = -p->pgrp;
......
......@@ -188,7 +188,7 @@ nlmclnt_recovery(struct nlm_host *host, u32 newstate)
nlmclnt_prepare_reclaim(host, newstate);
nlm_get_host(host);
MOD_INC_USE_COUNT;
kernel_thread(reclaimer, host, CLONE_SIGNAL);
kernel_thread(reclaimer, host, CLONE_KERNEL);
}
}
......
......@@ -1588,7 +1588,7 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 *l)
for (;;) {
error = posix_lock_file(filp, file_lock);
if ((error != -EAGAIN) || (cmd == F_SETLK))
if ((error != -EAGAIN) || (cmd == F_SETLK64))
break;
error = wait_event_interruptible(file_lock->fl_wait,
!file_lock->fl_next);
......
......@@ -883,11 +883,11 @@ asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type,
static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
{
struct task_struct *p;
struct task_struct *g, *p;
struct fs_struct *fs;
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p) {
task_lock(p);
fs = p->fs;
if (fs) {
......@@ -900,7 +900,7 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
put_fs_struct(fs);
} else
task_unlock(p);
}
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
......@@ -1012,7 +1012,7 @@ static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
struct namespace *namespace;
struct task_struct *p;
struct task_struct *g, *p;
mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
if (IS_ERR(mnt))
......@@ -1028,10 +1028,10 @@ static void __init init_mount_tree(void)
init_task.namespace = namespace;
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p) {
get_namespace(namespace);
p->namespace = namespace;
}
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
set_fs_pwd(current->fs, namespace->root, namespace->root->mnt_root);
......
......@@ -251,7 +251,7 @@ static void check_partition(struct gendisk *hd, struct block_device *bdev)
p[j-1].start_sect = state->parts[j].from;
p[j-1].nr_sects = state->parts[j].size;
#if CONFIG_BLK_DEV_MD
if (!state->parts[j-1].flags)
if (!state->parts[j].flags)
continue;
md_autodetect_dev(bdev->bd_dev+j);
#endif
......
......@@ -1136,7 +1136,7 @@ static int get_pid_list(int index, unsigned int *pids)
index--;
read_lock(&tasklist_lock);
for_each_task(p) {
for_each_process(p) {
int pid = p->pid;
if (!pid)
continue;
......
......@@ -235,7 +235,9 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
* Fixup the root inode's nlink value
*/
read_lock(&tasklist_lock);
for_each_task(p) if (p->pid) root_inode->i_nlink++;
for_each_process(p)
if (p->pid)
root_inode->i_nlink++;
read_unlock(&tasklist_lock);
s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
......
......@@ -51,7 +51,11 @@ struct exec_domain;
#define CLONE_CLEARTID 0x00200000 /* clear the userspace TID */
#define CLONE_DETACHED 0x00400000 /* parent wants no child-exit signal */
#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
/*
* List of flags we want to share for kernel threads,
* if only because they are not used by them anyway.
*/
#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
/*
* These are the constant used to fake the fixed-point load-average
......@@ -222,6 +226,8 @@ struct signal_struct {
/* thread group exit support */
int group_exit;
int group_exit_code;
struct task_struct *group_exit_task;
};
/*
......@@ -552,6 +558,7 @@ extern int dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t
extern void block_all_signals(int (*notifier)(void *priv), void *priv,
sigset_t *mask);
extern void unblock_all_signals(void);
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
......@@ -761,11 +768,13 @@ static inline void remove_wait_queue_locked(wait_queue_head_t *q,
#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children)
#define REMOVE_LINKS(p) do { \
if (thread_group_leader(p)) \
list_del_init(&(p)->tasks); \
remove_parent(p); \
} while (0)
#define SET_LINKS(p) do { \
if (thread_group_leader(p)) \
list_add_tail(&(p)->tasks,&init_task.tasks); \
add_parent(p, (p)->parent); \
} while (0)
......@@ -797,11 +806,18 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks)
#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks)
#define for_each_task(p) \
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
#define for_each_thread(task) \
for (task = next_thread(current) ; task != current ; task = next_thread(task))
/*
* Careful: do_each_thread/while_each_thread is a double loop so
* 'break' will not work as expected - use goto instead.
*/
#define do_each_thread(g, t) \
for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
static inline task_t *next_thread(task_t *p)
{
......
......@@ -126,23 +126,10 @@ struct llc_conn_ev_pdu_if {
u8 reason;
};
/* Event interface for timer-generated events */
struct llc_conn_ev_tmr_if {
struct sock *sk;
u32 component_handle;
void *timer_specific;
};
struct llc_conn_ev_rpt_sts_if {
u8 status;
};
union llc_conn_ev_if {
struct llc_conn_ev_simple_if a; /* 'a' for simple, easy ... */
struct llc_conn_ev_prim_if prim;
struct llc_conn_ev_pdu_if pdu;
struct llc_conn_ev_tmr_if tmr;
struct llc_conn_ev_rpt_sts_if rsts; /* report status */
};
struct llc_conn_state_ev {
......
......@@ -17,7 +17,6 @@
struct llc_timer {
struct timer_list timer;
u8 running; /* timer is running or no */
u16 expire; /* timer expire time */
};
......
......@@ -43,23 +43,12 @@ struct llc_stat_ev_prim_if {
struct llc_stat_ev_pdu_if {
u8 reason;
struct sk_buff *skb;
};
struct llc_stat_ev_tmr_if {
void *timer_specific;
};
struct llc_stat_ev_rpt_sts_if {
u8 status;
};
union llc_stat_ev_if {
struct llc_stat_ev_simple_if a; /* 'a' for simple, easy ... */
struct llc_stat_ev_prim_if prim;
struct llc_stat_ev_pdu_if pdu;
struct llc_stat_ev_tmr_if tmr;
struct llc_stat_ev_rpt_sts_if rsts; /* report status */
};
struct llc_station_state_ev {
......
......@@ -122,24 +122,12 @@ extern int llc_establish_connection(struct sock *sk, u8 *lmac,
extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
extern void llc_build_and_send_ui_pkt(struct llc_sap *sap,
struct sk_buff *skb,
struct sockaddr_llc *addr);
u8 *dmac, u8 dsap);
extern void llc_build_and_send_xid_pkt(struct llc_sap *sap,
struct sk_buff *skb,
struct sockaddr_llc *addr);
u8 *dmac, u8 dsap);
extern void llc_build_and_send_test_pkt(struct llc_sap *sap,
struct sk_buff *skb,
struct sockaddr_llc *addr);
u8 *dmac, u8 dsap);
extern int llc_send_disc(struct sock *sk);
/**
* llc_proto_type - return eth protocol for ARP header type
* @arphrd: ARP header type.
*
* Given an ARP header type return the corresponding ethernet protocol.
*/
static __inline__ u16 llc_proto_type(u16 arphrd)
{
return arphrd == ARPHRD_IEEE802_TR ?
htons(ETH_P_TR_802_2) : htons(ETH_P_802_2);
}
#endif /* LLC_IF_H */
......@@ -30,7 +30,6 @@
*
* @state - state of station
* @xid_r_count - XID response PDU counter
* @ack_tmr_running - 1 or 0
* @mac_sa - MAC source address
* @sap_list - list of related SAPs
* @ev_q - events entering state mach.
......@@ -40,7 +39,6 @@ struct llc_station {
u8 state;
u8 xid_r_count;
struct timer_list ack_timer;
u8 ack_tmr_running;
u8 retry_count;
u8 maximum_retry;
u8 mac_sa[6];
......
......@@ -52,20 +52,10 @@ struct llc_sap_ev_pdu_if {
u8 reason;
};
struct llc_sap_ev_tmr_if {
void *timer_specific;
};
struct llc_sap_ev_rpt_sts_if {
u8 status;
};
union llc_sap_ev_if {
struct llc_sap_ev_simple_if a; /* 'a' for simple, easy ... */
struct llc_sap_ev_prim_if prim;
struct llc_sap_ev_pdu_if pdu;
struct llc_sap_ev_tmr_if tmr;
struct llc_sap_ev_rpt_sts_if rsts; /* report status */
};
struct llc_prim_if_block;
......
......@@ -17,7 +17,6 @@
*
* @p_bit - only lowest-order bit used
* @f_bit - only lowest-order bit used
* @req - provided by LLC layer
* @ind - provided by network layer
* @conf - provided by network layer
* @laddr - SAP value in this 'lsap'
......@@ -30,7 +29,6 @@ struct llc_sap {
u8 state;
u8 p_bit;
u8 f_bit;
llc_prim_call_t req;
llc_prim_call_t ind;
llc_prim_call_t conf;
struct llc_prim_if_block llc_ind_prim, llc_cfm_prim;
......
......@@ -371,7 +371,7 @@ static void __init smp_init(void)
static void rest_init(void)
{
kernel_thread(init, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
kernel_thread(init, NULL, CLONE_KERNEL);
unlock_kernel();
cpu_idle();
}
......
......@@ -83,13 +83,13 @@ static inline void cap_set_pg(int pgrp, kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
task_t *target;
task_t *g, *target;
for_each_task(target) {
do_each_thread(g, target) {
if (target->pgrp != pgrp)
continue;
security_ops->capset_set(target, effective, inheritable, permitted);
}
} while_each_thread(g, target);
}
/*
......@@ -100,13 +100,13 @@ static inline void cap_set_all(kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
task_t *target;
task_t *g, *target;
for_each_task(target) {
do_each_thread(g, target) {
if (target == current || target->pid == 1)
continue;
security_ops->capset_set(target, effective, inheritable, permitted);
}
} while_each_thread(g, target);
}
/*
......
......@@ -49,7 +49,7 @@ static struct dentry * __unhash_process(struct task_struct *p)
return proc_dentry;
}
static void release_task(struct task_struct * p)
void release_task(struct task_struct * p)
{
struct dentry *proc_dentry;
......@@ -71,19 +71,19 @@ static void release_task(struct task_struct * p)
write_lock_irq(&tasklist_lock);
__exit_sighand(p);
proc_dentry = __unhash_process(p);
p->parent->cutime += p->utime + p->cutime;
p->parent->cstime += p->stime + p->cstime;
p->parent->cmin_flt += p->min_flt + p->cmin_flt;
p->parent->cmaj_flt += p->maj_flt + p->cmaj_flt;
p->parent->cnswap += p->nswap + p->cnswap;
sched_exit(p);
write_unlock_irq(&tasklist_lock);
if (unlikely(proc_dentry != NULL)) {
shrink_dcache_parent(proc_dentry);
dput(proc_dentry);
}
release_thread(p);
if (p != current) {
current->cmin_flt += p->min_flt + p->cmin_flt;
current->cmaj_flt += p->maj_flt + p->cmaj_flt;
current->cnswap += p->nswap + p->cnswap;
sched_exit(p);
}
put_task_struct(p);
}
......@@ -115,7 +115,7 @@ int session_of_pgrp(int pgrp)
fallback = -1;
read_lock(&tasklist_lock);
for_each_task(p) {
for_each_process(p) {
if (p->session <= 0)
continue;
if (p->pgrp == pgrp) {
......@@ -141,7 +141,7 @@ static int __will_become_orphaned_pgrp(int pgrp, struct task_struct * ignored_ta
{
struct task_struct *p;
for_each_task(p) {
for_each_process(p) {
if ((p == ignored_task) || (p->pgrp != pgrp) ||
(p->state == TASK_ZOMBIE) ||
(p->parent->pid == 1))
......@@ -175,7 +175,7 @@ static inline int __has_stopped_jobs(int pgrp)
int retval = 0;
struct task_struct * p;
for_each_task(p) {
for_each_process(p) {
if (p->pgrp != pgrp)
continue;
if (p->state != TASK_STOPPED)
......@@ -447,11 +447,7 @@ static inline void forget_original_parent(struct task_struct * father)
struct task_struct *p, *reaper = father;
struct list_head *_p;
if (father->exit_signal != -1)
reaper = prev_thread(reaper);
else
reaper = child_reaper;
reaper = father->group_leader;
if (reaper == father)
reaper = child_reaper;
......@@ -681,6 +677,9 @@ asmlinkage long sys_exit(int error_code)
*/
asmlinkage long sys_exit_group(int error_code)
{
unsigned int exit_code = (error_code & 0xff) << 8;
if (!list_empty(&current->thread_group)) {
struct signal_struct *sig = current->sig;
spin_lock_irq(&sig->siglock);
......@@ -691,11 +690,12 @@ asmlinkage long sys_exit_group(int error_code)
do_exit(sig->group_exit_code);
}
sig->group_exit = 1;
sig->group_exit_code = (error_code & 0xff) << 8;
sig->group_exit_code = exit_code;
__broadcast_thread_group(current, SIGKILL);
spin_unlock_irq(&sig->siglock);
}
do_exit(sig->group_exit_code);
do_exit(exit_code);
}
static int eligible_child(pid_t pid, int options, task_t *p)
......@@ -731,7 +731,7 @@ static int eligible_child(pid_t pid, int options, task_t *p)
* in a non-empty thread group:
*/
if (current->tgid != p->tgid && delay_group_leader(p))
return 0;
return 2;
if (security_ops->task_wait(p))
return 0;
......@@ -757,11 +757,16 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
do {
struct task_struct *p;
struct list_head *_p;
int ret;
list_for_each(_p,&tsk->children) {
p = list_entry(_p,struct task_struct,sibling);
if (!eligible_child(pid, options, p))
ret = eligible_child(pid, options, p);
if (!ret)
continue;
flag = 1;
switch (p->state) {
case TASK_STOPPED:
if (!p->exit_code)
......@@ -784,8 +789,11 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
}
goto end_wait4;
case TASK_ZOMBIE:
current->cutime += p->utime + p->cutime;
current->cstime += p->stime + p->cstime;
/*
* Eligible but we cannot release it yet:
*/
if (ret == 2)
continue;
read_unlock(&tasklist_lock);
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
if (!retval && stat_addr) {
......
......@@ -161,7 +161,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
static int get_pid(unsigned long flags)
{
struct task_struct *p;
struct task_struct *g, *p;
int pid;
if (flags & CLONE_IDLETASK)
......@@ -178,7 +178,7 @@ static int get_pid(unsigned long flags)
next_safe = pid_max;
read_lock(&tasklist_lock);
repeat:
for_each_task(p) {
do_each_thread(g, p) {
if (p->pid == last_pid ||
p->pgrp == last_pid ||
p->session == last_pid) {
......@@ -195,7 +195,8 @@ static int get_pid(unsigned long flags)
next_safe = p->pgrp;
if (p->session > last_pid && next_safe > p->session)
next_safe = p->session;
}
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
pid = last_pid;
......@@ -632,6 +633,7 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
atomic_set(&sig->count, 1);
sig->group_exit = 0;
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
memcpy(sig->action, current->sig->action, sizeof(sig->action));
sig->curr_target = NULL;
init_sigpending(&sig->shared_pending);
......@@ -671,16 +673,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well:
*/
if (clone_flags & CLONE_THREAD)
clone_flags |= CLONE_SIGHAND;
/*
* Detached threads can only be started up within the thread
* group.
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if (clone_flags & CLONE_DETACHED)
clone_flags |= CLONE_THREAD;
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
if ((clone_flags & CLONE_DETACHED) && !(clone_flags & CLONE_THREAD))
return ERR_PTR(-EINVAL);
retval = security_ops->task_create(clone_flags);
if (retval)
......
......@@ -479,17 +479,17 @@ void sched_exit(task_t * p)
{
local_irq_disable();
if (p->first_time_slice) {
current->time_slice += p->time_slice;
if (unlikely(current->time_slice > MAX_TIMESLICE))
current->time_slice = MAX_TIMESLICE;
p->parent->time_slice += p->time_slice;
if (unlikely(p->parent->time_slice > MAX_TIMESLICE))
p->parent->time_slice = MAX_TIMESLICE;
}
local_irq_enable();
/*
* If the child was a (relative-) CPU hog then decrease
* the sleep_avg of the parent as well.
*/
if (p->sleep_avg < current->sleep_avg)
current->sleep_avg = (current->sleep_avg * EXIT_WEIGHT +
if (p->sleep_avg < p->parent->sleep_avg)
p->parent->sleep_avg = (p->parent->sleep_avg * EXIT_WEIGHT +
p->sleep_avg) / (EXIT_WEIGHT + 1);
}
......@@ -1838,7 +1838,7 @@ char * render_sigset_t(sigset_t *set, char *buffer)
void show_state(void)
{
task_t *p;
task_t *g, *p;
#if (BITS_PER_LONG == 32)
printk("\n"
......@@ -1850,14 +1850,15 @@ void show_state(void)
printk(" task PC stack pid father child younger older\n");
#endif
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p) {
/*
* reset the NMI-timeout, listing all files on a slow
* console might take alot of time:
*/
touch_nmi_watchdog();
show_task(p);
}
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
......@@ -2054,8 +2055,7 @@ static int migration_call(struct notifier_block *nfb,
case CPU_ONLINE:
printk("Starting migration thread for cpu %li\n",
(long)hcpu);
kernel_thread(migration_thread, hcpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
kernel_thread(migration_thread, hcpu, CLONE_KERNEL);
while (!cpu_rq((long)hcpu)->migration_thread)
yield();
break;
......
......@@ -118,14 +118,18 @@ int max_queued_signals = 1024;
#define T(sig, mask) \
((1UL << (sig)) & mask)
#define sig_user_specific(sig) T(sig, SIG_USER_SPECIFIC_MASK)
#define sig_user_specific(sig) \
(((sig) < SIGRTMIN) && T(sig, SIG_USER_SPECIFIC_MASK))
#define sig_user_load_balance(sig) \
(T(sig, SIG_USER_LOAD_BALANCE_MASK) || ((sig) >= SIGRTMIN))
#define sig_kernel_specific(sig) T(sig, SIG_KERNEL_SPECIFIC_MASK)
(((sig) >= SIGRTMIN) || T(sig, SIG_USER_LOAD_BALANCE_MASK))
#define sig_kernel_specific(sig) \
(((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_SPECIFIC_MASK))
#define sig_kernel_broadcast(sig) \
(T(sig, SIG_KERNEL_BROADCAST_MASK) || ((sig) >= SIGRTMIN))
#define sig_kernel_only(sig) T(sig, SIG_KERNEL_ONLY_MASK)
#define sig_kernel_coredump(sig) T(sig, SIG_KERNEL_COREDUMP_MASK)
(((sig) >= SIGRTMIN) || T(sig, SIG_KERNEL_BROADCAST_MASK))
#define sig_kernel_only(sig) \
(((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
#define sig_kernel_coredump(sig) \
(((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
#define sig_user_defined(t, sig) \
(((t)->sig->action[(sig)-1].sa.sa_handler != SIG_DFL) && \
......@@ -269,6 +273,15 @@ void __exit_sighand(struct task_struct *tsk)
kmem_cache_free(sigact_cachep, sig);
} else {
struct task_struct *leader = tsk->group_leader;
/*
* If there is any task waiting for the group exit
* then notify it:
*/
if (sig->group_exit_task && atomic_read(&sig->count) <= 2) {
wake_up_process(sig->group_exit_task);
sig->group_exit_task = NULL;
}
/*
* If we are the last non-leader member of the thread
* group, and the leader is zombie, then notify the
......@@ -279,12 +292,15 @@ void __exit_sighand(struct task_struct *tsk)
*/
if (atomic_read(&sig->count) == 1 &&
leader->state == TASK_ZOMBIE) {
__remove_thread_group(tsk, sig);
spin_unlock(&sig->siglock);
do_notify_parent(leader, leader->exit_signal);
} else
} else {
__remove_thread_group(tsk, sig);
spin_unlock(&sig->siglock);
}
}
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
......@@ -932,8 +948,8 @@ int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
struct task_struct *p;
retval = -ESRCH;
for_each_task(p) {
if (p->pgrp == pgrp && thread_group_leader(p)) {
for_each_process(p) {
if (p->pgrp == pgrp) {
int err = send_sig_info(sig, info, p);
if (retval)
retval = err;
......@@ -970,7 +986,7 @@ kill_sl_info(int sig, struct siginfo *info, pid_t sess)
retval = -ESRCH;
read_lock(&tasklist_lock);
for_each_task(p) {
for_each_process(p) {
if (p->leader && p->session == sess) {
int err = send_sig_info(sig, info, p);
if (retval)
......@@ -1014,8 +1030,8 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
struct task_struct * p;
read_lock(&tasklist_lock);
for_each_task(p) {
if (p->pid > 1 && p != current && thread_group_leader(p)) {
for_each_process(p) {
if (p->pid > 1 && p != current) {
int err = send_sig_info(sig, info, p);
++count;
if (err != -EPERM)
......@@ -1099,7 +1115,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
struct siginfo info;
int why, status;
if (delay_group_leader(tsk))
if (!tsk->ptrace && delay_group_leader(tsk))
return;
if (sig == -1)
BUG();
......
......@@ -395,8 +395,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
int hotcpu = (unsigned long)hcpu;
if (action == CPU_ONLINE) {
if (kernel_thread(ksoftirqd, hcpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0) {
if (kernel_thread(ksoftirqd, hcpu, CLONE_KERNEL) < 0) {
printk("ksoftirqd for %i failed\n", hotcpu);
return NOTIFY_BAD;
}
......
......@@ -204,14 +204,14 @@ void refrigerator(unsigned long flag)
int freeze_processes(void)
{
int todo, start_time;
struct task_struct *p;
struct task_struct *g, *p;
printk( "Stopping tasks: " );
start_time = jiffies;
do {
todo = 0;
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p) {
unsigned long flags;
INTERESTING(p);
if (p->flags & PF_FROZEN)
......@@ -224,7 +224,7 @@ int freeze_processes(void)
signal_wake_up(p);
spin_unlock_irqrestore(&p->sigmask_lock, flags);
todo++;
}
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
yield();
if (time_after(jiffies, start_time + TIMEOUT)) {
......@@ -240,18 +240,19 @@ int freeze_processes(void)
void thaw_processes(void)
{
struct task_struct *p;
struct task_struct *g, *p;
printk( "Restarting tasks..." );
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p) {
INTERESTING(p);
if (p->flags & PF_FROZEN) p->flags &= ~PF_FROZEN;
else
printk(KERN_INFO " Strange, %s not stopped\n", p->comm );
wake_up_process(p);
}
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
printk( " done\n" );
MDELAY(500);
......
......@@ -227,7 +227,7 @@ static int proc_sel(struct task_struct *p, int which, int who)
asmlinkage long sys_setpriority(int which, int who, int niceval)
{
struct task_struct *p;
struct task_struct *g, *p;
int error;
if (which > 2 || which < 0)
......@@ -241,7 +241,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
niceval = 19;
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p) {
int no_nice;
if (!proc_sel(p, which, who))
continue;
......@@ -262,8 +262,8 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
continue;
}
set_user_nice(p, niceval);
} while_each_thread(g, p);
}
read_unlock(&tasklist_lock);
return error;
......@@ -277,21 +277,21 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
*/
asmlinkage long sys_getpriority(int which, int who)
{
struct task_struct *p;
struct task_struct *g, *p;
long retval = -ESRCH;
if (which > 2 || which < 0)
return -EINVAL;
read_lock(&tasklist_lock);
for_each_task (p) {
do_each_thread(g, p) {
long niceval;
if (!proc_sel(p, which, who))
continue;
niceval = 20 - task_nice(p);
if (niceval > retval)
retval = niceval;
}
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
return retval;
......@@ -882,12 +882,12 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
if (p->leader)
goto out;
if (pgid != pid) {
struct task_struct * tmp;
for_each_task (tmp) {
struct task_struct *g, *tmp;
do_each_thread(g, tmp) {
if (tmp->pgrp == pgid &&
tmp->session == current->session)
goto ok_pgid;
}
} while_each_thread(g, tmp);
goto out;
}
......@@ -956,14 +956,14 @@ asmlinkage long sys_getsid(pid_t pid)
asmlinkage long sys_setsid(void)
{
struct task_struct * p;
struct task_struct *g, *p;
int err = -EPERM;
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p)
if (p->pgrp == current->pid)
goto out;
}
while_each_thread(g, p);
current->leader = 1;
current->session = current->pgrp = current->pid;
......
......@@ -116,10 +116,10 @@ static int badness(struct task_struct *p)
static struct task_struct * select_bad_process(void)
{
int maxpoints = 0;
struct task_struct *p = NULL;
struct task_struct *g, *p;
struct task_struct *chosen = NULL;
for_each_task(p) {
do_each_thread(g, p)
if (p->pid) {
int points = badness(p);
if (points > maxpoints) {
......@@ -127,7 +127,7 @@ static struct task_struct * select_bad_process(void)
maxpoints = points;
}
}
}
while_each_thread(g, p);
return chosen;
}
......@@ -166,7 +166,7 @@ void oom_kill_task(struct task_struct *p)
*/
static void oom_kill(void)
{
struct task_struct *p, *q;
struct task_struct *g, *p, *q;
read_lock(&tasklist_lock);
p = select_bad_process();
......@@ -176,9 +176,11 @@ static void oom_kill(void)
panic("Out of memory and no killable processes...\n");
/* kill all processes that share the ->mm (i.e. all threads) */
for_each_task(q) {
if(q->mm == p->mm) oom_kill_task(q);
}
do_each_thread(g, q)
if (q->mm == p->mm)
oom_kill_task(q);
while_each_thread(g, q);
read_unlock(&tasklist_lock);
/*
......
......@@ -202,8 +202,7 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
static void start_one_pdflush_thread(void)
{
kernel_thread(pdflush, NULL,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
kernel_thread(pdflush, NULL, CLONE_KERNEL);
}
static int __init pdflush_init(void)
......
......@@ -705,7 +705,7 @@ static int __init kswapd_init(void)
{
printk("Starting kswapd\n");
swap_setup();
kernel_thread(kswapd, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
kernel_thread(kswapd, NULL, CLONE_KERNEL);
return 0;
}
......
......@@ -28,18 +28,8 @@
static int p8022_request(struct datalink_proto *dl, struct sk_buff *skb,
unsigned char *dest)
{
union llc_u_prim_data prim_data;
struct llc_prim_if_block prim;
prim.data = &prim_data;
prim.sap = dl->sap;
prim.prim = LLC_DATAUNIT_PRIM;
prim_data.test.skb = skb;
prim_data.test.saddr.lsap = dl->sap->laddr.lsap;
prim_data.test.daddr.lsap = dl->sap->laddr.lsap;
memcpy(prim_data.test.saddr.mac, skb->dev->dev_addr, IFHWADDRLEN);
memcpy(prim_data.test.daddr.mac, dest, IFHWADDRLEN);
return dl->sap->req(&prim);
llc_build_and_send_ui_pkt(dl->sap, skb, dest, dl->sap->laddr.lsap);
return 0;
}
struct datalink_proto *register_8022_client(unsigned char type,
......
......@@ -86,19 +86,9 @@ static int snap_indicate(struct llc_prim_if_block *prim)
static int snap_request(struct datalink_proto *dl,
struct sk_buff *skb, u8 *dest)
{
union llc_u_prim_data prim_data;
struct llc_prim_if_block prim;
memcpy(skb_push(skb, 5), dl->type, 5);
prim.data = &prim_data;
prim.sap = snap_sap;
prim.prim = LLC_DATAUNIT_PRIM;
prim_data.test.skb = skb;
prim_data.test.saddr.lsap = snap_sap->laddr.lsap;
prim_data.test.daddr.lsap = snap_sap->laddr.lsap;
memcpy(prim_data.test.saddr.mac, skb->dev->dev_addr, IFHWADDRLEN);
memcpy(prim_data.test.daddr.mac, dest, IFHWADDRLEN);
return snap_sap->req(&prim);
llc_build_and_send_ui_pkt(snap_sap, skb, dest, snap_sap->laddr.lsap);
return 0;
}
/*
......
......@@ -14,12 +14,12 @@
static int
match_comm(const struct sk_buff *skb, const char *comm)
{
struct task_struct *p;
struct task_struct *g, *p;
struct files_struct *files;
int i;
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p) {
if(strncmp(p->comm, comm, sizeof(p->comm)))
continue;
......@@ -38,7 +38,7 @@ match_comm(const struct sk_buff *skb, const char *comm)
read_unlock(&files->file_lock);
}
task_unlock(p);
}
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
return 0;
}
......@@ -77,12 +77,12 @@ match_pid(const struct sk_buff *skb, pid_t pid)
static int
match_sid(const struct sk_buff *skb, pid_t sid)
{
struct task_struct *p;
struct task_struct *g, *p;
struct file *file = skb->sk->socket->file;
int i, found=0;
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p) {
struct files_struct *files;
if (p->session != sid)
continue;
......@@ -100,9 +100,10 @@ match_sid(const struct sk_buff *skb, pid_t sid)
read_unlock(&files->file_lock);
}
task_unlock(p);
if(found)
break;
}
if (found)
goto out;
} while_each_thread(g, p);
out:
read_unlock(&tasklist_lock);
return found;
......
......@@ -49,12 +49,12 @@ match_pid(const struct sk_buff *skb, pid_t pid)
static int
match_sid(const struct sk_buff *skb, pid_t sid)
{
struct task_struct *p;
struct task_struct *g, *p;
struct file *file = skb->sk->socket->file;
int i, found=0;
read_lock(&tasklist_lock);
for_each_task(p) {
do_each_thread(g, p) {
struct files_struct *files;
if (p->session != sid)
continue;
......@@ -72,9 +72,10 @@ match_sid(const struct sk_buff *skb, pid_t sid)
read_unlock(&files->file_lock);
}
task_unlock(p);
if(found)
break;
}
if (found)
goto out;
} while_each_thread(g, p);
out:
read_unlock(&tasklist_lock);
return found;
......
......@@ -34,7 +34,6 @@ int llc_station_ac_start_ack_timer(struct llc_station *station,
station->ack_timer.data = (unsigned long)station;
station->ack_timer.function = llc_station_ack_tmr_callback;
add_timer(&station->ack_timer);
station->ack_tmr_running = 1;
return 0;
}
......@@ -136,12 +135,10 @@ static void llc_station_ack_tmr_callback(unsigned long timeout_data)
struct llc_station *station = (struct llc_station *)timeout_data;
struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
station->ack_tmr_running = 0;
if (skb) {
struct llc_station_state_ev *ev = llc_station_ev(skb);
ev->type = LLC_STATION_EV_TYPE_ACK_TMR;
ev->data.tmr.timer_specific = NULL;
llc_station_state_process(station, skb);
}
}
......@@ -48,7 +48,6 @@ int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb)
llc->remote_busy_flag = 0;
del_timer(&llc->busy_state_timer.timer);
llc->busy_state_timer.running = 0;
nr = LLC_I_GET_NR(pdu);
llc_conn_resend_i_pdu_as_cmd(sk, nr, 0);
}
......@@ -252,10 +251,8 @@ int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
{
struct llc_opt *llc = llc_sk(sk);
if (llc->data_flag == 2) {
if (llc->data_flag == 2)
del_timer(&llc->rej_sent_timer.timer);
llc->rej_sent_timer.running = 0;
}
return 0;
}
......@@ -672,7 +669,6 @@ int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb)
llc->busy_state_timer.timer.data = (unsigned long)sk;
llc->busy_state_timer.timer.function = llc_conn_busy_tmr_cb;
add_timer(&llc->busy_state_timer.timer);
llc->busy_state_timer.running = 1;
}
return 0;
}
......@@ -915,7 +911,6 @@ int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb)
llc->pf_cycle_timer.timer.data = (unsigned long)sk;
llc->pf_cycle_timer.timer.function = llc_conn_pf_cycle_tmr_cb;
add_timer(&llc->pf_cycle_timer.timer);
llc->pf_cycle_timer.running = 1;
return 0;
}
......@@ -1162,13 +1157,9 @@ int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
struct llc_opt *llc = llc_sk(sk);
del_timer(&llc->pf_cycle_timer.timer);
llc->pf_cycle_timer.running = 0;
del_timer(&llc->ack_timer.timer);
llc->ack_timer.running = 0;
del_timer(&llc->rej_sent_timer.timer);
llc->rej_sent_timer.running = 0;
del_timer(&llc->busy_state_timer.timer);
llc->busy_state_timer.running = 0;
llc->ack_must_be_send = 0;
llc->ack_pf = 0;
return 0;
......@@ -1179,11 +1170,8 @@ int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb)
struct llc_opt *llc = llc_sk(sk);
del_timer(&llc->rej_sent_timer.timer);
llc->rej_sent_timer.running = 0;
del_timer(&llc->pf_cycle_timer.timer);
llc->pf_cycle_timer.running = 0;
del_timer(&llc->busy_state_timer.timer);
llc->busy_state_timer.running = 0;
llc->ack_must_be_send = 0;
llc->ack_pf = 0;
return 0;
......@@ -1198,7 +1186,6 @@ int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb)
llc->ack_timer.timer.data = (unsigned long)sk;
llc->ack_timer.timer.function = llc_conn_ack_tmr_cb;
add_timer(&llc->ack_timer.timer);
llc->ack_timer.running = 1;
return 0;
}
......@@ -1212,7 +1199,6 @@ int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb)
llc->rej_sent_timer.timer.data = (unsigned long)sk;
llc->rej_sent_timer.timer.function = llc_conn_rej_tmr_cb;
add_timer(&llc->rej_sent_timer.timer);
llc->rej_sent_timer.running = 1;
return 0;
}
......@@ -1221,13 +1207,12 @@ int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
{
struct llc_opt *llc = llc_sk(sk);
if (!llc->ack_timer.running) {
if (!timer_pending(&llc->ack_timer.timer)) {
llc->ack_timer.timer.expires = jiffies +
llc->ack_timer.expire * HZ;
llc->ack_timer.timer.data = (unsigned long)sk;
llc->ack_timer.timer.function = llc_conn_ack_tmr_cb;
add_timer(&llc->ack_timer.timer);
llc->ack_timer.running = 1;
}
return 0;
}
......@@ -1235,7 +1220,6 @@ int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb)
{
del_timer(&llc_sk(sk)->ack_timer.timer);
llc_sk(sk)->ack_timer.running = 0;
return 0;
}
......@@ -1244,7 +1228,6 @@ int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb)
struct llc_opt *llc = llc_sk(sk);
del_timer(&llc->pf_cycle_timer.timer);
llc->pf_cycle_timer.running = 0;
llc->p_flag = 0;
return 0;
}
......@@ -1252,7 +1235,6 @@ int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb)
int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb)
{
del_timer(&llc_sk(sk)->rej_sent_timer.timer);
llc_sk(sk)->rej_sent_timer.running = 0;
return 0;
}
......@@ -1270,7 +1252,6 @@ int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb)
if (acked > 0 || (llc->dev->flags & IFF_LOOPBACK)) {
llc->retry_count = 0;
del_timer(&llc->ack_timer.timer);
llc->ack_timer.running = 0;
if (llc->failed_data_req) {
/* already, we did not accept data from upper layer
* (tx_window full or unacceptable state). Now, we
......@@ -1285,7 +1266,6 @@ int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb)
llc->ack_timer.timer.data = (unsigned long)sk;
llc->ack_timer.timer.function = llc_conn_ack_tmr_cb;
add_timer(&llc->ack_timer.timer);
llc->ack_timer.running = 1;
}
} else if (llc->failed_data_req) {
llc_pdu_decode_pf_bit(skb, &fbit);
......@@ -1423,13 +1403,11 @@ void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data)
struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
bh_lock_sock(sk);
llc_sk(sk)->pf_cycle_timer.running = 0;
if (skb) {
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb->sk = sk;
ev->type = LLC_CONN_EV_TYPE_P_TMR;
ev->data.tmr.timer_specific = NULL;
llc_process_tmr_ev(sk, skb);
}
bh_unlock_sock(sk);
......@@ -1441,13 +1419,11 @@ static void llc_conn_busy_tmr_cb(unsigned long timeout_data)
struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
bh_lock_sock(sk);
llc_sk(sk)->busy_state_timer.running = 0;
if (skb) {
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb->sk = sk;
ev->type = LLC_CONN_EV_TYPE_BUSY_TMR;
ev->data.tmr.timer_specific = NULL;
llc_process_tmr_ev(sk, skb);
}
bh_unlock_sock(sk);
......@@ -1459,13 +1435,11 @@ void llc_conn_ack_tmr_cb(unsigned long timeout_data)
struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
bh_lock_sock(sk);
llc_sk(sk)->ack_timer.running = 0;
if (skb) {
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb->sk = sk;
ev->type = LLC_CONN_EV_TYPE_ACK_TMR;
ev->data.tmr.timer_specific = NULL;
llc_process_tmr_ev(sk, skb);
}
bh_unlock_sock(sk);
......@@ -1477,13 +1451,11 @@ static void llc_conn_rej_tmr_cb(unsigned long timeout_data)
struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
bh_lock_sock(sk);
llc_sk(sk)->rej_sent_timer.running = 0;
if (skb) {
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
skb->sk = sk;
ev->type = LLC_CONN_EV_TYPE_REJ_TMR;
ev->data.tmr.timer_specific = NULL;
llc_process_tmr_ev(sk, skb);
}
bh_unlock_sock(sk);
......
......@@ -86,7 +86,8 @@ void llc_sap_close(struct llc_sap *sap)
* llc_build_and_send_ui_pkt - unitdata request interface for upper layers
* @sap: sap to use
* @skb: packet to send
* @addr: destination address
* @dmac: destination mac address
* @dsap: destination sap
*
* Upper layers calls this function when upper layer wants to send data
* using connection-less mode communication (UI pdu).
......@@ -95,25 +96,22 @@ void llc_sap_close(struct llc_sap *sap)
* less mode communication; timeout/retries handled by network layer;
* package primitive as an event and send to SAP event handler
*/
void llc_build_and_send_ui_pkt(struct llc_sap *sap,
struct sk_buff *skb,
struct sockaddr_llc *addr)
void llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
u8 *dmac, u8 dsap)
{
union llc_u_prim_data prim_data;
struct llc_prim_if_block prim;
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb->protocol = llc_proto_type(addr->sllc_arphrd);
prim.data = &prim_data;
prim.sap = sap;
prim.prim = LLC_DATAUNIT_PRIM;
prim_data.udata.skb = skb;
prim_data.udata.saddr.lsap = sap->laddr.lsap;
prim_data.udata.daddr.lsap = addr->sllc_dsap;
prim_data.udata.daddr.lsap = dsap;
memcpy(prim_data.udata.saddr.mac, skb->dev->dev_addr, IFHWADDRLEN);
memcpy(prim_data.udata.daddr.mac, addr->sllc_dmac, IFHWADDRLEN);
memcpy(prim_data.udata.daddr.mac, dmac, IFHWADDRLEN);
ev->type = LLC_SAP_EV_TYPE_PRIM;
ev->data.prim.prim = LLC_DATAUNIT_PRIM;
......@@ -126,30 +124,28 @@ void llc_build_and_send_ui_pkt(struct llc_sap *sap,
* llc_build_and_send_test_pkt - TEST interface for upper layers.
* @sap: sap to use
* @skb: packet to send
* @addr: destination address
* @dmac: destination mac address
* @dsap: destination sap
*
* This function is called when upper layer wants to send a TEST pdu.
* Returns 0 for success, 1 otherwise.
*/
void llc_build_and_send_test_pkt(struct llc_sap *sap,
struct sk_buff *skb,
struct sockaddr_llc *addr)
void llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb,
u8 *dmac, u8 dsap)
{
union llc_u_prim_data prim_data;
struct llc_prim_if_block prim;
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb->protocol = llc_proto_type(addr->sllc_arphrd);
prim.data = &prim_data;
prim.sap = sap;
prim.prim = LLC_TEST_PRIM;
prim_data.test.skb = skb;
prim_data.test.saddr.lsap = sap->laddr.lsap;
prim_data.test.daddr.lsap = addr->sllc_dsap;
prim_data.test.daddr.lsap = dsap;
memcpy(prim_data.test.saddr.mac, skb->dev->dev_addr, IFHWADDRLEN);
memcpy(prim_data.test.daddr.mac, addr->sllc_dmac, IFHWADDRLEN);
memcpy(prim_data.test.daddr.mac, dmac, IFHWADDRLEN);
ev->type = LLC_SAP_EV_TYPE_PRIM;
ev->data.prim.prim = LLC_TEST_PRIM;
......@@ -162,30 +158,28 @@ void llc_build_and_send_test_pkt(struct llc_sap *sap,
* llc_build_and_send_xid_pkt - XID interface for upper layers
* @sap: sap to use
* @skb: packet to send
* @addr: destination address
* @dmac: destination mac address
* @dsap: destination sap
*
* This function is called when upper layer wants to send a XID pdu.
* Returns 0 for success, 1 otherwise.
*/
void llc_build_and_send_xid_pkt(struct llc_sap *sap,
struct sk_buff *skb,
struct sockaddr_llc *addr)
void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
u8 *dmac, u8 dsap)
{
union llc_u_prim_data prim_data;
struct llc_prim_if_block prim;
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
skb->protocol = llc_proto_type(addr->sllc_arphrd);
prim.data = &prim_data;
prim.sap = sap;
prim.prim = LLC_XID_PRIM;
prim_data.xid.skb = skb;
prim_data.xid.saddr.lsap = sap->laddr.lsap;
prim_data.xid.daddr.lsap = addr->sllc_dsap;
prim_data.xid.daddr.lsap = dsap;
memcpy(prim_data.xid.saddr.mac, skb->dev->dev_addr, IFHWADDRLEN);
memcpy(prim_data.xid.daddr.mac, addr->sllc_dmac, IFHWADDRLEN);
memcpy(prim_data.xid.daddr.mac, dmac, IFHWADDRLEN);
ev->type = LLC_SAP_EV_TYPE_PRIM;
ev->data.prim.prim = LLC_XID_PRIM;
......@@ -196,7 +190,8 @@ void llc_build_and_send_xid_pkt(struct llc_sap *sap,
/**
* llc_build_and_send_pkt - Connection data sending for upper layers.
* @prim: pointer to structure that contains service parameters
* @sk: connection
* @skb: packet to send
*
* This function is called when upper layer wants to send data using
* connection oriented communication mode. During sending data, connection
......@@ -352,3 +347,4 @@ int llc_build_and_send_reset_pkt(struct sock *sk,
EXPORT_SYMBOL(llc_sap_open);
EXPORT_SYMBOL(llc_sap_close);
EXPORT_SYMBOL(llc_build_and_send_ui_pkt);
......@@ -78,9 +78,6 @@ void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
sap->ind(ev->prim);
else if (ev->type == LLC_SAP_EV_TYPE_PDU)
kfree_skb(skb);
else
printk(KERN_INFO ":%s !kfree_skb & it is %s in a list\n",
__FUNCTION__, skb->list ? "" : "NOT");
}
/**
......
......@@ -77,6 +77,18 @@ static __inline__ u16 llc_ui_next_link_no(int sap)
return llc_ui_sap_link_no_max[sap]++;
}
/**
* llc_proto_type - return eth protocol for ARP header type
* @arphrd: ARP header type.
*
* Given an ARP header type return the corresponding ethernet protocol.
*/
static __inline__ u16 llc_proto_type(u16 arphrd)
{
return arphrd == ARPHRD_IEEE802_TR ?
htons(ETH_P_TR_802_2) : htons(ETH_P_802_2);
}
/**
* llc_ui_addr_null - determines if a address structure is null
* @addr: Address to test if null.
......@@ -117,13 +129,11 @@ static __inline__ u8 llc_ui_header_len(struct sock *sk,
* Send data via reliable llc2 connection.
* Returns 0 upon success, non-zero if action did not succeed.
*/
static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb,
struct sockaddr_llc *addr, int noblock)
static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
{
struct llc_opt* llc = llc_sk(sk);
int rc = 0;
skb->protocol = llc_proto_type(addr->sllc_arphrd);
if (llc_data_accept_state(llc->state) || llc->p_flag) {
int timeout = sock_sndtimeo(sk, noblock);
......@@ -942,26 +952,30 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, int len,
goto release;
skb->sk = sk;
skb->dev = dev;
skb->protocol = llc_proto_type(addr->sllc_arphrd);
skb_reserve(skb, dev->hard_header_len + llc_ui_header_len(sk, addr));
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc)
goto out;
if (addr->sllc_test) {
llc_build_and_send_test_pkt(llc->sap, skb, addr);
llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_dmac,
addr->sllc_dsap);
goto out;
}
if (addr->sllc_xid) {
llc_build_and_send_xid_pkt(llc->sap, skb, addr);
llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_dmac,
addr->sllc_dsap);
goto out;
}
if (sk->type == SOCK_DGRAM || addr->sllc_ua) {
llc_build_and_send_ui_pkt(llc->sap, skb, addr);
llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_dmac,
addr->sllc_dsap);
goto out;
}
rc = -ENOPROTOOPT;
if (!(sk->type == SOCK_STREAM && !addr->sllc_ua))
goto out;
rc = llc_ui_send_data(sk, skb, addr, noblock);
rc = llc_ui_send_data(sk, skb, noblock);
if (rc)
dprintk("%s: llc_ui_send_data failed: %d\n", __FUNCTION__, rc);
out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment