Commit 6f3d20c9 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/sparc-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents a67f5a41 84e7eb7a
......@@ -1836,6 +1836,8 @@ static void __do_SAK(void *arg)
#else
struct tty_struct *tty = arg;
struct task_struct *p;
struct list_head *l;
struct pid *pid;
int session;
int i;
struct file *filp;
......@@ -1848,9 +1850,8 @@ static void __do_SAK(void *arg)
if (tty->driver.flush_buffer)
tty->driver.flush_buffer(tty);
read_lock(&tasklist_lock);
for_each_process(p) {
if ((p->tty == tty) ||
((session > 0) && (p->session == session))) {
for_each_task_pid(session, PIDTYPE_SID, p, l, pid) {
if (p->tty == tty || session > 0) {
printk(KERN_NOTICE "SAK: killed process %d"
" (%s): p->session==tty->session\n",
p->pid, p->comm);
......
......@@ -417,7 +417,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
struct buffer_head *bitmap_bh = NULL;
struct buffer_head *bh2;
int group;
ino_t ino;
unsigned long ino;
struct inode * inode;
struct ext3_group_desc * gdp;
struct ext3_super_block * es;
......@@ -463,7 +463,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
BUFFER_TRACE(bitmap_bh, "get_write_access");
err = ext3_journal_get_write_access(handle, bitmap_bh);
if (err) goto fail;
if (ext3_set_bit(ino, bitmap_bh->b_data)) {
ext3_error (sb, "ext3_new_inode",
"bit already set for inode %lu", ino);
......@@ -619,19 +619,18 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
}
/* Verify that we are loading a valid orphan from disk */
struct inode *ext3_orphan_get (struct super_block * sb, ino_t ino)
struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
{
ino_t max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
unsigned long block_group;
int bit;
struct buffer_head *bitmap_bh = NULL;
struct inode *inode = NULL;
/* Error cases - e2fsck has already cleaned up for us */
if (ino > max_ino) {
ext3_warning(sb, __FUNCTION__,
"bad orphan ino %lu! e2fsck was run?\n",
(unsigned long) ino);
"bad orphan ino %lu! e2fsck was run?\n", ino);
goto out;
}
......@@ -640,8 +639,7 @@ struct inode *ext3_orphan_get (struct super_block * sb, ino_t ino)
bitmap_bh = read_inode_bitmap(sb, block_group);
if (!bitmap_bh) {
ext3_warning(sb, __FUNCTION__,
"inode bitmap error for orphan %lu\n",
(unsigned long) ino);
"inode bitmap error for orphan %lu\n", ino);
goto out;
}
......@@ -653,19 +651,17 @@ struct inode *ext3_orphan_get (struct super_block * sb, ino_t ino)
!(inode = iget(sb, ino)) || is_bad_inode(inode) ||
NEXT_ORPHAN(inode) > max_ino) {
ext3_warning(sb, __FUNCTION__,
"bad orphan inode %lu! e2fsck was run?\n", (unsigned long)ino);
"bad orphan inode %lu! e2fsck was run?\n", ino);
printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
bit,
(unsigned long long)bitmap_bh->b_blocknr,
ext3_test_bit(bit, bitmap_bh->b_data));
bit, (unsigned long long)bitmap_bh->b_blocknr,
ext3_test_bit(bit, bitmap_bh->b_data));
printk(KERN_NOTICE "inode=%p\n", inode);
if (inode) {
printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
is_bad_inode(inode));
printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%d\n",
printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
NEXT_ORPHAN(inode));
printk(KERN_NOTICE "max_ino=%lu\n",
(unsigned long) max_ino);
printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
}
/* Avoid freeing blocks if we got a bad deleted inode */
if (inode && inode->i_nlink == 0)
......
......@@ -1098,20 +1098,14 @@ static int ext3_prepare_write(struct file *file, struct page *page,
return ret;
}
static int journal_dirty_sync_data(handle_t *handle, struct buffer_head *bh)
{
return ext3_journal_dirty_data(handle, bh, 0);
}
/*
* For ext3_writepage(). We also brelse() the buffer to account for
* the bget() which ext3_writepage() performs.
*/
static int journal_dirty_async_data(handle_t *handle, struct buffer_head *bh)
static int
ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
{
int ret = ext3_journal_dirty_data(handle, bh, 1);
__brelse(bh);
return ret;
int err = journal_dirty_data(handle, bh);
if (err)
ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
bh, handle,err);
return err;
}
/* For commit_write() in data=journal mode */
......@@ -1154,7 +1148,7 @@ static int ext3_commit_write(struct file *file, struct page *page,
} else {
if (ext3_should_order_data(inode)) {
ret = walk_page_buffers(handle, page_buffers(page),
from, to, NULL, journal_dirty_sync_data);
from, to, NULL, ext3_journal_dirty_data);
}
/* Be careful here if generic_commit_write becomes a
* required invocation after block_prepare_write. */
......@@ -1228,7 +1222,13 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
static int bget_one(handle_t *handle, struct buffer_head *bh)
{
atomic_inc(&bh->b_count);
get_bh(bh);
return 0;
}
static int bput_one(handle_t *handle, struct buffer_head *bh)
{
put_bh(bh);
return 0;
}
......@@ -1348,7 +1348,9 @@ static int ext3_writepage(struct page *page, struct writeback_control *wbc)
/* And attach them to the current transaction */
if (order_data) {
err = walk_page_buffers(handle, page_bufs,
0, PAGE_CACHE_SIZE, NULL, journal_dirty_async_data);
0, PAGE_CACHE_SIZE, NULL, ext3_journal_dirty_data);
walk_page_buffers(handle, page_bufs, 0,
PAGE_CACHE_SIZE, NULL, bput_one);
if (!ret)
ret = err;
}
......@@ -1587,7 +1589,7 @@ static int ext3_block_truncate_page(handle_t *handle,
err = ext3_journal_dirty_metadata(handle, bh);
} else {
if (ext3_should_order_data(inode))
err = ext3_journal_dirty_data(handle, bh, 0);
err = ext3_journal_dirty_data(handle, bh);
mark_buffer_dirty(bh);
}
......
......@@ -1871,10 +1871,10 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode)
struct list_head *prev;
struct ext3_inode_info *ei = EXT3_I(inode);
struct ext3_sb_info *sbi;
ino_t ino_next;
unsigned long ino_next;
struct ext3_iloc iloc;
int err = 0;
lock_super(inode->i_sb);
if (list_empty(&ei->i_orphan)) {
unlock_super(inode->i_sb);
......@@ -1885,7 +1885,7 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode)
prev = ei->i_orphan.prev;
sbi = EXT3_SB(inode->i_sb);
jbd_debug(4, "remove inode %ld from orphan list\n", inode->i_ino);
jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
list_del_init(&ei->i_orphan);
......@@ -1895,13 +1895,13 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode)
* list in memory. */
if (!handle)
goto out;
err = ext3_reserve_inode_write(handle, inode, &iloc);
if (err)
goto out_err;
if (prev == &sbi->s_orphan) {
jbd_debug(4, "superblock will point to %ld\n", ino_next);
jbd_debug(4, "superblock will point to %lu\n", ino_next);
BUFFER_TRACE(sbi->s_sbh, "get_write_access");
err = ext3_journal_get_write_access(handle, sbi->s_sbh);
if (err)
......@@ -1912,8 +1912,8 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode)
struct ext3_iloc iloc2;
struct inode *i_prev =
&list_entry(prev, struct ext3_inode_info, i_orphan)->vfs_inode;
jbd_debug(4, "orphan inode %ld will point to %ld\n",
jbd_debug(4, "orphan inode %lu will point to %lu\n",
i_prev->i_ino, ino_next);
err = ext3_reserve_inode_write(handle, i_prev, &iloc2);
if (err)
......@@ -1928,7 +1928,7 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode)
if (err)
goto out_brelse;
out_err:
out_err:
ext3_std_error(inode->i_sb, err);
out:
unlock_super(inode->i_sb);
......
......@@ -457,11 +457,8 @@ static struct address_space_operations hugetlbfs_aops = {
};
struct file_operations hugetlbfs_file_operations = {
.read = generic_file_read,
.write = generic_file_write,
.mmap = hugetlbfs_file_mmap,
.fsync = simple_sync_file,
.sendfile = generic_file_sendfile,
};
static struct inode_operations hugetlbfs_dir_inode_operations = {
......
......@@ -585,7 +585,6 @@ void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
J_ASSERT (transaction->t_ilist == NULL);
J_ASSERT (transaction->t_buffers == NULL);
J_ASSERT (transaction->t_sync_datalist == NULL);
J_ASSERT (transaction->t_async_datalist == NULL);
J_ASSERT (transaction->t_forget == NULL);
J_ASSERT (transaction->t_iobuf_list == NULL);
J_ASSERT (transaction->t_shadow_list == NULL);
......
......@@ -264,37 +264,6 @@ void journal_commit_transaction(journal_t *journal)
goto write_out_data_locked;
sync_datalist_empty:
/*
* Wait for all the async writepage data. As they become unlocked
* in end_buffer_async_write(), the only place where they can be
* reaped is in try_to_free_buffers(), and we're locked against
* that.
*/
while ((jh = commit_transaction->t_async_datalist)) {
struct buffer_head *bh = jh2bh(jh);
if (buffer_locked(bh)) {
spin_unlock(&journal_datalist_lock);
unlock_journal(journal);
wait_on_buffer(bh);
lock_journal(journal);
spin_lock(&journal_datalist_lock);
continue; /* List may have changed */
}
if (jh->b_next_transaction) {
/*
* For writepage() buffers in journalled data mode: a
* later transaction may want the buffer for "metadata"
*/
__journal_refile_buffer(jh);
} else {
BUFFER_TRACE(bh, "finished async writeout: unfile");
__journal_unfile_buffer(jh);
jh->b_transaction = NULL;
__journal_remove_journal_head(bh);
BUFFER_TRACE(bh, "finished async writeout: refile");
__brelse(bh);
}
}
spin_unlock(&journal_datalist_lock);
/*
......@@ -304,7 +273,6 @@ void journal_commit_transaction(journal_t *journal)
* clean by now, so check that it is in fact empty.
*/
J_ASSERT (commit_transaction->t_sync_datalist == NULL);
J_ASSERT (commit_transaction->t_async_datalist == NULL);
jbd_debug (3, "JBD: commit phase 3\n");
......@@ -629,7 +597,6 @@ void journal_commit_transaction(journal_t *journal)
jbd_debug(3, "JBD: commit phase 7\n");
J_ASSERT(commit_transaction->t_sync_datalist == NULL);
J_ASSERT(commit_transaction->t_async_datalist == NULL);
J_ASSERT(commit_transaction->t_buffers == NULL);
J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
J_ASSERT(commit_transaction->t_iobuf_list == NULL);
......
......@@ -578,9 +578,6 @@ static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
* part of the transaction, that is).
*
* Returns an error code or 0 on success.
*
* In full data journalling mode the buffer may be of type BJ_AsyncData,
* because we're write()ing a buffer which is also part of a shared mapping.
*/
static int
......@@ -949,26 +946,16 @@ int journal_get_undo_access (handle_t *handle, struct buffer_head *bh)
* The buffer is placed on the transaction's data list and is marked as
* belonging to the transaction.
*
* If `async' is set then the writebabk will be initiated by the caller
* using submit_bh -> end_buffer_async_write. We put the buffer onto
* t_async_datalist.
*
* Returns error number or 0 on success.
*
* journal_dirty_data() can be called via page_launder->ext3_writepage
* by kswapd. So it cannot block. Happily, there's nothing here
* which needs lock_journal if `async' is set.
*
* When the buffer is on the current transaction we freely move it
* between BJ_AsyncData and BJ_SyncData according to who tried to
* change its state last.
*/
int journal_dirty_data (handle_t *handle, struct buffer_head *bh, int async)
int journal_dirty_data (handle_t *handle, struct buffer_head *bh)
{
journal_t *journal = handle->h_transaction->t_journal;
int need_brelse = 0;
int wanted_jlist = async ? BJ_AsyncData : BJ_SyncData;
struct journal_head *jh;
if (is_handle_aborted(handle))
......@@ -1046,8 +1033,7 @@ int journal_dirty_data (handle_t *handle, struct buffer_head *bh, int async)
* the write() data.
*/
if (jh->b_jlist != BJ_None &&
jh->b_jlist != BJ_SyncData &&
jh->b_jlist != BJ_AsyncData) {
jh->b_jlist != BJ_SyncData) {
JBUFFER_TRACE(jh, "Not stealing");
goto no_journal;
}
......@@ -1058,7 +1044,7 @@ int journal_dirty_data (handle_t *handle, struct buffer_head *bh, int async)
* again because that can cause the write-out loop in
* commit to never terminate.
*/
if (!async && buffer_dirty(bh)) {
if (buffer_dirty(bh)) {
atomic_inc(&bh->b_count);
spin_unlock(&journal_datalist_lock);
need_brelse = 1;
......@@ -1084,18 +1070,18 @@ int journal_dirty_data (handle_t *handle, struct buffer_head *bh, int async)
* committing transaction, so might still be left on that
* transaction's metadata lists.
*/
if (jh->b_jlist != wanted_jlist) {
if (jh->b_jlist != BJ_SyncData) {
JBUFFER_TRACE(jh, "not on correct data list: unfile");
J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
__journal_unfile_buffer(jh);
jh->b_transaction = NULL;
JBUFFER_TRACE(jh, "file as data");
__journal_file_buffer(jh, handle->h_transaction,
wanted_jlist);
BJ_SyncData);
}
} else {
JBUFFER_TRACE(jh, "not on a transaction");
__journal_file_buffer(jh, handle->h_transaction, wanted_jlist);
__journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
}
no_journal:
spin_unlock(&journal_datalist_lock);
......@@ -1559,12 +1545,12 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
* Remove a buffer from the appropriate transaction list.
*
* Note that this function can *change* the value of
* bh->b_transaction->t_sync_datalist, t_async_datalist, t_buffers, t_forget,
* bh->b_transaction->t_sync_datalist, t_buffers, t_forget,
* t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list. If the caller
* is holding onto a copy of one of thee pointers, it could go bad.
* Generally the caller needs to re-read the pointer from the transaction_t.
*
* If bh->b_jlist is BJ_SyncData or BJ_AsyncData then we may have been called
* If bh->b_jlist is BJ_SyncData then we may have been called
* via journal_try_to_free_buffer() or journal_clean_data_list(). In that
* case, journal_datalist_lock will be held, and the journal may not be locked.
*/
......@@ -1590,9 +1576,6 @@ void __journal_unfile_buffer(struct journal_head *jh)
case BJ_SyncData:
list = &transaction->t_sync_datalist;
break;
case BJ_AsyncData:
list = &transaction->t_async_datalist;
break;
case BJ_Metadata:
transaction->t_nr_buffers--;
J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
......@@ -1658,7 +1641,7 @@ static inline int __journal_try_to_free_buffer(struct buffer_head *bh)
goto out;
if (jh->b_transaction != 0 && jh->b_cp_transaction == 0) {
if (jh->b_jlist == BJ_SyncData || jh->b_jlist==BJ_AsyncData) {
if (jh->b_jlist == BJ_SyncData) {
/* A written-back ordered data buffer */
JBUFFER_TRACE(jh, "release data");
__journal_unfile_buffer(jh);
......@@ -1994,9 +1977,6 @@ void __journal_file_buffer(struct journal_head *jh,
case BJ_SyncData:
list = &transaction->t_sync_datalist;
break;
case BJ_AsyncData:
list = &transaction->t_async_datalist;
break;
case BJ_Metadata:
transaction->t_nr_buffers++;
list = &transaction->t_buffers;
......
......@@ -221,7 +221,6 @@ printk("proc_iget: using deleted entry %s, count=%d\n", de->name, atomic_read(&d
int proc_fill_super(struct super_block *s, void *data, int silent)
{
struct inode * root_inode;
struct task_struct *p;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
......@@ -234,11 +233,7 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
/*
* Fixup the root inode's nlink value
*/
read_lock(&tasklist_lock);
for_each_process(p)
if (p->pid)
root_inode->i_nlink++;
read_unlock(&tasklist_lock);
root_inode->i_nlink += nr_processes();
s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
goto out_no_root;
......
......@@ -140,9 +140,10 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
struct page_state ps;
unsigned long inactive;
unsigned long active;
unsigned long free;
get_page_state(&ps);
get_zone_counts(&active, &inactive);
get_zone_counts(&active, &inactive, &free);
/*
* display in kilobytes.
......
......@@ -707,7 +707,7 @@ extern int ext3fs_dirhash(const char *name, int len, struct
/* ialloc.c */
extern struct inode * ext3_new_inode (handle_t *, struct inode *, int);
extern void ext3_free_inode (handle_t *, struct inode *);
extern struct inode * ext3_orphan_get (struct super_block *, ino_t);
extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
extern unsigned long ext3_count_free_inodes (struct super_block *);
extern unsigned long ext3_count_dirs (struct super_block *);
extern void ext3_check_inodes_bitmap (struct super_block *);
......
......@@ -132,16 +132,6 @@ __ext3_journal_get_write_access(const char *where,
return err;
}
static inline int
__ext3_journal_dirty_data(const char *where,
handle_t *handle, struct buffer_head *bh, int async)
{
int err = journal_dirty_data(handle, bh, async);
if (err)
ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
return err;
}
static inline void
ext3_journal_forget(handle_t *handle, struct buffer_head *bh)
{
......@@ -183,8 +173,6 @@ __ext3_journal_dirty_metadata(const char *where,
__ext3_journal_get_undo_access(__FUNCTION__, (handle), (bh))
#define ext3_journal_get_write_access(handle, bh) \
__ext3_journal_get_write_access(__FUNCTION__, (handle), (bh))
#define ext3_journal_dirty_data(handle, bh, async) \
__ext3_journal_dirty_data(__FUNCTION__, (handle), (bh), (async))
#define ext3_journal_revoke(handle, blocknr, bh) \
__ext3_journal_revoke(__FUNCTION__, (handle), (blocknr), (bh))
#define ext3_journal_get_create_access(handle, bh) \
......
......@@ -360,13 +360,6 @@ struct transaction_s
*/
struct journal_head * t_sync_datalist;
/*
* Doubly-linked circular list of all writepage data buffers
* still to be written before this transaction can be committed.
* Protected by journal_datalist_lock.
*/
struct journal_head * t_async_datalist;
/* Doubly-linked circular list of all forget buffers (superseded
buffers which we can un-checkpoint once this transaction
commits) */
......@@ -654,8 +647,7 @@ extern int journal_extend (handle_t *, int nblocks);
extern int journal_get_write_access (handle_t *, struct buffer_head *);
extern int journal_get_create_access (handle_t *, struct buffer_head *);
extern int journal_get_undo_access (handle_t *, struct buffer_head *);
extern int journal_dirty_data (handle_t *,
struct buffer_head *, int async);
extern int journal_dirty_data (handle_t *, struct buffer_head *);
extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
extern void journal_release_buffer (handle_t *, struct buffer_head *);
extern void journal_forget (handle_t *, struct buffer_head *);
......@@ -806,14 +798,13 @@ extern int journal_blocks_per_page(struct inode *inode);
/* journaling buffer types */
#define BJ_None 0 /* Not journaled */
#define BJ_SyncData 1 /* Normal data: flush before commit */
#define BJ_AsyncData 2 /* writepage data: wait on it before commit */
#define BJ_Metadata 3 /* Normal journaled metadata */
#define BJ_Forget 4 /* Buffer superseded by this transaction */
#define BJ_IO 5 /* Buffer is for temporary IO use */
#define BJ_Shadow 6 /* Buffer contents being shadowed to the log */
#define BJ_LogCtl 7 /* Buffer contains log descriptors */
#define BJ_Reserved 8 /* Buffer is reserved for access by journal */
#define BJ_Types 9
#define BJ_Metadata 2 /* Normal journaled metadata */
#define BJ_Forget 3 /* Buffer superseded by this transaction */
#define BJ_IO 4 /* Buffer is for temporary IO use */
#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
#define BJ_LogCtl 6 /* Buffer contains log descriptors */
#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
#define BJ_Types 8
extern int jbd_blocks_per_page(struct inode *inode);
......@@ -860,8 +851,7 @@ static inline int buffer_jdirty(struct buffer_head *bh)
static inline int buffer_jbd_data(struct buffer_head *bh)
{
return SPLICE_LOCK(buffer_jbd(bh),
bh2jh(bh)->b_jlist == BJ_SyncData ||
bh2jh(bh)->b_jlist == BJ_AsyncData);
bh2jh(bh)->b_jlist == BJ_SyncData);
}
#ifdef CONFIG_SMP
......
......@@ -193,7 +193,8 @@ typedef struct pglist_data {
extern int numnodes;
extern struct pglist_data *pgdat_list;
void get_zone_counts(unsigned long *active, unsigned long *inactive);
void get_zone_counts(unsigned long *active, unsigned long *inactive,
unsigned long *free);
void build_all_zonelists(void);
void wakeup_kswapd(struct zone *zone);
......
......@@ -27,6 +27,7 @@
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
struct exec_domain;
......@@ -87,6 +88,8 @@ extern unsigned long avenrun[]; /* Load averages */
extern int nr_threads;
extern int last_pid;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_iowait(void);
......
......@@ -41,6 +41,8 @@ static struct dentry * __unhash_process(struct task_struct *p)
if (thread_group_leader(p)) {
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
if (p->pid)
per_cpu(process_counts, smp_processor_id())--;
}
REMOVE_LINKS(p);
......@@ -191,7 +193,7 @@ int is_orphaned_pgrp(int pgrp)
return will_become_orphaned_pgrp(pgrp, 0);
}
static inline int __has_stopped_jobs(int pgrp)
static inline int has_stopped_jobs(int pgrp)
{
int retval = 0;
struct task_struct *p;
......@@ -207,17 +209,6 @@ static inline int __has_stopped_jobs(int pgrp)
return retval;
}
static inline int has_stopped_jobs(int pgrp)
{
int retval;
read_lock(&tasklist_lock);
retval = __has_stopped_jobs(pgrp);
read_unlock(&tasklist_lock);
return retval;
}
/**
* reparent_to_init() - Reparent the calling kernel thread to the init task.
*
......@@ -504,7 +495,7 @@ static inline void reparent_thread(task_t *p, task_t *father, int traced)
(p->session == father->session)) {
int pgrp = p->pgrp;
if (__will_become_orphaned_pgrp(pgrp, 0) && __has_stopped_jobs(pgrp)) {
if (__will_become_orphaned_pgrp(pgrp, 0) && has_stopped_jobs(pgrp)) {
__kill_pg_info(SIGHUP, (void *)1, pgrp);
__kill_pg_info(SIGCONT, (void *)1, pgrp);
}
......@@ -589,7 +580,7 @@ static void exit_notify(void)
if ((t->pgrp != current->pgrp) &&
(t->session == current->session) &&
__will_become_orphaned_pgrp(current->pgrp, current) &&
__has_stopped_jobs(current->pgrp)) {
has_stopped_jobs(current->pgrp)) {
__kill_pg_info(SIGHUP, (void *)1, current->pgrp);
__kill_pg_info(SIGCONT, (void *)1, current->pgrp);
}
......
......@@ -48,6 +48,8 @@ int nr_threads;
int max_threads;
unsigned long total_forks; /* Handle normal Linux uptimes. */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
/*
......@@ -57,6 +59,18 @@ rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
*/
static task_t *task_cache[NR_CPUS] __cacheline_aligned;
int nr_processes(void)
{
int cpu;
int total = 0;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (cpu_online(cpu))
total += per_cpu(process_counts, cpu);
}
return total;
}
void __put_task_struct(struct task_struct *tsk)
{
if (tsk != current) {
......@@ -931,6 +945,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
attach_pid(p, PIDTYPE_TGID, p->tgid);
attach_pid(p, PIDTYPE_PGID, p->pgrp);
attach_pid(p, PIDTYPE_SID, p->session);
if (p->pid)
per_cpu(process_counts, smp_processor_id())++;
} else
link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);
......
......@@ -776,15 +776,18 @@ void get_full_page_state(struct page_state *ret)
__get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
}
void get_zone_counts(unsigned long *active, unsigned long *inactive)
void get_zone_counts(unsigned long *active,
unsigned long *inactive, unsigned long *free)
{
struct zone *zone;
*active = 0;
*inactive = 0;
*free = 0;
for_each_zone(zone) {
*active += zone->nr_active;
*inactive += zone->nr_inactive;
*free += zone->free_pages;
}
}
......@@ -838,6 +841,7 @@ void show_free_areas(void)
int cpu, temperature;
unsigned long active;
unsigned long inactive;
unsigned long free;
struct zone *zone;
for_each_zone(zone) {
......@@ -863,7 +867,7 @@ void show_free_areas(void)
}
get_page_state(&ps);
get_zone_counts(&active, &inactive);
get_zone_counts(&active, &inactive, &free);
printk("\nFree pages: %11ukB (%ukB HighMem)\n",
K(nr_free_pages()),
......
......@@ -477,7 +477,8 @@ unsigned long max_sane_readahead(unsigned long nr)
{
unsigned long active;
unsigned long inactive;
unsigned long free;
get_zone_counts(&active, &inactive);
return min(nr, inactive / 2);
get_zone_counts(&active, &inactive, &free);
return min(nr, (inactive + free) / 2);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment