Commit ea5faa9a authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.7pre7

parent 353ca85a
......@@ -98,6 +98,8 @@
* (micz). From Kim.Berts@fisub.mail.abb.com
* 11.05.99 0.22 Implemented the IMIX call to mute recording monitor.
* Guenter Geiger <geiger@epy.co.at>
* 15.06.99 0.23 Fix bad allocation bug.
* Thanks to Deti Fliegl <fliegl@in.tum.de>
*
* some important things missing in Ensoniq documentation:
*
......@@ -531,8 +533,9 @@ static int prog_dmabuf(struct es1370_state *s, struct dmabuf *db, unsigned rate,
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
db->ready = db->mapped = 0;
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER && !db->rawbuf; order--)
db->rawbuf = (void *)__get_free_pages(GFP_KERNEL, order);
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
if ((db->rawbuf = (void *)__get_free_pages(GFP_KERNEL, order)))
break;
if (!db->rawbuf)
return -ENOMEM;
db->buforder = order;
......@@ -2317,7 +2320,7 @@ __initfunc(int init_es1370(void))
if (!pci_present()) /* No PCI bus in this machine! */
return -ENODEV;
printk(KERN_INFO "es1370: version v0.22 time " __TIME__ " " __DATE__ "\n");
printk(KERN_INFO "es1370: version v0.23 time " __TIME__ " " __DATE__ "\n");
while (index < NR_DEVICE &&
(pcidev = pci_find_device(PCI_VENDOR_ID_ENSONIQ, PCI_DEVICE_ID_ENSONIQ_ES1370, pcidev))) {
if (pcidev->base_address[0] == 0 ||
......
......@@ -65,6 +65,8 @@
* reported by "Ivan N. Kokshaysky" <ink@jurassic.park.msu.ru>
* Note: joystick address handling might still be wrong on archs
* other than i386
* 15.06.99 0.12 Fix bad allocation bug.
* Thanks to Deti Fliegl <fliegl@in.tum.de>
*
*/
......@@ -759,8 +761,9 @@ static int prog_dmabuf(struct es1371_state *s, struct dmabuf *db, unsigned rate,
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
db->ready = db->mapped = 0;
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER && !db->rawbuf; order--)
db->rawbuf = (void *)__get_free_pages(GFP_KERNEL, order);
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
if ((db->rawbuf = (void *)__get_free_pages(GFP_KERNEL, order)))
break;
if (!db->rawbuf)
return -ENOMEM;
db->buforder = order;
......@@ -2732,7 +2735,7 @@ __initfunc(int init_es1371(void))
if (!pci_present()) /* No PCI bus in this machine! */
return -ENODEV;
printk(KERN_INFO "es1371: version v0.11 time " __TIME__ " " __DATE__ "\n");
printk(KERN_INFO "es1371: version v0.12 time " __TIME__ " " __DATE__ "\n");
while (index < NR_DEVICE &&
(pcidev = pci_find_device(PCI_VENDOR_ID_ENSONIQ, PCI_DEVICE_ID_ENSONIQ_ES1371, pcidev))) {
if (pcidev->base_address[0] == 0 ||
......
......@@ -68,6 +68,8 @@
* SOUND_PCM_READ_CHANNELS, SOUND_PCM_READ_BITS;
* Alpha fixes reported by Peter Jones <pjones@redhat.com>
* Note: dmaio hack might still be wrong on archs other than i386
* 15.06.99 0.15 Fix bad allocation bug.
* Thanks to Deti Fliegl <fliegl@in.tum.de>
*
*/
......@@ -699,8 +701,9 @@ static int prog_dmabuf(struct sv_state *s, unsigned rec)
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
db->ready = db->mapped = 0;
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER && !db->rawbuf; order--)
db->rawbuf = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, order);
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
if ((db->rawbuf = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, order)))
break;
if (!db->rawbuf)
return -ENOMEM;
db->buforder = order;
......@@ -2321,7 +2324,7 @@ __initfunc(int init_sonicvibes(void))
if (!pci_present()) /* No PCI bus in this machine! */
return -ENODEV;
printk(KERN_INFO "sv: version v0.14 time " __TIME__ " " __DATE__ "\n");
printk(KERN_INFO "sv: version v0.15 time " __TIME__ " " __DATE__ "\n");
#if 0
if (!(wavetable_mem = __get_free_pages(GFP_KERNEL, 20-PAGE_SHIFT)))
printk(KERN_INFO "sv: cannot allocate 1MB of contiguous nonpageable memory for wavetable data\n");
......
......@@ -59,11 +59,7 @@ static void set_brk(unsigned long start, unsigned long end)
static int dump_write(struct file *file, const void *addr, int nr)
{
int r;
down(&file->f_dentry->d_inode->i_sem);
r = file->f_op->write(file, addr, nr, &file->f_pos) == nr;
up(&file->f_dentry->d_inode->i_sem);
return r;
return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
}
#define DUMP_WRITE(addr, nr) \
......
......@@ -918,11 +918,7 @@ static int load_elf_library(int fd)
*/
static int dump_write(struct file *file, const void *addr, int nr)
{
int r;
down(&file->f_dentry->d_inode->i_sem);
r = file->f_op->write(file, addr, nr, &file->f_pos) == nr;
up(&file->f_dentry->d_inode->i_sem);
return r;
return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
}
static int dump_seek(struct file *file, off_t off)
......
......@@ -35,35 +35,12 @@
#define blocksize (EXT2_BLOCK_SIZE(inode->i_sb))
#define addr_per_block (EXT2_ADDR_PER_BLOCK(inode->i_sb))
static int sync_block (struct inode * inode, u32 * block, int wait)
static int sync_indirect(struct inode * inode, u32 * block, int wait)
{
struct buffer_head * bh;
if (!*block)
return 0;
bh = get_hash_table (inode->i_dev, *block, blocksize);
if (!bh)
return 0;
if (wait && buffer_req(bh) && !buffer_uptodate(bh)) {
brelse (bh);
return -1;
}
if (wait || !buffer_uptodate(bh) || !buffer_dirty(bh)) {
brelse (bh);
return 0;
}
ll_rw_block (WRITE, 1, &bh);
bh->b_count--;
return 0;
}
#ifndef __LITTLE_ENDIAN
static int sync_block_swab32 (struct inode * inode, u32 * block, int wait)
{
struct buffer_head * bh;
if (!le32_to_cpu(*block))
return 0;
bh = get_hash_table (inode->i_dev, le32_to_cpu(*block), blocksize);
if (!bh)
return 0;
......@@ -79,40 +56,17 @@ static int sync_block_swab32 (struct inode * inode, u32 * block, int wait)
bh->b_count--;
return 0;
}
#else
#define sync_block_swab32 sync_block
#endif
static int sync_iblock (struct inode * inode, u32 * iblock,
struct buffer_head ** bh, int wait)
{
int rc, tmp;
*bh = NULL;
tmp = *iblock;
if (!tmp)
return 0;
rc = sync_block (inode, iblock, wait);
if (rc)
return rc;
*bh = bread (inode->i_dev, tmp, blocksize);
if (!*bh)
return -1;
return 0;
}
#ifndef __LITTLE_ENDIAN
static int sync_iblock_swab32 (struct inode * inode, u32 * iblock,
struct buffer_head ** bh, int wait)
{
int rc, tmp;
*bh = NULL;
tmp = le32_to_cpu(*iblock);
if (!tmp)
return 0;
rc = sync_block_swab32 (inode, iblock, wait);
rc = sync_indirect(inode, iblock, wait);
if (rc)
return rc;
*bh = bread (inode->i_dev, tmp, blocksize);
......@@ -120,68 +74,6 @@ static int sync_iblock_swab32 (struct inode * inode, u32 * iblock,
return -1;
return 0;
}
#else
#define sync_iblock_swab32 sync_iblock
#endif
static int sync_direct (struct inode * inode, int wait)
{
int i;
int rc, err = 0;
for (i = 0; i < EXT2_NDIR_BLOCKS; i++) {
rc = sync_block (inode, inode->u.ext2_i.i_data + i, wait);
if (rc)
err = rc;
}
return err;
}
static int sync_indirect (struct inode * inode, u32 * iblock, int wait)
{
int i;
struct buffer_head * ind_bh;
int rc, err = 0;
rc = sync_iblock (inode, iblock, &ind_bh, wait);
if (rc || !ind_bh)
return rc;
for (i = 0; i < addr_per_block; i++) {
rc = sync_block_swab32 (inode,
((u32 *) ind_bh->b_data) + i,
wait);
if (rc)
err = rc;
}
brelse (ind_bh);
return err;
}
#ifndef __LITTLE_ENDIAN
static __inline__ int sync_indirect_swab32 (struct inode * inode, u32 * iblock, int wait)
{
int i;
struct buffer_head * ind_bh;
int rc, err = 0;
rc = sync_iblock_swab32 (inode, iblock, &ind_bh, wait);
if (rc || !ind_bh)
return rc;
for (i = 0; i < addr_per_block; i++) {
rc = sync_block_swab32 (inode,
((u32 *) ind_bh->b_data) + i,
wait);
if (rc)
err = rc;
}
brelse (ind_bh);
return err;
}
#else
#define sync_indirect_swab32 sync_indirect
#endif
static int sync_dindirect (struct inode * inode, u32 * diblock, int wait)
{
......@@ -194,9 +86,9 @@ static int sync_dindirect (struct inode * inode, u32 * diblock, int wait)
return rc;
for (i = 0; i < addr_per_block; i++) {
rc = sync_indirect_swab32 (inode,
((u32 *) dind_bh->b_data) + i,
wait);
rc = sync_indirect(inode,
((u32 *) dind_bh->b_data) + i,
wait);
if (rc)
err = rc;
}
......@@ -204,31 +96,6 @@ static int sync_dindirect (struct inode * inode, u32 * diblock, int wait)
return err;
}
#ifndef __LITTLE_ENDIAN
static __inline__ int sync_dindirect_swab32 (struct inode * inode, u32 * diblock, int wait)
{
int i;
struct buffer_head * dind_bh;
int rc, err = 0;
rc = sync_iblock_swab32 (inode, diblock, &dind_bh, wait);
if (rc || !dind_bh)
return rc;
for (i = 0; i < addr_per_block; i++) {
rc = sync_indirect_swab32 (inode,
((u32 *) dind_bh->b_data) + i,
wait);
if (rc)
err = rc;
}
brelse (dind_bh);
return err;
}
#else
#define sync_dindirect_swab32 sync_dindirect
#endif
static int sync_tindirect (struct inode * inode, u32 * tiblock, int wait)
{
int i;
......@@ -240,9 +107,9 @@ static int sync_tindirect (struct inode * inode, u32 * tiblock, int wait)
return rc;
for (i = 0; i < addr_per_block; i++) {
rc = sync_dindirect_swab32 (inode,
((u32 *) tind_bh->b_data) + i,
wait);
rc = sync_dindirect(inode,
((u32 *) tind_bh->b_data) + i,
wait);
if (rc)
err = rc;
}
......@@ -266,9 +133,10 @@ int ext2_sync_file(struct file * file, struct dentry *dentry)
*/
goto skip;
err = generic_buffer_fdatasync(inode, 0, ~0UL);
for (wait=0; wait<=1; wait++)
{
err |= sync_direct (inode, wait);
err |= sync_indirect (inode,
inode->u.ext2_i.i_data+EXT2_IND_BLOCK,
wait);
......
......@@ -59,7 +59,7 @@ void ext2_delete_inode (struct inode * inode)
ext2_free_inode (inode);
}
#define inode_bmap(inode, nr) ((inode)->u.ext2_i.i_data[(nr)])
#define inode_bmap(inode, nr) (le32_to_cpu((inode)->u.ext2_i.i_data[(nr)]))
static inline int block_bmap (struct buffer_head * bh, int nr)
{
......@@ -252,11 +252,11 @@ static struct buffer_head * inode_getblk (struct inode * inode, int nr,
p = inode->u.ext2_i.i_data + nr;
repeat:
tmp = *p;
tmp = le32_to_cpu(*p);
if (tmp) {
if (metadata) {
struct buffer_head * result = getblk (inode->i_dev, tmp, inode->i_sb->s_blocksize);
if (tmp == *p)
if (tmp == le32_to_cpu(*p))
return result;
brelse (result);
goto repeat;
......@@ -291,7 +291,7 @@ static struct buffer_head * inode_getblk (struct inode * inode, int nr,
if (!goal) {
for (tmp = nr - 1; tmp >= 0; tmp--) {
if (inode->u.ext2_i.i_data[tmp]) {
goal = inode->u.ext2_i.i_data[tmp];
goal = le32_to_cpu(inode->u.ext2_i.i_data[tmp]);
break;
}
}
......@@ -326,7 +326,7 @@ static struct buffer_head * inode_getblk (struct inode * inode, int nr,
*err = 0;
*created = 1;
}
*p = tmp;
*p = cpu_to_le32(tmp);
inode->u.ext2_i.i_next_alloc_block = new_block;
inode->u.ext2_i.i_next_alloc_goal = tmp;
......@@ -699,11 +699,14 @@ void ext2_read_inode (struct inode * inode)
if (inode->u.ext2_i.i_prealloc_count)
ext2_error (inode->i_sb, "ext2_read_inode",
"New inode has non-zero prealloc count!");
if (S_ISLNK(inode->i_mode) && !inode->i_blocks)
for (block = 0; block < EXT2_N_BLOCKS; block++)
inode->u.ext2_i.i_data[block] = raw_inode->i_block[block];
else for (block = 0; block < EXT2_N_BLOCKS; block++)
inode->u.ext2_i.i_data[block] = le32_to_cpu(raw_inode->i_block[block]);
/*
* NOTE! The in-memory inode i_blocks array is in little-endian order
* even on big-endian machines: we do NOT byteswap the block numbers!
*/
for (block = 0; block < EXT2_N_BLOCKS; block++)
inode->u.ext2_i.i_data[block] = raw_inode->i_block[block];
if (inode->i_ino == EXT2_ACL_IDX_INO ||
inode->i_ino == EXT2_ACL_DATA_INO)
/* Nothing to do */ ;
......@@ -819,11 +822,8 @@ static int ext2_update_inode(struct inode * inode, int do_sync)
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_block[0] = cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
else if (S_ISLNK(inode->i_mode) && !inode->i_blocks)
for (block = 0; block < EXT2_N_BLOCKS; block++)
raw_inode->i_block[block] = inode->u.ext2_i.i_data[block];
else for (block = 0; block < EXT2_N_BLOCKS; block++)
raw_inode->i_block[block] = cpu_to_le32(inode->u.ext2_i.i_data[block]);
raw_inode->i_block[block] = inode->u.ext2_i.i_data[block];
mark_buffer_dirty(bh, 1);
if (do_sync) {
ll_rw_block (WRITE, 1, &bh);
......
......@@ -131,10 +131,7 @@ static int check_block_empty(struct inode *inode, struct buffer_head *bh,
if (bh->b_count == 1) {
int tmp;
if (ind_bh)
tmp = le32_to_cpu(*p);
else
tmp = *p;
tmp = le32_to_cpu(*p);
*p = 0;
inode->i_blocks -= (inode->i_sb->s_blocksize / 512);
mark_inode_dirty(inode);
......@@ -173,7 +170,7 @@ static int trunc_direct (struct inode * inode)
for (i = direct_block ; i < EXT2_NDIR_BLOCKS ; i++) {
u32 * p = inode->u.ext2_i.i_data + i;
int tmp = *p;
int tmp = le32_to_cpu(*p);
if (!tmp)
continue;
......@@ -218,11 +215,11 @@ static int trunc_indirect (struct inode * inode, int offset, u32 * p,
unsigned long block_to_free = 0, free_count = 0;
int indirect_block, addr_per_block, blocks;
tmp = dind_bh ? le32_to_cpu(*p) : *p;
tmp = le32_to_cpu(*p);
if (!tmp)
return 0;
ind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
if (tmp != (dind_bh ? le32_to_cpu(*p) : *p)) {
if (tmp != le32_to_cpu(*p)) {
brelse (ind_bh);
return 1;
}
......@@ -300,11 +297,11 @@ static int trunc_dindirect (struct inode * inode, int offset, u32 * p,
int i, tmp, retry = 0;
int dindirect_block, addr_per_block;
tmp = tind_bh ? le32_to_cpu(*p) : *p;
tmp = le32_to_cpu(*p);
if (!tmp)
return 0;
dind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
if (tmp != (tind_bh ? le32_to_cpu(*p) : *p)) {
if (tmp != le32_to_cpu(*p)) {
brelse (dind_bh);
return 1;
}
......@@ -347,10 +344,11 @@ static int trunc_tindirect (struct inode * inode)
int i, tmp, retry = 0;
int tindirect_block, addr_per_block, offset;
if (!(tmp = *p))
tmp = le32_to_cpu(*p);
if (!tmp)
return 0;
tind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
if (tmp != *p) {
if (tmp != le32_to_cpu(*p)) {
brelse (tind_bh);
return 1;
}
......
......@@ -900,6 +900,7 @@ extern ssize_t block_write(struct file *, const char *, size_t, loff_t *);
extern int block_fsync(struct file *, struct dentry *);
extern int file_fsync(struct file *, struct dentry *);
extern int generic_buffer_fdatasync(struct inode *inode, unsigned long start, unsigned long end);
extern int inode_change_ok(struct inode *, struct iattr *);
extern void inode_setattr(struct inode *, struct iattr *);
......
......@@ -333,10 +333,8 @@ static int do_acct_process(long exitcode, struct file *file)
fs = get_fs();
set_fs(KERNEL_DS);
inode = file->f_dentry->d_inode;
down(&inode->i_sem);
file->f_op->write(file, (char *)&ac,
sizeof(struct acct), &file->f_pos);
up(&inode->i_sem);
set_fs(fs);
fput(file);
return 0;
......
......@@ -347,6 +347,90 @@ static inline struct page * __find_page_nolock(struct inode * inode, unsigned lo
return page;
}
/*
* By the time this is called, the page is locked and
* we don't have to worry about any races any more.
*
* Start the IO..
*/
static int writeout_one_page(struct page *page)
{
struct buffer_head *bh, *head = page->buffers;
bh = head;
do {
if (buffer_locked(bh) || !buffer_dirty(bh))
continue;
bh->b_flushtime = 0;
ll_rw_block(WRITE, 1, &bh);
} while ((bh = bh->b_this_page) != head);
return 0;
}
static int waitfor_one_page(struct page *page)
{
int error = 0;
struct buffer_head *bh, *head = page->buffers;
bh = head;
do {
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
error = -EIO;
} while ((bh = bh->b_this_page) != head);
return error;
}
static int do_buffer_fdatasync(struct inode *inode, unsigned long start, unsigned long end, int (*fn)(struct page *))
{
struct page *next;
int retval = 0;
start &= PAGE_MASK;
spin_lock(&pagecache_lock);
next = inode->i_pages;
while (next) {
struct page *page = next;
next = page->next;
if (!page->buffers)
continue;
if (page->offset >= end)
continue;
if (page->offset < start)
continue;
get_page(page);
spin_unlock(&pagecache_lock);
lock_page(page);
/* The buffers could have been free'd while we waited for the page lock */
if (page->buffers)
retval |= fn(page);
UnlockPage(page);
spin_lock(&pagecache_lock);
next = page->next;
page_cache_release(page);
}
return retval;
}
/*
* Two-stage data sync: first start the IO, then go back and
* collect the information..
*/
int generic_buffer_fdatasync(struct inode *inode, unsigned long start, unsigned long end)
{
int retval;
retval = do_buffer_fdatasync(inode, start, end, writeout_one_page);
retval |= do_buffer_fdatasync(inode, start, end, waitfor_one_page);
return retval;
}
/*
* This adds a page to the page cache, starting out as locked,
* owned by us, referenced, but not uptodate and with no errors.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment