Commit b585ca3e authored by Ingo Molnar's avatar Ingo Molnar

- make i_mmap and i_mmap_shared a list.h list

-
 make vma->vm_next_share and vma->vm_pprev_share
  a proper list.h list as well.
parent 6d22c10f
......@@ -989,8 +989,8 @@ static int __init init_blkmtd(void)
rawdevice->as.nrpages = 0;
rawdevice->as.a_ops = &blkmtd_aops;
rawdevice->as.host = inode;
rawdevice->as.i_mmap = NULL;
rawdevice->as.i_mmap_shared = NULL;
INIT_LIST_HEAD(&rawdevice->as.i_mmap);
INIT_LIST_HEAD(&rawdevice->as.i_mmap_shared);
spin_lock_init(&rawdevice->as.i_shared_lock);
rawdevice->as.gfp_mask = GFP_KERNEL;
rawdevice->file = file;
......
......@@ -144,6 +144,8 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->i_devices);
sema_init(&inode->i_sem, 1);
spin_lock_init(&inode->i_data.i_shared_lock);
INIT_LIST_HEAD(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_shared);
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
......
......@@ -1459,7 +1459,7 @@ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
(inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
struct address_space *mapping = inode->i_mapping;
if (mapping->i_mmap_shared != NULL) {
if (!list_empty(&mapping->i_mmap_shared)) {
error = -EAGAIN;
goto out_putf;
}
......@@ -1615,7 +1615,7 @@ int fcntl_setlk64(unsigned int fd, unsigned int cmd, struct flock64 *l)
(inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
struct address_space *mapping = inode->i_mapping;
if (mapping->i_mmap_shared != NULL) {
if (!list_empty(&mapping->i_mmap_shared)) {
error = -EAGAIN;
goto out_putf;
}
......
......@@ -377,8 +377,8 @@ struct address_space {
unsigned long nrpages; /* number of total pages */
struct address_space_operations *a_ops; /* methods */
struct inode *host; /* owner: inode, block_device */
struct vm_area_struct *i_mmap; /* list of private mappings */
struct vm_area_struct *i_mmap_shared; /* list of shared mappings */
list_t i_mmap; /* list of private mappings */
list_t i_mmap_shared; /* list of private mappings */
spinlock_t i_shared_lock; /* and spinlock protecting it */
int gfp_mask; /* how to allocate the pages */
};
......
......@@ -61,8 +61,7 @@ struct vm_area_struct {
* one of the address_space->i_mmap{,shared} lists,
* for shm areas, the list of attaches, otherwise unused.
*/
struct vm_area_struct *vm_next_share;
struct vm_area_struct **vm_pprev_share;
list_t shared;
/* Function pointers to deal with this struct. */
struct vm_operations_struct * vm_ops;
......
......@@ -219,11 +219,7 @@ static inline int dup_mmap(struct mm_struct * mm)
/* insert tmp into the share list, just after mpnt */
spin_lock(&inode->i_mapping->i_shared_lock);
if((tmp->vm_next_share = mpnt->vm_next_share) != NULL)
mpnt->vm_next_share->vm_pprev_share =
&tmp->vm_next_share;
mpnt->vm_next_share = tmp;
tmp->vm_pprev_share = &mpnt->vm_next_share;
list_add_tail(&tmp->shared, &mpnt->shared);
spin_unlock(&inode->i_mapping->i_shared_lock);
}
......
......@@ -1399,7 +1399,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
if (mapping->i_mmap_shared != NULL)
if (!list_empty(&mapping->i_mmap_shared))
flush_dcache_page(page);
/*
......
......@@ -1032,31 +1032,35 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
return -1;
}
static void vmtruncate_list(struct vm_area_struct *mpnt, unsigned long pgoff)
static void vmtruncate_list(list_t *head, unsigned long pgoff)
{
do {
unsigned long start = mpnt->vm_start;
unsigned long end = mpnt->vm_end;
unsigned long len = end - start;
unsigned long diff;
unsigned long start, end, len, diff;
struct vm_area_struct *vma;
list_t *curr;
list_for_each(curr, head) {
vma = list_entry(curr, struct vm_area_struct, shared);
start = vma->vm_start;
end = vma->vm_end;
len = end - start;
/* mapping wholly truncated? */
if (mpnt->vm_pgoff >= pgoff) {
zap_page_range(mpnt, start, len);
if (vma->vm_pgoff >= pgoff) {
zap_page_range(vma, start, len);
continue;
}
/* mapping wholly unaffected? */
len = len >> PAGE_SHIFT;
diff = pgoff - mpnt->vm_pgoff;
diff = pgoff - vma->vm_pgoff;
if (diff >= len)
continue;
/* Ok, partially affected.. */
start += diff << PAGE_SHIFT;
len = (len - diff) << PAGE_SHIFT;
zap_page_range(mpnt, start, len);
} while ((mpnt = mpnt->vm_next_share) != NULL);
zap_page_range(vma, start, len);
}
}
/*
......@@ -1077,14 +1081,14 @@ int vmtruncate(struct inode * inode, loff_t offset)
goto do_expand;
inode->i_size = offset;
spin_lock(&mapping->i_shared_lock);
if (!mapping->i_mmap && !mapping->i_mmap_shared)
if (list_empty(&mapping->i_mmap) && list_empty(&mapping->i_mmap_shared))
goto out_unlock;
pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (mapping->i_mmap != NULL)
vmtruncate_list(mapping->i_mmap, pgoff);
if (mapping->i_mmap_shared != NULL)
vmtruncate_list(mapping->i_mmap_shared, pgoff);
if (!list_empty(&mapping->i_mmap))
vmtruncate_list(&mapping->i_mmap, pgoff);
if (!list_empty(&mapping->i_mmap_shared))
vmtruncate_list(&mapping->i_mmap_shared, pgoff);
out_unlock:
spin_unlock(&mapping->i_shared_lock);
......
......@@ -101,9 +101,7 @@ static inline void __remove_shared_vm_struct(struct vm_area_struct *vma)
struct inode *inode = file->f_dentry->d_inode;
if (vma->vm_flags & VM_DENYWRITE)
atomic_inc(&inode->i_writecount);
if(vma->vm_next_share)
vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
*vma->vm_pprev_share = vma->vm_next_share;
list_del_init(&vma->shared);
}
}
......@@ -308,20 +306,14 @@ static inline void __vma_link_file(struct vm_area_struct * vma)
if (file) {
struct inode * inode = file->f_dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
struct vm_area_struct **head;
if (vma->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
head = &mapping->i_mmap;
if (vma->vm_flags & VM_SHARED)
head = &mapping->i_mmap_shared;
/* insert vma into inode's share list */
if((vma->vm_next_share = *head) != NULL)
(*head)->vm_pprev_share = &vma->vm_next_share;
*head = vma;
vma->vm_pprev_share = head;
list_add_tail(&vma->shared, &mapping->i_mmap_shared);
else
list_add_tail(&vma->shared, &mapping->i_mmap);
}
}
......
......@@ -904,7 +904,7 @@ static void do_shmem_file_read(struct file * filp, loff_t *ppos, read_descriptor
if ((desc->error = shmem_getpage(inode, index, &page)))
break;
if (mapping->i_mmap_shared != NULL)
if (!list_empty(&mapping->i_mmap_shared))
flush_dcache_page(page);
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment