truncate.c 7.36 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * mm/truncate.c - code for taking down pages from address_spaces
 *
 * Copyright (C) 2002, Linus Torvalds
 *
 * 10Sep2002	akpm@zip.com.au
 *		Initial version.
 */

#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/buffer_head.h>	/* grr. try_to_release_page,
				   block_invalidatepage */


static int do_invalidatepage(struct page *page, unsigned long offset)
{
	int (*invalidatepage)(struct page *, unsigned long);
	invalidatepage = page->mapping->a_ops->invalidatepage;
	if (invalidatepage == NULL)
		invalidatepage = block_invalidatepage;
	return (*invalidatepage)(page, offset);
}

static inline void truncate_partial_page(struct page *page, unsigned partial)
{
	memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
	if (PagePrivate(page))
		do_invalidatepage(page, partial);
}

/*
 * If truncate cannot remove the fs-private metadata from the page, the page
 * becomes anonymous.  It will be left on the LRU and may even be mapped into
 * user pagetables if we're racing with filemap_nopage().
Andrew Morton's avatar
Andrew Morton committed
38 39 40 41 42
 *
 * We need to bale out if page->mapping is no longer equal to the original
 * mapping.  This happens a) when the VM reclaimed the page while we waited on
 * its lock, b) when a concurrent invalidate_inode_pages got there first and
 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
43
 */
Andrew Morton's avatar
Andrew Morton committed
44 45
static void
truncate_complete_page(struct address_space *mapping, struct page *page)
46
{
Andrew Morton's avatar
Andrew Morton committed
47 48 49
	if (page->mapping != mapping)
		return;

50 51 52 53 54
	if (PagePrivate(page))
		do_invalidatepage(page, 0);

	clear_page_dirty(page);
	ClearPageUptodate(page);
55
	ClearPageMappedToDisk(page);
56
	remove_from_page_cache(page);
57 58 59 60 61 62 63 64 65 66
	page_cache_release(page);	/* pagecache ref */
}

/*
 * This is for invalidate_inode_pages().  That function can be called at
 * any time, and is not supposed to throw away dirty pages.  But pages can
 * be marked dirty at any time too.  So we re-check the dirtiness inside
 * ->page_lock.  That provides exclusion against the __set_page_dirty
 * functions.
 */
67
static int
68 69 70
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
71
		return 0;
72 73

	if (PagePrivate(page) && !try_to_release_page(page, 0))
74
		return 0;
75

76
	spin_lock(&mapping->page_lock);
77
	if (PageDirty(page)) {
78
		spin_unlock(&mapping->page_lock);
79
		return 0;
80
	}
81
	__remove_from_page_cache(page);
82
	spin_unlock(&mapping->page_lock);
83 84 85
	ClearPageUptodate(page);
	page_cache_release(page);	/* pagecache ref */
	return 1;
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
}

/**
 * truncate_inode_pages - truncate *all* the pages from an offset
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
 *
 * Truncate the page cache at a set offset, removing the pages that are beyond
 * that offset (and zeroing out partial pages).
 *
 * Truncate takes two passes - the first pass is nonblocking.  It will not
 * block on page locks and it will not block on writeback.  The second pass
 * will wait.  This is to prevent as much IO as possible in the affected region.
 * The first pass will remove most pages, so the search cost of the second pass
 * is low.
 *
Andrew Morton's avatar
Andrew Morton committed
102 103 104
 * When looking at page->index outside the page lock we need to be careful to
 * copy it into a local to avoid races (it could change at any time).
 *
105 106 107 108
 * We pass down the cache-hot hint to the page freeing code.  Even if the
 * mapping is large, it is probably the case that the final pages are the most
 * recently touched, and freeing happens in ascending file offset order.
 *
109 110 111 112 113 114 115 116 117 118
 * Called under (and serialised by) inode->i_sem.
 */
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
{
	const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
	const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
	struct pagevec pvec;
	pgoff_t next;
	int i;

Andrew Morton's avatar
Andrew Morton committed
119 120 121
	if (mapping->nrpages == 0)
		return;

122
	pagevec_init(&pvec, 0);
123 124 125 126
	next = start;
	while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
Andrew Morton's avatar
Andrew Morton committed
127
			pgoff_t page_index = page->index;
128

Andrew Morton's avatar
Andrew Morton committed
129 130 131
			if (page_index > next)
				next = page_index;
			next++;
132 133 134 135 136 137
			if (TestSetPageLocked(page))
				continue;
			if (PageWriteback(page)) {
				unlock_page(page);
				continue;
			}
Andrew Morton's avatar
Andrew Morton committed
138
			truncate_complete_page(mapping, page);
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
			unlock_page(page);
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (partial) {
		struct page *page = find_lock_page(mapping, start - 1);
		if (page) {
			wait_on_page_writeback(page);
			truncate_partial_page(page, partial);
			unlock_page(page);
			page_cache_release(page);
		}
	}

	next = start;
	for ( ; ; ) {
		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
			if (next == start)
				break;
			next = start;
			continue;
		}
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

			lock_page(page);
			wait_on_page_writeback(page);
Andrew Morton's avatar
Andrew Morton committed
168 169 170 171
			if (page->index > next)
				next = page->index;
			next++;
			truncate_complete_page(mapping, page);
172 173 174 175 176 177 178
			unlock_page(page);
		}
		pagevec_release(&pvec);
	}
}

/**
179
 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
Andrew Morton's avatar
Andrew Morton committed
180 181 182
 * @mapping: the address_space which holds the pages to invalidate
 * @start: the offset 'from' which to invalidate
 * @end: the offset 'to' which to invalidate (inclusive)
183 184 185 186
 *
 * This function only removes the unlocked pages, if you want to
 * remove all the pages of one inode, you must call truncate_inode_pages.
 *
187
 * invalidate_mapping_pages() will not block on IO activity. It will not
188 189 190
 * invalidate pages which are dirty, locked, under writeback or mapped into
 * pagetables.
 */
191
unsigned long invalidate_mapping_pages(struct address_space *mapping,
192
				pgoff_t start, pgoff_t end)
193 194
{
	struct pagevec pvec;
195
	pgoff_t next = start;
196
	unsigned long ret = 0;
197 198
	int i;

199
	pagevec_init(&pvec, 0);
200 201
	while (next <= end &&
			pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
202 203 204 205 206 207 208
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

			if (TestSetPageLocked(page)) {
				next++;
				continue;
			}
Andrew Morton's avatar
Andrew Morton committed
209 210 211
			if (page->index > next)
				next = page->index;
			next++;
212 213 214 215
			if (PageDirty(page) || PageWriteback(page))
				goto unlock;
			if (page_mapped(page))
				goto unlock;
216
			ret += invalidate_complete_page(mapping, page);
217 218 219 220 221 222
unlock:
			unlock_page(page);
		}
		pagevec_release(&pvec);
		cond_resched();
	}
223
	return ret;
224 225
}

226
unsigned long invalidate_inode_pages(struct address_space *mapping)
227
{
228
	return invalidate_mapping_pages(mapping, 0, ~0UL);
229 230
}

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
/**
 * invalidate_inode_pages2 - remove all unmapped pages from an address_space
 * @mapping - the address_space
 *
 * invalidate_inode_pages2() is like truncate_inode_pages(), except for the case
 * where the page is seen to be mapped into process pagetables.  In that case,
 * the page is marked clean but is left attached to its address_space.
 *
 * FIXME: invalidate_inode_pages2() is probably trivially livelockable.
 */
void invalidate_inode_pages2(struct address_space *mapping)
{
	struct pagevec pvec;
	pgoff_t next = 0;
	int i;

247
	pagevec_init(&pvec, 0);
Andrew Morton's avatar
Andrew Morton committed
248
	while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
249 250 251 252
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

			lock_page(page);
Andrew Morton's avatar
Andrew Morton committed
253
			if (page->mapping == mapping) {	/* truncate race? */
254 255 256 257 258
				wait_on_page_writeback(page);
				next = page->index + 1;
				if (page_mapped(page))
					clear_page_dirty(page);
				else
259
					invalidate_complete_page(mapping, page);
260 261 262 263 264 265 266
			}
			unlock_page(page);
		}
		pagevec_release(&pvec);
		cond_resched();
	}
}