mbcache.c 13.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
Jan Kara's avatar
Jan Kara committed
2 3 4 5 6 7
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/list_bl.h>
#include <linux/module.h>
#include <linux/sched.h>
Jan Kara's avatar
Jan Kara committed
8
#include <linux/workqueue.h>
Jan Kara's avatar
Jan Kara committed
9
#include <linux/mbcache.h>
Jan Kara's avatar
Jan Kara committed
10 11 12 13

/*
 * Mbcache is a simple key-value store. Keys need not be unique, however
 * key-value pairs are expected to be unique (we use this fact in
14
 * mb_cache_entry_delete_or_get()).
Jan Kara's avatar
Jan Kara committed
15 16
 *
 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
17 18 19 20 21
 * Ext4 also uses it for deduplication of xattr values stored in inodes.
 * They use hash of data as a key and provide a value that may represent a
 * block or inode number. That's why keys need not be unique (hash of different
 * data may be the same). However user provided value always uniquely
 * identifies a cache entry.
Jan Kara's avatar
Jan Kara committed
22 23 24 25 26 27
 *
 * We provide functions for creation and removal of entries, search by key,
 * and a special "delete entry with given key-value pair" operation. Fixed
 * size hash table is used for fast key lookups.
 */

Jan Kara's avatar
Jan Kara committed
28
struct mb_cache {
Jan Kara's avatar
Jan Kara committed
29 30 31 32
	/* Hash table of entries */
	struct hlist_bl_head	*c_hash;
	/* log2 of hash table size */
	int			c_bucket_bits;
Jan Kara's avatar
Jan Kara committed
33
	/* Maximum entries in cache to avoid degrading hash too much */
34
	unsigned long		c_max_entries;
35 36 37
	/* Protects c_list, c_entry_count */
	spinlock_t		c_list_lock;
	struct list_head	c_list;
Jan Kara's avatar
Jan Kara committed
38 39 40
	/* Number of entries in cache */
	unsigned long		c_entry_count;
	struct shrinker		c_shrink;
Jan Kara's avatar
Jan Kara committed
41 42
	/* Work for shrinking when the cache has too many entries */
	struct work_struct	c_shrink_work;
Jan Kara's avatar
Jan Kara committed
43 44
};

Jan Kara's avatar
Jan Kara committed
45
static struct kmem_cache *mb_entry_cache;
Jan Kara's avatar
Jan Kara committed
46

Jan Kara's avatar
Jan Kara committed
47
static unsigned long mb_cache_shrink(struct mb_cache *cache,
48
				     unsigned long nr_to_scan);
Jan Kara's avatar
Jan Kara committed
49

50 51
static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
							u32 key)
52
{
53
	return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
54 55
}

Jan Kara's avatar
Jan Kara committed
56 57 58 59 60 61
/*
 * Number of entries to reclaim synchronously when there are too many entries
 * in cache
 */
#define SYNC_SHRINK_BATCH 64

Jan Kara's avatar
Jan Kara committed
62
/*
Jan Kara's avatar
Jan Kara committed
63
 * mb_cache_entry_create - create entry in cache
Jan Kara's avatar
Jan Kara committed
64 65 66
 * @cache - cache where the entry should be created
 * @mask - gfp mask with which the entry should be allocated
 * @key - key of the entry
67 68
 * @value - value of the entry
 * @reusable - is the entry reusable by others?
Jan Kara's avatar
Jan Kara committed
69
 *
70 71 72
 * Creates entry in @cache with key @key and value @value. The function returns
 * -EBUSY if entry with the same key and value already exists in cache.
 * Otherwise 0 is returned.
Jan Kara's avatar
Jan Kara committed
73
 */
Jan Kara's avatar
Jan Kara committed
74
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
75
			  u64 value, bool reusable)
Jan Kara's avatar
Jan Kara committed
76
{
Jan Kara's avatar
Jan Kara committed
77
	struct mb_cache_entry *entry, *dup;
Jan Kara's avatar
Jan Kara committed
78 79 80
	struct hlist_bl_node *dup_node;
	struct hlist_bl_head *head;

Jan Kara's avatar
Jan Kara committed
81 82 83 84 85
	/* Schedule background reclaim if there are too many entries */
	if (cache->c_entry_count >= cache->c_max_entries)
		schedule_work(&cache->c_shrink_work);
	/* Do some sync reclaim if background reclaim cannot keep up */
	if (cache->c_entry_count >= 2*cache->c_max_entries)
Jan Kara's avatar
Jan Kara committed
86
		mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
Jan Kara's avatar
Jan Kara committed
87

Jan Kara's avatar
Jan Kara committed
88
	entry = kmem_cache_alloc(mb_entry_cache, mask);
Jan Kara's avatar
Jan Kara committed
89 90 91
	if (!entry)
		return -ENOMEM;

92
	INIT_LIST_HEAD(&entry->e_list);
Jan Kara's avatar
Jan Kara committed
93 94 95
	/* One ref for hash, one ref returned */
	atomic_set(&entry->e_refcnt, 1);
	entry->e_key = key;
96
	entry->e_value = value;
97
	entry->e_reusable = reusable;
98
	entry->e_referenced = 0;
99
	head = mb_cache_entry_head(cache, key);
Jan Kara's avatar
Jan Kara committed
100 101
	hlist_bl_lock(head);
	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
102
		if (dup->e_key == key && dup->e_value == value) {
Jan Kara's avatar
Jan Kara committed
103
			hlist_bl_unlock(head);
Jan Kara's avatar
Jan Kara committed
104
			kmem_cache_free(mb_entry_cache, entry);
Jan Kara's avatar
Jan Kara committed
105 106 107 108 109 110
			return -EBUSY;
		}
	}
	hlist_bl_add_head(&entry->e_hash_list, head);
	hlist_bl_unlock(head);

111 112
	spin_lock(&cache->c_list_lock);
	list_add_tail(&entry->e_list, &cache->c_list);
Jan Kara's avatar
Jan Kara committed
113 114 115
	/* Grab ref for LRU list */
	atomic_inc(&entry->e_refcnt);
	cache->c_entry_count++;
116
	spin_unlock(&cache->c_list_lock);
Jan Kara's avatar
Jan Kara committed
117 118 119

	return 0;
}
Jan Kara's avatar
Jan Kara committed
120
EXPORT_SYMBOL(mb_cache_entry_create);
Jan Kara's avatar
Jan Kara committed
121

Jan Kara's avatar
Jan Kara committed
122
void __mb_cache_entry_free(struct mb_cache_entry *entry)
Jan Kara's avatar
Jan Kara committed
123
{
Jan Kara's avatar
Jan Kara committed
124
	kmem_cache_free(mb_entry_cache, entry);
Jan Kara's avatar
Jan Kara committed
125
}
Jan Kara's avatar
Jan Kara committed
126
EXPORT_SYMBOL(__mb_cache_entry_free);
Jan Kara's avatar
Jan Kara committed
127

128 129 130 131 132 133 134 135 136 137 138 139 140
/*
 * mb_cache_entry_wait_unused - wait to be the last user of the entry
 *
 * @entry - entry to work on
 *
 * Wait to be the last user of the entry.
 */
void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
{
	wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 3);
}
EXPORT_SYMBOL(mb_cache_entry_wait_unused);

Jan Kara's avatar
Jan Kara committed
141 142 143
static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
					   struct mb_cache_entry *entry,
					   u32 key)
Jan Kara's avatar
Jan Kara committed
144
{
Jan Kara's avatar
Jan Kara committed
145
	struct mb_cache_entry *old_entry = entry;
Jan Kara's avatar
Jan Kara committed
146 147 148
	struct hlist_bl_node *node;
	struct hlist_bl_head *head;

149
	head = mb_cache_entry_head(cache, key);
Jan Kara's avatar
Jan Kara committed
150 151 152 153 154 155
	hlist_bl_lock(head);
	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
		node = entry->e_hash_list.next;
	else
		node = hlist_bl_first(head);
	while (node) {
Jan Kara's avatar
Jan Kara committed
156
		entry = hlist_bl_entry(node, struct mb_cache_entry,
Jan Kara's avatar
Jan Kara committed
157
				       e_hash_list);
158
		if (entry->e_key == key && entry->e_reusable) {
Jan Kara's avatar
Jan Kara committed
159 160 161 162 163 164 165 166 167
			atomic_inc(&entry->e_refcnt);
			goto out;
		}
		node = node->next;
	}
	entry = NULL;
out:
	hlist_bl_unlock(head);
	if (old_entry)
Jan Kara's avatar
Jan Kara committed
168
		mb_cache_entry_put(cache, old_entry);
Jan Kara's avatar
Jan Kara committed
169 170 171 172 173

	return entry;
}

/*
174
 * mb_cache_entry_find_first - find the first reusable entry with the given key
Jan Kara's avatar
Jan Kara committed
175 176 177
 * @cache: cache where we should search
 * @key: key to look for
 *
178 179
 * Search in @cache for a reusable entry with key @key. Grabs reference to the
 * first reusable entry found and returns the entry.
Jan Kara's avatar
Jan Kara committed
180
 */
Jan Kara's avatar
Jan Kara committed
181 182
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
						 u32 key)
Jan Kara's avatar
Jan Kara committed
183 184 185
{
	return __entry_find(cache, NULL, key);
}
Jan Kara's avatar
Jan Kara committed
186
EXPORT_SYMBOL(mb_cache_entry_find_first);
Jan Kara's avatar
Jan Kara committed
187 188

/*
189
 * mb_cache_entry_find_next - find next reusable entry with the same key
Jan Kara's avatar
Jan Kara committed
190 191 192
 * @cache: cache where we should search
 * @entry: entry to start search from
 *
193 194 195 196
 * Finds next reusable entry in the hash chain which has the same key as @entry.
 * If @entry is unhashed (which can happen when deletion of entry races with the
 * search), finds the first reusable entry in the hash chain. The function drops
 * reference to @entry and returns with a reference to the found entry.
Jan Kara's avatar
Jan Kara committed
197
 */
Jan Kara's avatar
Jan Kara committed
198 199
struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
						struct mb_cache_entry *entry)
Jan Kara's avatar
Jan Kara committed
200 201 202
{
	return __entry_find(cache, entry, entry->e_key);
}
Jan Kara's avatar
Jan Kara committed
203
EXPORT_SYMBOL(mb_cache_entry_find_next);
Jan Kara's avatar
Jan Kara committed
204

205
/*
206
 * mb_cache_entry_get - get a cache entry by value (and key)
207
 * @cache - cache we work with
208 209
 * @key - key
 * @value - value
210 211
 */
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
212
					  u64 value)
213 214 215 216 217 218 219 220
{
	struct hlist_bl_node *node;
	struct hlist_bl_head *head;
	struct mb_cache_entry *entry;

	head = mb_cache_entry_head(cache, key);
	hlist_bl_lock(head);
	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
221
		if (entry->e_key == key && entry->e_value == value) {
222 223 224 225 226 227 228 229 230 231 232
			atomic_inc(&entry->e_refcnt);
			goto out;
		}
	}
	entry = NULL;
out:
	hlist_bl_unlock(head);
	return entry;
}
EXPORT_SYMBOL(mb_cache_entry_get);

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
/* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
 * @cache - cache we work with
 * @key - key
 * @value - value
 *
 * Remove entry from cache @cache with key @key and value @value. The removal
 * happens only if the entry is unused. The function returns NULL in case the
 * entry was successfully removed or there's no entry in cache. Otherwise the
 * function grabs reference of the entry that we failed to delete because it
 * still has users and return it.
 */
struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
						    u32 key, u64 value)
{
	struct hlist_bl_node *node;
	struct hlist_bl_head *head;
	struct mb_cache_entry *entry;

	head = mb_cache_entry_head(cache, key);
	hlist_bl_lock(head);
	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
		if (entry->e_key == key && entry->e_value == value) {
			if (atomic_read(&entry->e_refcnt) > 2) {
				atomic_inc(&entry->e_refcnt);
				hlist_bl_unlock(head);
				return entry;
			}
			/* We keep hash list reference to keep entry alive */
			hlist_bl_del_init(&entry->e_hash_list);
			hlist_bl_unlock(head);
			spin_lock(&cache->c_list_lock);
			if (!list_empty(&entry->e_list)) {
				list_del_init(&entry->e_list);
				if (!WARN_ONCE(cache->c_entry_count == 0,
		"mbcache: attempt to decrement c_entry_count past zero"))
					cache->c_entry_count--;
				atomic_dec(&entry->e_refcnt);
			}
			spin_unlock(&cache->c_list_lock);
			mb_cache_entry_put(cache, entry);
			return NULL;
		}
	}
	hlist_bl_unlock(head);

	return NULL;
}
EXPORT_SYMBOL(mb_cache_entry_delete_or_get);

Jan Kara's avatar
Jan Kara committed
282
/* mb_cache_entry_touch - cache entry got used
Jan Kara's avatar
Jan Kara committed
283 284 285
 * @cache - cache the entry belongs to
 * @entry - entry that got used
 *
286
 * Marks entry as used to give hit higher chances of surviving in cache.
Jan Kara's avatar
Jan Kara committed
287
 */
Jan Kara's avatar
Jan Kara committed
288 289
void mb_cache_entry_touch(struct mb_cache *cache,
			  struct mb_cache_entry *entry)
Jan Kara's avatar
Jan Kara committed
290
{
291
	entry->e_referenced = 1;
Jan Kara's avatar
Jan Kara committed
292
}
Jan Kara's avatar
Jan Kara committed
293
EXPORT_SYMBOL(mb_cache_entry_touch);
Jan Kara's avatar
Jan Kara committed
294

Jan Kara's avatar
Jan Kara committed
295 296
static unsigned long mb_cache_count(struct shrinker *shrink,
				    struct shrink_control *sc)
Jan Kara's avatar
Jan Kara committed
297
{
Jan Kara's avatar
Jan Kara committed
298 299
	struct mb_cache *cache = container_of(shrink, struct mb_cache,
					      c_shrink);
Jan Kara's avatar
Jan Kara committed
300 301 302 303 304

	return cache->c_entry_count;
}

/* Shrink number of entries in cache */
Jan Kara's avatar
Jan Kara committed
305
static unsigned long mb_cache_shrink(struct mb_cache *cache,
306
				     unsigned long nr_to_scan)
Jan Kara's avatar
Jan Kara committed
307
{
Jan Kara's avatar
Jan Kara committed
308
	struct mb_cache_entry *entry;
Jan Kara's avatar
Jan Kara committed
309
	struct hlist_bl_head *head;
310
	unsigned long shrunk = 0;
Jan Kara's avatar
Jan Kara committed
311

312 313 314
	spin_lock(&cache->c_list_lock);
	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
		entry = list_first_entry(&cache->c_list,
Jan Kara's avatar
Jan Kara committed
315
					 struct mb_cache_entry, e_list);
316
		if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) {
317
			entry->e_referenced = 0;
318
			list_move_tail(&entry->e_list, &cache->c_list);
319 320 321
			continue;
		}
		list_del_init(&entry->e_list);
Jan Kara's avatar
Jan Kara committed
322 323 324 325 326
		cache->c_entry_count--;
		/*
		 * We keep LRU list reference so that entry doesn't go away
		 * from under us.
		 */
327
		spin_unlock(&cache->c_list_lock);
328
		head = mb_cache_entry_head(cache, entry->e_key);
Jan Kara's avatar
Jan Kara committed
329
		hlist_bl_lock(head);
330 331 332 333 334 335 336 337
		/* Now a reliable check if the entry didn't get used... */
		if (atomic_read(&entry->e_refcnt) > 2) {
			hlist_bl_unlock(head);
			spin_lock(&cache->c_list_lock);
			list_add_tail(&entry->e_list, &cache->c_list);
			cache->c_entry_count++;
			continue;
		}
Jan Kara's avatar
Jan Kara committed
338 339 340 341 342
		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
			hlist_bl_del_init(&entry->e_hash_list);
			atomic_dec(&entry->e_refcnt);
		}
		hlist_bl_unlock(head);
Jan Kara's avatar
Jan Kara committed
343
		if (mb_cache_entry_put(cache, entry))
Jan Kara's avatar
Jan Kara committed
344 345
			shrunk++;
		cond_resched();
346
		spin_lock(&cache->c_list_lock);
Jan Kara's avatar
Jan Kara committed
347
	}
348
	spin_unlock(&cache->c_list_lock);
Jan Kara's avatar
Jan Kara committed
349 350 351 352

	return shrunk;
}

Jan Kara's avatar
Jan Kara committed
353 354
static unsigned long mb_cache_scan(struct shrinker *shrink,
				   struct shrink_control *sc)
Jan Kara's avatar
Jan Kara committed
355
{
Jan Kara's avatar
Jan Kara committed
356
	struct mb_cache *cache = container_of(shrink, struct mb_cache,
Jan Kara's avatar
Jan Kara committed
357
					      c_shrink);
358
	return mb_cache_shrink(cache, sc->nr_to_scan);
Jan Kara's avatar
Jan Kara committed
359 360 361 362 363
}

/* We shrink 1/X of the cache when we have too many entries in it */
#define SHRINK_DIVISOR 16

Jan Kara's avatar
Jan Kara committed
364
static void mb_cache_shrink_worker(struct work_struct *work)
Jan Kara's avatar
Jan Kara committed
365
{
Jan Kara's avatar
Jan Kara committed
366 367 368
	struct mb_cache *cache = container_of(work, struct mb_cache,
					      c_shrink_work);
	mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
Jan Kara's avatar
Jan Kara committed
369 370
}

Jan Kara's avatar
Jan Kara committed
371
/*
Jan Kara's avatar
Jan Kara committed
372
 * mb_cache_create - create cache
Jan Kara's avatar
Jan Kara committed
373 374 375 376
 * @bucket_bits: log2 of the hash table size
 *
 * Create cache for keys with 2^bucket_bits hash entries.
 */
Jan Kara's avatar
Jan Kara committed
377
struct mb_cache *mb_cache_create(int bucket_bits)
Jan Kara's avatar
Jan Kara committed
378
{
Jan Kara's avatar
Jan Kara committed
379
	struct mb_cache *cache;
380 381
	unsigned long bucket_count = 1UL << bucket_bits;
	unsigned long i;
Jan Kara's avatar
Jan Kara committed
382

Jan Kara's avatar
Jan Kara committed
383
	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
Jan Kara's avatar
Jan Kara committed
384 385 386
	if (!cache)
		goto err_out;
	cache->c_bucket_bits = bucket_bits;
Jan Kara's avatar
Jan Kara committed
387
	cache->c_max_entries = bucket_count << 4;
388 389
	INIT_LIST_HEAD(&cache->c_list);
	spin_lock_init(&cache->c_list_lock);
390 391 392
	cache->c_hash = kmalloc_array(bucket_count,
				      sizeof(struct hlist_bl_head),
				      GFP_KERNEL);
Jan Kara's avatar
Jan Kara committed
393 394 395 396 397 398 399
	if (!cache->c_hash) {
		kfree(cache);
		goto err_out;
	}
	for (i = 0; i < bucket_count; i++)
		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);

Jan Kara's avatar
Jan Kara committed
400 401
	cache->c_shrink.count_objects = mb_cache_count;
	cache->c_shrink.scan_objects = mb_cache_scan;
Jan Kara's avatar
Jan Kara committed
402
	cache->c_shrink.seeks = DEFAULT_SEEKS;
403 404 405 406 407
	if (register_shrinker(&cache->c_shrink)) {
		kfree(cache->c_hash);
		kfree(cache);
		goto err_out;
	}
Jan Kara's avatar
Jan Kara committed
408

Jan Kara's avatar
Jan Kara committed
409
	INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
Jan Kara's avatar
Jan Kara committed
410

Jan Kara's avatar
Jan Kara committed
411 412 413 414 415
	return cache;

err_out:
	return NULL;
}
Jan Kara's avatar
Jan Kara committed
416
EXPORT_SYMBOL(mb_cache_create);
Jan Kara's avatar
Jan Kara committed
417 418

/*
Jan Kara's avatar
Jan Kara committed
419
 * mb_cache_destroy - destroy cache
Jan Kara's avatar
Jan Kara committed
420 421 422 423 424
 * @cache: the cache to destroy
 *
 * Free all entries in cache and cache itself. Caller must make sure nobody
 * (except shrinker) can reach @cache when calling this.
 */
Jan Kara's avatar
Jan Kara committed
425
void mb_cache_destroy(struct mb_cache *cache)
Jan Kara's avatar
Jan Kara committed
426
{
Jan Kara's avatar
Jan Kara committed
427
	struct mb_cache_entry *entry, *next;
Jan Kara's avatar
Jan Kara committed
428 429 430 431 432 433 434

	unregister_shrinker(&cache->c_shrink);

	/*
	 * We don't bother with any locking. Cache must not be used at this
	 * point.
	 */
435
	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
Jan Kara's avatar
Jan Kara committed
436 437 438 439 440
		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
			hlist_bl_del_init(&entry->e_hash_list);
			atomic_dec(&entry->e_refcnt);
		} else
			WARN_ON(1);
441
		list_del(&entry->e_list);
Jan Kara's avatar
Jan Kara committed
442
		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
Jan Kara's avatar
Jan Kara committed
443
		mb_cache_entry_put(cache, entry);
Jan Kara's avatar
Jan Kara committed
444 445 446 447
	}
	kfree(cache->c_hash);
	kfree(cache);
}
Jan Kara's avatar
Jan Kara committed
448
EXPORT_SYMBOL(mb_cache_destroy);
Jan Kara's avatar
Jan Kara committed
449

Jan Kara's avatar
Jan Kara committed
450
static int __init mbcache_init(void)
Jan Kara's avatar
Jan Kara committed
451
{
Jan Kara's avatar
Jan Kara committed
452 453
	mb_entry_cache = kmem_cache_create("mbcache",
				sizeof(struct mb_cache_entry), 0,
Jan Kara's avatar
Jan Kara committed
454
				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
455 456
	if (!mb_entry_cache)
		return -ENOMEM;
Jan Kara's avatar
Jan Kara committed
457 458 459
	return 0;
}

Jan Kara's avatar
Jan Kara committed
460
static void __exit mbcache_exit(void)
Jan Kara's avatar
Jan Kara committed
461
{
Jan Kara's avatar
Jan Kara committed
462
	kmem_cache_destroy(mb_entry_cache);
Jan Kara's avatar
Jan Kara committed
463 464
}

Jan Kara's avatar
Jan Kara committed
465 466
module_init(mbcache_init)
module_exit(mbcache_exit)
Jan Kara's avatar
Jan Kara committed
467 468 469 470

MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
MODULE_LICENSE("GPL");