Commit 3d875182 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "6 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  sh: add copy_user_page() alias for __copy_user()
  lib/Kconfig: ZLIB_DEFLATE must select BITREVERSE
  mm, dax: fix DAX deadlocks
  memcg: convert threshold to bytes
  builddeb: remove debian/files before build
  mm, fs: obey gfp_mapping for add_to_page_cache()
parents 69984b64 934ed25e
...@@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2) ...@@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, void *from); extern void copy_page(void *to, void *from);
#define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE)
struct page; struct page;
struct vm_area_struct; struct vm_area_struct;
......
...@@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) ...@@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
prefetchw(&page->flags); prefetchw(&page->flags);
ret = add_to_page_cache_lru(page, inode->i_mapping, offset, ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
GFP_KERNEL); GFP_NOFS);
if (ret == 0) { if (ret == 0) {
unlock_page(page); unlock_page(page);
} else { } else {
......
...@@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, ...@@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
struct page *page, *tpage; struct page *page, *tpage;
unsigned int expected_index; unsigned int expected_index;
int rc; int rc;
gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
INIT_LIST_HEAD(tmplist); INIT_LIST_HEAD(tmplist);
...@@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, ...@@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
*/ */
__set_page_locked(page); __set_page_locked(page);
rc = add_to_page_cache_locked(page, mapping, rc = add_to_page_cache_locked(page, mapping,
page->index, GFP_KERNEL); page->index, gfp);
/* give up if we can't stick it in the cache */ /* give up if we can't stick it in the cache */
if (rc) { if (rc) {
...@@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, ...@@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
break; break;
__set_page_locked(page); __set_page_locked(page);
if (add_to_page_cache_locked(page, mapping, page->index, if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
GFP_KERNEL)) {
__clear_page_locked(page); __clear_page_locked(page);
break; break;
} }
......
...@@ -285,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh, ...@@ -285,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh,
static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
struct vm_area_struct *vma, struct vm_fault *vmf) struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct address_space *mapping = inode->i_mapping;
sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
unsigned long vaddr = (unsigned long)vmf->virtual_address; unsigned long vaddr = (unsigned long)vmf->virtual_address;
void __pmem *addr; void __pmem *addr;
...@@ -292,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, ...@@ -292,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
pgoff_t size; pgoff_t size;
int error; int error;
i_mmap_lock_read(mapping);
/* /*
* Check truncate didn't happen while we were allocating a block. * Check truncate didn't happen while we were allocating a block.
* If it did, this block may or may not be still allocated to the * If it did, this block may or may not be still allocated to the
...@@ -321,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, ...@@ -321,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
error = vm_insert_mixed(vma, vaddr, pfn); error = vm_insert_mixed(vma, vaddr, pfn);
out: out:
i_mmap_unlock_read(mapping);
return error; return error;
} }
...@@ -382,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -382,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
* from a read fault and we've raced with a truncate * from a read fault and we've raced with a truncate
*/ */
error = -EIO; error = -EIO;
goto unlock; goto unlock_page;
} }
} else {
i_mmap_lock_write(mapping);
} }
error = get_block(inode, block, &bh, 0); error = get_block(inode, block, &bh, 0);
if (!error && (bh.b_size < PAGE_SIZE)) if (!error && (bh.b_size < PAGE_SIZE))
error = -EIO; /* fs corruption? */ error = -EIO; /* fs corruption? */
if (error) if (error)
goto unlock; goto unlock_page;
if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
if (vmf->flags & FAULT_FLAG_WRITE) { if (vmf->flags & FAULT_FLAG_WRITE) {
...@@ -403,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -403,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
if (!error && (bh.b_size < PAGE_SIZE)) if (!error && (bh.b_size < PAGE_SIZE))
error = -EIO; error = -EIO;
if (error) if (error)
goto unlock; goto unlock_page;
} else { } else {
i_mmap_unlock_write(mapping);
return dax_load_hole(mapping, page, vmf); return dax_load_hole(mapping, page, vmf);
} }
} }
...@@ -417,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -417,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
else else
clear_user_highpage(new_page, vaddr); clear_user_highpage(new_page, vaddr);
if (error) if (error)
goto unlock; goto unlock_page;
vmf->page = page; vmf->page = page;
if (!page) { if (!page) {
i_mmap_lock_read(mapping);
/* Check we didn't race with truncate */ /* Check we didn't race with truncate */
size = (i_size_read(inode) + PAGE_SIZE - 1) >> size = (i_size_read(inode) + PAGE_SIZE - 1) >>
PAGE_SHIFT; PAGE_SHIFT;
if (vmf->pgoff >= size) { if (vmf->pgoff >= size) {
i_mmap_unlock_read(mapping);
error = -EIO; error = -EIO;
goto unlock; goto out;
} }
} }
return VM_FAULT_LOCKED; return VM_FAULT_LOCKED;
...@@ -461,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -461,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
} }
if (!page)
i_mmap_unlock_write(mapping);
out: out:
if (error == -ENOMEM) if (error == -ENOMEM)
return VM_FAULT_OOM | major; return VM_FAULT_OOM | major;
...@@ -471,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -471,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
return VM_FAULT_SIGBUS | major; return VM_FAULT_SIGBUS | major;
return VM_FAULT_NOPAGE | major; return VM_FAULT_NOPAGE | major;
unlock: unlock_page:
if (page) { if (page) {
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
} else {
i_mmap_unlock_write(mapping);
} }
goto out; goto out;
} }
EXPORT_SYMBOL(__dax_fault); EXPORT_SYMBOL(__dax_fault);
...@@ -556,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -556,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
bh.b_size = PMD_SIZE; bh.b_size = PMD_SIZE;
i_mmap_lock_write(mapping);
length = get_block(inode, block, &bh, write); length = get_block(inode, block, &bh, write);
if (length) if (length)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
i_mmap_lock_read(mapping);
/* /*
* If the filesystem isn't willing to tell us the length of a hole, * If the filesystem isn't willing to tell us the length of a hole,
...@@ -569,36 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -569,36 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
goto fallback; goto fallback;
sector = bh.b_blocknr << (blkbits - 9);
if (buffer_unwritten(&bh) || buffer_new(&bh)) {
int i;
length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
bh.b_size);
if (length < 0) {
result = VM_FAULT_SIGBUS;
goto out;
}
if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
goto fallback;
for (i = 0; i < PTRS_PER_PMD; i++)
clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
wmb_pmem();
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
result |= VM_FAULT_MAJOR;
}
/* /*
* If we allocated new storage, make sure no process has any * If we allocated new storage, make sure no process has any
* zero pages covering this hole * zero pages covering this hole
*/ */
if (buffer_new(&bh)) { if (buffer_new(&bh)) {
i_mmap_unlock_write(mapping); i_mmap_unlock_read(mapping);
unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
i_mmap_lock_write(mapping); i_mmap_lock_read(mapping);
} }
/* /*
...@@ -635,6 +612,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -635,6 +612,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
result = VM_FAULT_NOPAGE; result = VM_FAULT_NOPAGE;
spin_unlock(ptl); spin_unlock(ptl);
} else { } else {
sector = bh.b_blocknr << (blkbits - 9);
length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
bh.b_size); bh.b_size);
if (length < 0) { if (length < 0) {
...@@ -644,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -644,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
goto fallback; goto fallback;
if (buffer_unwritten(&bh) || buffer_new(&bh)) {
int i;
for (i = 0; i < PTRS_PER_PMD; i++)
clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
wmb_pmem();
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
result |= VM_FAULT_MAJOR;
}
result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
} }
out: out:
i_mmap_unlock_read(mapping);
if (buffer_unwritten(&bh)) if (buffer_unwritten(&bh))
complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
i_mmap_unlock_write(mapping);
return result; return result;
fallback: fallback:
......
...@@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping, ...@@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (pages) { if (pages) {
page = list_entry(pages->prev, struct page, lru); page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru); list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping, if (add_to_page_cache_lru(page, mapping, page->index,
page->index, GFP_KERNEL)) GFP_KERNEL & mapping_gfp_mask(mapping)))
goto next_page; goto next_page;
} }
......
...@@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) ...@@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
static struct bio * static struct bio *
do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
sector_t *last_block_in_bio, struct buffer_head *map_bh, sector_t *last_block_in_bio, struct buffer_head *map_bh,
unsigned long *first_logical_block, get_block_t get_block) unsigned long *first_logical_block, get_block_t get_block,
gfp_t gfp)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
const unsigned blkbits = inode->i_blkbits; const unsigned blkbits = inode->i_blkbits;
...@@ -277,8 +278,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, ...@@ -277,8 +278,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
goto out; goto out;
} }
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
min_t(int, nr_pages, BIO_MAX_PAGES), min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
GFP_KERNEL);
if (bio == NULL) if (bio == NULL)
goto confused; goto confused;
} }
...@@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, ...@@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
sector_t last_block_in_bio = 0; sector_t last_block_in_bio = 0;
struct buffer_head map_bh; struct buffer_head map_bh;
unsigned long first_logical_block = 0; unsigned long first_logical_block = 0;
gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
map_bh.b_state = 0; map_bh.b_state = 0;
map_bh.b_size = 0; map_bh.b_size = 0;
...@@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, ...@@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
prefetchw(&page->flags); prefetchw(&page->flags);
list_del(&page->lru); list_del(&page->lru);
if (!add_to_page_cache_lru(page, mapping, if (!add_to_page_cache_lru(page, mapping,
page->index, GFP_KERNEL)) { page->index,
gfp)) {
bio = do_mpage_readpage(bio, page, bio = do_mpage_readpage(bio, page,
nr_pages - page_idx, nr_pages - page_idx,
&last_block_in_bio, &map_bh, &last_block_in_bio, &map_bh,
&first_logical_block, &first_logical_block,
get_block); get_block, gfp);
} }
page_cache_release(page); page_cache_release(page);
} }
...@@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block) ...@@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
sector_t last_block_in_bio = 0; sector_t last_block_in_bio = 0;
struct buffer_head map_bh; struct buffer_head map_bh;
unsigned long first_logical_block = 0; unsigned long first_logical_block = 0;
gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping);
map_bh.b_state = 0; map_bh.b_state = 0;
map_bh.b_size = 0; map_bh.b_size = 0;
bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
&map_bh, &first_logical_block, get_block); &map_bh, &first_logical_block, get_block, gfp);
if (bio) if (bio)
mpage_bio_submit(READ, bio); mpage_bio_submit(READ, bio);
return 0; return 0;
......
...@@ -70,6 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) ...@@ -70,6 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
unsigned order; unsigned order;
void *data; void *data;
int ret; int ret;
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
/* make various checks */ /* make various checks */
order = get_order(newsize); order = get_order(newsize);
...@@ -84,7 +85,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) ...@@ -84,7 +85,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
/* allocate enough contiguous pages to be able to satisfy the /* allocate enough contiguous pages to be able to satisfy the
* request */ * request */
pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); pages = alloc_pages(gfp, order);
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
...@@ -108,7 +109,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) ...@@ -108,7 +109,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
struct page *page = pages + loop; struct page *page = pages + loop;
ret = add_to_page_cache_lru(page, inode->i_mapping, loop, ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
GFP_KERNEL); gfp);
if (ret < 0) if (ret < 0)
goto add_error; goto add_error;
......
...@@ -220,6 +220,7 @@ config ZLIB_INFLATE ...@@ -220,6 +220,7 @@ config ZLIB_INFLATE
config ZLIB_DEFLATE config ZLIB_DEFLATE
tristate tristate
select BITREVERSE
config LZO_COMPRESS config LZO_COMPRESS
tristate tristate
......
...@@ -3387,6 +3387,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, ...@@ -3387,6 +3387,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
ret = page_counter_memparse(args, "-1", &threshold); ret = page_counter_memparse(args, "-1", &threshold);
if (ret) if (ret)
return ret; return ret;
threshold <<= PAGE_SHIFT;
mutex_lock(&memcg->thresholds_lock); mutex_lock(&memcg->thresholds_lock);
......
...@@ -2426,6 +2426,8 @@ void unmap_mapping_range(struct address_space *mapping, ...@@ -2426,6 +2426,8 @@ void unmap_mapping_range(struct address_space *mapping,
if (details.last_index < details.first_index) if (details.last_index < details.first_index)
details.last_index = ULONG_MAX; details.last_index = ULONG_MAX;
/* DAX uses i_mmap_lock to serialise file truncate vs page fault */
i_mmap_lock_write(mapping); i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details); unmap_mapping_range_tree(&mapping->i_mmap, &details);
......
...@@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, ...@@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
while (!list_empty(pages)) { while (!list_empty(pages)) {
page = list_to_page(pages); page = list_to_page(pages);
list_del(&page->lru); list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping, if (add_to_page_cache_lru(page, mapping, page->index,
page->index, GFP_KERNEL)) { GFP_KERNEL & mapping_gfp_mask(mapping))) {
read_cache_pages_invalidate_page(mapping, page); read_cache_pages_invalidate_page(mapping, page);
continue; continue;
} }
...@@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp, ...@@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
for (page_idx = 0; page_idx < nr_pages; page_idx++) { for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_to_page(pages); struct page *page = list_to_page(pages);
list_del(&page->lru); list_del(&page->lru);
if (!add_to_page_cache_lru(page, mapping, if (!add_to_page_cache_lru(page, mapping, page->index,
page->index, GFP_KERNEL)) { GFP_KERNEL & mapping_gfp_mask(mapping))) {
mapping->a_ops->readpage(filp, page); mapping->a_ops->readpage(filp, page);
} }
page_cache_release(page); page_cache_release(page);
......
...@@ -115,7 +115,7 @@ esac ...@@ -115,7 +115,7 @@ esac
BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)" BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)"
# Setup the directory structure # Setup the directory structure
rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files
mkdir -m 755 -p "$tmpdir/DEBIAN" mkdir -m 755 -p "$tmpdir/DEBIAN"
mkdir -p "$tmpdir/lib" "$tmpdir/boot" mkdir -p "$tmpdir/lib" "$tmpdir/boot"
mkdir -p "$fwdir/lib/firmware/$version/" mkdir -p "$fwdir/lib/firmware/$version/"
...@@ -408,7 +408,7 @@ binary-arch: ...@@ -408,7 +408,7 @@ binary-arch:
\$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg
clean: clean:
rm -rf debian/*tmp rm -rf debian/*tmp debian/files
mv debian/ debian.backup # debian/ might be cleaned away mv debian/ debian.backup # debian/ might be cleaned away
\$(MAKE) clean \$(MAKE) clean
mv debian.backup debian mv debian.backup debian
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment