Commit 337ccfce authored by Takashi Iwai's avatar Takashi Iwai

Merge branch 'for-linus' into for-next

parents b0e159fe 6b7e95d1
...@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml \ ...@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml \
kernel-api.xml filesystems.xml lsm.xml kgdb.xml \ kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
80211.xml sh.xml regulator.xml w1.xml \ sh.xml regulator.xml w1.xml \
writing_musb_glue_layer.xml iio.xml writing_musb_glue_layer.xml iio.xml
ifeq ($(DOCBOOKS),) ifeq ($(DOCBOOKS),)
......
...@@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2) ...@@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
#else #else
const u16 *a = (const u16 *)addr1; const u16 *a = (const u16 *)addr1;
const u16 *b = (const u16 *)addr2; const u16 *b = (const u16 *)addr2;
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
#endif #endif
} }
......
VERSION = 4 VERSION = 4
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Roaring Lionus NAME = Roaring Lionus
# *DOCUMENTATION* # *DOCUMENTATION*
......
...@@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) ...@@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
} }
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
bool negative;
asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
CC_SET(s)
: CC_OUT(s) (negative), ADDR
: "ir" ((char) ~(1 << nr)) : "memory");
return negative;
}
// Let everybody know we have it
#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
/* /*
* __clear_bit_unlock - Clears a bit in memory * __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear * @nr: Bit to clear
......
...@@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
for (i = 0; i < ctcount; i++) { for (i = 0; i < ctcount; i++) {
unsigned int dlen = COMP_BUF_SIZE; unsigned int dlen = COMP_BUF_SIZE;
int ilen = ctemplate[i].inlen; int ilen = ctemplate[i].inlen;
void *input_vec;
input_vec = kmalloc(ilen, GFP_KERNEL);
if (!input_vec) {
ret = -ENOMEM;
goto out;
}
memcpy(input_vec, ctemplate[i].input, ilen);
memset(output, 0, dlen); memset(output, 0, dlen);
init_completion(&result.completion); init_completion(&result.completion);
sg_init_one(&src, ctemplate[i].input, ilen); sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen); sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm); req = acomp_request_alloc(tfm);
if (!req) { if (!req) {
pr_err("alg: acomp: request alloc failed for %s\n", pr_err("alg: acomp: request alloc failed for %s\n",
algo); algo);
kfree(input_vec);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
if (ret) { if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret); i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
...@@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n", pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
i + 1, algo, req->dlen); i + 1, algo, req->dlen);
ret = -EINVAL; ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
...@@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
i + 1, algo); i + 1, algo);
hexdump(output, req->dlen); hexdump(output, req->dlen);
ret = -EINVAL; ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
} }
for (i = 0; i < dtcount; i++) { for (i = 0; i < dtcount; i++) {
unsigned int dlen = COMP_BUF_SIZE; unsigned int dlen = COMP_BUF_SIZE;
int ilen = dtemplate[i].inlen; int ilen = dtemplate[i].inlen;
void *input_vec;
input_vec = kmalloc(ilen, GFP_KERNEL);
if (!input_vec) {
ret = -ENOMEM;
goto out;
}
memcpy(input_vec, dtemplate[i].input, ilen);
memset(output, 0, dlen); memset(output, 0, dlen);
init_completion(&result.completion); init_completion(&result.completion);
sg_init_one(&src, dtemplate[i].input, ilen); sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen); sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm); req = acomp_request_alloc(tfm);
if (!req) { if (!req) {
pr_err("alg: acomp: request alloc failed for %s\n", pr_err("alg: acomp: request alloc failed for %s\n",
algo); algo);
kfree(input_vec);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
if (ret) { if (ret) {
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret); i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
...@@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n", pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
i + 1, algo, req->dlen); i + 1, algo, req->dlen);
ret = -EINVAL; ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
...@@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
i + 1, algo); i + 1, algo);
hexdump(output, req->dlen); hexdump(output, req->dlen);
ret = -EINVAL; ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
} }
......
This diff is collapsed.
...@@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode, ...@@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode,
mutex_unlock(&ei->truncate_mutex); mutex_unlock(&ei->truncate_mutex);
goto cleanup; goto cleanup;
} }
} else {
*new = true;
} }
*new = true;
ext2_splice_branch(inode, iblock, partial, indirect_blks, count); ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex); mutex_unlock(&ei->truncate_mutex);
......
...@@ -258,7 +258,6 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -258,7 +258,6 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
int result; int result;
handle_t *handle = NULL;
struct inode *inode = file_inode(vma->vm_file); struct inode *inode = file_inode(vma->vm_file);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
bool write = vmf->flags & FAULT_FLAG_WRITE; bool write = vmf->flags & FAULT_FLAG_WRITE;
...@@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (write) { if (write) {
sb_start_pagefault(sb); sb_start_pagefault(sb);
file_update_time(vma->vm_file); file_update_time(vma->vm_file);
down_read(&EXT4_I(inode)->i_mmap_sem); }
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, down_read(&EXT4_I(inode)->i_mmap_sem);
EXT4_DATA_TRANS_BLOCKS(sb)); result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
} else up_read(&EXT4_I(inode)->i_mmap_sem);
down_read(&EXT4_I(inode)->i_mmap_sem); if (write)
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else
result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
if (write) {
if (!IS_ERR(handle))
ext4_journal_stop(handle);
up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(sb); sb_end_pagefault(sb);
} else
up_read(&EXT4_I(inode)->i_mmap_sem);
return result; return result;
} }
...@@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, ...@@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags) pmd_t *pmd, unsigned int flags)
{ {
int result; int result;
handle_t *handle = NULL;
struct inode *inode = file_inode(vma->vm_file); struct inode *inode = file_inode(vma->vm_file);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
bool write = flags & FAULT_FLAG_WRITE; bool write = flags & FAULT_FLAG_WRITE;
...@@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, ...@@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (write) { if (write) {
sb_start_pagefault(sb); sb_start_pagefault(sb);
file_update_time(vma->vm_file); file_update_time(vma->vm_file);
down_read(&EXT4_I(inode)->i_mmap_sem);
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
ext4_chunk_trans_blocks(inode,
PMD_SIZE / PAGE_SIZE));
} else
down_read(&EXT4_I(inode)->i_mmap_sem);
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else {
result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
&ext4_iomap_ops);
} }
down_read(&EXT4_I(inode)->i_mmap_sem);
if (write) { result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
if (!IS_ERR(handle)) &ext4_iomap_ops);
ext4_journal_stop(handle); up_read(&EXT4_I(inode)->i_mmap_sem);
up_read(&EXT4_I(inode)->i_mmap_sem); if (write)
sb_end_pagefault(sb); sb_end_pagefault(sb);
} else
up_read(&EXT4_I(inode)->i_mmap_sem);
return result; return result;
} }
......
...@@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap_ops *ops); struct iomap_ops *ops);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping, void dax_wake_mapping_entry_waiter(struct address_space *mapping,
pgoff_t index, void *entry, bool wake_all); pgoff_t index, void *entry, bool wake_all);
......
...@@ -73,13 +73,13 @@ ...@@ -73,13 +73,13 @@
*/ */
enum pageflags { enum pageflags {
PG_locked, /* Page is locked. Don't touch. */ PG_locked, /* Page is locked. Don't touch. */
PG_waiters, /* Page has waiters, check its waitqueue */
PG_error, PG_error,
PG_referenced, PG_referenced,
PG_uptodate, PG_uptodate,
PG_dirty, PG_dirty,
PG_lru, PG_lru,
PG_active, PG_active,
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_slab, PG_slab,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
PG_arch_1, PG_arch_1,
......
...@@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter) ...@@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
} }
EXPORT_SYMBOL_GPL(add_page_wait_queue); EXPORT_SYMBOL_GPL(add_page_wait_queue);
#ifndef clear_bit_unlock_is_negative_byte
/*
* PG_waiters is the high bit in the same byte as PG_lock.
*
* On x86 (and on many other architectures), we can clear PG_lock and
* test the sign bit at the same time. But if the architecture does
* not support that special operation, we just do this all by hand
* instead.
*
* The read of PG_waiters has to be after (or concurrently with) PG_locked
* being cleared, but a memory barrier should be unneccssary since it is
* in the same byte as PG_locked.
*/
static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
{
clear_bit_unlock(nr, mem);
/* smp_mb__after_atomic(); */
return test_bit(PG_waiters, mem);
}
#endif
/** /**
* unlock_page - unlock a locked page * unlock_page - unlock a locked page
* @page: the page * @page: the page
...@@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue); ...@@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
* mechanism between PageLocked pages and PageWriteback pages is shared. * mechanism between PageLocked pages and PageWriteback pages is shared.
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
* *
* The mb is necessary to enforce ordering between the clear_bit and the read * Note that this depends on PG_waiters being the sign bit in the byte
* of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
* clear the PG_locked bit and test PG_waiters at the same time fairly
* portably (architectures that do LL/SC can test any bit, while x86 can
* test the sign bit).
*/ */
void unlock_page(struct page *page) void unlock_page(struct page *page)
{ {
BUILD_BUG_ON(PG_waiters != 7);
page = compound_head(page); page = compound_head(page);
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
clear_bit_unlock(PG_locked, &page->flags); if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
smp_mb__after_atomic(); wake_up_page_bit(page, PG_locked);
wake_up_page(page, PG_locked);
} }
EXPORT_SYMBOL(unlock_page); EXPORT_SYMBOL(unlock_page);
......
...@@ -24,20 +24,12 @@ ...@@ -24,20 +24,12 @@
#include <linux/rmap.h> #include <linux/rmap.h>
#include "internal.h" #include "internal.h"
static void clear_exceptional_entry(struct address_space *mapping, static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
pgoff_t index, void *entry) void *entry)
{ {
struct radix_tree_node *node; struct radix_tree_node *node;
void **slot; void **slot;
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return;
if (dax_mapping(mapping)) {
dax_delete_mapping_entry(mapping, index);
return;
}
spin_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
/* /*
* Regular page slots are stabilized by the page lock even * Regular page slots are stabilized by the page lock even
...@@ -55,6 +47,56 @@ static void clear_exceptional_entry(struct address_space *mapping, ...@@ -55,6 +47,56 @@ static void clear_exceptional_entry(struct address_space *mapping,
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
} }
/*
* Unconditionally remove exceptional entry. Usually called from truncate path.
*/
static void truncate_exceptional_entry(struct address_space *mapping,
pgoff_t index, void *entry)
{
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return;
if (dax_mapping(mapping)) {
dax_delete_mapping_entry(mapping, index);
return;
}
clear_shadow_entry(mapping, index, entry);
}
/*
* Invalidate exceptional entry if easily possible. This handles exceptional
* entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
* clean entries.
*/
static int invalidate_exceptional_entry(struct address_space *mapping,
pgoff_t index, void *entry)
{
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return 1;
if (dax_mapping(mapping))
return dax_invalidate_mapping_entry(mapping, index);
clear_shadow_entry(mapping, index, entry);
return 1;
}
/*
* Invalidate exceptional entry if clean. This handles exceptional entries for
* invalidate_inode_pages2() so for DAX it evicts only clean entries.
*/
static int invalidate_exceptional_entry2(struct address_space *mapping,
pgoff_t index, void *entry)
{
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return 1;
if (dax_mapping(mapping))
return dax_invalidate_mapping_entry_sync(mapping, index);
clear_shadow_entry(mapping, index, entry);
return 1;
}
/** /**
* do_invalidatepage - invalidate part or all of a page * do_invalidatepage - invalidate part or all of a page
* @page: the page which is affected * @page: the page which is affected
...@@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
break; break;
if (radix_tree_exceptional_entry(page)) { if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page); truncate_exceptional_entry(mapping, index,
page);
continue; continue;
} }
...@@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
} }
if (radix_tree_exceptional_entry(page)) { if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page); truncate_exceptional_entry(mapping, index,
page);
continue; continue;
} }
...@@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, ...@@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
break; break;
if (radix_tree_exceptional_entry(page)) { if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page); invalidate_exceptional_entry(mapping, index,
page);
continue; continue;
} }
...@@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping, ...@@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
break; break;
if (radix_tree_exceptional_entry(page)) { if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page); if (!invalidate_exceptional_entry2(mapping,
index, page))
ret = -EBUSY;
continue; continue;
} }
......
...@@ -69,7 +69,7 @@ static void pcm_period_tasklet(unsigned long data); ...@@ -69,7 +69,7 @@ static void pcm_period_tasklet(unsigned long data);
* @protocol_size: the size to allocate newly for protocol * @protocol_size: the size to allocate newly for protocol
*/ */
int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir, enum cip_flags flags, enum amdtp_stream_direction dir, int flags,
unsigned int fmt, unsigned int fmt,
amdtp_stream_process_data_blocks_t process_data_blocks, amdtp_stream_process_data_blocks_t process_data_blocks,
unsigned int protocol_size) unsigned int protocol_size)
......
...@@ -93,7 +93,7 @@ typedef unsigned int (*amdtp_stream_process_data_blocks_t)( ...@@ -93,7 +93,7 @@ typedef unsigned int (*amdtp_stream_process_data_blocks_t)(
unsigned int *syt); unsigned int *syt);
struct amdtp_stream { struct amdtp_stream {
struct fw_unit *unit; struct fw_unit *unit;
enum cip_flags flags; int flags;
enum amdtp_stream_direction direction; enum amdtp_stream_direction direction;
struct mutex mutex; struct mutex mutex;
...@@ -137,7 +137,7 @@ struct amdtp_stream { ...@@ -137,7 +137,7 @@ struct amdtp_stream {
}; };
int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir, enum cip_flags flags, enum amdtp_stream_direction dir, int flags,
unsigned int fmt, unsigned int fmt,
amdtp_stream_process_data_blocks_t process_data_blocks, amdtp_stream_process_data_blocks_t process_data_blocks,
unsigned int protocol_size); unsigned int protocol_size);
......
...@@ -117,7 +117,7 @@ destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream) ...@@ -117,7 +117,7 @@ destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream)
conn = &efw->in_conn; conn = &efw->in_conn;
amdtp_stream_destroy(stream); amdtp_stream_destroy(stream);
cmp_connection_destroy(&efw->out_conn); cmp_connection_destroy(conn);
} }
static int static int
......
...@@ -343,7 +343,7 @@ int snd_tscm_stream_init_duplex(struct snd_tscm *tscm) ...@@ -343,7 +343,7 @@ int snd_tscm_stream_init_duplex(struct snd_tscm *tscm)
if (err < 0) if (err < 0)
amdtp_stream_destroy(&tscm->rx_stream); amdtp_stream_destroy(&tscm->rx_stream);
return 0; return err;
} }
/* At bus reset, streaming is stopped and some registers are clear. */ /* At bus reset, streaming is stopped and some registers are clear. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment