Commit 8b12e250 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  lockdep: Select frame pointers on x86
  dma-debug: be more careful when building reference entries
  dma-debug: check for sg_call_ents in best-fit algorithm too
parents 41331844 00540e5d
...@@ -472,7 +472,7 @@ config LOCKDEP ...@@ -472,7 +472,7 @@ config LOCKDEP
bool bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE select STACKTRACE
select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
select KALLSYMS select KALLSYMS
select KALLSYMS_ALL select KALLSYMS_ALL
......
...@@ -262,11 +262,12 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, ...@@ -262,11 +262,12 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
*/ */
matches += 1; matches += 1;
match_lvl = 0; match_lvl = 0;
entry->size == ref->size ? ++match_lvl : match_lvl; entry->size == ref->size ? ++match_lvl : 0;
entry->type == ref->type ? ++match_lvl : match_lvl; entry->type == ref->type ? ++match_lvl : 0;
entry->direction == ref->direction ? ++match_lvl : match_lvl; entry->direction == ref->direction ? ++match_lvl : 0;
entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
if (match_lvl == 3) { if (match_lvl == 4) {
/* perfect-fit - return the result */ /* perfect-fit - return the result */
return entry; return entry;
} else if (match_lvl > last_lvl) { } else if (match_lvl > last_lvl) {
...@@ -873,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size) ...@@ -873,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
"[addr=%p] [size=%llu]\n", addr, size); "[addr=%p] [size=%llu]\n", addr, size);
} }
static void check_sync(struct device *dev, dma_addr_t addr, static void check_sync(struct device *dev,
u64 size, u64 offset, int direction, bool to_cpu) struct dma_debug_entry *ref,
bool to_cpu)
{ {
struct dma_debug_entry ref = {
.dev = dev,
.dev_addr = addr,
.size = size,
.direction = direction,
};
struct dma_debug_entry *entry; struct dma_debug_entry *entry;
struct hash_bucket *bucket; struct hash_bucket *bucket;
unsigned long flags; unsigned long flags;
bucket = get_hash_bucket(&ref, &flags); bucket = get_hash_bucket(ref, &flags);
entry = hash_bucket_find(bucket, &ref); entry = hash_bucket_find(bucket, ref);
if (!entry) { if (!entry) {
err_printk(dev, NULL, "DMA-API: device driver tries " err_printk(dev, NULL, "DMA-API: device driver tries "
"to sync DMA memory it has not allocated " "to sync DMA memory it has not allocated "
"[device address=0x%016llx] [size=%llu bytes]\n", "[device address=0x%016llx] [size=%llu bytes]\n",
(unsigned long long)addr, size); (unsigned long long)ref->dev_addr, ref->size);
goto out; goto out;
} }
if ((offset + size) > entry->size) { if (ref->size > entry->size) {
err_printk(dev, entry, "DMA-API: device driver syncs" err_printk(dev, entry, "DMA-API: device driver syncs"
" DMA memory outside allocated range " " DMA memory outside allocated range "
"[device address=0x%016llx] " "[device address=0x%016llx] "
"[allocation size=%llu bytes] [sync offset=%llu] " "[allocation size=%llu bytes] "
"[sync size=%llu]\n", entry->dev_addr, entry->size, "[sync offset+size=%llu]\n",
offset, size); entry->dev_addr, entry->size,
ref->size);
} }
if (direction != entry->direction) { if (ref->direction != entry->direction) {
err_printk(dev, entry, "DMA-API: device driver syncs " err_printk(dev, entry, "DMA-API: device driver syncs "
"DMA memory with different direction " "DMA memory with different direction "
"[device address=0x%016llx] [size=%llu bytes] " "[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [synced with %s]\n", "[mapped with %s] [synced with %s]\n",
(unsigned long long)addr, entry->size, (unsigned long long)ref->dev_addr, entry->size,
dir2name[entry->direction], dir2name[entry->direction],
dir2name[direction]); dir2name[ref->direction]);
} }
if (entry->direction == DMA_BIDIRECTIONAL) if (entry->direction == DMA_BIDIRECTIONAL)
goto out; goto out;
if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
!(direction == DMA_TO_DEVICE)) !(ref->direction == DMA_TO_DEVICE))
err_printk(dev, entry, "DMA-API: device driver syncs " err_printk(dev, entry, "DMA-API: device driver syncs "
"device read-only DMA memory for cpu " "device read-only DMA memory for cpu "
"[device address=0x%016llx] [size=%llu bytes] " "[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [synced with %s]\n", "[mapped with %s] [synced with %s]\n",
(unsigned long long)addr, entry->size, (unsigned long long)ref->dev_addr, entry->size,
dir2name[entry->direction], dir2name[entry->direction],
dir2name[direction]); dir2name[ref->direction]);
if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
!(direction == DMA_FROM_DEVICE)) !(ref->direction == DMA_FROM_DEVICE))
err_printk(dev, entry, "DMA-API: device driver syncs " err_printk(dev, entry, "DMA-API: device driver syncs "
"device write-only DMA memory to device " "device write-only DMA memory to device "
"[device address=0x%016llx] [size=%llu bytes] " "[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [synced with %s]\n", "[mapped with %s] [synced with %s]\n",
(unsigned long long)addr, entry->size, (unsigned long long)ref->dev_addr, entry->size,
dir2name[entry->direction], dir2name[entry->direction],
dir2name[direction]); dir2name[ref->direction]);
out: out:
put_hash_bucket(bucket, &flags); put_hash_bucket(bucket, &flags);
...@@ -1036,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -1036,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
} }
EXPORT_SYMBOL(debug_dma_map_sg); EXPORT_SYMBOL(debug_dma_map_sg);
static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s) static int get_nr_mapped_entries(struct device *dev,
struct dma_debug_entry *ref)
{ {
struct dma_debug_entry *entry, ref; struct dma_debug_entry *entry;
struct hash_bucket *bucket; struct hash_bucket *bucket;
unsigned long flags; unsigned long flags;
int mapped_ents; int mapped_ents;
ref.dev = dev; bucket = get_hash_bucket(ref, &flags);
ref.dev_addr = sg_dma_address(s); entry = hash_bucket_find(bucket, ref);
ref.size = sg_dma_len(s),
bucket = get_hash_bucket(&ref, &flags);
entry = hash_bucket_find(bucket, &ref);
mapped_ents = 0; mapped_ents = 0;
if (entry) if (entry)
...@@ -1076,16 +1070,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -1076,16 +1070,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
.dev_addr = sg_dma_address(s), .dev_addr = sg_dma_address(s),
.size = sg_dma_len(s), .size = sg_dma_len(s),
.direction = dir, .direction = dir,
.sg_call_ents = 0, .sg_call_ents = nelems,
}; };
if (mapped_ents && i >= mapped_ents) if (mapped_ents && i >= mapped_ents)
break; break;
if (!i) { if (!i)
ref.sg_call_ents = nelems; mapped_ents = get_nr_mapped_entries(dev, &ref);
mapped_ents = get_nr_mapped_entries(dev, s);
}
check_unmap(&ref); check_unmap(&ref);
} }
...@@ -1140,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent); ...@@ -1140,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, int direction) size_t size, int direction)
{ {
struct dma_debug_entry ref;
if (unlikely(global_disable)) if (unlikely(global_disable))
return; return;
check_sync(dev, dma_handle, size, 0, direction, true); ref.type = dma_debug_single;
ref.dev = dev;
ref.dev_addr = dma_handle;
ref.size = size;
ref.direction = direction;
ref.sg_call_ents = 0;
check_sync(dev, &ref, true);
} }
EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
...@@ -1151,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev, ...@@ -1151,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, dma_addr_t dma_handle, size_t size,
int direction) int direction)
{ {
struct dma_debug_entry ref;
if (unlikely(global_disable)) if (unlikely(global_disable))
return; return;
check_sync(dev, dma_handle, size, 0, direction, false); ref.type = dma_debug_single;
ref.dev = dev;
ref.dev_addr = dma_handle;
ref.size = size;
ref.direction = direction;
ref.sg_call_ents = 0;
check_sync(dev, &ref, false);
} }
EXPORT_SYMBOL(debug_dma_sync_single_for_device); EXPORT_SYMBOL(debug_dma_sync_single_for_device);
...@@ -1163,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev, ...@@ -1163,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
unsigned long offset, size_t size, unsigned long offset, size_t size,
int direction) int direction)
{ {
struct dma_debug_entry ref;
if (unlikely(global_disable)) if (unlikely(global_disable))
return; return;
check_sync(dev, dma_handle, size, offset, direction, true); ref.type = dma_debug_single;
ref.dev = dev;
ref.dev_addr = dma_handle;
ref.size = offset + size;
ref.direction = direction;
ref.sg_call_ents = 0;
check_sync(dev, &ref, true);
} }
EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
...@@ -1175,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev, ...@@ -1175,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
unsigned long offset, unsigned long offset,
size_t size, int direction) size_t size, int direction)
{ {
struct dma_debug_entry ref;
if (unlikely(global_disable)) if (unlikely(global_disable))
return; return;
check_sync(dev, dma_handle, size, offset, direction, false); ref.type = dma_debug_single;
ref.dev = dev;
ref.dev_addr = dma_handle;
ref.size = offset + size;
ref.direction = direction;
ref.sg_call_ents = 0;
check_sync(dev, &ref, false);
} }
EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
...@@ -1192,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -1192,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
return; return;
for_each_sg(sg, s, nelems, i) { for_each_sg(sg, s, nelems, i) {
struct dma_debug_entry ref = {
.type = dma_debug_sg,
.dev = dev,
.paddr = sg_phys(s),
.dev_addr = sg_dma_address(s),
.size = sg_dma_len(s),
.direction = direction,
.sg_call_ents = nelems,
};
if (!i) if (!i)
mapped_ents = get_nr_mapped_entries(dev, s); mapped_ents = get_nr_mapped_entries(dev, &ref);
if (i >= mapped_ents) if (i >= mapped_ents)
break; break;
check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, check_sync(dev, &ref, true);
direction, true);
} }
} }
EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
...@@ -1214,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -1214,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
return; return;
for_each_sg(sg, s, nelems, i) { for_each_sg(sg, s, nelems, i) {
struct dma_debug_entry ref = {
.type = dma_debug_sg,
.dev = dev,
.paddr = sg_phys(s),
.dev_addr = sg_dma_address(s),
.size = sg_dma_len(s),
.direction = direction,
.sg_call_ents = nelems,
};
if (!i) if (!i)
mapped_ents = get_nr_mapped_entries(dev, s); mapped_ents = get_nr_mapped_entries(dev, &ref);
if (i >= mapped_ents) if (i >= mapped_ents)
break; break;
check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, check_sync(dev, &ref, false);
direction, false);
} }
} }
EXPORT_SYMBOL(debug_dma_sync_sg_for_device); EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment