Commit 9e1a27ea authored by Alexander Duyck's avatar Alexander Duyck Committed by Rusty Russell

virtio_ring: Update weak barriers to use dma_wmb/rmb

This change makes it so that instead of using smp_wmb/rmb which varies
depending on the kernel configuration we can can use dma_wmb/rmb which for
most architectures should be equal to or slightly more strict than
smp_wmb/rmb.

The advantage to this is that these barriers are available to uniprocessor
builds as well so the performance should improve under such a
configuration.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent a8557d32
...@@ -21,19 +21,20 @@ ...@@ -21,19 +21,20 @@
* actually quite cheap. * actually quite cheap.
*/ */
#ifdef CONFIG_SMP
static inline void virtio_mb(bool weak_barriers) static inline void virtio_mb(bool weak_barriers)
{ {
#ifdef CONFIG_SMP
if (weak_barriers) if (weak_barriers)
smp_mb(); smp_mb();
else else
#endif
mb(); mb();
} }
static inline void virtio_rmb(bool weak_barriers) static inline void virtio_rmb(bool weak_barriers)
{ {
if (weak_barriers) if (weak_barriers)
smp_rmb(); dma_rmb();
else else
rmb(); rmb();
} }
...@@ -41,26 +42,10 @@ static inline void virtio_rmb(bool weak_barriers) ...@@ -41,26 +42,10 @@ static inline void virtio_rmb(bool weak_barriers)
static inline void virtio_wmb(bool weak_barriers) static inline void virtio_wmb(bool weak_barriers)
{ {
if (weak_barriers) if (weak_barriers)
smp_wmb(); dma_wmb();
else else
wmb(); wmb();
} }
#else
static inline void virtio_mb(bool weak_barriers)
{
mb();
}
static inline void virtio_rmb(bool weak_barriers)
{
rmb();
}
static inline void virtio_wmb(bool weak_barriers)
{
wmb();
}
#endif
struct virtio_device; struct virtio_device;
struct virtqueue; struct virtqueue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment