Commit cc2552df authored by Jesse Barnes's avatar Jesse Barnes Committed by David Mosberger

[PATCH] ia64: remove stale mmiob function

The consensus on lkml was that devices should do reads from safe
registers to ensure PIO write ordering, which means we no longer need
mmiob.  This patch removes the mmiob entries from the machine vector
headers and io.h and updates the documentation about PIO ordering.
parent 7cdc6a8e
On some platforms, so-called memory-mapped I/O is weakly ordered. On such
platforms, driver writers are responsible for ensuring that I/O writes to
memory-mapped addresses on their device arrive in the order intended. This is
typically done by reading a 'safe' device or bridge register, causing the I/O
chipset to flush pending writes to the device before any reads are posted. A
driver would usually use this technique immediately prior to the exit of a
critical section of code protected by spinlocks. This would ensure that
subsequent writes to I/O space arrived only after all prior writes (much like a
memory barrier op, mb(), only with respect to I/O).
A more concrete example from a hypothetical device driver:
...
CPU A: spin_lock_irqsave(&dev_lock, flags)
CPU A: val = readl(my_status);
CPU A: ...
CPU A: writel(newval, ring_ptr);
CPU A: spin_unlock_irqrestore(&dev_lock, flags)
...
CPU B: spin_lock_irqsave(&dev_lock, flags)
CPU B: val = readl(my_status);
CPU B: ...
CPU B: writel(newval2, ring_ptr);
CPU B: spin_unlock_irqrestore(&dev_lock, flags)
...
In the case above, the device may receive newval2 before it receives newval,
which could cause problems. Fixing it is easy enough though:
...
CPU A: spin_lock_irqsave(&dev_lock, flags)
CPU A: val = readl(my_status);
CPU A: ...
CPU A: writel(newval, ring_ptr);
CPU A: (void)readl(safe_register); /* maybe a config register? */
CPU A: spin_unlock_irqrestore(&dev_lock, flags)
...
CPU B: spin_lock_irqsave(&dev_lock, flags)
CPU B: val = readl(my_status);
CPU B: ...
CPU B: writel(newval2, ring_ptr);
CPU B: (void)readl(safe_register); /* maybe a config register? */
CPU B: spin_unlock_irqrestore(&dev_lock, flags)
Here, the reads from safe_register will cause the I/O chipset to flush any
pending writes before actually posting the read to the chipset, preventing
possible data corruption.
......@@ -69,22 +69,6 @@ phys_to_virt (unsigned long address)
*/
#define __ia64_mf_a() __asm__ __volatile__ ("mf.a" ::: "memory")
/**
* __ia64_mmiob - I/O space memory barrier
*
* Acts as a memory mapped I/O barrier for platforms that queue writes to
* I/O space. This ensures that subsequent writes to I/O space arrive after
* all previous writes. For most ia64 platforms, this is a simple
* 'mf.a' instruction, so the address is ignored. For other platforms,
* the address may be required to ensure proper ordering of writes to I/O space
* since a 'dummy' read might be necessary to barrier the write operation.
*/
static inline void
__ia64_mmiob (void)
{
__ia64_mf_a();
}
static inline const unsigned long
__ia64_get_io_port_base (void)
{
......@@ -287,7 +271,6 @@ __outsl (unsigned long port, void *src, unsigned long count)
#define __outb platform_outb
#define __outw platform_outw
#define __outl platform_outl
#define __mmiob platform_mmiob
#define inb(p) __inb(p)
#define inw(p) __inw(p)
......@@ -301,7 +284,6 @@ __outsl (unsigned long port, void *src, unsigned long count)
#define outsb(p,s,c) __outsb(p,s,c)
#define outsw(p,s,c) __outsw(p,s,c)
#define outsl(p,s,c) __outsl(p,s,c)
#define mmiob() __mmiob()
/*
* The address passed to these functions are ioremap()ped already.
......
......@@ -61,7 +61,6 @@ typedef unsigned int ia64_mv_inl_t (unsigned long);
typedef void ia64_mv_outb_t (unsigned char, unsigned long);
typedef void ia64_mv_outw_t (unsigned short, unsigned long);
typedef void ia64_mv_outl_t (unsigned int, unsigned long);
typedef void ia64_mv_mmiob_t (void);
extern void machvec_noop (void);
......@@ -110,7 +109,6 @@ extern void machvec_noop (void);
# define platform_outb ia64_mv.outb
# define platform_outw ia64_mv.outw
# define platform_outl ia64_mv.outl
# define platofrm_mmiob ia64_mv.mmiob
# endif
/* __attribute__((__aligned__(16))) is required to make size of the
......@@ -149,7 +147,6 @@ struct ia64_machine_vector {
ia64_mv_outb_t *outb;
ia64_mv_outw_t *outw;
ia64_mv_outl_t *outl;
ia64_mv_mmiob_t *mmiob;
} __attribute__((__aligned__(16)));
#define MACHVEC_INIT(name) \
......@@ -184,7 +181,6 @@ struct ia64_machine_vector {
platform_outb, \
platform_outw, \
platform_outl, \
platform_mmiob \
}
extern struct ia64_machine_vector ia64_mv;
......@@ -300,8 +296,5 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
#ifndef platform_outl
# define platform_outl __ia64_outl
#endif
#ifndef platform_mmiob
# define platform_mmiob __ia64_mmiob
#endif
#endif /* _ASM_IA64_MACHVEC_H */
......@@ -16,7 +16,6 @@ extern ia64_mv_inl_t __ia64_inl;
extern ia64_mv_outb_t __ia64_outb;
extern ia64_mv_outw_t __ia64_outw;
extern ia64_mv_outl_t __ia64_outl;
extern ia64_mv_mmiob_t __ia64_mmiob;
#define MACHVEC_HELPER(name) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
......
......@@ -44,7 +44,6 @@ extern ia64_mv_inl_t sn1_inl;
extern ia64_mv_outb_t sn1_outb;
extern ia64_mv_outw_t sn1_outw;
extern ia64_mv_outl_t sn1_outl;
extern ia64_mv_mmiob_t sn_mmiob;
extern ia64_mv_pci_alloc_consistent sn1_pci_alloc_consistent;
extern ia64_mv_pci_free_consistent sn1_pci_free_consistent;
extern ia64_mv_pci_map_single sn1_pci_map_single;
......@@ -74,7 +73,6 @@ extern ia64_mv_pci_dma_address sn1_dma_address;
#define platform_outb sn1_outb
#define platform_outw sn1_outw
#define platform_outl sn1_outl
#define platform_mmiob sn_mmiob
#define platform_pci_dma_init machvec_noop
#define platform_pci_alloc_consistent sn1_pci_alloc_consistent
#define platform_pci_free_consistent sn1_pci_free_consistent
......
......@@ -47,7 +47,6 @@ extern ia64_mv_inl_t sn_inl;
extern ia64_mv_outb_t sn_outb;
extern ia64_mv_outw_t sn_outw;
extern ia64_mv_outl_t sn_outl;
extern ia64_mv_mmiob_t sn2_mmiob;
extern ia64_mv_pci_alloc_consistent sn_pci_alloc_consistent;
extern ia64_mv_pci_free_consistent sn_pci_free_consistent;
extern ia64_mv_pci_map_single sn_pci_map_single;
......@@ -78,7 +77,6 @@ extern ia64_mv_pci_dma_supported sn_pci_dma_supported;
#define platform_outb sn_outb
#define platform_outw sn_outw
#define platform_outl sn_outl
#define platform_mmiob sn2_mmiob
#define platform_irq_desc sn_irq_desc
#define platform_irq_to_vector sn_irq_to_vector
#define platform_local_vector_to_irq sn_local_vector_to_irq
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment