Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
d284142c
Commit
d284142c
authored
Feb 08, 2008
by
David S. Miller
Committed by
David S. Miller
Feb 09, 2008
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[SPARC64]: IOMMU allocations using iommu-helper layer.
Signed-off-by:
David S. Miller
<
davem@davemloft.net
>
parent
19814ea2
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
112 additions
and
110 deletions
+112
-110
arch/sparc64/Kconfig
arch/sparc64/Kconfig
+4
-0
arch/sparc64/kernel/iommu.c
arch/sparc64/kernel/iommu.c
+81
-44
arch/sparc64/kernel/iommu_common.h
arch/sparc64/kernel/iommu_common.h
+8
-0
arch/sparc64/kernel/pci_sun4v.c
arch/sparc64/kernel/pci_sun4v.c
+18
-66
include/asm-sparc64/iommu.h
include/asm-sparc64/iommu.h
+1
-0
No files found.
arch/sparc64/Kconfig
View file @
d284142c
...
...
@@ -40,6 +40,10 @@ config MMU
bool
default y
config IOMMU_HELPER
bool
default y
config QUICKLIST
bool
default y
...
...
arch/sparc64/kernel/iommu.c
View file @
d284142c
/* iommu.c: Generic sparc64 IOMMU support.
*
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999, 2007
, 2008
David S. Miller (davem@davemloft.net)
* Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
*/
...
...
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
...
...
@@ -41,7 +42,7 @@
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
static
void
__
iommu_flushall
(
struct
iommu
*
iommu
)
static
void
iommu_flushall
(
struct
iommu
*
iommu
)
{
if
(
iommu
->
iommu_flushinv
)
{
iommu_write
(
iommu
->
iommu_flushinv
,
~
(
u64
)
0
);
...
...
@@ -83,54 +84,91 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
iopte_val
(
*
iopte
)
=
val
;
}
/* Based largely upon the ppc64 iommu allocator. */
static
long
arena_alloc
(
struct
iommu
*
iommu
,
unsigned
long
npages
)
/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
* facility it must all be done in one pass while under the iommu lock.
*
* On sun4u platforms, we only flush the IOMMU once every time we've passed
* over the entire page table doing allocations. Therefore we only ever advance
* the hint and cannot backtrack it.
*/
unsigned
long
iommu_range_alloc
(
struct
device
*
dev
,
struct
iommu
*
iommu
,
unsigned
long
npages
,
unsigned
long
*
handle
)
{
unsigned
long
n
,
end
,
start
,
limit
,
boundary_size
;
struct
iommu_arena
*
arena
=
&
iommu
->
arena
;
unsigned
long
n
,
i
,
start
,
end
,
limit
;
int
pass
;
int
pass
=
0
;
/* This allocator was derived from x86_64's bit string search */
/* Sanity check */
if
(
unlikely
(
npages
==
0
))
{
if
(
printk_ratelimit
())
WARN_ON
(
1
);
return
DMA_ERROR_CODE
;
}
if
(
handle
&&
*
handle
)
start
=
*
handle
;
else
start
=
arena
->
hint
;
limit
=
arena
->
limit
;
start
=
arena
->
hint
;
pass
=
0
;
again:
n
=
find_next_zero_bit
(
arena
->
map
,
limit
,
start
);
end
=
n
+
npages
;
if
(
unlikely
(
end
>=
limit
))
{
/* The case below can happen if we have a small segment appended
* to a large, or when the previous alloc was at the very end of
* the available space. If so, go back to the beginning and flush.
*/
if
(
start
>=
limit
)
{
start
=
0
;
if
(
iommu
->
flush_all
)
iommu
->
flush_all
(
iommu
);
}
again:
if
(
dev
)
boundary_size
=
ALIGN
(
dma_get_seg_boundary
(
dev
)
+
1
,
1
<<
IO_PAGE_SHIFT
);
else
boundary_size
=
ALIGN
(
1UL
<<
32
,
1
<<
IO_PAGE_SHIFT
);
n
=
iommu_area_alloc
(
arena
->
map
,
limit
,
start
,
npages
,
0
,
boundary_size
>>
IO_PAGE_SHIFT
,
0
);
if
(
n
==
-
1
)
{
if
(
likely
(
pass
<
1
))
{
limit
=
start
;
/* First failure, rescan from the beginning. */
start
=
0
;
__iommu_flushall
(
iommu
);
if
(
iommu
->
flush_all
)
iommu
->
flush_all
(
iommu
);
pass
++
;
goto
again
;
}
else
{
/* S
canned the whole thing, give up.
*/
return
-
1
;
/* S
econd failure, give up
*/
return
DMA_ERROR_CODE
;
}
}
for
(
i
=
n
;
i
<
end
;
i
++
)
{
if
(
test_bit
(
i
,
arena
->
map
))
{
start
=
i
+
1
;
goto
again
;
}
}
for
(
i
=
n
;
i
<
end
;
i
++
)
__set_bit
(
i
,
arena
->
map
);
end
=
n
+
npages
;
arena
->
hint
=
end
;
/* Update handle for SG allocations */
if
(
handle
)
*
handle
=
end
;
return
n
;
}
static
void
arena_free
(
struct
iommu_arena
*
arena
,
unsigned
long
base
,
unsigned
long
npages
)
void
iommu_range_free
(
struct
iommu
*
iommu
,
dma_addr_t
dma_addr
,
unsigned
long
npages
)
{
unsigned
long
i
;
struct
iommu_arena
*
arena
=
&
iommu
->
arena
;
unsigned
long
entry
;
for
(
i
=
base
;
i
<
(
base
+
npages
);
i
++
)
__clear_bit
(
i
,
arena
->
map
);
entry
=
(
dma_addr
-
iommu
->
page_table_map_base
)
>>
IO_PAGE_SHIFT
;
iommu_area_free
(
arena
->
map
,
entry
,
npages
);
}
int
iommu_table_init
(
struct
iommu
*
iommu
,
int
tsbsize
,
...
...
@@ -156,6 +194,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
}
iommu
->
arena
.
limit
=
num_tsb_entries
;
if
(
tlb_type
!=
hypervisor
)
iommu
->
flush_all
=
iommu_flushall
;
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
*/
...
...
@@ -192,22 +233,18 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
return
-
ENOMEM
;
}
static
inline
iopte_t
*
alloc_npages
(
struct
iommu
*
iommu
,
unsigned
long
npages
)
static
inline
iopte_t
*
alloc_npages
(
struct
device
*
dev
,
struct
iommu
*
iommu
,
unsigned
long
npages
)
{
long
entry
;
unsigned
long
entry
;
entry
=
arena_alloc
(
iommu
,
npages
);
if
(
unlikely
(
entry
<
0
))
entry
=
iommu_range_alloc
(
dev
,
iommu
,
npages
,
NULL
);
if
(
unlikely
(
entry
==
DMA_ERROR_CODE
))
return
NULL
;
return
iommu
->
page_table
+
entry
;
}
static
inline
void
free_npages
(
struct
iommu
*
iommu
,
dma_addr_t
base
,
unsigned
long
npages
)
{
arena_free
(
&
iommu
->
arena
,
base
>>
IO_PAGE_SHIFT
,
npages
);
}
static
int
iommu_alloc_ctx
(
struct
iommu
*
iommu
)
{
int
lowest
=
iommu
->
ctx_lowest_free
;
...
...
@@ -258,7 +295,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
iommu
=
dev
->
archdata
.
iommu
;
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
iopte
=
alloc_npages
(
iommu
,
size
>>
IO_PAGE_SHIFT
);
iopte
=
alloc_npages
(
dev
,
iommu
,
size
>>
IO_PAGE_SHIFT
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
if
(
unlikely
(
iopte
==
NULL
))
{
...
...
@@ -296,7 +333,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
free_npages
(
iommu
,
dvma
-
iommu
->
page_table_map_base
,
npages
);
iommu_range_free
(
iommu
,
dvma
,
npages
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
...
...
@@ -327,7 +364,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
npages
>>=
IO_PAGE_SHIFT
;
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
base
=
alloc_npages
(
iommu
,
npages
);
base
=
alloc_npages
(
dev
,
iommu
,
npages
);
ctx
=
0
;
if
(
iommu
->
iommu_ctxflush
)
ctx
=
iommu_alloc_ctx
(
iommu
);
...
...
@@ -465,7 +502,7 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
for
(
i
=
0
;
i
<
npages
;
i
++
)
iopte_make_dummy
(
iommu
,
base
+
i
);
free_npages
(
iommu
,
bus_addr
-
iommu
->
page_table_map_base
,
npages
);
iommu_range_free
(
iommu
,
bus_addr
,
npages
);
iommu_free_ctx
(
iommu
,
ctx
);
...
...
@@ -503,7 +540,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
base
=
alloc_npages
(
iommu
,
npages
);
base
=
alloc_npages
(
dev
,
iommu
,
npages
);
ctx
=
0
;
if
(
iommu
->
iommu_ctxflush
)
ctx
=
iommu_alloc_ctx
(
iommu
);
...
...
@@ -592,7 +629,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
for
(
i
=
0
;
i
<
npages
;
i
++
)
iopte_make_dummy
(
iommu
,
base
+
i
);
free_npages
(
iommu
,
bus_addr
-
iommu
->
page_table_map_base
,
npages
);
iommu_range_free
(
iommu
,
bus_addr
,
npages
);
iommu_free_ctx
(
iommu
,
ctx
);
...
...
arch/sparc64/kernel/iommu_common.h
View file @
d284142c
...
...
@@ -58,4 +58,12 @@ static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems)
return
npages
;
}
extern
unsigned
long
iommu_range_alloc
(
struct
device
*
dev
,
struct
iommu
*
iommu
,
unsigned
long
npages
,
unsigned
long
*
handle
);
extern
void
iommu_range_free
(
struct
iommu
*
iommu
,
dma_addr_t
dma_addr
,
unsigned
long
npages
);
#endif
/* _IOMMU_COMMON_H */
arch/sparc64/kernel/pci_sun4v.c
View file @
d284142c
/* pci_sun4v.c: SUN4V specific PCI controller support.
*
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 2006, 2007
, 2008
David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
...
...
@@ -113,54 +113,6 @@ static inline long iommu_batch_end(void)
return
iommu_batch_flush
(
p
);
}
static
long
arena_alloc
(
struct
iommu_arena
*
arena
,
unsigned
long
npages
)
{
unsigned
long
n
,
i
,
start
,
end
,
limit
;
int
pass
;
limit
=
arena
->
limit
;
start
=
arena
->
hint
;
pass
=
0
;
again:
n
=
find_next_zero_bit
(
arena
->
map
,
limit
,
start
);
end
=
n
+
npages
;
if
(
unlikely
(
end
>=
limit
))
{
if
(
likely
(
pass
<
1
))
{
limit
=
start
;
start
=
0
;
pass
++
;
goto
again
;
}
else
{
/* Scanned the whole thing, give up. */
return
-
1
;
}
}
for
(
i
=
n
;
i
<
end
;
i
++
)
{
if
(
test_bit
(
i
,
arena
->
map
))
{
start
=
i
+
1
;
goto
again
;
}
}
for
(
i
=
n
;
i
<
end
;
i
++
)
__set_bit
(
i
,
arena
->
map
);
arena
->
hint
=
end
;
return
n
;
}
static
void
arena_free
(
struct
iommu_arena
*
arena
,
unsigned
long
base
,
unsigned
long
npages
)
{
unsigned
long
i
;
for
(
i
=
base
;
i
<
(
base
+
npages
);
i
++
)
__clear_bit
(
i
,
arena
->
map
);
}
static
void
*
dma_4v_alloc_coherent
(
struct
device
*
dev
,
size_t
size
,
dma_addr_t
*
dma_addrp
,
gfp_t
gfp
)
{
...
...
@@ -185,11 +137,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
iommu
=
dev
->
archdata
.
iommu
;
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
entry
=
arena_alloc
(
&
iommu
->
arena
,
npages
);
entry
=
iommu_range_alloc
(
dev
,
iommu
,
npages
,
NULL
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
if
(
unlikely
(
entry
<
0L
))
goto
arena
_alloc_fail
;
if
(
unlikely
(
entry
==
DMA_ERROR_CODE
))
goto
range
_alloc_fail
;
*
dma_addrp
=
(
iommu
->
page_table_map_base
+
(
entry
<<
IO_PAGE_SHIFT
));
...
...
@@ -219,10 +171,10 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
iommu_map_fail:
/* Interrupts are disabled. */
spin_lock
(
&
iommu
->
lock
);
arena_free
(
&
iommu
->
arena
,
entry
,
npages
);
iommu_range_free
(
iommu
,
*
dma_addrp
,
npages
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
arena
_alloc_fail:
range
_alloc_fail:
free_pages
(
first_page
,
order
);
return
NULL
;
}
...
...
@@ -243,7 +195,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
arena_free
(
&
iommu
->
arena
,
entry
,
npages
);
iommu_range_free
(
iommu
,
dvma
,
npages
);
do
{
unsigned
long
num
;
...
...
@@ -281,10 +233,10 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
npages
>>=
IO_PAGE_SHIFT
;
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
entry
=
arena_alloc
(
&
iommu
->
arena
,
npages
);
entry
=
iommu_range_alloc
(
dev
,
iommu
,
npages
,
NULL
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
if
(
unlikely
(
entry
<
0L
))
if
(
unlikely
(
entry
==
DMA_ERROR_CODE
))
goto
bad
;
bus_addr
=
(
iommu
->
page_table_map_base
+
...
...
@@ -319,7 +271,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
iommu_map_fail:
/* Interrupts are disabled. */
spin_lock
(
&
iommu
->
lock
);
arena_free
(
&
iommu
->
arena
,
entry
,
npages
);
iommu_range_free
(
iommu
,
bus_addr
,
npages
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
return
DMA_ERROR_CODE
;
...
...
@@ -350,9 +302,9 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
entry
=
(
bus_addr
-
iommu
->
page_table_map_base
)
>>
IO_PAGE_SHIFT
;
arena_free
(
&
iommu
->
arena
,
entry
,
npages
);
iommu_range_free
(
iommu
,
bus_addr
,
npages
);
entry
=
(
bus_addr
-
iommu
->
page_table_map_base
)
>>
IO_PAGE_SHIFT
;
do
{
unsigned
long
num
;
...
...
@@ -369,10 +321,10 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int
nelems
,
enum
dma_data_direction
direction
)
{
unsigned
long
flags
,
npages
,
i
,
prot
;
u32
dma_base
,
orig_dma_base
;
struct
scatterlist
*
sg
;
struct
iommu
*
iommu
;
long
entry
,
err
;
u32
dma_base
;
/* Fast path single entry scatterlists. */
if
(
nelems
==
1
)
{
...
...
@@ -393,13 +345,13 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
npages
=
calc_npages
(
sglist
,
nelems
);
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
entry
=
arena_alloc
(
&
iommu
->
arena
,
npages
);
entry
=
iommu_range_alloc
(
dev
,
iommu
,
npages
,
NULL
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
if
(
unlikely
(
entry
<
0L
))
if
(
unlikely
(
entry
==
DMA_ERROR_CODE
))
goto
bad
;
dma_base
=
iommu
->
page_table_map_base
+
orig_dma_base
=
dma_base
=
iommu
->
page_table_map_base
+
(
entry
<<
IO_PAGE_SHIFT
);
prot
=
HV_PCI_MAP_ATTR_READ
;
...
...
@@ -449,7 +401,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
iommu_map_failed:
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
arena_free
(
&
iommu
->
arena
,
entry
,
npages
);
iommu_range_free
(
iommu
,
orig_dma_base
,
npages
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
return
0
;
...
...
@@ -481,7 +433,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
arena_free
(
&
iommu
->
arena
,
entry
,
npages
);
iommu_range_free
(
iommu
,
bus_addr
,
npages
);
do
{
unsigned
long
num
;
...
...
include/asm-sparc64/iommu.h
View file @
d284142c
...
...
@@ -26,6 +26,7 @@ struct iommu_arena {
struct
iommu
{
spinlock_t
lock
;
struct
iommu_arena
arena
;
void
(
*
flush_all
)(
struct
iommu
*
);
iopte_t
*
page_table
;
u32
page_table_map_base
;
unsigned
long
iommu_control
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment