Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
88cb6a74
Commit
88cb6a74
authored
Jun 28, 2009
by
David Woodhouse
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
intel-iommu: Change aligned_size() to aligned_nrpages()
Signed-off-by:
David Woodhouse
<
David.Woodhouse@intel.com
>
parent
b536d24d
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
12 additions
and
9 deletions
+12
-9
drivers/pci/intel-iommu.c
drivers/pci/intel-iommu.c
+12
-9
No files found.
drivers/pci/intel-iommu.c
View file @
88cb6a74
...
@@ -2320,11 +2320,13 @@ int __init init_dmars(void)
...
@@ -2320,11 +2320,13 @@ int __init init_dmars(void)
return
ret
;
return
ret
;
}
}
static
inline
u64
aligned_size
(
u64
host_addr
,
size_t
size
)
static
inline
unsigned
long
aligned_nrpages
(
unsigned
long
host_addr
,
size_t
size
)
{
{
u64
addr
;
host_addr
&=
~
PAGE_MASK
;
addr
=
(
host_addr
&
(
~
PAGE_MASK
))
+
size
;
host_addr
+=
size
+
PAGE_SIZE
-
1
;
return
PAGE_ALIGN
(
addr
);
return
host_addr
>>
VTD_PAGE_SHIFT
;
}
}
struct
iova
*
struct
iova
*
...
@@ -2466,7 +2468,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
...
@@ -2466,7 +2468,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
return
0
;
return
0
;
iommu
=
domain_get_iommu
(
domain
);
iommu
=
domain_get_iommu
(
domain
);
size
=
aligned_
size
(
paddr
,
size
)
>>
VTD_PAGE_SHIFT
;
size
=
aligned_
nrpages
(
paddr
,
size
)
;
iova
=
__intel_alloc_iova
(
hwdev
,
domain
,
size
<<
VTD_PAGE_SHIFT
,
pdev
->
dma_mask
);
iova
=
__intel_alloc_iova
(
hwdev
,
domain
,
size
<<
VTD_PAGE_SHIFT
,
pdev
->
dma_mask
);
if
(
!
iova
)
if
(
!
iova
)
...
@@ -2757,9 +2759,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
...
@@ -2757,9 +2759,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
iommu
=
domain_get_iommu
(
domain
);
iommu
=
domain_get_iommu
(
domain
);
for_each_sg
(
sglist
,
sg
,
nelems
,
i
)
for_each_sg
(
sglist
,
sg
,
nelems
,
i
)
size
+=
aligned_
size
(
sg
->
offset
,
sg
->
length
);
size
+=
aligned_
nrpages
(
sg
->
offset
,
sg
->
length
);
iova
=
__intel_alloc_iova
(
hwdev
,
domain
,
size
,
pdev
->
dma_mask
);
iova
=
__intel_alloc_iova
(
hwdev
,
domain
,
size
<<
VTD_PAGE_SHIFT
,
pdev
->
dma_mask
);
if
(
!
iova
)
{
if
(
!
iova
)
{
sglist
->
dma_length
=
0
;
sglist
->
dma_length
=
0
;
return
0
;
return
0
;
...
@@ -2778,7 +2781,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
...
@@ -2778,7 +2781,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
start_vpfn
=
mm_to_dma_pfn
(
iova
->
pfn_lo
);
start_vpfn
=
mm_to_dma_pfn
(
iova
->
pfn_lo
);
offset_pfn
=
0
;
offset_pfn
=
0
;
for_each_sg
(
sglist
,
sg
,
nelems
,
i
)
{
for_each_sg
(
sglist
,
sg
,
nelems
,
i
)
{
int
nr_pages
=
aligned_
size
(
sg
->
offset
,
sg
->
length
)
>>
VTD_PAGE_SHIFT
;
int
nr_pages
=
aligned_
nrpages
(
sg
->
offset
,
sg
->
length
)
;
ret
=
domain_pfn_mapping
(
domain
,
start_vpfn
+
offset_pfn
,
ret
=
domain_pfn_mapping
(
domain
,
start_vpfn
+
offset_pfn
,
page_to_dma_pfn
(
sg_page
(
sg
)),
page_to_dma_pfn
(
sg_page
(
sg
)),
nr_pages
,
prot
);
nr_pages
,
prot
);
...
@@ -3502,7 +3505,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
...
@@ -3502,7 +3505,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
}
}
/* Round up size to next multiple of PAGE_SIZE, if it and
/* Round up size to next multiple of PAGE_SIZE, if it and
the low bits of hpa would take us onto the next page */
the low bits of hpa would take us onto the next page */
size
=
aligned_
size
(
hpa
,
size
)
>>
VTD_PAGE_SHIFT
;
size
=
aligned_
nrpages
(
hpa
,
size
)
;
ret
=
domain_pfn_mapping
(
dmar_domain
,
iova
>>
VTD_PAGE_SHIFT
,
ret
=
domain_pfn_mapping
(
dmar_domain
,
iova
>>
VTD_PAGE_SHIFT
,
hpa
>>
VTD_PAGE_SHIFT
,
size
,
prot
);
hpa
>>
VTD_PAGE_SHIFT
,
size
,
prot
);
return
ret
;
return
ret
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment