Commit 9231da70 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Remove kmap/kunmap wrappers

As these are now both plain and simple kmap_atomic/kunmap_atomic pairs,
we can remove the wrappers for a small gain of clarity (in particular,
not hiding the atomic critical sections!).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170215084357.19977-7-chris@chris-wilson.co.uk
parent 8448661d
...@@ -411,21 +411,7 @@ static void cleanup_page_dma(struct i915_address_space *vm, ...@@ -411,21 +411,7 @@ static void cleanup_page_dma(struct i915_address_space *vm,
vm_free_page(vm, p->page); vm_free_page(vm, p->page);
} }
static void *kmap_page_dma(struct i915_page_dma *p) #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
{
return kmap_atomic(p->page);
}
/* We use the flushing unmap only with ppgtt structures:
* page directories, page tables and scratch pages.
*/
static void kunmap_page_dma(void *vaddr)
{
kunmap_atomic(vaddr);
}
#define kmap_px(px) kmap_page_dma(px_base(px))
#define kunmap_px(vaddr) kunmap_page_dma((vaddr))
#define setup_px(vm, px) setup_page_dma((vm), px_base(px)) #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px)) #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
...@@ -436,13 +422,13 @@ static void fill_page_dma(struct i915_address_space *vm, ...@@ -436,13 +422,13 @@ static void fill_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p, struct i915_page_dma *p,
const u64 val) const u64 val)
{ {
u64 * const vaddr = kmap_page_dma(p); u64 * const vaddr = kmap_atomic(p->page);
int i; int i;
for (i = 0; i < 512; i++) for (i = 0; i < 512; i++)
vaddr[i] = val; vaddr[i] = val;
kunmap_page_dma(vaddr); kunmap_atomic(vaddr);
} }
static void fill_page_dma_32(struct i915_address_space *vm, static void fill_page_dma_32(struct i915_address_space *vm,
...@@ -675,9 +661,9 @@ gen8_setup_pdpe(struct i915_hw_ppgtt *ppgtt, ...@@ -675,9 +661,9 @@ gen8_setup_pdpe(struct i915_hw_ppgtt *ppgtt,
if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev))) if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
return; return;
page_directorypo = kmap_px(pdp); page_directorypo = kmap_atomic_px(pdp);
page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
kunmap_px(page_directorypo); kunmap_atomic(page_directorypo);
} }
static void static void
...@@ -685,10 +671,10 @@ gen8_setup_pml4e(struct i915_pml4 *pml4, ...@@ -685,10 +671,10 @@ gen8_setup_pml4e(struct i915_pml4 *pml4,
struct i915_page_directory_pointer *pdp, struct i915_page_directory_pointer *pdp,
int index) int index)
{ {
gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4); gen8_ppgtt_pml4e_t *pagemap = kmap_atomic_px(pml4);
pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
kunmap_px(pagemap); kunmap_atomic(pagemap);
} }
/* Broadwell Page Directory Pointer Descriptors */ /* Broadwell Page Directory Pointer Descriptors */
...@@ -774,10 +760,10 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, ...@@ -774,10 +760,10 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
return true; return true;
} }
vaddr = kmap_px(pt); vaddr = kmap_atomic_px(pt);
while (pte < pte_end) while (pte < pte_end)
vaddr[pte++] = scratch_pte; vaddr[pte++] = scratch_pte;
kunmap_px(vaddr); kunmap_atomic(vaddr);
return false; return false;
} }
...@@ -802,9 +788,9 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, ...@@ -802,9 +788,9 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
if (gen8_ppgtt_clear_pt(vm, pt, start, length)) { if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
__clear_bit(pde, pd->used_pdes); __clear_bit(pde, pd->used_pdes);
pde_vaddr = kmap_px(pd); pde_vaddr = kmap_atomic_px(pd);
pde_vaddr[pde] = scratch_pde; pde_vaddr[pde] = scratch_pde;
kunmap_px(pde_vaddr); kunmap_atomic(pde_vaddr);
free_pt(vm, pt); free_pt(vm, pt);
} }
} }
...@@ -904,7 +890,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, ...@@ -904,7 +890,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
bool ret; bool ret;
pd = pdp->page_directory[pdpe]; pd = pdp->page_directory[pdpe];
vaddr = kmap_px(pd->page_table[pde]); vaddr = kmap_atomic_px(pd->page_table[pde]);
do { do {
vaddr[pte] = pte_encode | iter->dma; vaddr[pte] = pte_encode | iter->dma;
iter->dma += PAGE_SIZE; iter->dma += PAGE_SIZE;
...@@ -932,12 +918,12 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, ...@@ -932,12 +918,12 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
pde = 0; pde = 0;
} }
kunmap_px(vaddr); kunmap_atomic(vaddr);
vaddr = kmap_px(pd->page_table[pde]); vaddr = kmap_atomic_px(pd->page_table[pde]);
pte = 0; pte = 0;
} }
} while (1); } while (1);
kunmap_px(vaddr); kunmap_atomic(vaddr);
return ret; return ret;
} }
...@@ -1370,7 +1356,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, ...@@ -1370,7 +1356,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
/* Allocations have completed successfully, so set the bitmaps, and do /* Allocations have completed successfully, so set the bitmaps, and do
* the mappings. */ * the mappings. */
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
gen8_pde_t *const page_directory = kmap_px(pd); gen8_pde_t *const page_directory = kmap_atomic_px(pd);
struct i915_page_table *pt; struct i915_page_table *pt;
uint64_t pd_len = length; uint64_t pd_len = length;
uint64_t pd_start = start; uint64_t pd_start = start;
...@@ -1405,7 +1391,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, ...@@ -1405,7 +1391,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
* point we're still relying on insert_entries() */ * point we're still relying on insert_entries() */
} }
kunmap_px(page_directory); kunmap_atomic(page_directory);
__set_bit(pdpe, pdp->used_pdpes); __set_bit(pdpe, pdp->used_pdpes);
gen8_setup_pdpe(ppgtt, pdp, pd, pdpe); gen8_setup_pdpe(ppgtt, pdp, pd, pdpe);
} }
...@@ -1512,7 +1498,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, ...@@ -1512,7 +1498,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
if (!test_bit(pde, pd->used_pdes)) if (!test_bit(pde, pd->used_pdes))
continue; continue;
pt_vaddr = kmap_px(pt); pt_vaddr = kmap_atomic_px(pt);
for (pte = 0; pte < GEN8_PTES; pte += 4) { for (pte = 0; pte < GEN8_PTES; pte += 4) {
uint64_t va = uint64_t va =
(pdpe << GEN8_PDPE_SHIFT) | (pdpe << GEN8_PDPE_SHIFT) |
...@@ -1536,9 +1522,6 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, ...@@ -1536,9 +1522,6 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
} }
seq_puts(m, "\n"); seq_puts(m, "\n");
} }
/* don't use kunmap_px, it could trigger
* an unnecessary flush.
*/
kunmap_atomic(pt_vaddr); kunmap_atomic(pt_vaddr);
} }
} }
...@@ -1693,7 +1676,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) ...@@ -1693,7 +1676,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected); expected);
seq_printf(m, "\tPDE: %x\n", pd_entry); seq_printf(m, "\tPDE: %x\n", pd_entry);
pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]); pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
for (pte = 0; pte < GEN6_PTES; pte+=4) { for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va = unsigned long va =
...@@ -1716,7 +1699,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) ...@@ -1716,7 +1699,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
} }
seq_puts(m, "\n"); seq_puts(m, "\n");
} }
kunmap_px(pt_vaddr); kunmap_atomic(pt_vaddr);
} }
} }
...@@ -1908,12 +1891,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, ...@@ -1908,12 +1891,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
if (last_pte > GEN6_PTES) if (last_pte > GEN6_PTES)
last_pte = GEN6_PTES; last_pte = GEN6_PTES;
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
for (i = first_pte; i < last_pte; i++) for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte; pt_vaddr[i] = scratch_pte;
kunmap_px(pt_vaddr); kunmap_atomic(pt_vaddr);
num_entries -= last_pte - first_pte; num_entries -= last_pte - first_pte;
first_pte = 0; first_pte = 0;
...@@ -1934,7 +1917,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, ...@@ -1934,7 +1917,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sgt_dma iter; struct sgt_dma iter;
gen6_pte_t *vaddr; gen6_pte_t *vaddr;
vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
iter.sg = pages->sgl; iter.sg = pages->sgl;
iter.dma = sg_dma_address(iter.sg); iter.dma = sg_dma_address(iter.sg);
iter.max = iter.dma + iter.sg->length; iter.max = iter.dma + iter.sg->length;
...@@ -1952,12 +1935,12 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, ...@@ -1952,12 +1935,12 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
} }
if (++act_pte == GEN6_PTES) { if (++act_pte == GEN6_PTES) {
kunmap_px(vaddr); kunmap_atomic(vaddr);
vaddr = kmap_px(ppgtt->pd.page_table[++act_pt]); vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
act_pte = 0; act_pte = 0;
} }
} while (1); } while (1);
kunmap_px(vaddr); kunmap_atomic(vaddr);
} }
static int gen6_alloc_va_range(struct i915_address_space *vm, static int gen6_alloc_va_range(struct i915_address_space *vm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment