Context |
Check |
Description |
snowpatch_ozlabs/apply_patch |
warning
|
Failed to apply on branch powerpc/merge (09a0972ac14f67d600aa3c80035367a8074e90eb)
|
snowpatch_ozlabs/apply_patch |
warning
|
Failed to apply on branch powerpc/next (3cea11cd5e3b00d91caf0b4730194039b45c5891)
|
snowpatch_ozlabs/apply_patch |
warning
|
Failed to apply on branch linus/master (b7cbaf59f62f8ab8f157698f9e31642bff525bd0)
|
snowpatch_ozlabs/apply_patch |
warning
|
Failed to apply on branch powerpc/fixes (99f070b62322a4b8c1252952735806d09eb44b68)
|
snowpatch_ozlabs/apply_patch |
warning
|
Failed to apply on branch linux-next (c50585161e791d5ddb924da98741fdf413a0480a)
|
snowpatch_ozlabs/apply_patch |
fail
|
Failed to apply to any branch
|
@@ -1081,7 +1081,7 @@ static void reloc_cache_reset(struct rel
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- io_mapping_unmap_atomic((void __iomem *)vaddr);
+ io_mapping_unmap_local((void __iomem *)vaddr);
if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
@@ -1147,7 +1147,7 @@ static void *reloc_iomap(struct drm_i915
if (cache->vaddr) {
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
+ io_mapping_unmap_local((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int err;
@@ -1195,8 +1195,7 @@ static void *reloc_iomap(struct drm_i915
offset += page << PAGE_SHIFT;
}
- vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
- offset);
+ vaddr = (void __force *)io_mapping_map_local_wc(&ggtt->iomap, offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
@@ -379,22 +379,15 @@ gtt_user_read(struct io_mapping *mapping
char __user *user_data, int length)
{
void __iomem *vaddr;
- unsigned long unwritten;
+ bool fail = false;
/* We can use the cpu mem copy function because this is X86. */
- vaddr = io_mapping_map_atomic_wc(mapping, base);
- unwritten = __copy_to_user_inatomic(user_data,
- (void __force *)vaddr + offset,
- length);
- io_mapping_unmap_atomic(vaddr);
- if (unwritten) {
- vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
- unwritten = copy_to_user(user_data,
- (void __force *)vaddr + offset,
- length);
- io_mapping_unmap(vaddr);
- }
- return unwritten;
+ vaddr = io_mapping_map_local_wc(mapping, base);
+ if (copy_to_user(user_data, (void __force *)vaddr + offset, length))
+ fail = true;
+ io_mapping_unmap_local(vaddr);
+
+ return fail;
}
static int
@@ -557,21 +550,14 @@ ggtt_write(struct io_mapping *mapping,
char __user *user_data, int length)
{
void __iomem *vaddr;
- unsigned long unwritten;
+ bool fail = false;
/* We can use the cpu mem copy function because this is X86. */
- vaddr = io_mapping_map_atomic_wc(mapping, base);
- unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
- user_data, length);
- io_mapping_unmap_atomic(vaddr);
- if (unwritten) {
- vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
- unwritten = copy_from_user((void __force *)vaddr + offset,
- user_data, length);
- io_mapping_unmap(vaddr);
- }
-
- return unwritten;
+ vaddr = io_mapping_map_local_wc(mapping, base);
+ if (copy_from_user((void __force *)vaddr + offset, user_data, length))
+ fail = true;
+ io_mapping_unmap_local(vaddr);
+ return fail;
}
/**
@@ -57,12 +57,12 @@ static void trash_stolen(struct drm_i915
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
- s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
+ s = io_mapping_map_local_wc(&ggtt->iomap, slot);
for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
prng = next_pseudo_random32(prng);
iowrite32(prng, &s[x]);
}
- io_mapping_unmap_atomic(s);
+ io_mapping_unmap_local(s);
}
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
@@ -1200,9 +1200,9 @@ static int igt_ggtt_page(void *arg)
u64 offset = tmp.start + order[n] * PAGE_SIZE;
u32 __iomem *vaddr;
- vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
+ vaddr = io_mapping_map_local_wc(&ggtt->iomap, offset);
iowrite32(n, vaddr + n);
- io_mapping_unmap_atomic(vaddr);
+ io_mapping_unmap_local(vaddr);
}
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
@@ -1212,9 +1212,9 @@ static int igt_ggtt_page(void *arg)
u32 __iomem *vaddr;
u32 val;
- vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
+ vaddr = io_mapping_map_local_wc(&ggtt->iomap, offset);
val = ioread32(vaddr + n);
- io_mapping_unmap_atomic(vaddr);
+ io_mapping_unmap_local(vaddr);
if (val != n) {
pr_err("insert page failed: found %d, expected %d\n",
None of these mapping requires the side effect of disabling pagefaults and preemption. Use io_mapping_map_local_wc() instead, and clean up gtt_user_read() and gtt_user_write() to use a plain copy_from_user() as the local maps are not disabling pagefaults. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: intel-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org --- V3: New patch --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 7 +--- drivers/gpu/drm/i915/i915_gem.c | 40 ++++++++----------------- drivers/gpu/drm/i915/selftests/i915_gem.c | 4 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 8 ++--- 4 files changed, 22 insertions(+), 37 deletions(-)