workload->status = ret;
if (!IS_ERR_OR_NULL(rq))
- i915_add_request_no_flush(rq);
+ i915_add_request(rq);
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
}
ret = i915_switch_context(req);
- i915_add_request_no_flush(req);
+ i915_add_request(req);
if (ret)
return ret;
}
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \
- __i915_add_request(req, true)
-#define i915_add_request_no_flush(req) \
__i915_add_request(req, false)
void __i915_gem_request_submit(struct drm_i915_gem_request *request);
intel_mark_page_flip_active(intel_crtc, work);
work->flip_queued_req = i915_gem_request_get(request);
- i915_add_request_no_flush(request);
+ i915_add_request(request);
}
i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
return 0;
cleanup_request:
- i915_add_request_no_flush(request);
+ i915_add_request(request);
cleanup_unpin:
to_intel_plane_state(primary->state)->vma = work->old_vma;
intel_unpin_fb_vma(vma);
cs = intel_ring_begin(req, 4);
if (IS_ERR(cs)) {
- i915_add_request_no_flush(req);
+ i915_add_request(req);
return PTR_ERR(cs);
}
cs = intel_ring_begin(req, 2);
if (IS_ERR(cs)) {
- i915_add_request_no_flush(req);
+ i915_add_request(req);
return PTR_ERR(cs);
}
cs = intel_ring_begin(req, 6);
if (IS_ERR(cs)) {
- i915_add_request_no_flush(req);
+ i915_add_request(req);
return PTR_ERR(cs);
}
cs = intel_ring_begin(req, 2);
if (IS_ERR(cs)) {
- i915_add_request_no_flush(req);
+ i915_add_request(req);
return PTR_ERR(cs);
}
rcs->init_context(req);
/* Mark the device busy, calling intel_enable_gt_powersave() */
- i915_add_request_no_flush(req);
+ i915_add_request(req);
unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);