It doesn't make sense to never again schedule the work, since by the
time we might want to re-enable psr the world might have changed and
we can do it again.
The only exception is when we shut down the pipe, but that's an
entirely different thing and needs to be handled in psr_disable.
Note that later patch will again split psr_exit into psr_invalidate
and psr_flush. But the split is different and this simplification
helps with the transition.
v2: Improve the commit message a bit.
Cc: Rodrigo Vivi <[email protected]>
Reviewed-by: Rodrigo Vivi <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>
goto unlock;
}
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
/* Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
if (ret)
return ret;
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
if (ret)
return ret;
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
if (!i915.powersave)
return;
return -ENOMEM;
/* Exit PSR early in page flip */
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
work->event = event;
work->crtc = crtc;
& ~EDP_PSR_ENABLE);
}
-void intel_edp_psr_exit(struct drm_device *dev, bool schedule_back)
+void intel_edp_psr_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->psr.active)
intel_edp_psr_inactivate(dev);
- if (schedule_back)
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
+ schedule_delayed_work(&dev_priv->psr.work,
+ msecs_to_jiffies(100));
}
void intel_edp_psr_init(struct drm_device *dev)
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-void intel_edp_psr_exit(struct drm_device *dev, bool schedule_back);
+void intel_edp_psr_exit(struct drm_device *dev);
void intel_edp_psr_init(struct drm_device *dev);
mutex_unlock(&dev->struct_mutex);
}
- intel_edp_psr_exit(dev, true);
+ intel_edp_psr_exit(dev);
return 0;
}