diff options
Diffstat (limited to 'target/linux/bcm27xx/patches-5.15/950-0018-drm-vc4-Increase-the-core-clock-based-on-HVS-load.patch')
-rw-r--r-- | target/linux/bcm27xx/patches-5.15/950-0018-drm-vc4-Increase-the-core-clock-based-on-HVS-load.patch | 242 |
1 files changed, 242 insertions, 0 deletions
diff --git a/target/linux/bcm27xx/patches-5.15/950-0018-drm-vc4-Increase-the-core-clock-based-on-HVS-load.patch b/target/linux/bcm27xx/patches-5.15/950-0018-drm-vc4-Increase-the-core-clock-based-on-HVS-load.patch new file mode 100644 index 0000000000..f8475861fe --- /dev/null +++ b/target/linux/bcm27xx/patches-5.15/950-0018-drm-vc4-Increase-the-core-clock-based-on-HVS-load.patch @@ -0,0 +1,242 @@ +From 99c821dc4cd65ff067e2dfff4a47ceb5aa61ad0c Mon Sep 17 00:00:00 2001 +From: Maxime Ripard <maxime@cerno.tech> +Date: Wed, 26 May 2021 16:13:02 +0200 +Subject: [PATCH] drm/vc4: Increase the core clock based on HVS load + +Depending on a given HVS output (HVS to PixelValves) and input (planes +attached to a channel) load, the HVS needs for the core clock to be +raised above its boot time default. + +Failing to do so will result in a vblank timeout and a stalled display +pipeline. + +Signed-off-by: Maxime Ripard <maxime@cerno.tech> +--- + drivers/gpu/drm/vc4/vc4_crtc.c | 15 +++++ + drivers/gpu/drm/vc4/vc4_drv.h | 2 + + drivers/gpu/drm/vc4/vc4_kms.c | 112 ++++++++++++++++++++++++++++++--- + 3 files changed, 119 insertions(+), 10 deletions(-) + +--- a/drivers/gpu/drm/vc4/vc4_crtc.c ++++ b/drivers/gpu/drm/vc4/vc4_crtc.c +@@ -661,12 +661,27 @@ static int vc4_crtc_atomic_check(struct + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); + struct drm_connector *conn; + struct drm_connector_state *conn_state; ++ struct drm_encoder *encoder; + int ret, i; + + ret = vc4_hvs_atomic_check(crtc, state); + if (ret) + return ret; + ++ encoder = vc4_get_crtc_encoder(crtc, crtc_state); ++ if (encoder) { ++ const struct drm_display_mode *mode = &crtc_state->adjusted_mode; ++ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); ++ ++ mode = &crtc_state->adjusted_mode; ++ if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) { ++ vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000, ++ mode->clock * 9 / 10) * 1000; ++ } else { ++ vc4_state->hvs_load = mode->clock * 1000; ++ } ++ } ++ + for_each_new_connector_in_state(state, conn, conn_state, + i) { + if (conn_state->crtc != crtc) +--- a/drivers/gpu/drm/vc4/vc4_drv.h ++++ b/drivers/gpu/drm/vc4/vc4_drv.h +@@ -558,6 +558,8 @@ struct vc4_crtc_state { + unsigned int bottom; + } margins; + ++ unsigned long hvs_load; ++ + /* Transitional state below, only valid during atomic commits */ + bool update_muxing; + }; +--- a/drivers/gpu/drm/vc4/vc4_kms.c ++++ b/drivers/gpu/drm/vc4/vc4_kms.c +@@ -39,9 +39,11 @@ static struct vc4_ctm_state *to_vc4_ctm_ + + struct vc4_hvs_state { + struct drm_private_state base; ++ unsigned long core_clock_rate; + + struct { + unsigned in_use: 1; ++ unsigned long fifo_load; + struct drm_crtc_commit *pending_commit; + } fifo_state[HVS_NUM_CHANNELS]; + }; +@@ -339,11 +341,20 @@ static void vc4_atomic_commit_tail(struc + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_hvs *hvs = vc4->hvs; + struct drm_crtc_state *new_crtc_state; ++ struct vc4_hvs_state *new_hvs_state; + struct drm_crtc *crtc; + struct vc4_hvs_state *old_hvs_state; + unsigned int channel; + int i; + ++ old_hvs_state = vc4_hvs_get_old_global_state(state); ++ if (WARN_ON(!old_hvs_state)) ++ return; ++ ++ new_hvs_state = vc4_hvs_get_new_global_state(state); ++ if (WARN_ON(!new_hvs_state)) ++ return; ++ + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + struct vc4_crtc_state *vc4_crtc_state; + +@@ -354,10 +365,6 @@ static void vc4_atomic_commit_tail(struc + vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); + } + +- old_hvs_state = vc4_hvs_get_old_global_state(state); +- if (IS_ERR(old_hvs_state)) +- return; +- + for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) { + struct drm_crtc_commit *commit; + int ret; +@@ -377,8 +384,13 @@ static void vc4_atomic_commit_tail(struc + old_hvs_state->fifo_state[channel].pending_commit = NULL; + } + +- if (vc4->hvs->hvs5) +- clk_set_min_rate(hvs->core_clk, 500000000); ++ if (vc4->hvs->hvs5) { ++ unsigned long core_rate = max_t(unsigned long, ++ 500000000, ++ new_hvs_state->core_clock_rate); ++ ++ clk_set_min_rate(hvs->core_clk, core_rate); ++ } + + drm_atomic_helper_commit_modeset_disables(dev, state); + +@@ -401,8 +413,12 @@ static void vc4_atomic_commit_tail(struc + + drm_atomic_helper_cleanup_planes(dev, state); + +- if (vc4->hvs->hvs5) +- clk_set_min_rate(hvs->core_clk, 0); ++ if (vc4->hvs->hvs5) { ++ drm_dbg(dev, "Running the core clock at %lu Hz\n", ++ new_hvs_state->core_clock_rate); ++ ++ clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate); ++ } + } + + static int vc4_atomic_commit_setup(struct drm_atomic_state *state) +@@ -659,11 +675,13 @@ vc4_hvs_channels_duplicate_state(struct + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + +- + for (i = 0; i < HVS_NUM_CHANNELS; i++) { + state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; ++ state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load; + } + ++ state->core_clock_rate = old_state->core_clock_rate; ++ + return &state->base; + } + +@@ -819,6 +837,76 @@ static int vc4_pv_muxing_atomic_check(st + } + + static int ++vc4_core_clock_atomic_check(struct drm_atomic_state *state) ++{ ++ struct vc4_dev *vc4 = to_vc4_dev(state->dev); ++ struct drm_private_state *priv_state; ++ struct vc4_hvs_state *hvs_new_state; ++ struct vc4_load_tracker_state *load_state; ++ struct drm_crtc_state *old_crtc_state, *new_crtc_state; ++ struct drm_crtc *crtc; ++ unsigned int num_outputs; ++ unsigned long pixel_rate; ++ unsigned long cob_rate; ++ unsigned int i; ++ ++ priv_state = drm_atomic_get_private_obj_state(state, ++ &vc4->load_tracker); ++ if (IS_ERR(priv_state)) ++ return PTR_ERR(priv_state); ++ ++ load_state = to_vc4_load_tracker_state(priv_state); ++ ++ hvs_new_state = vc4_hvs_get_global_state(state); ++ if (!hvs_new_state) ++ return -EINVAL; ++ ++ for_each_oldnew_crtc_in_state(state, crtc, ++ old_crtc_state, ++ new_crtc_state, ++ i) { ++ if (old_crtc_state->active) { ++ struct vc4_crtc_state *old_vc4_state = ++ to_vc4_crtc_state(old_crtc_state); ++ unsigned int channel = old_vc4_state->assigned_channel; ++ ++ hvs_new_state->fifo_state[channel].fifo_load = 0; ++ } ++ ++ if (new_crtc_state->active) { ++ struct vc4_crtc_state *new_vc4_state = ++ to_vc4_crtc_state(new_crtc_state); ++ unsigned int channel = new_vc4_state->assigned_channel; ++ ++ hvs_new_state->fifo_state[channel].fifo_load = ++ new_vc4_state->hvs_load; ++ } ++ } ++ ++ cob_rate = 0; ++ num_outputs = 0; ++ for (i = 0; i < HVS_NUM_CHANNELS; i++) { ++ if (!hvs_new_state->fifo_state[i].in_use) ++ continue; ++ ++ num_outputs++; ++ cob_rate += hvs_new_state->fifo_state[i].fifo_load; ++ } ++ ++ pixel_rate = load_state->hvs_load; ++ if (num_outputs > 1) { ++ pixel_rate = (pixel_rate * 40) / 100; ++ } else { ++ pixel_rate = (pixel_rate * 60) / 100; ++ } ++ ++ hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate); ++ ++ return 0; ++} ++ ++ ++static int + vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) + { + int ret; +@@ -835,7 +923,11 @@ vc4_atomic_check(struct drm_device *dev, + if (ret) + return ret; + +- return vc4_load_tracker_atomic_check(state); ++ ret = vc4_load_tracker_atomic_check(state); ++ if (ret) ++ return ret; ++ ++ return vc4_core_clock_atomic_check(state); + } + + static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = { |