[PATCH v6 3/8] drm: zynqmp_dp: Don't retrain the link in our IRQ
Sean Anderson
sean.anderson at linux.dev
Fri Aug 9 12:35:55 PDT 2024
Retraining the link can take a while, and might involve waiting for
DPCD reads/writes to complete. In preparation for unthreading the IRQ
handler, move this into its own work function.
Signed-off-by: Sean Anderson <sean.anderson at linux.dev>
---
(no changes since v2)
Changes in v2:
- Document hpd_irq_work
- Split this off from the locking changes
drivers/gpu/drm/xlnx/zynqmp_dp.c | 45 ++++++++++++++++++++------------
1 file changed, 29 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index abfccd8bb5a7..cec5711c7026 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -289,6 +289,7 @@ struct zynqmp_dp_config {
* @phy: PHY handles for DP lanes
* @num_lanes: number of enabled phy lanes
* @hpd_work: hot plug detection worker
+ * @hpd_irq_work: hot plug detection IRQ worker
* @status: connection status
* @enabled: flag to indicate if the device is enabled
* @dpcd: DP configuration data from currently connected sink device
@@ -304,6 +305,7 @@ struct zynqmp_dp {
struct drm_dp_aux aux;
struct drm_bridge bridge;
struct work_struct hpd_work;
+ struct work_struct hpd_irq_work;
struct mutex lock;
struct drm_bridge *next_bridge;
@@ -1671,6 +1673,29 @@ static void zynqmp_dp_hpd_work_func(struct work_struct *work)
drm_bridge_hpd_notify(&dp->bridge, status);
}
+static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work)
+{
+ struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp,
+ hpd_irq_work);
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+ int err;
+
+ mutex_lock(&dp->lock);
+ err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (err < 0) {
+ dev_dbg_ratelimited(dp->dev,
+ "could not read sink status: %d\n", err);
+ } else {
+ if (status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
+ zynqmp_dp_train_loop(dp);
+ }
+ }
+ mutex_unlock(&dp->lock);
+}
+
static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
{
struct zynqmp_dp *dp = (struct zynqmp_dp *)data;
@@ -1702,23 +1727,9 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
if (status & ZYNQMP_DP_INT_HPD_EVENT)
schedule_work(&dp->hpd_work);
- if (status & ZYNQMP_DP_INT_HPD_IRQ) {
- int ret;
- u8 status[DP_LINK_STATUS_SIZE + 2];
+ if (status & ZYNQMP_DP_INT_HPD_IRQ)
+ schedule_work(&dp->hpd_irq_work);
- ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
- DP_LINK_STATUS_SIZE + 2);
- if (ret < 0)
- goto handled;
-
- if (status[4] & DP_LINK_STATUS_UPDATED ||
- !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
- !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
- zynqmp_dp_train_loop(dp);
- }
- }
-
-handled:
return IRQ_HANDLED;
}
@@ -1744,6 +1755,7 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
mutex_init(&dp->lock);
INIT_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func);
+ INIT_WORK(&dp->hpd_irq_work, zynqmp_dp_hpd_irq_work_func);
/* Acquire all resources (IOMEM, IRQ and PHYs). */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
@@ -1848,6 +1860,7 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub)
zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_ALL);
disable_irq(dp->irq);
+ cancel_work_sync(&dp->hpd_irq_work);
cancel_work_sync(&dp->hpd_work);
zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0);
--
2.35.1.1320.gc452695387.dirty
More information about the linux-arm-kernel
mailing list