From 59097a2a5ecadb0f025232c665fd11c8ae1e1f58 Mon Sep 17 00:00:00 2001 From: Konrad Dybcio Date: Sat, 2 Mar 2024 03:22:49 +0100 Subject: [PATCH 1/2] interconnect: qcom: x1e80100: Remove inexistent ACV_PERF BCM Booting the kernel on X1E results in a message like: [ 2.561524] qnoc-x1e80100 interconnect-0: ACV_PERF could not find RPMh address And indeed, taking a look at cmd-db, no such BCM exists. Remove it. Fixes: 9f196772841e ("interconnect: qcom: Add X1E80100 interconnect provider driver") Signed-off-by: Konrad Dybcio Reviewed-by: Mike Tipton Link: https://lore.kernel.org/r/20240302-topic-faux_bcm_x1e-v1-1-c40fab7c4bc5@linaro.org Signed-off-by: Georgi Djakov --- drivers/interconnect/qcom/x1e80100.c | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c index cbaf4f9c41be..06f0a6d6cbbc 100644 --- a/drivers/interconnect/qcom/x1e80100.c +++ b/drivers/interconnect/qcom/x1e80100.c @@ -116,15 +116,6 @@ static struct qcom_icc_node xm_sdc2 = { .links = { X1E80100_SLAVE_A2NOC_SNOC }, }; -static struct qcom_icc_node ddr_perf_mode_master = { - .name = "ddr_perf_mode_master", - .id = X1E80100_MASTER_DDR_PERF_MODE, - .channels = 1, - .buswidth = 4, - .num_links = 1, - .links = { X1E80100_SLAVE_DDR_PERF_MODE }, -}; - static struct qcom_icc_node qup0_core_master = { .name = "qup0_core_master", .id = X1E80100_MASTER_QUP_CORE_0, @@ -832,14 +823,6 @@ static struct qcom_icc_node qns_a2noc_snoc = { .links = { X1E80100_MASTER_A2NOC_SNOC }, }; -static struct qcom_icc_node ddr_perf_mode_slave = { - .name = "ddr_perf_mode_slave", - .id = X1E80100_SLAVE_DDR_PERF_MODE, - .channels = 1, - .buswidth = 4, - .num_links = 0, -}; - static struct qcom_icc_node qup0_core_slave = { .name = "qup0_core_slave", .id = X1E80100_SLAVE_QUP_CORE_0, @@ -1591,12 +1574,6 @@ static struct qcom_icc_bcm bcm_acv = { .nodes = { &ebi }, }; -static struct qcom_icc_bcm bcm_acv_perf = { - .name = "ACV_PERF", - .num_nodes = 1, - .nodes = { &ddr_perf_mode_slave }, -}; - static struct qcom_icc_bcm bcm_ce0 = { .name = "CE0", .num_nodes = 1, @@ -1863,18 +1840,15 @@ static const struct qcom_icc_desc x1e80100_aggre2_noc = { }; static struct qcom_icc_bcm * const clk_virt_bcms[] = { - &bcm_acv_perf, &bcm_qup0, &bcm_qup1, &bcm_qup2, }; static struct qcom_icc_node * const clk_virt_nodes[] = { - [MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master, [MASTER_QUP_CORE_0] = &qup0_core_master, [MASTER_QUP_CORE_1] = &qup1_core_master, [MASTER_QUP_CORE_2] = &qup2_core_master, - [SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave, [SLAVE_QUP_CORE_0] = &qup0_core_slave, [SLAVE_QUP_CORE_1] = &qup1_core_slave, [SLAVE_QUP_CORE_2] = &qup2_core_slave, From de1bf25b6d771abdb52d43546cf57ad775fb68a1 Mon Sep 17 00:00:00 2001 From: Mike Tipton Date: Tue, 5 Mar 2024 14:56:52 -0800 Subject: [PATCH 2/2] interconnect: Don't access req_list while it's being manipulated The icc_lock mutex was split into separate icc_lock and icc_bw_lock mutexes in [1] to avoid lockdep splats. However, this didn't adequately protect access to icc_node::req_list. The icc_set_bw() function will eventually iterate over req_list while only holding icc_bw_lock, but req_list can be modified while only holding icc_lock. This causes races between icc_set_bw(), of_icc_get(), and icc_put(). Example A: CPU0 CPU1 ---- ---- icc_set_bw(path_a) mutex_lock(&icc_bw_lock); icc_put(path_b) mutex_lock(&icc_lock); aggregate_requests() hlist_for_each_entry(r, ... hlist_del(... Example B: CPU0 CPU1 ---- ---- icc_set_bw(path_a) mutex_lock(&icc_bw_lock); path_b = of_icc_get() of_icc_get_by_index() mutex_lock(&icc_lock); path_find() path_init() aggregate_requests() hlist_for_each_entry(r, ... hlist_add_head(... Fix this by ensuring icc_bw_lock is always held before manipulating icc_node::req_list. The additional places icc_bw_lock is held don't perform any memory allocations, so we should still be safe from the original lockdep splats that motivated the separate locks. [1] commit af42269c3523 ("interconnect: Fix locking for runpm vs reclaim") Signed-off-by: Mike Tipton Fixes: af42269c3523 ("interconnect: Fix locking for runpm vs reclaim") Reviewed-by: Rob Clark Link: https://lore.kernel.org/r/20240305225652.22872-1-quic_mdtipton@quicinc.com Signed-off-by: Georgi Djakov --- drivers/interconnect/core.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 50bac2d79d9b..68edb07d4443 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst, path->num_nodes = num_nodes; + mutex_lock(&icc_bw_lock); + for (i = num_nodes - 1; i >= 0; i--) { node->provider->users++; hlist_add_head(&path->reqs[i].req_node, &node->req_list); @@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst, node = node->reverse; } + mutex_unlock(&icc_bw_lock); + return path; } @@ -792,12 +796,16 @@ void icc_put(struct icc_path *path) pr_err("%s: error (%d)\n", __func__, ret); mutex_lock(&icc_lock); + mutex_lock(&icc_bw_lock); + for (i = 0; i < path->num_nodes; i++) { node = path->reqs[i].node; hlist_del(&path->reqs[i].req_node); if (!WARN_ON(!node->provider->users)) node->provider->users--; } + + mutex_unlock(&icc_bw_lock); mutex_unlock(&icc_lock); kfree_const(path->name);