From nobody Mon Nov 25 22:52:27 2024 Received: from szxga04-in.huawei.com (szxga04-in.huawei.com [45.249.212.190]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 030981CFEDB; Thu, 24 Oct 2024 12:46:21 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.190 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1729773985; cv=none; b=Dmbuu8eoLs6/ec4f/1Qxyag8Y4S8rnedHIhMEZ6sF9WpOetKyKW7fJhWn4s2MYk8gXQ6BhI+/zLdtcAm2JmK+RS/6FWAlJ4fyUnUUhX4noPgFLWGxCjaLoV/BeG05lE39td0rbtQBKjl38MzTPvvSF53xn/lliQeZFEuvw8rItE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1729773985; c=relaxed/simple; bh=fnJFw8/dw+J9LLrAwqDGjdaFaer2pfGYPU+XrXW9JRg=; h=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=XQLMpW2iksOZczsbY5ApXEEvjxINS+vO16QIsESHmskfjIUhbwye+gph/eKePqIdqL4xt/ZrQhgfccA0DgPGmPRukZjVF4rLAwn2B/y+1r+um2Egb0gSKr6GdHSMvdvfjeHhl/fzTezLc4Uz3txfRzax+NdA27s/7DUdrxgsCa8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=hisilicon.com; spf=pass smtp.mailfrom=hisilicon.com; arc=none smtp.client-ip=45.249.212.190 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=hisilicon.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=hisilicon.com Received: from mail.maildlp.com (unknown [172.19.88.214]) by szxga04-in.huawei.com (SkyGuard) with ESMTP id 4XZ5H83vnVz20qmr; Thu, 24 Oct 2024 20:45:24 +0800 (CST) Received: from kwepemf100018.china.huawei.com (unknown [7.202.181.17]) by mail.maildlp.com (Postfix) with ESMTPS id 759EA1A016C; Thu, 24 Oct 2024 20:46:17 +0800 (CST) Received: from localhost.localdomain (10.90.30.45) by kwepemf100018.china.huawei.com (7.202.181.17) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Thu, 24 Oct 2024 20:46:16 +0800 From: Junxian Huang To: , CC: , , , , Subject: [PATCH v2 for-rc 1/5] RDMA/hns: Fix an AEQE overflow error caused by untimely update of eq_db_ci Date: Thu, 24 Oct 2024 20:39:56 +0800 Message-ID: <20241024124000.2931869-2-huangjunxian6@hisilicon.com> X-Mailer: git-send-email 2.30.0 In-Reply-To: <20241024124000.2931869-1-huangjunxian6@hisilicon.com> References: <20241024124000.2931869-1-huangjunxian6@hisilicon.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: dggems705-chm.china.huawei.com (10.3.19.182) To kwepemf100018.china.huawei.com (7.202.181.17) Content-Type: text/plain; charset="utf-8" From: wenglianfa eq_db_ci is updated only after all AEQEs are processed in the AEQ interrupt handler, which is not timely enough and may result in AEQ overflow. Two optimization methods are proposed: 1. Set an upper limit for AEQE processing. 2. Move time-consuming operations such as printings to the bottom half of the interrupt. cmd events and flush_cqe events are still fully processed in the top half to ensure timely handling. Fixes: a5073d6054f7 ("RDMA/hns: Add eq support of hip08") Signed-off-by: wenglianfa Signed-off-by: Junxian Huang --- drivers/infiniband/hw/hns/hns_roce_device.h | 1 + drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 75 ++++++++++++++------- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 5 ++ drivers/infiniband/hw/hns/hns_roce_qp.c | 54 +++++++++------ 4 files changed, 91 insertions(+), 44 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniba= nd/hw/hns/hns_roce_device.h index 0b1e21cb6d2d..73c78005901e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1289,6 +1289,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_d= ev, u32 cqn); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_typ= e); void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_typ= e); +void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn); void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_t= ype); void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniban= d/hw/hns/hns_roce_hw_v2.c index 24e906b9d3ae..e85c450e1809 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5967,11 +5967,10 @@ static int hns_roce_v2_query_mpt(struct hns_roce_de= v *hr_dev, u32 key, return ret; } =20 -static void hns_roce_irq_work_handle(struct work_struct *work) +static void dump_aeqe_log(struct hns_roce_work *irq_work) { - struct hns_roce_work *irq_work =3D - container_of(work, struct hns_roce_work, work); - struct ib_device *ibdev =3D &irq_work->hr_dev->ib_dev; + struct hns_roce_dev *hr_dev =3D irq_work->hr_dev; + struct ib_device *ibdev =3D &hr_dev->ib_dev; =20 switch (irq_work->event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: @@ -6015,6 +6014,8 @@ static void hns_roce_irq_work_handle(struct work_stru= ct *work) case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: ibdev_warn(ibdev, "DB overflow.\n"); break; + case HNS_ROCE_EVENT_TYPE_MB: + break; case HNS_ROCE_EVENT_TYPE_FLR: ibdev_warn(ibdev, "function level reset.\n"); break; @@ -6025,8 +6026,46 @@ static void hns_roce_irq_work_handle(struct work_str= uct *work) ibdev_err(ibdev, "invalid xrceth error.\n"); break; default: + ibdev_info(ibdev, "Undefined event %d.\n", + irq_work->event_type); break; } +} + +static void hns_roce_irq_work_handle(struct work_struct *work) +{ + struct hns_roce_work *irq_work =3D + container_of(work, struct hns_roce_work, work); + struct hns_roce_dev *hr_dev =3D irq_work->hr_dev; + int event_type =3D irq_work->event_type; + u32 queue_num =3D irq_work->queue_num; + + switch (event_type) { + case HNS_ROCE_EVENT_TYPE_PATH_MIG: + case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: + case HNS_ROCE_EVENT_TYPE_COMM_EST: + case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: + case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: + case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: + case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: + case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: + case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: + case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: + hns_roce_qp_event(hr_dev, queue_num, event_type); + break; + case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: + case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: + hns_roce_srq_event(hr_dev, queue_num, event_type); + break; + case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: + case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: + hns_roce_cq_event(hr_dev, queue_num, event_type); + break; + default: + break; + } + + dump_aeqe_log(irq_work); =20 kfree(irq_work); } @@ -6087,14 +6126,14 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct= hns_roce_eq *eq) static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { - struct device *dev =3D hr_dev->dev; struct hns_roce_aeqe *aeqe =3D next_aeqe_sw_v2(eq); irqreturn_t aeqe_found =3D IRQ_NONE; + int num_aeqes =3D 0; int event_type; u32 queue_num; int sub_type; =20 - while (aeqe) { + while (aeqe && num_aeqes < HNS_AEQ_POLLING_BUDGET) { /* Make sure we read AEQ entry after we have checked the * ownership bit */ @@ -6105,25 +6144,12 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_r= oce_dev *hr_dev, queue_num =3D hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM); =20 switch (event_type) { - case HNS_ROCE_EVENT_TYPE_PATH_MIG: - case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: - case HNS_ROCE_EVENT_TYPE_COMM_EST: - case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: - hns_roce_qp_event(hr_dev, queue_num, event_type); - break; - case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: - case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: - hns_roce_srq_event(hr_dev, queue_num, event_type); - break; - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - hns_roce_cq_event(hr_dev, queue_num, event_type); + hns_roce_flush_cqe(hr_dev, queue_num); break; case HNS_ROCE_EVENT_TYPE_MB: hns_roce_cmd_event(hr_dev, @@ -6131,12 +6157,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_ro= ce_dev *hr_dev, aeqe->event.cmd.status, le64_to_cpu(aeqe->event.cmd.out_param)); break; - case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: - case HNS_ROCE_EVENT_TYPE_FLR: - break; default: - dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n", - event_type, eq->eqn, eq->cons_index); break; } =20 @@ -6150,6 +6171,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roc= e_dev *hr_dev, hns_roce_v2_init_irq_work(hr_dev, eq, queue_num); =20 aeqe =3D next_aeqe_sw_v2(eq); + ++num_aeqes; } =20 update_eq_db(eq); @@ -6699,6 +6721,9 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_= dev *hr_dev) int ret; int i; =20 + if (hr_dev->caps.aeqe_depth < HNS_AEQ_POLLING_BUDGET) + return -EINVAL; + other_num =3D hr_dev->caps.num_other_vectors; comp_num =3D hr_dev->caps.num_comp_vectors; aeq_num =3D hr_dev->caps.num_aeq_vectors; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniban= d/hw/hns/hns_roce_hw_v2.h index c65f68a14a26..3b3c6259ace0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -85,6 +85,11 @@ =20 #define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18) =20 +/* budget must be smaller than aeqe_depth to guarantee that we update + * the ci before we polled all the entries in the EQ. + */ +#define HNS_AEQ_POLLING_BUDGET 64 + enum { HNS_ROCE_CMD_FLAG_IN =3D BIT(0), HNS_ROCE_CMD_FLAG_OUT =3D BIT(1), diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/h= w/hns/hns_roce_qp.c index 6b03ba671ff8..dcaa370d4a26 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -39,6 +39,25 @@ #include "hns_roce_device.h" #include "hns_roce_hem.h" =20 +static struct hns_roce_qp *hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, + u32 qpn) +{ + struct device *dev =3D hr_dev->dev; + struct hns_roce_qp *qp; + unsigned long flags; + + xa_lock_irqsave(&hr_dev->qp_table_xa, flags); + qp =3D __hns_roce_qp_lookup(hr_dev, qpn); + if (qp) + refcount_inc(&qp->refcount); + xa_unlock_irqrestore(&hr_dev->qp_table_xa, flags); + + if (!qp) + dev_warn(dev, "async event for bogus QP %08x\n", qpn); + + return qp; +} + static void flush_work_handle(struct work_struct *work) { struct hns_roce_work *flush_work =3D container_of(work, @@ -95,31 +114,28 @@ void flush_cqe(struct hns_roce_dev *dev, struct hns_ro= ce_qp *qp) =20 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_typ= e) { - struct device *dev =3D hr_dev->dev; struct hns_roce_qp *qp; =20 - xa_lock(&hr_dev->qp_table_xa); - qp =3D __hns_roce_qp_lookup(hr_dev, qpn); - if (qp) - refcount_inc(&qp->refcount); - xa_unlock(&hr_dev->qp_table_xa); - - if (!qp) { - dev_warn(dev, "async event for bogus QP %08x\n", qpn); + qp =3D hns_roce_qp_lookup(hr_dev, qpn); + if (!qp) return; - } =20 - if (event_type =3D=3D HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || - event_type =3D=3D HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || - event_type =3D=3D HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || - event_type =3D=3D HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || - event_type =3D=3D HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) { - qp->state =3D IB_QPS_ERR; + qp->event(qp, (enum hns_roce_event)event_type); =20 - flush_cqe(hr_dev, qp); - } + if (refcount_dec_and_test(&qp->refcount)) + complete(&qp->free); +} =20 - qp->event(qp, (enum hns_roce_event)event_type); +void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn) +{ + struct hns_roce_qp *qp; + + qp =3D hns_roce_qp_lookup(hr_dev, qpn); + if (!qp) + return; + + qp->state =3D IB_QPS_ERR; + flush_cqe(hr_dev, qp); =20 if (refcount_dec_and_test(&qp->refcount)) complete(&qp->free); --=20 2.33.0