[RFC PATCH v3 2/2] drivers/perf: hisi: add driver for HNS3 PMU
huangguangbin (A)
huangguangbin2 at huawei.com
Mon May 24 19:17:31 PDT 2021
On 2021/5/11 0:33, Jonathan Cameron wrote:
> On Sat, 8 May 2021 17:44:56 +0800
> Guangbin Huang <huangguangbin2 at huawei.com> wrote:
>
>> HNS3 PMU End Point device supports to collect performance statistics
>
> support collection of performance...
>
Ok.
>> of bandwidth, latency, packet rate, interrupt rate in HiSilicon SoC
>> NIC.
>>
>> NIC of each IO DIE has one PMU device for it. Driver registers each
>
> There is one HNS3 PMU device per IO die. (perhaps?)
>
Yes, one HNS3 PMU device per IO die.
>> PMU device to perf, and exports information of supported events,
>> filter mode of each event, identifier and so on via sysfs.
>>
>> Each PMU device has its own control, counter and interrupt registers,
>> and supports up to 8 events by hardware.
>
> supports 8 hardware event counters.
>
Ok.
>>
>> Filter options contains:
>> event - select event
>> subevent - select subevent
>> port - select physical port of nic
>
> NIC
>
Ok.
>> tc - select tc(must be used with port)
>
> define TC
>
Ok.
>> func - select PF/VF
>> queue - select queue of PF/VF(must be used with func)
>> intr - select interrupt number(must be used with func)
>> global - select all functions of IO DIE
>>
>> Signed-off-by: Guangbin Huang <huangguangbin2 at huawei.com>
>
> Hi. A few general comments inline.
>
Ok.
>> ---
>> MAINTAINERS | 6 +
>> drivers/perf/pci/Kconfig | 8 +
>> drivers/perf/pci/hisilicon/Makefile | 1 +
>> drivers/perf/pci/hisilicon/hns3_pmu.c | 1586 +++++++++++++++++++++++++++++++++
>> include/linux/cpuhotplug.h | 1 +
>> 5 files changed, 1602 insertions(+)
>> create mode 100644 drivers/perf/pci/hisilicon/hns3_pmu.c
>>
>> diff --git a/MAINTAINERS b/MAINTAINERS
>> index 2beb4cb..da7354a 100644
>> --- a/MAINTAINERS
>> +++ b/MAINTAINERS
>> @@ -8194,6 +8194,12 @@ S: Maintained
>> F: Documentation/admin-guide/perf/hisi-pcie-pmu.rst
>> F: drivers/perf/pci/hisilicon/hisi_pcie_pmu.c
>>
>> +HISILICON HNS3 PMU DRIVER
>> +M: Guangbin Huang <huangguangbin2 at huawei.com>
>> +S: Supported
>> +F: Documentation/admin-guide/perf/hns3-pmu.rst
>> +F: drivers/perf/pci/hisilicon/hns3_pmu.c
>> +
>> HISILICON QM AND ZIP Controller DRIVER
>> M: Zhou Wang <wangzhou1 at hisilicon.com>
>> L: linux-crypto at vger.kernel.org
>> diff --git a/drivers/perf/pci/Kconfig b/drivers/perf/pci/Kconfig
>> index 9f30291..4f85afd 100644
>> --- a/drivers/perf/pci/Kconfig
>> +++ b/drivers/perf/pci/Kconfig
>> @@ -13,4 +13,12 @@ config HISI_PCIE_PMU
>> Adds the PCIe PMU into perf events system for monitoring latency,
>> bandwidth etc.
>>
>> +config HNS3_PMU
>> + tristate "HNS3 PERF PMU"
>> + depends on ARM64 && PCI
>> + help
>> + Provide support for HNS3 performance monitoring unit (PMU) IEP
>> + devices.
>> + Adds the HNS3 PMU into perf events system for monitoring latency,
>> + bandwidth etc.
>> endmenu
>> diff --git a/drivers/perf/pci/hisilicon/Makefile b/drivers/perf/pci/hisilicon/Makefile
>> index 65b0bd7..708326d0 100644
>> --- a/drivers/perf/pci/hisilicon/Makefile
>> +++ b/drivers/perf/pci/hisilicon/Makefile
>> @@ -1,3 +1,4 @@
>> # SPDX-License-Identifier: GPL-2.0-only
>>
>> obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
>> +obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o
>> diff --git a/drivers/perf/pci/hisilicon/hns3_pmu.c b/drivers/perf/pci/hisilicon/hns3_pmu.c
>> new file mode 100644
>> index 0000000..eddd410
>> --- /dev/null
>> +++ b/drivers/perf/pci/hisilicon/hns3_pmu.c
>> @@ -0,0 +1,1586 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/*
>> + * This driver adds support for HNS3 PMU iEP device. Related perf events are
>> + * bandwidth, latency, packet rate, interrupt rate etc.
>> + *
>> + * Copyright (C) 2021 HiSilicon Limited
>> + */
>> +#include <linux/bitfield.h>
>> +#include <linux/bitmap.h>
>> +#include <linux/bug.h>
>> +#include <linux/cpuhotplug.h>
>> +#include <linux/cpumask.h>
>> +#include <linux/delay.h>
>> +#include <linux/device.h>
>> +#include <linux/err.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/iopoll.h>
>> +#include <linux/irq.h>
>> +#include <linux/kernel.h>
>> +#include <linux/list.h>
>> +#include <linux/module.h>
>> +#include <linux/pci.h>
>> +#include <linux/perf_event.h>
>> +#include <linux/smp.h>
>> +
>> +/* registers offset address */
>> +#define HNS3_PMU_REG_GLOBAL_CTRL 0x0000
>> +#define HNS3_PMU_REG_CLOCK_FREQ 0x0020
>> +#define HNS3_PMU_REG_BDF 0x0FE0
>> +#define HNS3_PMU_REG_VERSION 0x0FE4
>> +#define HNS3_PMU_REG_DEVICE_ID 0x0FE8
>> +
>> +#define HNS3_PMU_REG_EVENT_OFFSET 0x1000
>> +#define HNS3_PMU_REG_EVENT_SIZE 0x1000
>> +#define HNS3_PMU_REG_EVENT_CTRL_LOW 0x00
>> +#define HNS3_PMU_REG_EVENT_CTRL_HIGH 0x04
>> +#define HNS3_PMU_REG_EVENT_INTR_STATUS 0x08
>> +#define HNS3_PMU_REG_EVENT_INTR_MASK 0x0C
>> +#define HNS3_PMU_REG_EVENT_COUNTER 0x10
>> +#define HNS3_PMU_REG_EVENT_EXT_COUNTER 0x18
>> +#define HNS3_PMU_REG_EVENT_QID_CTRL 0x28
>> +#define HNS3_PMU_REG_EVENT_QID_PARA 0x2C
>> +
>> +#define HNS3_PMU_FILTER_SUPPORT_GLOBAL BIT(0)
>> +#define HNS3_PMU_FILTER_SUPPORT_PORT BIT(1)
>> +#define HNS3_PMU_FILTER_SUPPORT_PORT_TC BIT(2)
>> +#define HNS3_PMU_FILTER_SUPPORT_FUNC BIT(3)
>> +#define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE BIT(4)
>> +#define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR BIT(5)
>> +
>> +#define HNS3_PMU_FILTER_ALL_TC 0xF
>> +#define HNS3_PMU_FILTER_ALL_QUEUE 0xFFFF
>> +
>> +#define HNS3_PMU_CTRL_SUBEVENT_S 4
>> +#define HNS3_PMU_CTRL_FILTER_MODE_S 24
>> +
>> +#define HNS3_PMU_GLOBAL_START BIT(0)
>> +
>> +#define HNS3_PMU_EVENT_STATUS_RESET BIT(11)
>> +#define HNS3_PMU_EVENT_EN BIT(12)
>> +#define HNS3_PMU_EVENT_OVERFLOW_RESTART BIT(15)
>> +
>> +#define HNS3_PMU_QID_PARA_FUNC_S 0
>> +#define HNS3_PMU_QID_PARA_QUEUE_S 16
>> +
>> +#define HNS3_PMU_QID_CTRL_REQ_ENABLE BIT(0)
>> +#define HNS3_PMU_QID_CTRL_DONE BIT(1)
>> +#define HNS3_PMU_QID_CTRL_MISS BIT(2)
>> +
>> +#define HNS3_PMU_INTR_MASK_OVERFLOW BIT(1)
>> +
>> +#define HNS3_PMU_MAX_COUNTERS 8
>> +
>> +#define HNS3_PMU_INVALID_CPU_ID (-1)
>> +
>> +enum hns3_pmu_event_type {
>> + HNS3_PMU_EVENT_TYPE_BANDWIDTH,
>> + HNS3_PMU_EVENT_TYPE_PACKET_RATE,
>> + HNS3_PMU_EVENT_TYPE_LATENCY,
>> + HNS3_PMU_EVENT_TYPE_INTR_RATE,
>> +};
>> +
>> +enum hns3_pmu_bandwidth_subevent_code {
>> + HNS3_PMU_SUBEVT_BW_IGU_SSU = 0,
>> + HNS3_PMU_SUBEVT_BW_SSU_EGU = 1,
>> + HNS3_PMU_SUBEVT_BW_SSU_RPU = 2,
>> + HNS3_PMU_SUBEVT_BW_TPU_SSU = 5,
>> + HNS3_PMU_SUBEVT_BW_RPU_RCBRX = 6,
>> + HNS3_PMU_SUBEVT_BW_RCBTX_TXSCH = 8,
>> + HNS3_PMU_SUBEVT_BW_WR_FBD = 9,
>> + HNS3_PMU_SUBEVT_BW_WR_EBD = 10,
>> + HNS3_PMU_SUBEVT_BW_RD_FBD = 11,
>> + HNS3_PMU_SUBEVT_BW_RD_EBD = 12,
>> + HNS3_PMU_SUBEVT_BW_RD_PAY_M0 = 13,
>> + HNS3_PMU_SUBEVT_BW_RD_PAY_M1 = 14,
>> + HNS3_PMU_SUBEVT_BW_WR_PAY_M0 = 15,
>> + HNS3_PMU_SUBEVT_BW_WR_PAY_M1 = 16,
>> +};
>> +
>> +enum hns3_pmu_packet_rate_subevent_code {
>> + HNS3_PMU_SUBEVT_PPS_IGU_SSU = 0,
>> + HNS3_PMU_SUBEVT_PPS_SSU_EGU = 1,
>> + HNS3_PMU_SUBEVT_PPS_SSU_RPU = 2,
>> + HNS3_PMU_SUBEVT_PPS_TPU_SSU = 5,
>> + HNS3_PMU_SUBEVT_PPS_RPU_RCBRX = 6,
>> + HNS3_PMU_SUBEVT_PPS_RCBTX_TPU = 7,
>> + HNS3_PMU_SUBEVT_PPS_RCBTX_TXSCH = 8,
>> + HNS3_PMU_SUBEVT_PPS_WR_FBD = 9,
>> + HNS3_PMU_SUBEVT_PPS_WR_EBD = 10,
>> + HNS3_PMU_SUBEVT_PPS_RD_FBD = 11,
>> + HNS3_PMU_SUBEVT_PPS_RD_EBD = 12,
>> + HNS3_PMU_SUBEVT_PPS_RD_PAY_M0 = 13,
>> + HNS3_PMU_SUBEVT_PPS_RD_PAY_M1 = 14,
>> + HNS3_PMU_SUBEVT_PPS_WR_PAY_M0 = 15,
>> + HNS3_PMU_SUBEVT_PPS_WR_PAY_M1 = 16,
>> + HNS3_PMU_SUBEVT_PPS_NICROH_TX_PRE = 17,
>> + HNS3_PMU_SUBEVT_PPS_NICROH_RX_PRE = 18,
>> +};
>> +
>> +enum hns3_pmu_latency_subevent_code {
>> + HNS3_PMU_SUBEVT_DLY_RX_INTR = 0,
>> + HNS3_PMU_SUBEVT_DLY_TX_PUSH = 2,
>> + HNS3_PMU_SUBEVT_DLY_TX = 4,
>> + HNS3_PMU_SUBEVT_DLY_SSU_TX_NIC = 6,
>> + HNS3_PMU_SUBEVT_DLY_SSU_RX_NIC = 8,
>> + HNS3_PMU_SUBEVT_DLY_RPU = 14,
>> + HNS3_PMU_SUBEVT_DLY_TPU = 15,
>> + HNS3_PMU_SUBEVT_DLY_RPE = 16,
>> + HNS3_PMU_SUBEVT_DLY_TPE = 17,
>> + HNS3_PMU_SUBEVT_DLY_TPE_PUSH = 18,
>> + HNS3_PMU_SUBEVT_DLY_WR_FBD = 19,
>> + HNS3_PMU_SUBEVT_DLY_WR_EBD = 20,
>> + HNS3_PMU_SUBEVT_DLY_RD_FBD = 21,
>> + HNS3_PMU_SUBEVT_DLY_RD_EBD = 22,
>> + HNS3_PMU_SUBEVT_DLY_RD_PAY_M0 = 23,
>> + HNS3_PMU_SUBEVT_DLY_RD_PAY_M1 = 24,
>> + HNS3_PMU_SUBEVT_DLY_WR_PAY_M0 = 25,
>> + HNS3_PMU_SUBEVT_DLY_WR_PAY_M1 = 26,
>> + HNS3_PMU_SUBEVT_DLY_MSIX_WRITE = 28,
>> +};
>> +
>> +enum hns3_pmu_interrupt_rate_subevent_code {
>> + HNS3_PMU_SUBEVT_INTR_MSIX_NIC = 0,
>> + HNS3_PMU_SUBEVT_INTR_MERGE = 2,
>> +};
>> +
>> +/* filter mode supported by each bandwidth subevent code */
>> +#define HNS3_PMU_FILTER_BW_IGU_SSU 0x07
>> +#define HNS3_PMU_FILTER_BW_SSU_EGU 0x07
>> +#define HNS3_PMU_FILTER_BW_SSU_RPU 0x1F
>> +#define HNS3_PMU_FILTER_BW_TPU_SSU 0x1F
>> +#define HNS3_PMU_FILTER_BW_RPU_RCBRX 0x11
>> +#define HNS3_PMU_FILTER_BW_RCBTX_TXSCH 0x1B
>> +#define HNS3_PMU_FILTER_BW_WR_FBD 0x1B
>> +#define HNS3_PMU_FILTER_BW_WR_EBD 0x1B
>> +#define HNS3_PMU_FILTER_BW_RD_FBD 0x1B
>> +#define HNS3_PMU_FILTER_BW_RD_EBD 0x1B
>> +#define HNS3_PMU_FILTER_BW_RD_PAY_M0 0x01
>> +#define HNS3_PMU_FILTER_BW_RD_PAY_M1 0x01
>> +#define HNS3_PMU_FILTER_BW_WR_PAY_M0 0x01
>> +#define HNS3_PMU_FILTER_BW_WR_PAY_M1 0x01
>> +
>> +/* filter mode supported by each packet rate subevent code */
>> +#define HNS3_PMU_FILTER_PPS_IGU_SSU 0x07
>> +#define HNS3_PMU_FILTER_PPS_SSU_EGU 0x07
>> +#define HNS3_PMU_FILTER_PPS_SSU_RPU 0x1F
>> +#define HNS3_PMU_FILTER_PPS_TPU_SSU 0x1F
>> +#define HNS3_PMU_FILTER_PPS_RPU_RCBRX 0x11
>> +#define HNS3_PMU_FILTER_PPS_RCBTX_TPU 0x1F
>> +#define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH 0x1B
>> +#define HNS3_PMU_FILTER_PPS_WR_FBD 0x1B
>> +#define HNS3_PMU_FILTER_PPS_WR_EBD 0x1B
>> +#define HNS3_PMU_FILTER_PPS_RD_FBD 0x1B
>> +#define HNS3_PMU_FILTER_PPS_RD_EBD 0x1B
>> +#define HNS3_PMU_FILTER_PPS_RD_PAY_M0 0x01
>> +#define HNS3_PMU_FILTER_PPS_RD_PAY_M1 0x01
>> +#define HNS3_PMU_FILTER_PPS_WR_PAY_M0 0x01
>> +#define HNS3_PMU_FILTER_PPS_WR_PAY_M1 0x01
>> +#define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE 0x21
>> +#define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE 0x21
>> +
>> +/* filter mode supported by each latency subevent code */
>> +#define HNS3_PMU_FILTER_DLY_RX_INTR 0x01
>> +#define HNS3_PMU_FILTER_DLY_TX_PUSH 0x01
>> +#define HNS3_PMU_FILTER_DLY_TX 0x01
>> +#define HNS3_PMU_FILTER_DLY_SSU_TX_NIC 0x07
>> +#define HNS3_PMU_FILTER_DLY_SSU_RX_NIC 0x07
>> +#define HNS3_PMU_FILTER_DLY_RPU 0x11
>> +#define HNS3_PMU_FILTER_DLY_TPU 0x1F
>> +#define HNS3_PMU_FILTER_DLY_RPE 0x01
>> +#define HNS3_PMU_FILTER_DLY_TPE 0x1B
>> +#define HNS3_PMU_FILTER_DLY_TPE_PUSH 0x1B
>> +#define HNS3_PMU_FILTER_DLY_WR_FBD 0x1B
>> +#define HNS3_PMU_FILTER_DLY_WR_EBD 0x11
>> +#define HNS3_PMU_FILTER_DLY_RD_FBD 0x1B
>> +#define HNS3_PMU_FILTER_DLY_RD_EBD 0x1B
>> +#define HNS3_PMU_FILTER_DLY_RD_PAY_M0 0x01
>> +#define HNS3_PMU_FILTER_DLY_RD_PAY_M1 0x01
>> +#define HNS3_PMU_FILTER_DLY_WR_PAY_M0 0x01
>> +#define HNS3_PMU_FILTER_DLY_WR_PAY_M1 0x01
>> +#define HNS3_PMU_FILTER_DLY_MSIX_WRITE 0x01
>> +
>> +/* filter mode supported by each interrupt rate subevent code */
>> +#define HNS3_PMU_FILTER_INTR_MSIX_NIC 0x01
>> +#define HNS3_PMU_FILTER_INTR_MERGE 0x01
>> +
>> +enum hns3_pmu_hw_filter_mode {
>> + HNS3_PMU_HW_FILTER_GLOBAL,
>> + HNS3_PMU_HW_FILTER_PORT,
>> + HNS3_PMU_HW_FILTER_PORT_TC,
>> + HNS3_PMU_HW_FILTER_FUNC,
>> + HNS3_PMU_HW_FILTER_FUNC_QUEUE,
>> + HNS3_PMU_HW_FILTER_FUNC_INTR,
>> +};
>> +
>> +struct hns3_pmu_event_attr {
>> + u8 event;
>> + u8 subevent;
>> + u16 filter_support;
>> +};
>> +
>> +struct hns3_pmu {
>> + struct perf_event *hw_events[HNS3_PMU_MAX_COUNTERS];
>> + struct hlist_node node;
>> + struct pci_dev *pdev;
>> + struct pmu pmu;
>> + void __iomem *base;
>> + int irq;
>> + int on_cpu;
>> + u32 identifier;
>> + u32 hw_clk_freq; /* hardware clock frequency of PMU */
>> + /* maximum and minimun bdf allowed by PMU */
>> + u16 bdf_min;
>> + u16 bdf_max;
>> +};
>> +
>> +#define to_hns3_pmu(p) (container_of((p), struct hns3_pmu, pmu))
>> +#define attr_to_dattr(a) (container_of((a), struct device_attribute, attr))
>> +#define dattr_to_eattr(d) (container_of((d), struct dev_ext_attribute, attr))
>> +
>> +#define GET_PCI_DEVFN(bdf) ((bdf) & 0xFF)
>> +
>> +#define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xFF)
>> +#define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07))
>> +#define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func))
>> +
>> +#define BYTES_TO_BITS(bytes) ((bytes) * 8)
>> +
>> +#define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end) \
>> + static inline u64 hns3_get_##_name(struct perf_event *event) \
>> + { \
>> + return FIELD_GET(GENMASK_ULL(_end, _start), \
>> + event->attr._config); \
>> + }
>> +
>> +HNS3_PMU_FILTER_ATTR(event, config, 0, 7);
>> +HNS3_PMU_FILTER_ATTR(subevent, config, 8, 15);
>> +HNS3_PMU_FILTER_ATTR(port, config1, 0, 3);
>> +HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7);
>> +HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23);
>> +HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39);
>> +HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51);
>> +HNS3_PMU_FILTER_ATTR(global, config1, 52, 52);
>> +
>> +#define HNS3_CONSTRUCT_BW_EVENT(_name) \
>> + (&(struct hns3_pmu_event_attr) {HNS3_PMU_EVENT_TYPE_BANDWIDTH, \
>> + HNS3_PMU_SUBEVT_BW_##_name, \
>> + HNS3_PMU_FILTER_BW_##_name})
>> +#define HNS3_CONSTRUCT_PPS_EVENT(_name) \
>> + (&(struct hns3_pmu_event_attr) {HNS3_PMU_EVENT_TYPE_PACKET_RATE, \
>> + HNS3_PMU_SUBEVT_PPS_##_name, \
>> + HNS3_PMU_FILTER_PPS_##_name})
>> +#define HNS3_CONSTRUCT_DLY_EVENT(_name) \
>> + (&(struct hns3_pmu_event_attr) {HNS3_PMU_EVENT_TYPE_LATENCY, \
>> + HNS3_PMU_SUBEVT_DLY_##_name, \
>> + HNS3_PMU_FILTER_DLY_##_name})
>> +#define HNS3_CONSTRUCT_INTR_EVENT(_name) \
>> + (&(struct hns3_pmu_event_attr) {HNS3_PMU_EVENT_TYPE_INTR_RATE, \
>> + HNS3_PMU_SUBEVT_INTR_##_name, \
>> + HNS3_PMU_FILTER_INTR_##_name})
>> +
>> +static ssize_t hns3_pmu_format_show(struct device *dev,
>> + struct device_attribute *attr, char *buf)
>> +{
>> + struct dev_ext_attribute *eattr;
>> +
>> + eattr = container_of(attr, struct dev_ext_attribute, attr);
>> +
>> + return sysfs_emit(buf, "%s\n", (char *)eattr->var);
>> +}
>> +
>> +static ssize_t hns3_pmu_event_show(struct device *dev,
>> + struct device_attribute *attr, char *buf)
>> +{
>> + struct hns3_pmu_event_attr *event;
>> + struct dev_ext_attribute *eattr;
>> + u16 config;
>> +
>> + eattr = container_of(attr, struct dev_ext_attribute, attr);
>> + event = (struct hns3_pmu_event_attr *)eattr->var;
>> + config = event->event << 8 | event->subevent;
>> +
>> + return sysfs_emit(buf, "config=0x%04x\n", config);
>> +}
>> +
>> +static ssize_t hns3_pmu_filter_mode_show(struct device *dev,
>> + struct device_attribute *attr,
>> + char *buf)
>> +{
>> + struct hns3_pmu_event_attr *event;
>> + struct dev_ext_attribute *eattr;
>> + int len;
>> +
>> + eattr = container_of(attr, struct dev_ext_attribute, attr);
>> + event = (struct hns3_pmu_event_attr *)eattr->var;
>> +
>> + len = sysfs_emit_at(buf, 0, "filter mode supported: ");
>
> As mentioned in the documentation review, normally sysfs files
> don't have such and intro string. Is this based on an existing
> driver?
>
No, but I need to show what filter mode are supported for each event.
Is there any other way I can show it to the user?
>> + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)
>> + len += sysfs_emit_at(buf, len, "global/");
>> + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)
>> + len += sysfs_emit_at(buf, len, "port/");
>> + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)
>> + len += sysfs_emit_at(buf, len, "port-tc/");
>> + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)
>> + len += sysfs_emit_at(buf, len, "func/");
>> + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)
>> + len += sysfs_emit_at(buf, len, "func-queue/");
>> + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)
>> + len += sysfs_emit_at(buf, len, "func-intr/");
>> +
>> + len += sysfs_emit_at(buf, len, "\n");
>> +
>> + return len;
>> +}
>> +
>> +#define HNS3_PMU_ATTR(_name, _func, _config) \
>> + (&((struct dev_ext_attribute[]) { \
>> + { __ATTR(_name, 0444, _func, NULL), (void *)_config } \
>> + })[0].attr.attr)
>> +
>> +#define HNS3_PMU_FORMAT_ATTR(_name, _format) \
>> + HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format)
>> +#define HNS3_PMU_EVENT_ATTR(_name, _event) \
>> + HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event)
>> +
>> +static u8 hns3_pmu_hw_filter_modes[] = {
>> + HNS3_PMU_HW_FILTER_GLOBAL,
>> + HNS3_PMU_HW_FILTER_PORT,
>> + HNS3_PMU_HW_FILTER_PORT_TC,
>> + HNS3_PMU_HW_FILTER_FUNC,
>> + HNS3_PMU_HW_FILTER_FUNC_QUEUE,
>> + HNS3_PMU_HW_FILTER_FUNC_INTR
>> +};
>> +
>> +#define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \
>> + ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)])
>> +
>> +static ssize_t identifier_show(struct device *dev,
>> + struct device_attribute *attr, char *buf)
>> +{
>> + struct hns3_pmu *hns3_pmu;
>> +
>> + hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
>> +
>> + return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier);
>> +}
>> +static DEVICE_ATTR_RO(identifier);
>> +
>> +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
>> + char *buf)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
>> +
>> + return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu);
>> +}
>> +static DEVICE_ATTR_RO(cpumask);
>> +
>> +static struct attribute *hns3_pmu_events_attr[] = {
>> + /* bandwidth events */
>> + HNS3_PMU_EVENT_ATTR(bw_igu_ssu,
>> + HNS3_CONSTRUCT_BW_EVENT(IGU_SSU)),
>
> I'd drop the CONSTRUCT part of this as it's fairly obvious and then
> each of these will easily fit on oneline making this both more
> readable and shorter.
>
Ok.
>> + HNS3_PMU_EVENT_ATTR(bw_ssu_egu,
>> + HNS3_CONSTRUCT_BW_EVENT(SSU_EGU)),
>> + HNS3_PMU_EVENT_ATTR(bw_ssu_rpu,
>> + HNS3_CONSTRUCT_BW_EVENT(SSU_RPU)),
>> + HNS3_PMU_EVENT_ATTR(bw_tpu_ssu,
>> + HNS3_CONSTRUCT_BW_EVENT(TPU_SSU)),
>> + HNS3_PMU_EVENT_ATTR(bw_rpu_rcbrx,
>> + HNS3_CONSTRUCT_BW_EVENT(RPU_RCBRX)),
>> + HNS3_PMU_EVENT_ATTR(bw_rcbtx_txsch,
>> + HNS3_CONSTRUCT_BW_EVENT(RCBTX_TXSCH)),
>> + HNS3_PMU_EVENT_ATTR(bw_wr_fbd,
>> + HNS3_CONSTRUCT_BW_EVENT(WR_FBD)),
>> + HNS3_PMU_EVENT_ATTR(bw_wr_ebd,
>> + HNS3_CONSTRUCT_BW_EVENT(WR_EBD)),
>> + HNS3_PMU_EVENT_ATTR(bw_rd_fbd,
>> + HNS3_CONSTRUCT_BW_EVENT(RD_FBD)),
>> + HNS3_PMU_EVENT_ATTR(bw_rd_ebd,
>> + HNS3_CONSTRUCT_BW_EVENT(RD_EBD)),
>> + HNS3_PMU_EVENT_ATTR(bw_rd_pay_m0,
>> + HNS3_CONSTRUCT_BW_EVENT(RD_PAY_M0)),
>> + HNS3_PMU_EVENT_ATTR(bw_rd_pay_m1,
>> + HNS3_CONSTRUCT_BW_EVENT(RD_PAY_M1)),
>> + HNS3_PMU_EVENT_ATTR(bw_wr_pay_m0,
>> + HNS3_CONSTRUCT_BW_EVENT(WR_PAY_M0)),
>> + HNS3_PMU_EVENT_ATTR(bw_wr_pay_m1,
>> + HNS3_CONSTRUCT_BW_EVENT(WR_PAY_M1)),
>> +
>> + /* packet rate events */
>> + HNS3_PMU_EVENT_ATTR(pps_igu_ssu,
>> + HNS3_CONSTRUCT_PPS_EVENT(IGU_SSU)),
>> + HNS3_PMU_EVENT_ATTR(pps_ssu_egu,
>> + HNS3_CONSTRUCT_PPS_EVENT(SSU_EGU)),
>> + HNS3_PMU_EVENT_ATTR(pps_ssu_rpu,
>> + HNS3_CONSTRUCT_PPS_EVENT(SSU_RPU)),
>> + HNS3_PMU_EVENT_ATTR(pps_tpu_ssu,
>> + HNS3_CONSTRUCT_PPS_EVENT(TPU_SSU)),
>> + HNS3_PMU_EVENT_ATTR(pps_rpu_rcbrx,
>> + HNS3_CONSTRUCT_PPS_EVENT(RPU_RCBRX)),
>> + HNS3_PMU_EVENT_ATTR(pps_rcbtx_tpu,
>> + HNS3_CONSTRUCT_PPS_EVENT(RCBTX_TPU)),
>> + HNS3_PMU_EVENT_ATTR(pps_rcbtx_txsch,
>> + HNS3_CONSTRUCT_PPS_EVENT(RCBTX_TXSCH)),
>> + HNS3_PMU_EVENT_ATTR(pps_wr_fbd,
>> + HNS3_CONSTRUCT_PPS_EVENT(WR_FBD)),
>> + HNS3_PMU_EVENT_ATTR(pps_wr_ebd,
>> + HNS3_CONSTRUCT_PPS_EVENT(WR_EBD)),
>> + HNS3_PMU_EVENT_ATTR(pps_rd_fbd,
>> + HNS3_CONSTRUCT_PPS_EVENT(RD_FBD)),
>> + HNS3_PMU_EVENT_ATTR(pps_rd_ebd,
>> + HNS3_CONSTRUCT_PPS_EVENT(RD_EBD)),
>> + HNS3_PMU_EVENT_ATTR(pps_rd_pay_m0,
>> + HNS3_CONSTRUCT_PPS_EVENT(RD_PAY_M0)),
>> + HNS3_PMU_EVENT_ATTR(pps_rd_pay_m1,
>> + HNS3_CONSTRUCT_PPS_EVENT(RD_PAY_M1)),
>> + HNS3_PMU_EVENT_ATTR(pps_wr_pay_m0,
>> + HNS3_CONSTRUCT_PPS_EVENT(WR_PAY_M0)),
>> + HNS3_PMU_EVENT_ATTR(pps_wr_pay_m1,
>> + HNS3_CONSTRUCT_PPS_EVENT(WR_PAY_M1)),
>> + HNS3_PMU_EVENT_ATTR(pps_intr_nicroh_tx_pre,
>> + HNS3_CONSTRUCT_PPS_EVENT(NICROH_TX_PRE)),
>> + HNS3_PMU_EVENT_ATTR(pps_intr_nicroh_rx_pre,
>> + HNS3_CONSTRUCT_PPS_EVENT(NICROH_RX_PRE)),
>> +
>> + /* latency events */
>> + HNS3_PMU_EVENT_ATTR(dly_rx_mac_to_intr,
>> + HNS3_CONSTRUCT_DLY_EVENT(RX_INTR)),
>> + HNS3_PMU_EVENT_ATTR(dly_tx_push_to_mac,
>> + HNS3_CONSTRUCT_DLY_EVENT(TX_PUSH)),
>> + HNS3_PMU_EVENT_ATTR(dly_tx_normal_to_mac,
>> + HNS3_CONSTRUCT_DLY_EVENT(TX)),
>> + HNS3_PMU_EVENT_ATTR(dly_ssu_tx_th_nic,
>> + HNS3_CONSTRUCT_DLY_EVENT(SSU_TX_NIC)),
>> + HNS3_PMU_EVENT_ATTR(dly_ssu_rx_th_nic,
>> + HNS3_CONSTRUCT_DLY_EVENT(SSU_RX_NIC)),
>> + HNS3_PMU_EVENT_ATTR(dly_rpu,
>> + HNS3_CONSTRUCT_DLY_EVENT(RPU)),
>> + HNS3_PMU_EVENT_ATTR(dly_tpu,
>> + HNS3_CONSTRUCT_DLY_EVENT(TPU)),
>> + HNS3_PMU_EVENT_ATTR(dly_rpe,
>> + HNS3_CONSTRUCT_DLY_EVENT(RPE)),
>> + HNS3_PMU_EVENT_ATTR(dly_tpe_normal,
>> + HNS3_CONSTRUCT_DLY_EVENT(TPE)),
>> + HNS3_PMU_EVENT_ATTR(dly_tpe_push,
>> + HNS3_CONSTRUCT_DLY_EVENT(TPE_PUSH)),
>> + HNS3_PMU_EVENT_ATTR(dly_wr_fbd,
>> + HNS3_CONSTRUCT_DLY_EVENT(WR_FBD)),
>> + HNS3_PMU_EVENT_ATTR(dly_wr_ebd,
>> + HNS3_CONSTRUCT_DLY_EVENT(WR_EBD)),
>> + HNS3_PMU_EVENT_ATTR(dly_rd_fbd,
>> + HNS3_CONSTRUCT_DLY_EVENT(RD_FBD)),
>> + HNS3_PMU_EVENT_ATTR(dly_rd_ebd,
>> + HNS3_CONSTRUCT_DLY_EVENT(RD_EBD)),
>> + HNS3_PMU_EVENT_ATTR(dly_rd_pay_m0,
>> + HNS3_CONSTRUCT_DLY_EVENT(RD_PAY_M0)),
>> + HNS3_PMU_EVENT_ATTR(dly_rd_pay_m1,
>> + HNS3_CONSTRUCT_DLY_EVENT(RD_PAY_M1)),
>> + HNS3_PMU_EVENT_ATTR(dly_wr_pay_m0,
>> + HNS3_CONSTRUCT_DLY_EVENT(WR_PAY_M0)),
>> + HNS3_PMU_EVENT_ATTR(dly_wr_pay_m1,
>> + HNS3_CONSTRUCT_DLY_EVENT(WR_PAY_M1)),
>> + HNS3_PMU_EVENT_ATTR(dly_msix_write,
>> + HNS3_CONSTRUCT_DLY_EVENT(MSIX_WRITE)),
>> +
>> + /* interrupt rate events */
>> + HNS3_PMU_EVENT_ATTR(pps_intr_msix_nic,
>> + HNS3_CONSTRUCT_INTR_EVENT(MSIX_NIC)),
>> + HNS3_PMU_EVENT_ATTR(pps_intr_merge_nicroh,
>> + HNS3_CONSTRUCT_INTR_EVENT(MERGE)),
>> + NULL
>> +};
>> +
>> +static struct attribute_group hns3_pmu_events_group = {
>> + .name = "events",
>> + .attrs = hns3_pmu_events_attr,
>> +};
>> +
>> +static struct attribute_group hns3_pmu_filter_mode_group = {
>> + .name = "filtermode",
>> +};
>> +
>> +static struct attribute *hns3_pmu_format_attr[] = {
>> + HNS3_PMU_FORMAT_ATTR(event, "config:0-7"),
>> + HNS3_PMU_FORMAT_ATTR(subevent, "config:8-15"),
>> + HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"),
>> + HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"),
>> + HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"),
>> + HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"),
>> + HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"),
>> + HNS3_PMU_FORMAT_ATTR(global, "config1:52-52"),
>> + NULL
>> +};
>> +
>> +static struct attribute_group hns3_pmu_format_group = {
>> + .name = "format",
>> + .attrs = hns3_pmu_format_attr,
>> +};
>> +
>> +static struct attribute *hns3_pmu_cpumask_attrs[] = {
>> + &dev_attr_cpumask.attr,
>> + NULL
>> +};
>> +
>> +static struct attribute_group hns3_pmu_cpumask_attr_group = {
>> + .attrs = hns3_pmu_cpumask_attrs,
>> +};
>> +
>> +static struct attribute *hns3_pmu_identifier_attrs[] = {
>> + &dev_attr_identifier.attr,
>> + NULL
>> +};
>> +
>> +static struct attribute_group hns3_pmu_identifier_attr_group = {
>> + .attrs = hns3_pmu_identifier_attrs,
>> +};
>> +
>> +static const struct attribute_group *hns3_pmu_attr_groups[] = {
>> + &hns3_pmu_events_group,
>> + &hns3_pmu_filter_mode_group,
>> + &hns3_pmu_format_group,
>> + &hns3_pmu_cpumask_attr_group,
>> + &hns3_pmu_identifier_attr_group,
>> + NULL
>> +};
>> +
>> +static int hns3_pmu_init_filter_mode_group(void)
>
> I'm not sure I'm convinced doing this dynamically is a good idea.
> Given it generates the same thing every time, is it really worth
> taking this away from the static data we'd normally see.
>
If I don't use this way, I need to define a new large array as large as
hns3_pmu_events_attr[]. And every time I need to add a new event, I need
to modify two arrays, so I think the dynamic data is better.
>> +{
>> + u32 group_size = ARRAY_SIZE(hns3_pmu_events_attr);
>> + struct dev_ext_attribute *eattr_filter;
>> + struct dev_ext_attribute *eattr_event;
>> + struct device_attribute *dattr_event;
>> + struct attribute **attr_filter;
>> + struct attribute *attr_event;
>> + u32 i;
>> +
>> + attr_filter = kcalloc(group_size, sizeof(struct attribute *),
>> + GFP_KERNEL);
>> + if (!attr_filter)
>> + return -ENOMEM;
>> +
>> + eattr_filter = kcalloc(group_size - 1, sizeof(struct dev_ext_attribute),
>> + GFP_KERNEL);
>> + if (!eattr_filter) {
>> + kfree(attr_filter);
>> + return -ENOMEM;
>> + }
>> +
>> + for (i = 0; i < group_size - 1; i++) {
>> + attr_event = hns3_pmu_events_attr[i];
>> + dattr_event = attr_to_dattr(attr_event);
>> + eattr_event = dattr_to_eattr(dattr_event);
>> +
>> + memcpy(&eattr_filter[i], eattr_event,
>> + sizeof(struct dev_ext_attribute));
>> + eattr_filter[i].attr.show = hns3_pmu_filter_mode_show;
>> +
>> + attr_filter[i] = &eattr_filter[i].attr.attr;
>> + }
>> +
>> + hns3_pmu_filter_mode_group.attrs = attr_filter;
>> +
>> + return 0;
>> +}
>> +
>> +static void hns3_pmu_uinit_filter_mode_group(void)
>> +{
>> + struct dev_ext_attribute *eattr;
>> + struct device_attribute *dattr;
>> + struct attribute **attrs;
>> +
>> + attrs = hns3_pmu_filter_mode_group.attrs;
>> + dattr = attr_to_dattr(attrs[0]);
>> + eattr = dattr_to_eattr(dattr);
>> +
>> + kfree(eattr);
>> + kfree(attrs);
>> +
>> + hns3_pmu_filter_mode_group.attrs = NULL;
>> +}
>> +
>> +static u32 hns3_pmu_get_offset(u32 offset, u32 idx)
>> +{
>> + return offset + HNS3_PMU_REG_EVENT_OFFSET +
>> + HNS3_PMU_REG_EVENT_SIZE * idx;
>> +}
>> +
>> +static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
>> +{
>> + u32 offset = hns3_pmu_get_offset(reg_offset, idx);
>> +
>> + return readl(hns3_pmu->base + offset);
>> +}
>> +
>> +static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
>> + u32 val)
>> +{
>> + u32 offset = hns3_pmu_get_offset(reg_offset, idx);
>> +
>> + writel(val, hns3_pmu->base + offset);
>> +}
>> +
>> +static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
>> +{
>> + u32 offset = hns3_pmu_get_offset(reg_offset, idx);
>> +
>> + return readq(hns3_pmu->base + offset);
>> +}
>> +
>> +static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
>> + u64 val)
>> +{
>> + u32 offset = hns3_pmu_get_offset(reg_offset, idx);
>> +
>> + writeq(val, hns3_pmu->base + offset);
>> +}
>> +
>> +static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu)
>> +{
>> + int idx;
>> +
>> + for (idx = 0; idx < HNS3_PMU_MAX_COUNTERS; idx++)
>> + if (!hns3_pmu->hw_events[idx])
>> + return idx;
>> +
>> + return -EBUSY;
>> +}
>> +
>> +static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf)
>> +{
>> + struct pci_dev *pdev;
>> +
>> + if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) {
>> + pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf);
>> + return false;
>> + }
>> +
>> + pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus),
>> + PCI_BUS_NUM(bdf),
>> + GET_PCI_DEVFN(bdf));
>> + if (!pdev) {
>> + pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf);
>> + return false;
>> + }
>> +
>> + pci_dev_put(pdev);
>> + return true;
>> +}
>> +
>> +static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
>> + u16 queue)
>> +{
>> + u32 val;
>> +
>> + val = GET_PCI_DEVFN(bdf);
>> + val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S;
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val);
>> +}
>> +
>> +static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx)
>> +{
>> + bool queue_id_valid = false;
>> + u32 reg_qid_ctrl, val;
>> + int err;
>> +
>> + /* enable queue id request */
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx,
>> + HNS3_PMU_QID_CTRL_REQ_ENABLE);
>> +
>> + reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx);
>> + err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val,
>> + val & HNS3_PMU_QID_CTRL_DONE, 1, 1000);
>> + if (err == -ETIMEDOUT) {
>> + pci_err(hns3_pmu->pdev, "QID request timeout!\n");
>> + goto out;
>> + }
>> +
>> + queue_id_valid = (val & HNS3_PMU_QID_CTRL_MISS) == 0;
>> +
>> +out:
>> + /* disable qid request and clear status */
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0);
>> +
>> + return queue_id_valid;
>> +}
>> +
>> +static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
>> + u16 queue)
>> +{
>> + hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue);
>> +
>> + return hns3_pmu_qid_req_start(hns3_pmu, idx);
>> +}
>> +
>> +static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u8 event, u8 subevent)
>> +{
>> + struct hns3_pmu_event_attr *pmu_event;
>> + struct dev_ext_attribute *eattr;
>> + struct device_attribute *dattr;
>> + struct attribute *attr;
>> + u32 i;
>> +
>> + for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) {
>> + attr = hns3_pmu_events_attr[i];
>> + dattr = container_of(attr, struct device_attribute, attr);
>> + eattr = container_of(dattr, struct dev_ext_attribute, attr);
>> + pmu_event = (struct hns3_pmu_event_attr *)eattr->var;
>
> var is a void * so there should be no need for an explicit cast.
>
Ok, thanks.
>> +
>> + if (event == pmu_event->event &&
>> + subevent == pmu_event->subevent)
>> + return pmu_event;
>> + }
>> +
>> + return NULL;
>> +}
>> +
>> +static int hns3_pmu_set_func_mode(struct perf_event *event,
>> + struct hns3_pmu *hns3_pmu)
>> +{
>> + struct hw_perf_event *hwc = &event->hw;
>> + u16 bdf = hns3_get_bdf(event);
>> +
>> + if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
>> + return -ENOENT;
>> +
>> + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC);
>> +
>> + return 0;
>> +}
>> +
>> +static int hns3_pmu_set_func_queue_mode(struct perf_event *event,
>> + struct hns3_pmu *hns3_pmu)
>> +{
>> + struct hw_perf_event *hwc = &event->hw;
>> + u16 queue_id = hns3_get_queue(event);
>> + u16 bdf = hns3_get_bdf(event);
>> +
>> + if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
>> + return -ENOENT;
>> +
>> + if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) {
>> + pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id);
>> + return -ENOENT;
>> + }
>> +
>> + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE);
>> +
>> + return 0;
>> +}
>> +
>> +static bool
>> +hns3_pmu_is_enabled_global_mode(struct perf_event *event,
>> + struct hns3_pmu_event_attr *pmu_event)
>> +{
>> + u8 global = hns3_get_global(event);
>> +
>> + return pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL &&
>> + !!global;
>> +}
>> +
>> +static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event,
>> + struct hns3_pmu_event_attr *pmu_event)
>> +{
>> + u16 queue_id = hns3_get_queue(event);
>> + u16 bdf = hns3_get_bdf(event);
>> +
>> + return pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC &&
>> + queue_id == HNS3_PMU_FILTER_ALL_QUEUE && !!bdf;
>> +}
>> +
>> +static bool
>> +hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event,
>> + struct hns3_pmu_event_attr *pmu_event)
>> +{
>> + u16 queue_id = hns3_get_queue(event);
>> + u16 bdf = hns3_get_bdf(event);
>> +
>> + return pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE &&
>> + queue_id != HNS3_PMU_FILTER_ALL_QUEUE && !!bdf;
>> +}
>> +
>> +static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event,
>> + struct hns3_pmu_event_attr *pmu_event)
>> +{
>> + u8 tc_id = hns3_get_tc(event);
>> +
>> + return pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT &&
>> + tc_id == HNS3_PMU_FILTER_ALL_TC;
>> +}
>> +
>> +static bool
>> +hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event,
>> + struct hns3_pmu_event_attr *pmu_event)
>> +{
>> + u8 tc_id = hns3_get_tc(event);
>> +
>> + return pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC &&
>> + tc_id != HNS3_PMU_FILTER_ALL_TC;
>> +}
>> +
>> +static bool
>> +hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event,
>> + struct hns3_pmu *hns3_pmu,
>> + struct hns3_pmu_event_attr *pmu_event)
>> +{
>> + u16 bdf = hns3_get_bdf(event);
>> +
>> + return pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR &&
>> + hns3_pmu_valid_bdf(hns3_pmu, bdf);
>> +}
>> +
>> +static int hns3_pmu_select_filter_mode(struct perf_event *event,
>> + struct hns3_pmu *hns3_pmu)
>> +{
>> + struct hns3_pmu_event_attr *pmu_event;
>> + struct hw_perf_event *hwc = &event->hw;
>> + u8 subevent_id = hns3_get_subevent(event);
>> + u8 event_id = hns3_get_event(event);
>> +
>> + pmu_event = hns3_pmu_get_pmu_event(event_id, subevent_id);
>> + if (!pmu_event) {
>> + pci_err(hns3_pmu->pdev, "Invalid pmu event\n");
>> + return -ENOENT;
>> + }
>> +
>> + if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) {
>> + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL);
>> + return 0;
>> + }
>> +
>> + if (hns3_pmu_is_enabled_func_mode(event, pmu_event))
>> + return hns3_pmu_set_func_mode(event, hns3_pmu);
>> +
>> + if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event))
>> + return hns3_pmu_set_func_queue_mode(event, hns3_pmu);
>> +
>> + if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) {
>> + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT);
>> + return 0;
>> + }
>> +
>> + if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) {
>> + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC);
>> + return 0;
>> + }
>> +
>> + if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) {
>> + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR);
>> + return 0;
>> + }
>> +
>> + return -ENOENT;
>> +}
>> +
>> +static bool hns3_pmu_validate_event_group(struct perf_event *event)
>> +{
>> + struct perf_event *leader = event->group_leader;
>> + struct perf_event *sibling;
>> + int counters = 1;
>> +
>> + if (!is_software_event(leader)) {
>> + /*
>> + * We must NOT create groups containing mixed PMUs, although
>> + * software events are acceptable
>> + */
>> + if (leader->pmu != event->pmu)
>> + return false;
>> +
>> + /* Increase counter for the leader */
>> + if (leader != event)
>> + counters++;
>> + }
>> +
>> + for_each_sibling_event(sibling, event->group_leader) {
>> + if (is_software_event(sibling))
>> + continue;
>> +
>> + if (sibling->pmu != event->pmu)
>> + return false;
>> +
>> + /* Increase counter for each sibling */
>> + counters++;
>> + }
>> +
>> + return counters <= HNS3_PMU_MAX_COUNTERS;
>> +}
>> +
>> +static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
>> +{
>> + struct hw_perf_event *hwc = &event->hw;
>> + u16 intr_id = hns3_get_intr(event);
>> + u8 port_id = hns3_get_port(event);
>> + u16 bdf = hns3_get_bdf(event);
>> + u8 tc_id = hns3_get_tc(event);
>> + u8 filter_mode;
>> + u32 filter = 0;
>> +
>> + filter_mode = *(u8 *)hwc->addr_filters;
>> + switch (filter_mode) {
>> + case HNS3_PMU_HW_FILTER_PORT:
>> + filter = FILTER_CONDITION_PORT(port_id);
>> + break;
>> + case HNS3_PMU_HW_FILTER_PORT_TC:
>> + filter = FILTER_CONDITION_PORT_TC(port_id, tc_id);
>> + break;
>> + case HNS3_PMU_HW_FILTER_FUNC:
>> + case HNS3_PMU_HW_FILTER_FUNC_QUEUE:
>> + filter = GET_PCI_DEVFN(bdf);
>> + break;
>> + case HNS3_PMU_HW_FILTER_FUNC_INTR:
>> + filter = FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf),
>> + intr_id);
>> + break;
>> + default:
>> + break;
>> + }
>> +
>> + return filter;
>> +}
>> +
>> +static void hns3_pmu_config_filter(struct perf_event *event)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
>> + struct hw_perf_event *hwc = &event->hw;
>> + u8 filter_mode = *(u8 *)hwc->addr_filters;
>> + u8 subevent_id = hns3_get_subevent(event);
>> + u16 queue_id = hns3_get_queue(event);
>> + u8 event_id = hns3_get_event(event);
>> + u16 bdf = hns3_get_bdf(event);
>> + u32 idx = hwc->idx;
>> + u32 val;
>> +
>> + val = event_id;
>> + val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S;
>> + val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S;
>> + val |= HNS3_PMU_EVENT_OVERFLOW_RESTART;
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
>> +
>> + val = hns3_pmu_get_filter_condition(event);
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val);
>> +
>> + if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE)
>> + hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id);
>> +}
>> +
>> +static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu,
>> + struct hw_perf_event *hwc)
>> +{
>> + u32 idx = hwc->idx;
>> + u32 val;
>> +
>> + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
>> + val |= HNS3_PMU_EVENT_EN;
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
>> +}
>> +
>> +static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu,
>> + struct hw_perf_event *hwc)
>> +{
>> + u32 idx = hwc->idx;
>> + u32 val;
>> +
>> + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
>> + val &= ~HNS3_PMU_EVENT_EN;
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
>> +}
>> +
>> +static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu,
>> + struct hw_perf_event *hwc)
>> +{
>> + u32 idx = hwc->idx;
>> + u32 val;
>> +
>> + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
>> + val &= ~HNS3_PMU_INTR_MASK_OVERFLOW;
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
>> +}
>> +
>> +static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu,
>> + struct hw_perf_event *hwc)
>> +{
>> + u32 idx = hwc->idx;
>> + u32 val;
>> +
>> + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
>> + val |= HNS3_PMU_INTR_MASK_OVERFLOW;
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
>> +}
>> +
>> +static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx)
>> +{
>> + u32 val;
>> +
>> + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
>> + val |= HNS3_PMU_EVENT_STATUS_RESET;
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
>> +
>> + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
>> + val &= ~HNS3_PMU_EVENT_STATUS_RESET;
>> + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
>> +}
>> +
>> +static void hns3_pmu_read_counter(struct perf_event *event, u64 *cnt,
>> + u64 *cnt_ext)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
>> + u32 idx = event->hw.idx;
>> +
>> + *cnt = hns3_pmu_readq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx);
>> + *cnt_ext = hns3_pmu_readq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER,
>> + idx);
>> +}
>> +
>> +static void hns3_pmu_write_counter(struct perf_event *event, u64 cnt,
>> + u64 cnt_ext)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
>> + u32 idx = event->hw.idx;
>> +
>> + hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, cnt);
>> + hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, cnt_ext);
>> +}
>> +
>> +static void hns3_pmu_init_counter(struct perf_event *event)
>> +{
>> + struct hw_perf_event *hwc = &event->hw;
>> + struct hw_perf_event_extra *hwc_ext;
>> +
>> + hwc_ext = &hwc->extra_reg;
>> + local64_set(&hwc->prev_count, 0);
>> + hwc_ext->config = 0;
>> + hns3_pmu_write_counter(event, 0, 0);
>> +}
>> +
>> +static u64 hns3_pmu_tick_to_ms(u32 hw_clk_freq, u64 ticks)
>> +{
>> + u64 time_ms = ticks / (hw_clk_freq / 1000);
>> +
>> + if (!time_ms)
>> + time_ms = 1;
>
> Add comment to say why here. If it actually happens we are going
> to get some missleading results.
>
Ok. Normally, it should not be 0 as the minimal period of perf is 1 ms.
It is to prevent this value from being close to 1 but less than 1 when the period is 1 ms.
>> +
>> + return time_ms;
>> +}
>> +
>> +/*
>> + * The performance calculation formula of all types of event is
>> + * performance = data / data_ext.
>> + * The arguments data and data_ext have different meanings for different types
>> + * of events as follow:
>> + * event_type data data_ext
>
> Format this so it's obvious there is a separation between this first line (titles)
> and the data.
>
Ok.
>> + * bandwidth byte number cycles number
>> + * packet rate packet number cycles number
>> + * interrupt rate interrupt number cycles number
>> + * latency cycles number packet number
>> + */
>> +static u64 hns3_pmu_get_performance(u8 event_type, u32 hw_clk_freq, u64 data,
>> + u64 data_ext)
>> +{
>> + u64 result;
>> +
>> + if (!hw_clk_freq || !data || !data_ext)
>> + return 0;
>> +
>> + switch (event_type) {
>> + case HNS3_PMU_EVENT_TYPE_BANDWIDTH:
>> + /* process data to set unit of bandwidth as "KBits/s" */
>> + result = data / hns3_pmu_tick_to_ms(hw_clk_freq, data_ext);
>> + result = BYTES_TO_BITS(result);
>> + break;
>> +
>> + case HNS3_PMU_EVENT_TYPE_PACKET_RATE:
>> + case HNS3_PMU_EVENT_TYPE_INTR_RATE:
>> + /* process data to set unit as "pps" */
>> + result = data * 1000 /
>> + hns3_pmu_tick_to_ms(hw_clk_freq, data_ext);
>> + break;
>> +
>> + case HNS3_PMU_EVENT_TYPE_LATENCY:
>> + /* process data to set unit of latency as "ns" */
>> + result = data / data_ext;
>> + result = result * 1000000000 / hw_clk_freq;
>> + break;
>> +
>> + default:
>> + result = data / data_ext;
>> + break;
>> + }
>> +
>> + return result;
>> +}
>> +
>> +static int hns3_pmu_event_init(struct perf_event *event)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
>> + struct hw_perf_event *hwc = &event->hw;
>> + int idx;
>> + int ret;
>> +
>> + if (event->attr.type != event->pmu->type)
>> + return -ENOENT;
>> +
>> + /* Sampling is not supported */
>> + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
>> + return -EOPNOTSUPP;
>> +
>> + event->cpu = hns3_pmu->on_cpu;
>> +
>> + idx = hns3_pmu_get_event_idx(hns3_pmu);
>> + if (idx < 0) {
>> + pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n",
>> + HNS3_PMU_MAX_COUNTERS);
>> + return -EBUSY;
>> + }
>> +
>> + hwc->idx = idx;
>> +
>> + ret = hns3_pmu_select_filter_mode(event, hns3_pmu);
>> + if (ret) {
>> + pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret);
>> + return ret;
>> + }
>> +
>> + if (!hns3_pmu_validate_event_group(event)) {
>> + pci_err(hns3_pmu->pdev, "Invalid event group.\n");
>> + return -EINVAL;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static void hns3_pmu_read(struct perf_event *event)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
>> + struct hw_perf_event *hwc = &event->hw;
>> + struct hw_perf_event_extra *hwc_ext;
>> + u64 new_cnt_ext, prev_cnt_ext;
>> + u64 new_cnt, prev_cnt, delta;
>> +
>> + hwc_ext = &hwc->extra_reg;
>> + do {
>> + prev_cnt = local64_read(&hwc->prev_count);
>> + prev_cnt_ext = hwc_ext->config;
>> + hns3_pmu_read_counter(event, &new_cnt, &new_cnt_ext);
>> + } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) !=
>> + prev_cnt);
>> +
>> + hwc_ext->config = new_cnt_ext;
>> +
>> + delta = hns3_pmu_get_performance(hns3_get_event(event),
>> + hns3_pmu->hw_clk_freq,
>> + new_cnt - prev_cnt,
>> + new_cnt_ext - prev_cnt_ext);
>> +
>> + local64_add(delta, &event->count);
>> +}
>> +
>> +static void hns3_pmu_start(struct perf_event *event, int flags)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
>> + struct hw_perf_event *hwc = &event->hw;
>> +
>> + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
>> + return;
>> +
>> + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
>> + hwc->state = 0;
>> +
>> + hns3_pmu_config_filter(event);
>> + hns3_pmu_init_counter(event);
>> + hns3_pmu_enable_intr(hns3_pmu, hwc);
>> + hns3_pmu_enable_counter(hns3_pmu, hwc);
>> +
>> + perf_event_update_userpage(event);
>> +}
>> +
>> +static void hns3_pmu_stop(struct perf_event *event, int flags)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
>> + struct hw_perf_event *hwc = &event->hw;
>> +
>> + hns3_pmu_disable_counter(hns3_pmu, hwc);
>> + hns3_pmu_disable_intr(hns3_pmu, hwc);
>> +
>> + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
>> + hwc->state |= PERF_HES_STOPPED;
>> +
>> + if (hwc->state & PERF_HES_UPTODATE)
>> + return;
>> +
>> + /* Read hardware counter and update the perf counter statistics */
>> + hns3_pmu_read(event);
>> + hwc->state |= PERF_HES_UPTODATE;
>> +}
>> +
>> +static int hns3_pmu_add(struct perf_event *event, int flags)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
>> + struct hw_perf_event *hwc = &event->hw;
>> + int idx;
>> +
>> + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
>> +
>> + /* Get an available counter index for counting */
>> + idx = hns3_pmu_get_event_idx(hns3_pmu);
>> + if (idx < 0)
>> + return idx;
>> +
>> + hwc->idx = idx;
>> + hns3_pmu->hw_events[idx] = event;
>> +
>> + if (flags & PERF_EF_START)
>> + hns3_pmu_start(event, PERF_EF_RELOAD);
>> +
>> + return 0;
>> +}
>> +
>> +static void hns3_pmu_del(struct perf_event *event, int flags)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
>> + struct hw_perf_event *hwc = &event->hw;
>> +
>> + hns3_pmu_stop(event, PERF_EF_UPDATE);
>> + hns3_pmu->hw_events[hwc->idx] = NULL;
>> + perf_event_update_userpage(event);
>> +}
>> +
>> +static void hns3_pmu_enable(struct pmu *pmu)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
>> + u32 val;
>> +
>> + val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
>> + val |= HNS3_PMU_GLOBAL_START;
>> + writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
>> +}
>> +
>> +static void hns3_pmu_disable(struct pmu *pmu)
>> +{
>> + struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
>> + u32 val;
>> +
>> + val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
>> + val &= ~HNS3_PMU_GLOBAL_START;
>> + writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
>> +}
>> +
>> +static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
>> +{
>> + u16 device_id;
>> + char *name;
>> + u32 val;
>> + int ret;
>> +
>> + hns3_pmu->base = pci_ioremap_bar(pdev, 2);
>> + if (!hns3_pmu->base) {
>> + pci_err(pdev, "ioremap failed for hns3_pmu resource\n");
>> + ret = -ENOMEM;
>> + goto err_ioremap_bar;
>
> Why is there any cleanup to do if we fail here?
>
Here is seem to have problem, thank you, I will modify it.
>> + }
>> +
>> + hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ);
>> +
>> + val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF);
>> + hns3_pmu->bdf_min = val & 0xffff;
>> + hns3_pmu->bdf_max = val >> 16;
>> +
>> + val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID);
>> + device_id = val & 0xffff;
>> + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_%u", device_id);
>> + if (!name) {
>> + pci_err(pdev, "failed to allocate name for HNS3 PMU.\n");
>> + ret = -ENOMEM;
>> + goto err_alloc_dev_name;
>> + }
>> +
>> + hns3_pmu->pdev = pdev;
>> + hns3_pmu->on_cpu = HNS3_PMU_INVALID_CPU_ID;
>> + hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION);
>> + hns3_pmu->pmu = (struct pmu) {
>> + .name = name,
>> + .module = THIS_MODULE,
>> + .event_init = hns3_pmu_event_init,
>> + .pmu_enable = hns3_pmu_enable,
>> + .pmu_disable = hns3_pmu_disable,
>> + .add = hns3_pmu_add,
>> + .del = hns3_pmu_del,
>> + .start = hns3_pmu_start,
>> + .stop = hns3_pmu_stop,
>> + .read = hns3_pmu_read,
>> + .task_ctx_nr = perf_invalid_context,
>> + .attr_groups = hns3_pmu_attr_groups,
>> + .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
>> + };
>> +
>> + return 0;
>> +
>> +err_alloc_dev_name:
>> + iounmap(hns3_pmu->base);
>> +err_ioremap_bar:
>> + pci_release_regions(pdev);
>
> Where is matching pci_request_regions?
>
As above, here is problem, thank you.
>> +
>> + return ret;
>> +}
>> +
>> +static irqreturn_t hns3_pmu_irq(int irq, void *data)
>> +{
>> + struct hns3_pmu *hns3_pmu = data;
>> + u32 intr_status, idx;
>> +
>> + for (idx = 0; idx < HNS3_PMU_MAX_COUNTERS; idx++) {
>> + intr_status = hns3_pmu_readl(hns3_pmu,
>> + HNS3_PMU_REG_EVENT_INTR_STATUS,
>> + idx);
>> +
>> + /*
>> + * As each counter will restart from 0 when it is overflowed,
>> + * extra processing is no need, just clear interrupt status.
>> + */
>> + if (intr_status)
>> + hns3_pmu_clear_intr_status(hns3_pmu, idx);
>> + }
>> +
>> + return IRQ_HANDLED;
>> +}
>> +
>> +static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
>> +{
>> + struct hns3_pmu *hns3_pmu;
>> +
>> + hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
>> + if (!hns3_pmu)
>> + return -ENODEV;
>> +
>> + if (hns3_pmu->on_cpu == HNS3_PMU_INVALID_CPU_ID) {
>> + hns3_pmu->on_cpu = cpu;
>> + irq_set_affinity_hint(hns3_pmu->irq, cpumask_of(cpu));
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
>> +{
>> + struct hns3_pmu *hns3_pmu;
>> + unsigned int target;
>> +
>> + hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
>> + if (!hns3_pmu)
>> + return -ENODEV;
>> +
>> + /* Nothing to do if this CPU doesn't own the PMU */
>> + if (hns3_pmu->on_cpu != cpu)
>> + return 0;
>> +
>> + /* Choose a new CPU from all online cpus */
>> + target = cpumask_any_but(cpu_online_mask, cpu);
>> + if (target >= nr_cpu_ids)
>> + return 0;
>> +
>> + perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target);
>> + hns3_pmu->on_cpu = target;
>> + irq_set_affinity_hint(hns3_pmu->irq, cpumask_of(target));
>> +
>> + return 0;
>> +}
>> +
>> +static int hns3_pmu_irq_register(struct pci_dev *pdev,
>> + struct hns3_pmu *hns3_pmu)
>> +{
>> + int irq, ret;
>> +
>> + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
>> + if (ret < 0) {
>> + pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret);
>> + return ret;
>> + }
>> +
>> + irq = pci_irq_vector(pdev, 0);
>> + ret = request_irq(irq, hns3_pmu_irq, 0, hns3_pmu->pmu.name, hns3_pmu);
>> + if (ret) {
>> + pci_err(pdev, "failed to register irq, ret = %d.\n", ret);
>> + pci_free_irq_vectors(pdev);
>> + return ret;
>> + }
>> +
>> + hns3_pmu->irq = irq;
>> +
>> + return 0;
>> +}
>> +
>> +static void hns3_pmu_irq_unregister(struct pci_dev *pdev,
>> + struct hns3_pmu *hns3_pmu)
>> +{
>> + free_irq(hns3_pmu->irq, hns3_pmu);
>> + pci_free_irq_vectors(pdev);
>> +}
>> +
>> +static int hns3_pmu_init(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
>> +{
>> + int ret;
>> +
>> + ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu);
>> + if (ret)
>> + return ret;
>> +
>> + ret = hns3_pmu_irq_register(pdev, hns3_pmu);
>> + if (ret)
>> + goto err_irq_register;
>> +
>> + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
>> + &hns3_pmu->node);
>> + if (ret) {
>> + pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret);
>> + goto err_register_hotplug;
>> + }
>> +
>> + ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
>> + if (ret) {
>> + pci_err(pdev, "failed to register HNS3 PMU, ret = %d.\n", ret);
>> + goto err_register_pmu;
>> + }
>> +
>> + return ret;
>> +
>> +err_register_pmu:
>> + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
>> + &hns3_pmu->node);
>> + irq_set_affinity_hint(hns3_pmu->irq, NULL);
>> +
>> +err_register_hotplug:
>> + hns3_pmu_irq_unregister(pdev, hns3_pmu);
>> +
>> +err_irq_register:
>> + iounmap(hns3_pmu->base);
>> +
>> + return ret;
>> +}
>> +
>> +static void hns3_pmu_uninit(struct pci_dev *pdev)
>> +{
>> + struct hns3_pmu *hns3_pmu = (struct hns3_pmu *)pci_get_drvdata(pdev);
>
> pci_get_drvdata() returns a void * and the c specification allows implicit
> casting to any other pointer type.
>
> struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
>
Ok,thanks.
>
>
>> +
>> + perf_pmu_unregister(&hns3_pmu->pmu);
>> + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
>> + &hns3_pmu->node);
>> + irq_set_affinity_hint(hns3_pmu->irq, NULL);
>> + hns3_pmu_irq_unregister(pdev, hns3_pmu);
>> + iounmap(hns3_pmu->base);
>> +}
>> +
>> +static int hns3_pmu_init_dev(struct pci_dev *pdev)
>> +{
>> + int ret;
>> +
>> + ret = pci_enable_device(pdev);
>> + if (ret) {
>> + pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret);
>> + return ret;
>> + }
>> +
>> + ret = pci_request_mem_regions(pdev, "hns3_pmu");
>> + if (ret < 0) {
>> + pci_err(pdev, "failed to request pci mem regions, ret = %d.\n",
>> + ret);
>> + pci_disable_device(pdev);
>> + return ret;
>> + }
>> +
>> + pci_set_master(pdev);
>> +
>> + return 0;
>> +}
>> +
>> +static void hns3_pmu_uninit_dev(struct pci_dev *pdev)
>> +{
>> + pci_clear_master(pdev);
>> + pci_release_mem_regions(pdev);
>> + pci_disable_device(pdev);
>> +}
>> +
>> +static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
>> +{
>> + struct hns3_pmu *hns3_pmu;
>> + int ret;
>> +
>> + hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL);
>> + if (!hns3_pmu)
>> + return -ENOMEM;
>> +
>> + ret = hns3_pmu_init_dev(pdev);
>> + if (ret)
>> + return ret;
>> +
>> + ret = hns3_pmu_init(pdev, hns3_pmu);
>> + if (ret) {
>> + hns3_pmu_uninit_dev(pdev);
>> + return ret;
>> + }
>> +
>> + pci_set_drvdata(pdev, hns3_pmu);
>> +
>> + return ret;
>> +}
>> +
>> +static void hns3_pmu_remove(struct pci_dev *pdev)
>> +{
>> + hns3_pmu_uninit(pdev);
>> + hns3_pmu_uninit_dev(pdev);
>> + pci_set_drvdata(pdev, NULL);
>
> Fairly sure that there are not paths in which you need this to be set to
> NULL before the driver core will do it anyway.
>
Ok,thanks.
>> +}
>> +
>> +static const struct pci_device_id hns3_pmu_ids[] = {
>> + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xA22B) },
>> + { 0, }
>> +};
>> +MODULE_DEVICE_TABLE(pci, hns3_pmu_ids);
>> +
>> +static struct pci_driver hns3_pmu_driver = {
>> + .name = "hns3_pmu",
>> + .id_table = hns3_pmu_ids,
>> + .probe = hns3_pmu_probe,
>> + .remove = hns3_pmu_remove,
>> +};
>> +
>> +static int __init hns3_pmu_module_init(void)
>> +{
>> + int ret;
>> +
>> + ret = hns3_pmu_init_filter_mode_group();
>> + if (ret)
>> + return ret;
>> +
>> + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
>> + "AP_PERF_ARM_HNS3_PMU_ONLINE",
>> + hns3_pmu_online_cpu,
>> + hns3_pmu_offline_cpu);
>> + if (ret) {
>> + pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret);
>> + goto err_init;
>> + }
>> +
>> + ret = pci_register_driver(&hns3_pmu_driver);
>> + if (ret) {
>> + pr_err("failed to register pci driver, ret = %d.\n", ret);
>> + goto err_hp;
>> + }
>> +
>> + return 0;
>> +
>> +err_hp:
>> + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
>> +err_init:
>> + hns3_pmu_uinit_filter_mode_group();
>> + return ret;
>> +}
>> +module_init(hns3_pmu_module_init);
>> +
>> +static void __exit hns3_pmu_module_exit(void)
>> +{
>> + pci_unregister_driver(&hns3_pmu_driver);
>> + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
>> + hns3_pmu_uinit_filter_mode_group();
>> +}
>> +module_exit(hns3_pmu_module_exit);
>> +
>> +MODULE_DESCRIPTION("HNS3 PMU driver");
>> +MODULE_LICENSE("GPL v2");
>> diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
>> index 2b73114..2e788f9 100644
>> --- a/include/linux/cpuhotplug.h
>> +++ b/include/linux/cpuhotplug.h
>> @@ -180,6 +180,7 @@ enum cpuhp_state {
>> CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
>> CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
>> CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
>> + CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
>> CPUHP_AP_PERF_ARM_L2X0_ONLINE,
>> CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
>> CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
>
> .
>
More information about the linux-arm-kernel
mailing list