[PATCH] Re allocate the RX DMA buffer to avoid skb conflict Handle all RX packets in the DMA ring buffer
YanBo
dreamfly281 at gmail.com
Sat May 25 12:38:46 EDT 2013
Resend this patch with correctly format and fix the memory error
return value for review
This patch reallocated the skb buffer in the RX chain to avoid HW&SW
memory access race.
and also handle all RX packets at the one interrupt trigger
BR /Yanbo
On Sun, May 26, 2013 at 12:34 AM, Yanbo Li <dreamfly281 at gmail.com> wrote:
> Signed-off-by: Yanbo Li <yanbol at qti.qualcomm.com>
> ---
> dxe.c | 170 +++++++++++++++++++++++++++++++++-------------------------------
> dxe.h | 28 +++++++++--
> txrx.c | 19 +++-----
> 3 files changed, 121 insertions(+), 96 deletions(-)
>
> diff --git a/dxe.c b/dxe.c
> index 4ee25f4..83c98ef 100644
> --- a/dxe.c
> +++ b/dxe.c
> @@ -205,22 +205,33 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
> (int)reg_data);
> return 0;
> }
> +
> +static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *cur_dxe_ctl)
> +{
> + struct wcn36xx_dxe_desc *cur_dxe_desc = cur_dxe_ctl->desc;
> + struct sk_buff *skb;
> +
> + skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
> + if (skb == NULL) {
> + return -ENOMEM;
> + }
> + cur_dxe_desc->desc.dst_addr_l = dma_map_single(NULL,
> + skb_tail_pointer(skb),
> + WCN36XX_PKT_SIZE,
> + DMA_FROM_DEVICE);
> + cur_dxe_ctl->skb = skb;
> + return 0;
> +}
> +
> +
> static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *wcn_ch)
> {
> int i;
> struct wcn36xx_dxe_ctl *cur_dxe_ctl = NULL;
> cur_dxe_ctl = wcn_ch->head_blk_ctl;
>
> - for ( i = 0; i < wcn_ch->desc_num; i++)
> - {
> - cur_dxe_ctl->skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
> - skb_reserve(cur_dxe_ctl->skb, WCN36XX_PKT_SIZE);
> - skb_headroom(cur_dxe_ctl->skb);
> - skb_push(cur_dxe_ctl->skb, WCN36XX_PKT_SIZE);
> - cur_dxe_ctl->desc->desc.dst_addr_l = dma_map_single(NULL,
> - cur_dxe_ctl->skb->data,
> - cur_dxe_ctl->skb->len,
> - DMA_FROM_DEVICE);
> + for ( i = 0; i < wcn_ch->desc_num; i++){
> + wcn36xx_dxe_fill_skb(cur_dxe_ctl);
> cur_dxe_ctl = cur_dxe_ctl->next;
> }
> return 0;
> @@ -270,96 +281,93 @@ out_err:
> return -ret;
>
> }
> -void wcn36xx_rx_ready_work(struct work_struct *work)
> -{
> - struct wcn36xx *wcn =
> - container_of(work, struct wcn36xx, rx_ready_work);
> - struct wcn36xx_dxe_desc *cur_dxe_desc = NULL;
> - struct wcn36xx_dxe_ctl *cur_dxe_ctl = NULL;
> - int intSrc;
> - int int_reason;
>
> - // TODO read which channel generated INT by checking mask
> - wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &intSrc);
> -
> - wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe rx ready channel %x %s%s",
> - intSrc,
> - intSrc & WCN36XX_INT_MASK_CHAN_RX_H ? "high " : "",
> - intSrc & WCN36XX_INT_MASK_CHAN_RX_L ? "low " : "");
> +int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
> +{
> + struct wcn36xx_dxe_ctl *cur_dxe_ctl = ch->head_blk_ctl;
> + struct wcn36xx_dxe_desc *cur_dxe_desc = cur_dxe_ctl->desc;
> + dma_addr_t dma_addr;
> + struct sk_buff *skb;
>
> - // check if this channel is High or Low. Assume high
> - if (intSrc & WCN36XX_INT_MASK_CHAN_RX_H) {
> - /* Read Channel Status Register to know why INT Happen */
> - wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H, &int_reason);
> - // TODO if status says erro handle that
> + while (!(cur_dxe_desc->desc_ctl.ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
> + skb = cur_dxe_ctl->skb;
> + dma_addr = cur_dxe_desc->desc.dst_addr_l;
> + wcn36xx_dxe_fill_skb(cur_dxe_ctl);
>
> - /* Clean up all the INT within this channel */
> - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_CLR_ADDR , WCN36XX_INT_MASK_CHAN_RX_H);
> -
> - /* Clean up ED INT Bit */
> - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_END_CLR_ADDR, WCN36XX_INT_MASK_CHAN_RX_H);
> + switch (ch->ch_type) {
> + case WCN36XX_DXE_CH_RX_L:
> + cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_L;
> + cur_dxe_desc->desc.src_addr_l = WCN36XX_DXE_WQ_RX_L;
> + break;
> + case WCN36XX_DXE_CH_RX_H:
> + cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_H;
> + cur_dxe_desc->desc.src_addr_l = WCN36XX_DXE_WQ_RX_H;
> + break;
> + default:
> + wcn36xx_warn("Unknow received channel");
> + }
>
> - cur_dxe_ctl = wcn->dxe_rx_h_ch.head_blk_ctl;
> + dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE, DMA_FROM_DEVICE);
> + wcn36xx_rx_skb(wcn, skb);
> + cur_dxe_ctl = cur_dxe_ctl->next;
> cur_dxe_desc = cur_dxe_ctl->desc;
> + }
> + ch->head_blk_ctl = cur_dxe_ctl;
>
> - wcn36xx_dbg(WCN36XX_DBG_DXE,
> - "dxe rx ready order %d ctl %x",
> - cur_dxe_ctl->ctl_blk_order,
> - cur_dxe_desc->desc_ctl.ctrl);
> -
> - dma_unmap_single( NULL,
> - (dma_addr_t)cur_dxe_desc->desc.dst_addr_l,
> - cur_dxe_ctl->skb->len,
> - DMA_FROM_DEVICE );
> - wcn36xx_rx_skb(wcn, cur_dxe_ctl->skb);
> + return 0;
> +}
>
> - // Release RX descriptor
> - cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_H;
> +void wcn36xx_rx_ready_work(struct work_struct *work)
> +{
> + struct wcn36xx *wcn =
> + container_of(work, struct wcn36xx, rx_ready_work);
> + int int_src;
> + int int_reason;
>
> - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, WCN36XX_INT_MASK_CHAN_RX_H);
> + wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
>
> - // Reenable RX Low not high
> - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CTL_RX_L, WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
> + /* TX_LOW_PRI */
> + if (int_src & WCN36XX_DXE_INT_CH0_MASK) {
> + wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH0_STATUS,
> + &int_reason);
> + }
>
> - wcn->dxe_rx_h_ch.head_blk_ctl = cur_dxe_ctl->next;
> - } else if (intSrc & WCN36XX_INT_MASK_CHAN_RX_L) {
> - /* Read Channel Status Register to know why INT Happen */
> - wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L, &int_reason);
> - // TODO if status says erro handle that
> + /* RX_LOW_PRI */
> + if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
> + wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH1_STATUS,
> + &int_reason);
> + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
> + WCN36XX_DXE_INT_CH1_MASK);
> + wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
> + }
>
> + /* RX_HIGH_PRI */
> + if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
> + wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH3_STATUS,
> + &int_reason);
> /* Clean up all the INT within this channel */
> - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_CLR_ADDR , WCN36XX_INT_MASK_CHAN_RX_L);
> -
> - /* Clean up ED INT Bit */
> - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_END_CLR_ADDR, WCN36XX_INT_MASK_CHAN_RX_L);
> -
> - cur_dxe_ctl = wcn->dxe_rx_l_ch.head_blk_ctl;
> - cur_dxe_desc = cur_dxe_ctl->desc;
> -
> - wcn36xx_dbg(WCN36XX_DBG_DXE,
> - "dxe rx ready order %d ctl %x",
> - cur_dxe_ctl->ctl_blk_order,
> - cur_dxe_desc->desc_ctl.ctrl);
> + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
> + WCN36XX_DXE_INT_CH3_MASK);
> + wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH0_STATUS,
> + &int_reason);
>
> - dma_unmap_single( NULL,
> - (dma_addr_t)cur_dxe_desc->desc.dst_addr_l,
> - cur_dxe_ctl->skb->len,
> - DMA_FROM_DEVICE );
> - wcn36xx_rx_skb(wcn, cur_dxe_ctl->skb);
> -
> - // Release RX descriptor
> - cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_L;
> -
> - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, WCN36XX_INT_MASK_CHAN_RX_L);
> + wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
> + }
>
> - // Reenable RX Low not high
> - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CTL_RX_H, WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
> + /* TX_HIGH_PRI */
> + if (int_src & WCN36XX_DXE_INT_CH4_MASK) {
> + /* Clean up all the INT within this channel */
> + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
> + WCN36XX_DXE_INT_CH4_MASK);
> + }
>
> - wcn->dxe_rx_l_ch.head_blk_ctl = cur_dxe_ctl->next;
> + if (!int_src) {
> + wcn36xx_warn("None DXE interrupt triggerd");
> }
>
> enable_irq(wcn->rx_irq);
> }
> +
> int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
> {
> size_t s;
> diff --git a/dxe.h b/dxe.h
> index 7761d7b..e9b9e0e 100644
> --- a/dxe.h
> +++ b/dxe.h
> @@ -23,6 +23,7 @@ TX_LOW = DMA0
> TX_HIGH = DMA4
> RX_LOW = DMA1
> RX_HIGH = DMA3
> +H2H_TEST_RX_TX = DMA2
> */
>
> // DXE registers
> @@ -48,6 +49,9 @@ RX_HIGH = DMA3
> #define WCN36XX_DXE_WQ_RX_L 0xB
> #define WCN36XX_DXE_WQ_RX_H 0x4
>
> +/* DXE descriptor control filed */
> +#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
> +
> // TODO This must calculated properly but not hardcoded
> // DXE default control register values
> #define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L 0x847EAD2F
> @@ -57,14 +61,30 @@ RX_HIGH = DMA3
>
> // Common DXE registers
> #define WCN36XX_DXE_MEM_CSR WCN36XX_DXE_MEM_REG + 0x00
> +#define WCN36XX_DXE_REG_CSR_RESET WCN36XX_DXE_MEM_REG + 0x00
> #define WCN36XX_DXE_ENCH_ADDR WCN36XX_DXE_MEM_REG + 0x04
> #define WCN36XX_DXE_REG_CH_EN WCN36XX_DXE_MEM_REG + 0x08
> +#define WCN36XX_DXE_REG_CH_DONE WCN36XX_DXE_MEM_REG + 0x0C
> +#define WCN36XX_DXE_REG_CH_ERR WCN36XX_DXE_MEM_REG + 0x10
> #define WCN36XX_DXE_INT_MASK_REG WCN36XX_DXE_MEM_REG + 0x18
> #define WCN36XX_DXE_INT_SRC_RAW_REG WCN36XX_DXE_MEM_REG + 0x20
> -#define WCN36XX_DXE_INT_CLR_ADDR WCN36XX_DXE_MEM_REG + 0x30
> -#define WCN36XX_DXE_INT_END_CLR_ADDR WCN36XX_DXE_MEM_REG + 0x34
> -
> -#define WCN36XX_DXE_REG_CSR_RESET WCN36XX_DXE_MEM_REG+0x00
> + #define WCN36XX_DXE_INT_CH6_MASK (0x00000040)
> + #define WCN36XX_DXE_INT_CH5_MASK (0x00000020)
> + #define WCN36XX_DXE_INT_CH4_MASK (0x00000010)
> + #define WCN36XX_DXE_INT_CH3_MASK (0x00000008)
> + #define WCN36XX_DXE_INT_CH2_MASK (0x00000004)
> + #define WCN36XX_DXE_INT_CH1_MASK (0x00000002)
> + #define WCN36XX_DXE_INT_CH0_MASK (0x00000001)
> +#define WCN36XX_DXE_0_INT_CLR (WCN36XX_DXE_MEM_REG + 0x30)
> +#define WCN36XX_DXE_0_INT_ED_CLR (WCN36XX_DXE_MEM_REG + 0x34)
> +#define WCN36XX_DXE_0_INT_DONE_CLR (WCN36XX_DXE_MEM_REG + 0x38)
> +#define WCN36XX_DXE_0_INT_ERR_CLR (WCN36XX_DXE_MEM_REG + 0x3C)
> +
> +#define WCN36XX_DXE_0_CH0_STATUS (WCN36XX_DXE_MEM_REG + 0x404)
> +#define WCN36XX_DXE_0_CH1_STATUS (WCN36XX_DXE_MEM_REG + 0x444)
> +#define WCN36XX_DXE_0_CH2_STATUS (WCN36XX_DXE_MEM_REG + 0x484)
> +#define WCN36XX_DXE_0_CH3_STATUS (WCN36XX_DXE_MEM_REG + 0x4C4)
> +#define WCN36XX_DXE_0_CH4_STATUS (WCN36XX_DXE_MEM_REG + 0x504)
>
> #define WCN36XX_DXE_REG_RESET 0x5c89
>
> diff --git a/txrx.c b/txrx.c
> index c469e68..40590ac 100644
> --- a/txrx.c
> +++ b/txrx.c
> @@ -21,18 +21,16 @@
>
> int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
> {
> - struct sk_buff *skb2 ;
> struct ieee80211_rx_status status;
> struct ieee80211_hdr *hdr;
> struct wcn36xx_rx_bd * bd;
>
> - skb2 = skb_clone(skb, GFP_ATOMIC);
> - bd = (struct wcn36xx_rx_bd *)skb2->data;
> + bd = (struct wcn36xx_rx_bd *)skb->data;
> buff_to_be((u32*)bd, sizeof(*bd)/sizeof(u32));
>
> - skb_pull(skb2, bd->pdu.mpdu_header_off);
> + skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
> + skb_pull(skb, bd->pdu.mpdu_header_off);
>
> - skb_trim(skb2, bd->pdu.mpdu_len);
> status.mactime = 10;
> status.freq = wcn->current_channel->center_freq;
> status.band = wcn->current_channel->band;
> @@ -41,17 +39,16 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
> status.rate_idx = 1;
> status.flag = 0;
> status.rx_flags = 0;
> - memcpy(skb2->cb, &status, sizeof(struct ieee80211_rx_status));
> -
> - hdr = (struct ieee80211_hdr *) skb2->data;
> + memcpy(skb->cb, &status, sizeof(struct ieee80211_rx_status));
> + hdr = (struct ieee80211_hdr *) skb->data;
>
> wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %02x",
> - skb2, skb2->len, __le16_to_cpu(hdr->frame_control));
> + skb, skb->len, __le16_to_cpu(hdr->frame_control));
>
> wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
> - (char*)skb2->data, skb2->len);
> + (char*)skb->data, skb->len);
>
> - ieee80211_rx_ni(wcn->hw, skb2);
> + ieee80211_rx_ni(wcn->hw, skb);
>
> return 0;
> }
> --
> 1.7.9.5
>
More information about the wcn36xx
mailing list