[PATCH] Reinit the RX DMA buffer to avoid skb conflict, Handle all RX packets in the DMA ring buffer

YanBo dreamfly281 at gmail.com
Thu May 23 14:03:27 EDT 2013


Hi,

This patch is used to reallocation the rx skb buffer instead of clone
it, cause if just clone the skb, the hardware may refill the skb
buffer when the upper layer still ir. and this skb buffer is still in
DMA mapping status, it should not be access by the CPU before unmap
it.

Another function of this patch is it will check every possible filled
RX descriptor in the ring buffer and handle them at once, normally the
hardware may fill more than one RX descriptors but just trigger one
interrupt to reduce CPU interrupt load.

(This patch is just for pre review cause it is just generated after
rebased on the newest wcn36xx repo. I have tested it in the previous
wcn36xx repo.  but still not verify it after rebase due to lack of
time, welcome to get you feedback for this)


BR /Yanbo


Signed-off-by: Yanbo Li <yanbol at qti.qualcomm.com>
---
 dxe.c  |  171 +++++++++++++++++++++++++++++++++++-----------------------------
 dxe.h  |   28 +++++++++--
 txrx.c |   18 +++----
 3 files changed, 125 insertions(+), 92 deletions(-)

diff --git a/dxe.c b/dxe.c
index d972a56..dd79485 100644
--- a/dxe.c
+++ b/dxe.c
@@ -227,6 +227,26 @@ static int wcn36xx_dxe_enable_ch_int(struct
wcn36xx *wcn, u16 wcn_ch)
  (int)reg_data);
  return 0;
 }
+
+static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *cur_dxe_ctl)
+{
+ struct wcn36xx_dxe_desc *cur_dxe_desc = cur_dxe_ctl->desc;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+        if (skb == NULL) {
+ wcn36xx_error("DMA buffer allocation failed");
+ return -1;
+ }
+ cur_dxe_desc->desc.dst_addr_l = dma_map_single(NULL,
+       skb_tail_pointer(skb),
+       WCN36XX_PKT_SIZE,
+       DMA_FROM_DEVICE);
+ cur_dxe_ctl->skb = skb;
+ return 0;
+}
+
+
 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn, struct
wcn36xx_dxe_ch *wcn_ch)
 {
  int i;
@@ -235,14 +255,7 @@ static int wcn36xx_dxe_ch_alloc_skb(struct
wcn36xx *wcn, struct wcn36xx_dxe_ch *

  for ( i = 0; i < wcn_ch->desc_num; i++)
  {
- cur_dxe_ctl->skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
- skb_reserve(cur_dxe_ctl->skb, WCN36XX_PKT_SIZE);
- skb_headroom(cur_dxe_ctl->skb);
- skb_push(cur_dxe_ctl->skb, WCN36XX_PKT_SIZE);
- cur_dxe_ctl->desc->desc.dst_addr_l = dma_map_single(NULL,
- cur_dxe_ctl->skb->data,
- cur_dxe_ctl->skb->len,
- DMA_FROM_DEVICE);
+ wcn36xx_dxe_fill_skb(cur_dxe_ctl);
  cur_dxe_ctl = cur_dxe_ctl->next;
  }
  return 0;
@@ -292,92 +305,94 @@ out_err:
  return -ret;

 }
-void wcn36xx_rx_ready_work(struct work_struct *work)
-{
- struct wcn36xx *wcn =
- container_of(work, struct wcn36xx, rx_ready_work);
- struct wcn36xx_dxe_desc *cur_dxe_desc = NULL;
- struct wcn36xx_dxe_ctl *cur_dxe_ctl = NULL;
- int intSrc;
- int int_reason;

- // TODO read which channel generated INT by checking mask
- wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &intSrc);
-
- wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe rx ready channel %x %s%s",
-    intSrc,
-    intSrc & WCN36XX_INT_MASK_CHAN_RX_H ? "high " : "",
-    intSrc & WCN36XX_INT_MASK_CHAN_RX_L ? "low " : "");
-
- // check if this channel is High or Low. Assume high
- if (intSrc & WCN36XX_INT_MASK_CHAN_RX_H) {
- /* Read Channel Status Register to know why INT Happen */
- wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H,
&int_reason);
- // TODO if status says erro handle that
+int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *cur_dxe_ctl = ch->head_blk_ctl;
+ struct wcn36xx_dxe_desc *cur_dxe_desc = cur_dxe_ctl->desc;
+ dma_addr_t  dma_addr;
+ struct sk_buff *skb;

- /* Clean up all the INT within this channel */
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_CLR_ADDR ,
WCN36XX_INT_MASK_CHAN_RX_H);
+ while (!(cur_dxe_desc->desc_ctl.ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
+ skb = cur_dxe_ctl->skb;
+ dma_addr = cur_dxe_desc->desc.dst_addr_l;
+ wcn36xx_dxe_fill_skb(cur_dxe_ctl);

- /* Clean up ED INT Bit */
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_END_CLR_ADDR,
WCN36XX_INT_MASK_CHAN_RX_H);
+ switch (ch->ch_type) {
+ case WCN36XX_DXE_CH_RX_L:
+ cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_L;
+ cur_dxe_desc->desc.src_addr_l = WCN36XX_DXE_WQ_RX_L;
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_H;
+ cur_dxe_desc->desc.src_addr_l = WCN36XX_DXE_WQ_RX_H;
+ break;
+ default:
+ wcn36xx_warn("Unknow received channel");
+ }

- cur_dxe_ctl = wcn->dxe_rx_h_ch.head_blk_ctl;
+ dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE, DMA_FROM_DEVICE);
+ wcn36xx_rx_skb(wcn, skb);
+ cur_dxe_ctl = cur_dxe_ctl->next;
  cur_dxe_desc = cur_dxe_ctl->desc;
+ }
+ ch->head_blk_ctl = cur_dxe_ctl;

- wcn36xx_dbg(WCN36XX_DBG_DXE,
-    "dxe rx ready order %d ctl %x",
-    cur_dxe_ctl->ctl_blk_order,
-    cur_dxe_desc->desc_ctl.ctrl);
+ return 0;
+}

- dma_unmap_single( NULL,
- (dma_addr_t)cur_dxe_desc->desc.dst_addr_l,
- cur_dxe_ctl->skb->len,
- DMA_FROM_DEVICE );
- wcn36xx_rx_skb(wcn, cur_dxe_ctl->skb);
+void wcn36xx_rx_ready_work(struct work_struct *work)
+{
+ struct wcn36xx *wcn =
+ container_of(work, struct wcn36xx, rx_ready_work);
+ int int_src;
+ int int_reason;

- // Release RX descriptor
- cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_H;

- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
WCN36XX_INT_MASK_CHAN_RX_H);
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);

- // Reenable RX Low not high
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CTL_RX_L,
WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
+ /* TX_LOW_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH0_MASK) {
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH0_STATUS,
+  &int_reason);
+ }

- wcn->dxe_rx_h_ch.head_blk_ctl = cur_dxe_ctl->next;
- } else if (intSrc & WCN36XX_INT_MASK_CHAN_RX_L) {
- /* Read Channel Status Register to know why INT Happen */
- wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L,
&int_reason);
- // TODO if status says erro handle that
+ /* RX_LOW_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH1_STATUS,
+  &int_reason);
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+   WCN36XX_DXE_INT_CH1_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
+ }

+ /* RX_HIGH_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH3_STATUS,
+  &int_reason);
  /* Clean up all the INT within this channel */
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_CLR_ADDR ,
WCN36XX_INT_MASK_CHAN_RX_L);
-
- /* Clean up ED INT Bit */
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_END_CLR_ADDR,
WCN36XX_INT_MASK_CHAN_RX_L);
-
- cur_dxe_ctl = wcn->dxe_rx_l_ch.head_blk_ctl;
- cur_dxe_desc = cur_dxe_ctl->desc;
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+   WCN36XX_DXE_INT_CH3_MASK);
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH0_STATUS,
+  &int_reason);

- wcn36xx_dbg(WCN36XX_DBG_DXE,
-    "dxe rx ready order %d ctl %x",
-    cur_dxe_ctl->ctl_blk_order,
-    cur_dxe_desc->desc_ctl.ctrl);
-
- dma_unmap_single( NULL,
- (dma_addr_t)cur_dxe_desc->desc.dst_addr_l,
- cur_dxe_ctl->skb->len,
- DMA_FROM_DEVICE );
- wcn36xx_rx_skb(wcn, cur_dxe_ctl->skb);
-
- // Release RX descriptor
- cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_L;
-
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
WCN36XX_INT_MASK_CHAN_RX_L);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
+ }

- // Reenable RX Low not high
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CTL_RX_H,
WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
+ /* TX_HIGH_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH4_MASK) {
+ int_reason = 0;
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_ERR,
+  &int_reason);
+ if (int_reason & WCN36XX_DXE_INT_CH4_MASK)
+ wcn36xx_info("TX high pri interrupt error");
+ /* Clean up all the INT within this channel */
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+   WCN36XX_DXE_INT_CH4_MASK);
+ }

- wcn->dxe_rx_l_ch.head_blk_ctl = cur_dxe_ctl->next;
+ if (!int_src)  {
+ wcn36xx_warn("None DXE interrupt triggerd");
  }

  enable_irq(wcn->rx_irq);
diff --git a/dxe.h b/dxe.h
index f67a89d..0778be8 100644
--- a/dxe.h
+++ b/dxe.h
@@ -23,6 +23,7 @@ TX_LOW = DMA0
 TX_HIGH = DMA4
 RX_LOW = DMA1
 RX_HIGH = DMA3
+H2H_TEST_RX_TX = DMA2
 */

 // DXE registers
@@ -48,6 +49,9 @@ RX_HIGH = DMA3
 #define WCN36XX_DXE_WQ_RX_L 0xB
 #define WCN36XX_DXE_WQ_RX_H 0x4

+/* DXE descriptor control filed */
+#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
+
 // TODO This must calculated properly but not hardcoded
 // DXE default control register values
 #define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L 0x847EAD2F
@@ -57,14 +61,30 @@ RX_HIGH = DMA3

 // Common DXE registers
 #define WCN36XX_DXE_MEM_CSR           WCN36XX_DXE_MEM_REG + 0x00
+#define WCN36XX_DXE_REG_CSR_RESET             WCN36XX_DXE_MEM_REG + 0x00
 #define WCN36XX_DXE_ENCH_ADDR           WCN36XX_DXE_MEM_REG + 0x04
 #define WCN36XX_DXE_REG_CH_EN             WCN36XX_DXE_MEM_REG + 0x08
+#define WCN36XX_DXE_REG_CH_DONE             WCN36XX_DXE_MEM_REG + 0x0C
+#define WCN36XX_DXE_REG_CH_ERR             WCN36XX_DXE_MEM_REG + 0x10
 #define WCN36XX_DXE_INT_MASK_REG             WCN36XX_DXE_MEM_REG + 0x18
 #define WCN36XX_DXE_INT_SRC_RAW_REG             WCN36XX_DXE_MEM_REG + 0x20
-#define WCN36XX_DXE_INT_CLR_ADDR             WCN36XX_DXE_MEM_REG + 0x30
-#define WCN36XX_DXE_INT_END_CLR_ADDR         WCN36XX_DXE_MEM_REG + 0x34
-
-#define WCN36XX_DXE_REG_CSR_RESET             WCN36XX_DXE_MEM_REG+0x00
+ #define WCN36XX_DXE_INT_CH6_MASK (0x00000040)
+ #define WCN36XX_DXE_INT_CH5_MASK (0x00000020)
+ #define WCN36XX_DXE_INT_CH4_MASK (0x00000010)
+ #define WCN36XX_DXE_INT_CH3_MASK (0x00000008)
+ #define WCN36XX_DXE_INT_CH2_MASK (0x00000004)
+ #define WCN36XX_DXE_INT_CH1_MASK (0x00000002)
+ #define WCN36XX_DXE_INT_CH0_MASK (0x00000001)
+#define WCN36XX_DXE_0_INT_CLR (WCN36XX_DXE_MEM_REG + 0x30)
+#define WCN36XX_DXE_0_INT_ED_CLR (WCN36XX_DXE_MEM_REG + 0x34)
+#define WCN36XX_DXE_0_INT_DONE_CLR (WCN36XX_DXE_MEM_REG + 0x38)
+#define WCN36XX_DXE_0_INT_ERR_CLR (WCN36XX_DXE_MEM_REG + 0x3C)
+
+#define WCN36XX_DXE_0_CH0_STATUS (WCN36XX_DXE_MEM_REG + 0x404)
+#define WCN36XX_DXE_0_CH1_STATUS (WCN36XX_DXE_MEM_REG + 0x444)
+#define WCN36XX_DXE_0_CH2_STATUS (WCN36XX_DXE_MEM_REG + 0x484)
+#define WCN36XX_DXE_0_CH3_STATUS (WCN36XX_DXE_MEM_REG + 0x4C4)
+#define WCN36XX_DXE_0_CH4_STATUS (WCN36XX_DXE_MEM_REG + 0x504)

 #define WCN36XX_DXE_REG_RESET 0x5c89

diff --git a/txrx.c b/txrx.c
index 873cc6a..1d33a18 100644
--- a/txrx.c
+++ b/txrx.c
@@ -18,18 +18,16 @@
 #include <linux/ieee80211.h>
 int  wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
 {
- struct sk_buff *skb2 ;
  struct ieee80211_rx_status status;
  struct ieee80211_hdr *hdr;
  struct wcn36xx_rx_bd * bd;

- skb2 = skb_clone(skb, GFP_ATOMIC);
- bd = (struct wcn36xx_rx_bd *)skb2->data;
+ bd = (struct wcn36xx_rx_bd *)skb->data;
  buff_to_be((u32*)bd, sizeof(*bd)/sizeof(u32));

- skb_pull(skb2, bd->pdu.mpdu_header_off);
+ skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
+ skb_pull(skb, bd->pdu.mpdu_header_off);

- skb_trim(skb2, bd->pdu.mpdu_len);
  status.mactime = 10;
  status.freq = wcn->current_channel->center_freq;
  status.band = wcn->current_channel->band;
@@ -38,17 +36,17 @@ int  wcn36xx_rx_skb(struct wcn36xx *wcn, struct
sk_buff *skb)
  status.rate_idx = 1;
  status.flag = 0;
  status.rx_flags = 0;
- memcpy(skb2->cb, &status, sizeof(struct ieee80211_rx_status));
+ memcpy(skb->cb, &status, sizeof(struct ieee80211_rx_status));

- hdr = (struct ieee80211_hdr *) skb2->data;
+ hdr = (struct ieee80211_hdr *) skb->data;

  wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %02x",
-    skb2, skb2->len, __le16_to_cpu(hdr->frame_control));
+    skb, skb->len, __le16_to_cpu(hdr->frame_control));

  wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
- (char*)skb2->data, skb2->len);
+ (char*)skb->data, skb->len);

- ieee80211_rx_ni(wcn->hw, skb2);
+ ieee80211_rx_ni(wcn->hw, skb);

  return 0;
 }
--
1.7.9.5



More information about the wcn36xx mailing list