[PATCH] Handle the RX skb data to avoid missing packet or conflict
Yanbo Li
dreamfly281 at gmail.com
Tue May 28 04:11:20 EDT 2013
Re allocate the RX DMA buffer to avoid skb conflict
Handle all RX packets in the DMA ring buffer
Signed-off-by: Yanbo Li <yanbol at qti.qualcomm.com>
---
dxe.c | 164 +++++++++++++++++++++++++++++++---------------------------------
dxe.h | 38 +++++++++++----
txrx.c | 21 ++++-----
3 files changed, 117 insertions(+), 106 deletions(-)
diff --git a/dxe.c b/dxe.c
index dc88f9e..dce2d69 100644
--- a/dxe.c
+++ b/dxe.c
@@ -205,22 +205,32 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
(int)reg_data);
return 0;
}
-static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *wcn_ch)
+
+static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *cur_dxe_ctl)
+{
+ struct wcn36xx_dxe_desc *cur_dxe_desc = cur_dxe_ctl->desc;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+ cur_dxe_desc->desc.dst_addr_l = dma_map_single(NULL,
+ skb_tail_pointer(skb),
+ WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ cur_dxe_ctl->skb = skb;
+ return 0;
+}
+
+static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
+ struct wcn36xx_dxe_ch *wcn_ch)
{
int i;
struct wcn36xx_dxe_ctl *cur_dxe_ctl = NULL;
cur_dxe_ctl = wcn_ch->head_blk_ctl;
- for ( i = 0; i < wcn_ch->desc_num; i++)
- {
- cur_dxe_ctl->skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_KERNEL);
- skb_reserve(cur_dxe_ctl->skb, WCN36XX_PKT_SIZE);
- skb_headroom(cur_dxe_ctl->skb);
- skb_push(cur_dxe_ctl->skb, WCN36XX_PKT_SIZE);
- cur_dxe_ctl->desc->desc.dst_addr_l = dma_map_single(NULL,
- cur_dxe_ctl->skb->data,
- cur_dxe_ctl->skb->len,
- DMA_FROM_DEVICE);
+ for (i = 0; i < wcn_ch->desc_num; i++) {
+ wcn36xx_dxe_fill_skb(cur_dxe_ctl);
cur_dxe_ctl = cur_dxe_ctl->next;
}
return 0;
@@ -282,96 +292,80 @@ out_err:
return -ret;
}
-void wcn36xx_rx_ready_work(struct work_struct *work)
-{
- struct wcn36xx *wcn =
- container_of(work, struct wcn36xx, rx_ready_work);
- struct wcn36xx_dxe_desc *cur_dxe_desc = NULL;
- struct wcn36xx_dxe_ctl *cur_dxe_ctl = NULL;
- int intSrc;
- int int_reason;
- // TODO read which channel generated INT by checking mask
- wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &intSrc);
-
- wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe rx ready channel %x %s%s",
- intSrc,
- intSrc & WCN36XX_INT_MASK_CHAN_RX_H ? "high " : "",
- intSrc & WCN36XX_INT_MASK_CHAN_RX_L ? "low " : "");
+int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
+{
+ struct wcn36xx_dxe_ctl *cur_dxe_ctl = ch->head_blk_ctl;
+ struct wcn36xx_dxe_desc *cur_dxe_desc = cur_dxe_ctl->desc;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
- // check if this channel is High or Low. Assume high
- if (intSrc & WCN36XX_INT_MASK_CHAN_RX_H) {
- /* Read Channel Status Register to know why INT Happen */
- wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H, &int_reason);
- // TODO if status says erro handle that
+ while (!(cur_dxe_desc->desc_ctl.ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
+ skb = cur_dxe_ctl->skb;
+ dma_addr = cur_dxe_desc->desc.dst_addr_l;
+ wcn36xx_dxe_fill_skb(cur_dxe_ctl);
- /* Clean up all the INT within this channel */
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_CLR_ADDR , WCN36XX_INT_MASK_CHAN_RX_H);
-
- /* Clean up ED INT Bit */
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_END_CLR_ADDR, WCN36XX_INT_MASK_CHAN_RX_H);
+ switch (ch->ch_type) {
+ case WCN36XX_DXE_CH_RX_L:
+ cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_L;
+ cur_dxe_desc->desc.src_addr_l = WCN36XX_DXE_WQ_RX_L;
+ break;
+ case WCN36XX_DXE_CH_RX_H:
+ cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_H;
+ cur_dxe_desc->desc.src_addr_l = WCN36XX_DXE_WQ_RX_H;
+ break;
+ default:
+ wcn36xx_warn("Unknow received channel");
+ }
- cur_dxe_ctl = wcn->dxe_rx_h_ch.head_blk_ctl;
+ dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+ DMA_FROM_DEVICE);
+ wcn36xx_rx_skb(wcn, skb);
+ cur_dxe_ctl = cur_dxe_ctl->next;
cur_dxe_desc = cur_dxe_ctl->desc;
+ }
+ ch->head_blk_ctl = cur_dxe_ctl;
- wcn36xx_dbg(WCN36XX_DBG_DXE,
- "dxe rx ready order %d ctl %x",
- cur_dxe_ctl->ctl_blk_order,
- cur_dxe_desc->desc_ctl.ctrl);
-
- dma_unmap_single( NULL,
- (dma_addr_t)cur_dxe_desc->desc.dst_addr_l,
- cur_dxe_ctl->skb->len,
- DMA_FROM_DEVICE );
- wcn36xx_rx_skb(wcn, cur_dxe_ctl->skb);
-
- // Release RX descriptor
- cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_H;
+ return 0;
+}
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, WCN36XX_INT_MASK_CHAN_RX_H);
+void wcn36xx_rx_ready_work(struct work_struct *work)
+{
+ struct wcn36xx *wcn =
+ container_of(work, struct wcn36xx, rx_ready_work);
+ int int_src;
+ int int_reason;
- // Reenable RX Low not high
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CTL_RX_L, WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
- wcn->dxe_rx_h_ch.head_blk_ctl = cur_dxe_ctl->next;
- } else if (intSrc & WCN36XX_INT_MASK_CHAN_RX_L) {
- /* Read Channel Status Register to know why INT Happen */
- wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L, &int_reason);
- // TODO if status says erro handle that
+ /* RX_LOW_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH1_STATUS,
+ &int_reason);
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH1_MASK);
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
+ }
+ /* RX_HIGH_PRI */
+ if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH3_STATUS,
+ &int_reason);
/* Clean up all the INT within this channel */
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_CLR_ADDR , WCN36XX_INT_MASK_CHAN_RX_L);
-
- /* Clean up ED INT Bit */
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_INT_END_CLR_ADDR, WCN36XX_INT_MASK_CHAN_RX_L);
-
- cur_dxe_ctl = wcn->dxe_rx_l_ch.head_blk_ctl;
- cur_dxe_desc = cur_dxe_ctl->desc;
-
- wcn36xx_dbg(WCN36XX_DBG_DXE,
- "dxe rx ready order %d ctl %x",
- cur_dxe_ctl->ctl_blk_order,
- cur_dxe_desc->desc_ctl.ctrl);
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+ WCN36XX_DXE_INT_CH3_MASK);
+ wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_0_CH0_STATUS,
+ &int_reason);
- dma_unmap_single( NULL,
- (dma_addr_t)cur_dxe_desc->desc.dst_addr_l,
- cur_dxe_ctl->skb->len,
- DMA_FROM_DEVICE );
- wcn36xx_rx_skb(wcn, cur_dxe_ctl->skb);
-
- // Release RX descriptor
- cur_dxe_desc->desc_ctl.ctrl = WCN36XX_DXE_CTRL_RX_L;
-
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, WCN36XX_INT_MASK_CHAN_RX_L);
-
- // Reenable RX Low not high
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CTL_RX_H, WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
-
- wcn->dxe_rx_l_ch.head_blk_ctl = cur_dxe_ctl->next;
+ wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
}
+ if (!int_src)
+ wcn36xx_warn("None DXE interrupt triggerd");
+
enable_irq(wcn->rx_irq);
}
+
int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
{
size_t s;
diff --git a/dxe.h b/dxe.h
index 7761d7b..d8ccb10 100644
--- a/dxe.h
+++ b/dxe.h
@@ -23,6 +23,7 @@ TX_LOW = DMA0
TX_HIGH = DMA4
RX_LOW = DMA1
RX_HIGH = DMA3
+H2H_TEST_RX_TX = DMA2
*/
// DXE registers
@@ -48,6 +49,9 @@ RX_HIGH = DMA3
#define WCN36XX_DXE_WQ_RX_L 0xB
#define WCN36XX_DXE_WQ_RX_H 0x4
+/* DXE descriptor control filed */
+#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
+
// TODO This must calculated properly but not hardcoded
// DXE default control register values
#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L 0x847EAD2F
@@ -56,15 +60,31 @@ RX_HIGH = DMA3
#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L 0x843e8b4d
// Common DXE registers
-#define WCN36XX_DXE_MEM_CSR WCN36XX_DXE_MEM_REG + 0x00
-#define WCN36XX_DXE_ENCH_ADDR WCN36XX_DXE_MEM_REG + 0x04
-#define WCN36XX_DXE_REG_CH_EN WCN36XX_DXE_MEM_REG + 0x08
-#define WCN36XX_DXE_INT_MASK_REG WCN36XX_DXE_MEM_REG + 0x18
-#define WCN36XX_DXE_INT_SRC_RAW_REG WCN36XX_DXE_MEM_REG + 0x20
-#define WCN36XX_DXE_INT_CLR_ADDR WCN36XX_DXE_MEM_REG + 0x30
-#define WCN36XX_DXE_INT_END_CLR_ADDR WCN36XX_DXE_MEM_REG + 0x34
-
-#define WCN36XX_DXE_REG_CSR_RESET WCN36XX_DXE_MEM_REG+0x00
+#define WCN36XX_DXE_MEM_CSR (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_REG_CSR_RESET (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_ENCH_ADDR (WCN36XX_DXE_MEM_REG + 0x04)
+#define WCN36XX_DXE_REG_CH_EN (WCN36XX_DXE_MEM_REG + 0x08)
+#define WCN36XX_DXE_REG_CH_DONE (WCN36XX_DXE_MEM_REG + 0x0C)
+#define WCN36XX_DXE_REG_CH_ERR (WCN36XX_DXE_MEM_REG + 0x10)
+#define WCN36XX_DXE_INT_MASK_REG (WCN36XX_DXE_MEM_REG + 0x18)
+#define WCN36XX_DXE_INT_SRC_RAW_REG (WCN36XX_DXE_MEM_REG + 0x20)
+ #define WCN36XX_DXE_INT_CH6_MASK 0x00000040
+ #define WCN36XX_DXE_INT_CH5_MASK 0x00000020
+ #define WCN36XX_DXE_INT_CH4_MASK 0x00000010
+ #define WCN36XX_DXE_INT_CH3_MASK 0x00000008
+ #define WCN36XX_DXE_INT_CH2_MASK 0x00000004
+ #define WCN36XX_DXE_INT_CH1_MASK 0x00000002
+ #define WCN36XX_DXE_INT_CH0_MASK 0x00000001
+#define WCN36XX_DXE_0_INT_CLR (WCN36XX_DXE_MEM_REG + 0x30)
+#define WCN36XX_DXE_0_INT_ED_CLR (WCN36XX_DXE_MEM_REG + 0x34)
+#define WCN36XX_DXE_0_INT_DONE_CLR (WCN36XX_DXE_MEM_REG + 0x38)
+#define WCN36XX_DXE_0_INT_ERR_CLR (WCN36XX_DXE_MEM_REG + 0x3C)
+
+#define WCN36XX_DXE_0_CH0_STATUS (WCN36XX_DXE_MEM_REG + 0x404)
+#define WCN36XX_DXE_0_CH1_STATUS (WCN36XX_DXE_MEM_REG + 0x444)
+#define WCN36XX_DXE_0_CH2_STATUS (WCN36XX_DXE_MEM_REG + 0x484)
+#define WCN36XX_DXE_0_CH3_STATUS (WCN36XX_DXE_MEM_REG + 0x4C4)
+#define WCN36XX_DXE_0_CH4_STATUS (WCN36XX_DXE_MEM_REG + 0x504)
#define WCN36XX_DXE_REG_RESET 0x5c89
diff --git a/txrx.c b/txrx.c
index 97bc6d0..75c5a2a 100644
--- a/txrx.c
+++ b/txrx.c
@@ -19,20 +19,18 @@
#define RSSI0(x) (100 - ((x->phy_stat0 >> 24) & 0xff))
-int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
{
- struct sk_buff *skb2 ;
struct ieee80211_rx_status status;
struct ieee80211_hdr *hdr;
struct wcn36xx_rx_bd * bd;
- skb2 = skb_clone(skb, GFP_KERNEL);
- bd = (struct wcn36xx_rx_bd *)skb2->data;
+ bd = (struct wcn36xx_rx_bd *)skb->data;
buff_to_be((u32*)bd, sizeof(*bd)/sizeof(u32));
- skb_pull(skb2, bd->pdu.mpdu_header_off);
+ skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
+ skb_pull(skb, bd->pdu.mpdu_header_off);
- skb_trim(skb2, bd->pdu.mpdu_len);
status.mactime = 10;
status.freq = wcn->current_channel->center_freq;
status.band = wcn->current_channel->band;
@@ -41,17 +39,16 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
status.rate_idx = 1;
status.flag = 0;
status.rx_flags = 0;
- memcpy(skb2->cb, &status, sizeof(struct ieee80211_rx_status));
-
- hdr = (struct ieee80211_hdr *) skb2->data;
+ memcpy(skb->cb, &status, sizeof(struct ieee80211_rx_status));
+ hdr = (struct ieee80211_hdr *) skb->data;
wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %02x",
- skb2, skb2->len, __le16_to_cpu(hdr->frame_control));
+ skb, skb->len, __le16_to_cpu(hdr->frame_control));
wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
- (char*)skb2->data, skb2->len);
+ (char *)skb->data, skb->len);
- ieee80211_rx_ni(wcn->hw, skb2);
+ ieee80211_rx_ni(wcn->hw, skb);
return 0;
}
--
1.7.9.5
More information about the wcn36xx
mailing list