[PATCH] Back off if ring buffer is full
Eugene Krasnikov
k.eugene.e at gmail.com
Tue Jul 16 09:20:52 EDT 2013
This is a patch for the bug https://github.com/KrasnikovEugene/wcn36xx/issues/83
Pull request: https://github.com/KrasnikovEugene/wcn36xx/pull/85
2013/7/16 Eugene Krasnikov <k.eugene.e at gmail.com>:
> If ring is full stop mac80211 queues so upper layers will not try to
> send any frames until bottom half has capacity for sending frames.
>
> Signed-off-by: Eugene Krasnikov <k.eugene.e at gmail.com>
> ---
> dxe.c | 22 ++++++++++++++++++++++
> dxe.h | 1 +
> wcn36xx.h | 1 +
> 3 files changed, 24 insertions(+)
>
> diff --git a/dxe.c b/dxe.c
> index fd6cbae..502e0df 100644
> --- a/dxe.c
> +++ b/dxe.c
> @@ -319,6 +319,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
> {
> struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
> struct ieee80211_tx_info *info;
> + unsigned long flags;
>
> while (ctl != ch->head_blk_ctl &&
> !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
> @@ -330,6 +331,13 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
> /* Keep frame until TX status comes */
> ieee80211_free_txskb(wcn->hw, ctl->skb);
> }
> + spin_lock_irqsave(&ctl->skb_lock, flags);
> + if (wcn->queues_stopped) {
> + wcn->queues_stopped = false;
> + ieee80211_wake_queues(wcn->hw);
> + }
> + spin_unlock_irqrestore(&ctl->skb_lock, flags);
> +
> ctl->skb = NULL;
> }
> ctl = ctl->next;
> @@ -539,10 +547,24 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
> struct wcn36xx_dxe_ctl *ctl = NULL;
> struct wcn36xx_dxe_desc *desc = NULL;
> struct wcn36xx_dxe_ch *ch = NULL;
> + unsigned long flags;
>
> ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
>
> ctl = ch->head_blk_ctl;
> + spin_lock_irqsave(&ctl->next->skb_lock, flags);
> + /*
> + * If skb is not null that means that we reached the tail of the ring
> + * hence ring is full. Stop queues to let mac80211 back off until ring
> + * has an empty slot again.
> + */
> + if (NULL != ctl->next->skb) {
> + ieee80211_stop_queues(wcn->hw);
> + wcn->queues_stopped = true;
> + spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
> + return -EBUSY;
> + }
> + spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
> ctl->skb = NULL;
> desc = ctl->desc;
>
> diff --git a/dxe.h b/dxe.h
> index 0b154bb..16f0b12 100644
> --- a/dxe.h
> +++ b/dxe.h
> @@ -195,6 +195,7 @@ struct wcn36xx_dxe_ctl {
> unsigned int desc_phy_addr;
> int ctl_blk_order;
> struct sk_buff *skb;
> + spinlock_t skb_lock;
> void *bd_cpu_addr;
> dma_addr_t bd_phy_addr;
> };
> diff --git a/wcn36xx.h b/wcn36xx.h
> index 743325e..ff349ea 100644
> --- a/wcn36xx.h
> +++ b/wcn36xx.h
> @@ -166,6 +166,7 @@ struct wcn36xx {
>
> /* For synchronization of DXE resources from BH, IRQ and WQ contexts */
> spinlock_t dxe_lock;
> + bool queues_stopped;
>
> /* Memory pools */
> struct wcn36xx_dxe_mem_pool mgmt_mem_pool;
> --
> 1.7.11.3
>
--
Best regards,
Eugene
More information about the wcn36xx
mailing list