mlxsw: spectrum: Do not process learned records with a dummy FID
[linux-2.6-microblaze.git] / drivers / net / ethernet / ti / davinci_cpdma.c
index 5cf1758..a65edd2 100644 (file)
@@ -138,7 +138,8 @@ struct submit_info {
        struct cpdma_chan *chan;
        int directed;
        void *token;
-       void *data;
+       void *data_virt;
+       dma_addr_t data_dma;
        int len;
 };
 
@@ -184,6 +185,8 @@ static struct cpdma_control_info controls[] = {
                                 (directed << CPDMA_TO_PORT_SHIFT));    \
        } while (0)
 
+#define CPDMA_DMA_EXT_MAP              BIT(16)
+
 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
 {
        struct cpdma_desc_pool *pool = ctlr->pool;
@@ -1015,6 +1018,7 @@ static int cpdma_chan_submit_si(struct submit_info *si)
        struct cpdma_chan               *chan = si->chan;
        struct cpdma_ctlr               *ctlr = chan->ctlr;
        int                             len = si->len;
+       int                             swlen = len;
        struct cpdma_desc __iomem       *desc;
        dma_addr_t                      buffer;
        u32                             mode;
@@ -1036,16 +1040,22 @@ static int cpdma_chan_submit_si(struct submit_info *si)
                chan->stats.runt_transmit_buff++;
        }
 
-       buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
-       ret = dma_mapping_error(ctlr->dev, buffer);
-       if (ret) {
-               cpdma_desc_free(ctlr->pool, desc, 1);
-               return -EINVAL;
-       }
-
        mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
        cpdma_desc_to_port(chan, mode, si->directed);
 
+       if (si->data_dma) {
+               buffer = si->data_dma;
+               dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
+               swlen |= CPDMA_DMA_EXT_MAP;
+       } else {
+               buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
+               ret = dma_mapping_error(ctlr->dev, buffer);
+               if (ret) {
+                       cpdma_desc_free(ctlr->pool, desc, 1);
+                       return -EINVAL;
+               }
+       }
+
        /* Relaxed IO accessors can be used here as there is read barrier
         * at the end of write sequence.
         */
@@ -1055,7 +1065,7 @@ static int cpdma_chan_submit_si(struct submit_info *si)
        writel_relaxed(mode | len, &desc->hw_mode);
        writel_relaxed((uintptr_t)si->token, &desc->sw_token);
        writel_relaxed(buffer, &desc->sw_buffer);
-       writel_relaxed(len, &desc->sw_len);
+       writel_relaxed(swlen, &desc->sw_len);
        desc_read(desc, sw_len);
 
        __cpdma_chan_submit(chan, desc);
@@ -1076,7 +1086,33 @@ int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
 
        si.chan = chan;
        si.token = token;
-       si.data = data;
+       si.data_virt = data;
+       si.data_dma = 0;
+       si.len = len;
+       si.directed = directed;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       if (chan->state == CPDMA_STATE_TEARDOWN) {
+               spin_unlock_irqrestore(&chan->lock, flags);
+               return -EINVAL;
+       }
+
+       ret = cpdma_chan_submit_si(&si);
+       spin_unlock_irqrestore(&chan->lock, flags);
+       return ret;
+}
+
+int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
+                                 dma_addr_t data, int len, int directed)
+{
+       struct submit_info si;
+       unsigned long flags;
+       int ret;
+
+       si.chan = chan;
+       si.token = token;
+       si.data_virt = NULL;
+       si.data_dma = data;
        si.len = len;
        si.directed = directed;
 
@@ -1100,7 +1136,33 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
 
        si.chan = chan;
        si.token = token;
-       si.data = data;
+       si.data_virt = data;
+       si.data_dma = 0;
+       si.len = len;
+       si.directed = directed;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       if (chan->state != CPDMA_STATE_ACTIVE) {
+               spin_unlock_irqrestore(&chan->lock, flags);
+               return -EINVAL;
+       }
+
+       ret = cpdma_chan_submit_si(&si);
+       spin_unlock_irqrestore(&chan->lock, flags);
+       return ret;
+}
+
+int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
+                            dma_addr_t data, int len, int directed)
+{
+       struct submit_info si;
+       unsigned long flags;
+       int ret;
+
+       si.chan = chan;
+       si.token = token;
+       si.data_virt = NULL;
+       si.data_dma = data;
        si.len = len;
        si.directed = directed;
 
@@ -1140,10 +1202,17 @@ static void __cpdma_chan_free(struct cpdma_chan *chan,
        uintptr_t                       token;
 
        token      = desc_read(desc, sw_token);
-       buff_dma   = desc_read(desc, sw_buffer);
        origlen    = desc_read(desc, sw_len);
 
-       dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+       buff_dma   = desc_read(desc, sw_buffer);
+       if (origlen & CPDMA_DMA_EXT_MAP) {
+               origlen &= ~CPDMA_DMA_EXT_MAP;
+               dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
+                                       chan->dir);
+       } else {
+               dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+       }
+
        cpdma_desc_free(pool, desc, 1);
        (*chan->handler)((void *)token, outlen, status);
 }
@@ -1354,8 +1423,23 @@ int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
        return ctlr->num_tx_desc;
 }
 
-void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
+int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
 {
+       unsigned long flags;
+       int temp, ret;
+
+       spin_lock_irqsave(&ctlr->lock, flags);
+
+       temp = ctlr->num_rx_desc;
        ctlr->num_rx_desc = num_rx_desc;
        ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+       ret = cpdma_chan_split_pool(ctlr);
+       if (ret) {
+               ctlr->num_rx_desc = temp;
+               ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+       }
+
+       spin_unlock_irqrestore(&ctlr->lock, flags);
+
+       return ret;
 }