mlxsw: spectrum: Do not process learned records with a dummy FID
[linux-2.6-microblaze.git] / drivers / net / ethernet / ti / davinci_cpdma.c
index 35bf14d..a65edd2 100644 (file)
@@ -134,6 +134,15 @@ struct cpdma_control_info {
 #define ACCESS_RW      (ACCESS_RO | ACCESS_WO)
 };
 
+struct submit_info {
+       struct cpdma_chan *chan;
+       int directed;
+       void *token;
+       void *data_virt;
+       dma_addr_t data_dma;
+       int len;
+};
+
 static struct cpdma_control_info controls[] = {
        [CPDMA_TX_RLIM]           = {CPDMA_DMACONTROL,  8,  0xffff, ACCESS_RW},
        [CPDMA_CMD_IDLE]          = {CPDMA_DMACONTROL,  3,  1,      ACCESS_WO},
@@ -176,6 +185,8 @@ static struct cpdma_control_info controls[] = {
                                 (directed << CPDMA_TO_PORT_SHIFT));    \
        } while (0)
 
+#define CPDMA_DMA_EXT_MAP              BIT(16)
+
 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
 {
        struct cpdma_desc_pool *pool = ctlr->pool;
@@ -1002,34 +1013,26 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
        }
 }
 
-int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
-                     int len, int directed)
+static int cpdma_chan_submit_si(struct submit_info *si)
 {
+       struct cpdma_chan               *chan = si->chan;
        struct cpdma_ctlr               *ctlr = chan->ctlr;
+       int                             len = si->len;
+       int                             swlen = len;
        struct cpdma_desc __iomem       *desc;
        dma_addr_t                      buffer;
-       unsigned long                   flags;
        u32                             mode;
-       int                             ret = 0;
-
-       spin_lock_irqsave(&chan->lock, flags);
-
-       if (chan->state == CPDMA_STATE_TEARDOWN) {
-               ret = -EINVAL;
-               goto unlock_ret;
-       }
+       int                             ret;
 
        if (chan->count >= chan->desc_num)      {
                chan->stats.desc_alloc_fail++;
-               ret = -ENOMEM;
-               goto unlock_ret;
+               return -ENOMEM;
        }
 
        desc = cpdma_desc_alloc(ctlr->pool);
        if (!desc) {
                chan->stats.desc_alloc_fail++;
-               ret = -ENOMEM;
-               goto unlock_ret;
+               return -ENOMEM;
        }
 
        if (len < ctlr->params.min_packet_size) {
@@ -1037,16 +1040,21 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
                chan->stats.runt_transmit_buff++;
        }
 
-       buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
-       ret = dma_mapping_error(ctlr->dev, buffer);
-       if (ret) {
-               cpdma_desc_free(ctlr->pool, desc, 1);
-               ret = -EINVAL;
-               goto unlock_ret;
-       }
-
        mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
-       cpdma_desc_to_port(chan, mode, directed);
+       cpdma_desc_to_port(chan, mode, si->directed);
+
+       if (si->data_dma) {
+               buffer = si->data_dma;
+               dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
+               swlen |= CPDMA_DMA_EXT_MAP;
+       } else {
+               buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
+               ret = dma_mapping_error(ctlr->dev, buffer);
+               if (ret) {
+                       cpdma_desc_free(ctlr->pool, desc, 1);
+                       return -EINVAL;
+               }
+       }
 
        /* Relaxed IO accessors can be used here as there is read barrier
         * at the end of write sequence.
@@ -1055,9 +1063,9 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
        writel_relaxed(buffer, &desc->hw_buffer);
        writel_relaxed(len, &desc->hw_len);
        writel_relaxed(mode | len, &desc->hw_mode);
-       writel_relaxed((uintptr_t)token, &desc->sw_token);
+       writel_relaxed((uintptr_t)si->token, &desc->sw_token);
        writel_relaxed(buffer, &desc->sw_buffer);
-       writel_relaxed(len, &desc->sw_len);
+       writel_relaxed(swlen, &desc->sw_len);
        desc_read(desc, sw_len);
 
        __cpdma_chan_submit(chan, desc);
@@ -1066,8 +1074,105 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
                chan_write(chan, rxfree, 1);
 
        chan->count++;
+       return 0;
+}
 
-unlock_ret:
+int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
+                          int len, int directed)
+{
+       struct submit_info si;
+       unsigned long flags;
+       int ret;
+
+       si.chan = chan;
+       si.token = token;
+       si.data_virt = data;
+       si.data_dma = 0;
+       si.len = len;
+       si.directed = directed;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       if (chan->state == CPDMA_STATE_TEARDOWN) {
+               spin_unlock_irqrestore(&chan->lock, flags);
+               return -EINVAL;
+       }
+
+       ret = cpdma_chan_submit_si(&si);
+       spin_unlock_irqrestore(&chan->lock, flags);
+       return ret;
+}
+
+int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
+                                 dma_addr_t data, int len, int directed)
+{
+       struct submit_info si;
+       unsigned long flags;
+       int ret;
+
+       si.chan = chan;
+       si.token = token;
+       si.data_virt = NULL;
+       si.data_dma = data;
+       si.len = len;
+       si.directed = directed;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       if (chan->state == CPDMA_STATE_TEARDOWN) {
+               spin_unlock_irqrestore(&chan->lock, flags);
+               return -EINVAL;
+       }
+
+       ret = cpdma_chan_submit_si(&si);
+       spin_unlock_irqrestore(&chan->lock, flags);
+       return ret;
+}
+
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+                     int len, int directed)
+{
+       struct submit_info si;
+       unsigned long flags;
+       int ret;
+
+       si.chan = chan;
+       si.token = token;
+       si.data_virt = data;
+       si.data_dma = 0;
+       si.len = len;
+       si.directed = directed;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       if (chan->state != CPDMA_STATE_ACTIVE) {
+               spin_unlock_irqrestore(&chan->lock, flags);
+               return -EINVAL;
+       }
+
+       ret = cpdma_chan_submit_si(&si);
+       spin_unlock_irqrestore(&chan->lock, flags);
+       return ret;
+}
+
+int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
+                            dma_addr_t data, int len, int directed)
+{
+       struct submit_info si;
+       unsigned long flags;
+       int ret;
+
+       si.chan = chan;
+       si.token = token;
+       si.data_virt = NULL;
+       si.data_dma = data;
+       si.len = len;
+       si.directed = directed;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       if (chan->state != CPDMA_STATE_ACTIVE) {
+               spin_unlock_irqrestore(&chan->lock, flags);
+               return -EINVAL;
+       }
+
+       ret = cpdma_chan_submit_si(&si);
        spin_unlock_irqrestore(&chan->lock, flags);
        return ret;
 }
@@ -1097,10 +1202,17 @@ static void __cpdma_chan_free(struct cpdma_chan *chan,
        uintptr_t                       token;
 
        token      = desc_read(desc, sw_token);
-       buff_dma   = desc_read(desc, sw_buffer);
        origlen    = desc_read(desc, sw_len);
 
-       dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+       buff_dma   = desc_read(desc, sw_buffer);
+       if (origlen & CPDMA_DMA_EXT_MAP) {
+               origlen &= ~CPDMA_DMA_EXT_MAP;
+               dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
+                                       chan->dir);
+       } else {
+               dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+       }
+
        cpdma_desc_free(pool, desc, 1);
        (*chan->handler)((void *)token, outlen, status);
 }
@@ -1311,8 +1423,23 @@ int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
        return ctlr->num_tx_desc;
 }
 
-void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
+int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
 {
+       unsigned long flags;
+       int temp, ret;
+
+       spin_lock_irqsave(&ctlr->lock, flags);
+
+       temp = ctlr->num_rx_desc;
        ctlr->num_rx_desc = num_rx_desc;
        ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+       ret = cpdma_chan_split_pool(ctlr);
+       if (ret) {
+               ctlr->num_rx_desc = temp;
+               ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+       }
+
+       spin_unlock_irqrestore(&ctlr->lock, flags);
+
+       return ret;
 }