110 lines
3.5 KiB
Diff
110 lines
3.5 KiB
Diff
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
|
Date: Fri, 20 May 2022 20:11:30 +0200
|
|
Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size in
|
|
mtk_desc_to_tx_buf
|
|
|
|
This is a preliminary patch to add mt7986 ethernet support.
|
|
|
|
Tested-by: Sam Shih <sam.shih@mediatek.com>
|
|
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
|
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
---
|
|
|
|
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
@@ -901,10 +901,11 @@ static inline void *mtk_qdma_phys_to_vir
|
|
return ret + (desc - ring->phys);
|
|
}
|
|
|
|
-static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
|
|
- struct mtk_tx_dma *txd)
|
|
+static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
|
|
+ struct mtk_tx_dma *txd,
|
|
+ u32 txd_size)
|
|
{
|
|
- int idx = txd - ring->dma;
|
|
+ int idx = ((void *)txd - (void *)ring->dma) / txd_size;
|
|
|
|
return &ring->buf[idx];
|
|
}
|
|
@@ -1026,6 +1027,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
};
|
|
struct mtk_mac *mac = netdev_priv(dev);
|
|
struct mtk_eth *eth = mac->hw;
|
|
+ const struct mtk_soc_data *soc = eth->soc;
|
|
struct mtk_tx_dma *itxd, *txd;
|
|
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
|
|
struct mtk_tx_buf *itx_buf, *tx_buf;
|
|
@@ -1037,7 +1039,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
if (itxd == ring->last_free)
|
|
return -ENOMEM;
|
|
|
|
- itx_buf = mtk_desc_to_tx_buf(ring, itxd);
|
|
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
|
|
memset(itx_buf, 0, sizeof(*itx_buf));
|
|
|
|
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
|
|
@@ -1065,7 +1067,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
while (frag_size) {
|
|
bool new_desc = true;
|
|
|
|
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
|
|
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
|
|
(i & 0x1)) {
|
|
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
|
|
txd_pdma = qdma_to_pdma(ring, txd);
|
|
@@ -1089,7 +1091,8 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
|
|
mtk_tx_set_dma_desc(dev, txd, &txd_info);
|
|
|
|
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
|
|
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
|
|
+ soc->txrx.txd_size);
|
|
if (new_desc)
|
|
memset(tx_buf, 0, sizeof(*tx_buf));
|
|
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
|
|
@@ -1108,7 +1111,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
/* store skb to cleanup */
|
|
itx_buf->skb = skb;
|
|
|
|
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
|
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
|
if (k & 0x1)
|
|
txd_pdma->txd2 |= TX_DMA_LS0;
|
|
else
|
|
@@ -1126,7 +1129,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
*/
|
|
wmb();
|
|
|
|
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
|
|
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
|
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
|
|
!netdev_xmit_more())
|
|
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
|
|
@@ -1140,13 +1143,13 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
|
|
err_dma:
|
|
do {
|
|
- tx_buf = mtk_desc_to_tx_buf(ring, itxd);
|
|
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
|
|
|
|
/* unmap dma */
|
|
mtk_tx_unmap(eth, tx_buf, false);
|
|
|
|
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
|
|
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
|
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
|
|
itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
|
|
|
|
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
|
|
@@ -1460,7 +1463,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
|
|
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
|
|
break;
|
|
|
|
- tx_buf = mtk_desc_to_tx_buf(ring, desc);
|
|
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
|
|
+ eth->soc->txrx.txd_size);
|
|
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
|
|
mac = 1;
|
|
|