sonic-buildimage/platform/broadcom/saibcm-modules/sdklt/linux/knet/ngknet_buff.c
vmittal-msft 04b9ce8e32
[BCMSAI] Update BCMSAI debian to 6.0.0.10 with 6.5.23 SDK, and opennsl module to 6.5.23 (#9046)
Manual verification on switch (TH3 device)
admin@str2-xxxxx-01:~$ bcmcmd bsv
bsv
BRCM SAI ver: [6.0.0.10], OCP SAI ver: [1.9.1], SDK ver: [sdk-6.5.23]
drivshell>

admin@str2-xxxxx-01:~$ bcmcmd version
version
Broadcom Command Monitor: Copyright (c) 1998-2021 Broadcom
Release: sdk-6.5.23 built 20211020 (Wed Oct 20 06:52:58 2021)
From root@fedbbfdbee81:/__w/2/s/output/x86-xgsall-deb/hsdk
Platform: X86
OS: Unix (Posix)
Chips:
BCM56640_A0,
BCM56850_A0,
BCM56340_A0,
BCM56960_A0, BCM56860_A0,

   BCM56970_A0, BCM56870_A0,
   BCM56980_A0, BCM56980_B0,
  
   BCM56370_A0, BCM56275_A0, BCM56770_A0,
Chips:
BCM56780_A0, BCM56782_A0, BCM56784_A0, BCM56785_A0,
BCM56786_A0, BCM56787_A0, BCM56788_A0, BCM56789_A0,
BCM56880_A0, BCM56880_B0, BCM56881_A0, BCM56881_B0,
BCM56883_A0, BCM56883_B0, BCM56990_A0, BCM56990_B0,
BCM56991_B0, BCM56992_B0, BCM56996_A0, BCM56996_B0,
BCM56997_A0, BCM56997_B0

Variant drivers:
BCM56780_A0_CNA_1_2_10, BCM56780_A0_DNA_2_7_6_0, BCM56880_A0_CNA_1_2_9, BCM56880_A0_DNA_4_9_5_0
PHYs: BCM5400, BCM54182, BCM54185, BCM54180,
BCM54140, BCM54192, BCM54195, BCM54190,
BCM54194, BCM54210, BCM54220, BCM54280,
BCM54282, BCM54240, BCM54285, BCM5428X,
BCM54290, BCM54292, BCM54294, BCM54295,
BCM54296, BCM56160-GPHY, BCM53540-GPHY, BCM56275-GPHY,
BCM8750, BCM8752, BCM8754, BCM84740,
BCM84164, BCM84758, BCM84780, BCM84784,
BCM84318, BCM84328, Sesto, BCM82780,
copper sfp

drivshell>
2021-10-28 00:12:32 -07:00

331 lines
9.2 KiB
C

/*! \file ngknet_buff.c
*
* Utility routines for NGKNET packet buffer management in Linux kernel mode.
*
*/
/*
* $Copyright: Copyright 2018-2021 Broadcom. All rights reserved.
* The term 'Broadcom' refers to Broadcom Inc. and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* A copy of the GNU General Public License version 2 (GPLv2) can
* be found in the LICENSES folder.$
*/
#include <bcmcnet/bcmcnet_core.h>
#include <bcmcnet/bcmcnet_dev.h>
#include <bcmcnet/bcmcnet_rxtx.h>
#include "ngknet_main.h"
#include "ngknet_buff.h"
/*!
* Allocate coherent memory
*/
static void *
bcmcnet_ring_buf_alloc(struct pdma_dev *dev, uint32_t size, dma_addr_t *dma)
{
struct ngknet_dev *kdev = (struct ngknet_dev *)dev->priv;
return dma_alloc_coherent(kdev->dev, size, dma, GFP_KERNEL);
}
/*!
* Free coherent memory
*/
static void
bcmcnet_ring_buf_free(struct pdma_dev *dev, uint32_t size, void *addr, dma_addr_t dma)
{
struct ngknet_dev *kdev = (struct ngknet_dev *)dev->priv;
dma_free_coherent(kdev->dev, size, addr, dma);
}
/*!
* Allocate Rx buffer
*/
static int
bcmcnet_rx_buf_alloc(struct pdma_dev *dev, struct pdma_rx_queue *rxq,
struct pdma_rx_buf *pbuf)
{
struct ngknet_dev *kdev = (struct ngknet_dev *)dev->priv;
dma_addr_t dma;
struct page *page;
struct sk_buff *skb;
if (rxq->mode == PDMA_BUF_MODE_PAGE) {
page = kal_dev_alloc_page();
if (unlikely(!page)) {
return SHR_E_MEMORY;
}
dma = dma_map_page(kdev->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(kdev->dev, dma))) {
__free_page(page);
return SHR_E_MEMORY;
}
pbuf->dma = dma;
pbuf->page = page;
pbuf->page_offset = 0;
} else {
skb = netdev_alloc_skb(kdev->net_dev, PDMA_RXB_RESV + pbuf->adj + rxq->buf_size);
if (unlikely(!skb)) {
return SHR_E_MEMORY;
}
skb_reserve(skb, PDMA_RXB_ALIGN - (((unsigned long)skb->data) & (PDMA_RXB_ALIGN - 1)));
pbuf->skb = skb;
pbuf->pkb = (struct pkt_buf *)skb->data;
dma = dma_map_single(kdev->dev, &pbuf->pkb->data + pbuf->adj, rxq->buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(kdev->dev, dma))) {
dev_kfree_skb_any(skb);
return SHR_E_MEMORY;
}
pbuf->dma = dma;
}
return SHR_E_NONE;
}
/*!
* Get Rx buffer DMA address
*/
static void
bcmcnet_rx_buf_dma(struct pdma_dev *dev, struct pdma_rx_queue *rxq,
struct pdma_rx_buf *pbuf, dma_addr_t *addr)
{
if (rxq->mode == PDMA_BUF_MODE_PAGE) {
*addr = pbuf->dma + pbuf->page_offset + PDMA_RXB_RESV + pbuf->adj;
} else {
*addr = pbuf->dma;
}
}
/*!
* Check Rx buffer
*/
static bool
bcmcnet_rx_buf_avail(struct pdma_dev *dev, struct pdma_rx_queue *rxq,
struct pdma_rx_buf *pbuf)
{
if (rxq->mode == PDMA_BUF_MODE_PAGE) {
pbuf->skb = NULL;
}
return (pbuf->dma != 0);
}
/*!
* Get Rx buffer
*/
static struct pkt_hdr *
bcmcnet_rx_buf_get(struct pdma_dev *dev, struct pdma_rx_queue *rxq,
struct pdma_rx_buf *pbuf, int len)
{
struct ngknet_dev *kdev = (struct ngknet_dev *)dev->priv;
struct sk_buff *skb;
if (rxq->mode == PDMA_BUF_MODE_PAGE) {
if (pbuf->skb) {
return &pbuf->pkb->pkh;
}
skb = kal_build_skb(page_address(pbuf->page) + pbuf->page_offset,
PDMA_SKB_RESV + pbuf->adj + rxq->buf_size);
if (unlikely(!skb)) {
return NULL;
}
skb_reserve(skb, PDMA_RXB_ALIGN);
dma_sync_single_range_for_cpu(kdev->dev, pbuf->dma, pbuf->page_offset,
PDMA_PAGE_BUF_MAX, DMA_FROM_DEVICE);
pbuf->skb = skb;
pbuf->pkb = (struct pkt_buf *)skb->data;
/* Try to reuse this page */
if (unlikely(page_count(pbuf->page) != 1)) {
dma_unmap_page(kdev->dev, pbuf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
pbuf->dma = 0;
} else {
pbuf->page_offset ^= PDMA_PAGE_BUF_MAX;
page_ref_inc(pbuf->page);
dma_sync_single_range_for_device(kdev->dev, pbuf->dma, pbuf->page_offset,
PDMA_PAGE_BUF_MAX, DMA_FROM_DEVICE);
}
} else {
if (!pbuf->dma) {
return &pbuf->pkb->pkh;
}
skb = pbuf->skb;
dma_unmap_single(kdev->dev, pbuf->dma, rxq->buf_size, DMA_FROM_DEVICE);
pbuf->dma = 0;
}
skb_put(skb, PKT_HDR_SIZE + pbuf->adj + len);
return &pbuf->pkb->pkh;
}
/*!
* Put Rx buffer
*/
static int
bcmcnet_rx_buf_put(struct pdma_dev *dev, struct pdma_rx_queue *rxq,
struct pdma_rx_buf *pbuf, int len)
{
struct ngknet_dev *kdev = (struct ngknet_dev *)dev->priv;
dma_addr_t dma;
struct sk_buff *skb;
if (rxq->mode == PDMA_BUF_MODE_PAGE) {
dev_kfree_skb_any(pbuf->skb);
} else {
skb = pbuf->skb;
if (pbuf->pkb != (struct pkt_buf *)skb->data) {
dev_kfree_skb_any(skb);
return SHR_E_NONE;
}
dma = dma_map_single(kdev->dev, &pbuf->pkb->data + pbuf->adj,
rxq->buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(kdev->dev, dma))) {
dev_kfree_skb_any(skb);
pbuf->dma = 0;
return SHR_E_MEMORY;
}
pbuf->dma = dma;
skb_trim(skb, skb->len - (PKT_HDR_SIZE + pbuf->adj + len));
}
return SHR_E_NONE;
}
/*!
* Free Rx buffer
*/
static void
bcmcnet_rx_buf_free(struct pdma_dev *dev, struct pdma_rx_queue *rxq,
struct pdma_rx_buf *pbuf)
{
struct ngknet_dev *kdev = (struct ngknet_dev *)dev->priv;
if (rxq->mode == PDMA_BUF_MODE_PAGE) {
dma_unmap_single(kdev->dev, pbuf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(pbuf->page);
} else {
dma_unmap_single(kdev->dev, pbuf->dma, rxq->buf_size, DMA_FROM_DEVICE);
dev_kfree_skb_any(pbuf->skb);
}
pbuf->dma = 0;
pbuf->page = NULL;
pbuf->page_offset = 0;
pbuf->skb = NULL;
pbuf->pkb = NULL;
pbuf->adj = 0;
}
/*!
* Get Rx buffer mode
*/
static enum buf_mode
bcmcnet_rx_buf_mode(struct pdma_dev *dev, struct pdma_rx_queue *rxq)
{
uint32_t len;
len = dev->rx_ph_size ? rxq->buf_size : rxq->buf_size + PDMA_RXB_META;
if (PDMA_RXB_SIZE(len) <= PDMA_PAGE_BUF_MAX && PAGE_SIZE < 8192 &&
kal_support_paged_skb()) {
return PDMA_BUF_MODE_PAGE;
}
return PDMA_BUF_MODE_SKB;
}
/*!
* Get Tx buffer
*/
static struct pkt_hdr *
bcmcnet_tx_buf_get(struct pdma_dev *dev, struct pdma_tx_queue *txq,
struct pdma_tx_buf *pbuf, void *buf)
{
struct ngknet_dev *kdev = (struct ngknet_dev *)dev->priv;
struct sk_buff *skb = (struct sk_buff *)buf;
struct pkt_buf *pkb = (struct pkt_buf *)skb->data;
dma_addr_t dma;
pbuf->len = pkb->pkh.data_len + (pbuf->adj ? pkb->pkh.meta_len : 0);
dma = dma_map_single(kdev->dev, &pkb->data + (pbuf->adj ? 0 : pkb->pkh.meta_len),
pbuf->len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(kdev->dev, dma))) {
dev_kfree_skb_any(skb);
return NULL;
}
pbuf->dma = dma;
pbuf->skb = skb;
pbuf->pkb = pkb;
return &pkb->pkh;
}
/*!
* Get Tx buffer DMA address
*/
static void
bcmcnet_tx_buf_dma(struct pdma_dev *dev, struct pdma_tx_queue *txq,
struct pdma_tx_buf *pbuf, dma_addr_t *addr)
{
*addr = pbuf->dma;
}
/*!
* Free Tx buffer
*/
static void
bcmcnet_tx_buf_free(struct pdma_dev *dev, struct pdma_tx_queue *txq,
struct pdma_tx_buf *pbuf)
{
struct ngknet_dev *kdev = (struct ngknet_dev *)dev->priv;
dma_unmap_single(kdev->dev, pbuf->dma, pbuf->len, DMA_TO_DEVICE);
if (skb_shinfo(pbuf->skb)->tx_flags & SKBTX_IN_PROGRESS) {
skb_queue_tail(&kdev->ptp_tx_queue, pbuf->skb);
schedule_work(&kdev->ptp_tx_work);
} else {
dev_kfree_skb_any(pbuf->skb);
}
pbuf->dma = 0;
pbuf->len = 0;
pbuf->skb = NULL;
pbuf->pkb = NULL;
pbuf->adj = 0;
}
static const struct pdma_buf_mngr buf_mngr = {
.ring_buf_alloc = bcmcnet_ring_buf_alloc,
.ring_buf_free = bcmcnet_ring_buf_free,
.rx_buf_alloc = bcmcnet_rx_buf_alloc,
.rx_buf_dma = bcmcnet_rx_buf_dma,
.rx_buf_avail = bcmcnet_rx_buf_avail,
.rx_buf_get = bcmcnet_rx_buf_get,
.rx_buf_put = bcmcnet_rx_buf_put,
.rx_buf_free = bcmcnet_rx_buf_free,
.rx_buf_mode = bcmcnet_rx_buf_mode,
.tx_buf_get = bcmcnet_tx_buf_get,
.tx_buf_dma = bcmcnet_tx_buf_dma,
.tx_buf_free = bcmcnet_tx_buf_free,
};
/*!
* Open a device
*/
void
bcmcnet_buf_mngr_init(struct pdma_dev *dev)
{
dev->ctrl.buf_mngr = (struct pdma_buf_mngr *)&buf_mngr;
}