Add pensando platform (#15978)

This commit adds support for pensando asic called ELBA. ELBA is used in pci based cards and in smartswitches.

#### Why I did it
This commit introduces pensando platform which is based on ELBA ASIC.
##### Work item tracking
- Microsoft ADO **(number only)**:

#### How I did it
Created platform/pensando folder and created makefiles specific to pensando.
This mainly creates pensando docker (which OEM's need to download before building an image) which has all the userspace to initialize and use the DPU (ELBA ASIC).
Output of the build process creates two images which can be used from ONIE and goldfw.
Recommendation is use to use ONIE.
#### How to verify it
Load the SONiC image via ONIE or goldfw and make sure the interfaces are UP.

##### Description for the changelog
Add pensando platform support.
This commit is contained in:
Ashwin Hiranniah 2023-12-04 22:41:52 +00:00 committed by GitHub
parent ed8fa6a47e
commit ada7c6a72e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
163 changed files with 43782 additions and 19 deletions

3
.gitignore vendored
View File

@ -41,6 +41,9 @@ installer/x86_64/platforms/
# Misc. files
asic_config_checksum
files/Aboot/boot0
files/dsc/MANIFEST
files/dsc/install_debian
files/dsc/fs.zip
files/initramfs-tools/arista-convertfs
files/initramfs-tools/union-mount

View File

@ -50,8 +50,8 @@ TRUSTED_GPG_DIR=$BUILD_TOOL_PATH/trusted.gpg.d
echo "Error: Invalid ONIE_IMAGE_PART_SIZE in onie image config file"
exit 1
}
[ -n "$ONIE_INSTALLER_PAYLOAD" ] || {
echo "Error: Invalid ONIE_INSTALLER_PAYLOAD in onie image config file"
[ -n "$INSTALLER_PAYLOAD" ] || {
echo "Error: Invalid INSTALLER_PAYLOAD in onie image config file"
exit 1
}
[ -n "$FILESYSTEM_SQUASHFS" ] || {
@ -753,16 +753,29 @@ sudo chroot $FILESYSTEM_ROOT update-initramfs -u
## Convert initrd image to u-boot format
if [[ $TARGET_BOOTLOADER == uboot ]]; then
INITRD_FILE=initrd.img-${LINUX_KERNEL_VERSION}-${CONFIGURED_ARCH}
KERNEL_FILE=vmlinuz-${LINUX_KERNEL_VERSION}-${CONFIGURED_ARCH}
if [[ $CONFIGURED_ARCH == armhf ]]; then
INITRD_FILE=initrd.img-${LINUX_KERNEL_VERSION}-armmp
sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -A arm -O linux -T ramdisk -C gzip -d /boot/$INITRD_FILE /boot/u${INITRD_FILE}
## Overwriting the initrd image with uInitrd
sudo LANG=C chroot $FILESYSTEM_ROOT mv /boot/u${INITRD_FILE} /boot/$INITRD_FILE
elif [[ $CONFIGURED_ARCH == arm64 ]]; then
if [[ $CONFIGURED_PLATFORM == pensando ]]; then
## copy device tree file into boot (XXX: need to compile dtb from dts)
sudo cp -v $PLATFORM_DIR/pensando/elba-asic-psci.dtb $FILESYSTEM_ROOT/boot/
## make kernel as gzip file
sudo LANG=C chroot $FILESYSTEM_ROOT gzip /boot/${KERNEL_FILE}
sudo LANG=C chroot $FILESYSTEM_ROOT mv /boot/${KERNEL_FILE}.gz /boot/${KERNEL_FILE}
## Convert initrd image to u-boot format
sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -A arm64 -O linux -T ramdisk -C gzip -d /boot/$INITRD_FILE /boot/u${INITRD_FILE}
## Overwriting the initrd image with uInitrd
sudo LANG=C chroot $FILESYSTEM_ROOT mv /boot/u${INITRD_FILE} /boot/$INITRD_FILE
else
sudo cp -v $PLATFORM_DIR/${sonic_asic_platform}-${CONFIGURED_ARCH}/sonic_fit.its $FILESYSTEM_ROOT/boot/
sudo LANG=C chroot $FILESYSTEM_ROOT mkimage -f /boot/sonic_fit.its /boot/sonic_${CONFIGURED_ARCH}.fit
fi
fi
fi
# Collect host image version files before cleanup
SONIC_VERSION_CACHE=${SONIC_VERSION_CACHE} \
@ -811,7 +824,7 @@ if [[ "$CHANGE_DEFAULT_PASSWORD" == "y" ]]; then
fi
## Compress most file system into squashfs file
sudo rm -f $ONIE_INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS
sudo rm -f $INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS
## Output the file system total size for diag purpose
## Note: -x to skip directories on different file systems, such as /proc
sudo du -hsx $FILESYSTEM_ROOT
@ -856,5 +869,5 @@ fi
pushd $FILESYSTEM_ROOT && sudo tar -I $GZ_COMPRESS_PROGRAM -cf $OLDPWD/$FILESYSTEM_DOCKERFS -C ${DOCKERFS_PATH}var/lib/docker .; popd
## Compress together with /boot, /var/lib/docker and $PLATFORM_DIR as an installer payload zip file
pushd $FILESYSTEM_ROOT && sudo tar -I $GZ_COMPRESS_PROGRAM -cf platform.tar.gz -C $PLATFORM_DIR . && sudo zip -n .gz $OLDPWD/$ONIE_INSTALLER_PAYLOAD -r boot/ platform.tar.gz; popd
sudo zip -g -n .squashfs:.gz $ONIE_INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS $FILESYSTEM_DOCKERFS
pushd $FILESYSTEM_ROOT && sudo tar -I $GZ_COMPRESS_PROGRAM -cf platform.tar.gz -C $PLATFORM_DIR . && sudo zip -n .gz $OLDPWD/$INSTALLER_PAYLOAD -r boot/ platform.tar.gz; popd
sudo zip -g -n .squashfs:.gz $INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS $FILESYSTEM_DOCKERFS

View File

@ -18,8 +18,8 @@ fi
echo "Error: Invalid ONIE_IMAGE_PART_SIZE in onie image config file"
exit 1
}
[ -n "$ONIE_INSTALLER_PAYLOAD" ] || {
echo "Error: Invalid ONIE_INSTALLER_PAYLOAD in onie image config file"
[ -n "$INSTALLER_PAYLOAD" ] || {
echo "Error: Invalid INSTALLER_PAYLOAD in onie image config file"
exit 1
}
@ -86,7 +86,7 @@ generate_onie_installer_image()
## Note: Don't leave blank between lines. It is single line command.
./onie-mk-demo.sh $CONFIGURED_ARCH $TARGET_MACHINE $TARGET_PLATFORM-$TARGET_MACHINE-$ONIEIMAGE_VERSION \
installer platform/$TARGET_MACHINE/platform.conf $output_file OS $IMAGE_VERSION $ONIE_IMAGE_PART_SIZE \
$ONIE_INSTALLER_PAYLOAD $SECURE_UPGRADE_SIGNING_CERT $SECURE_UPGRADE_DEV_SIGNING_KEY
$INSTALLER_PAYLOAD $SECURE_UPGRADE_SIGNING_CERT $SECURE_UPGRADE_DEV_SIGNING_KEY
}
# Generate asic-specific device list
@ -175,7 +175,7 @@ elif [ "$IMAGE_TYPE" = "aboot" ]; then
sudo rm -f $OUTPUT_ABOOT_IMAGE
sudo rm -f $ABOOT_BOOT_IMAGE
## Add main payload
cp $ONIE_INSTALLER_PAYLOAD $OUTPUT_ABOOT_IMAGE
cp $INSTALLER_PAYLOAD $OUTPUT_ABOOT_IMAGE
## Add Aboot boot0 file
j2 -f env files/Aboot/boot0.j2 ./onie-image.conf > files/Aboot/boot0
sed -i -e "s/%%IMAGE_VERSION%%/$IMAGE_VERSION/g" files/Aboot/boot0
@ -213,6 +213,38 @@ elif [ "$IMAGE_TYPE" = "aboot" ]; then
[ -f "$CA_CERT" ] && cp "$CA_CERT" "$TARGET_CA_CERT"
./scripts/sign_image.sh -i "$OUTPUT_ABOOT_IMAGE" -k "$SIGNING_KEY" -c "$SIGNING_CERT" -a "$TARGET_CA_CERT"
fi
elif [ "$IMAGE_TYPE" = "dsc" ]; then
echo "Build DSC installer"
dsc_installer_dir=files/dsc
dsc_installer=$dsc_installer_dir/install_debian
dsc_installer_manifest=$dsc_installer_dir/MANIFEST
mkdir -p `dirname $OUTPUT_DSC_IMAGE`
sudo rm -f $OUTPUT_DSC_IMAGE
source ./onie-image.conf
j2 $dsc_installer.j2 > $dsc_installer
export installer_sha=$(sha512sum "$dsc_installer" | awk '{print $1}')
export build_date=$(date -u)
export build_user=$(id -un)
export installer_payload_sha=$(sha512sum "$INSTALLER_PAYLOAD" | awk '{print $1}')
j2 $dsc_installer_manifest.j2 > $dsc_installer_manifest
cp $INSTALLER_PAYLOAD $dsc_installer_dir
tar cf $OUTPUT_DSC_IMAGE -C files/dsc $(basename $dsc_installer_manifest) $INSTALLER_PAYLOAD $(basename $dsc_installer)
echo "Build ONIE installer"
mkdir -p `dirname $OUTPUT_ONIE_IMAGE`
sudo rm -f $OUTPUT_ONIE_IMAGE
generate_device_list "./installer/platforms_asic"
generate_onie_installer_image
else
echo "Error: Non supported image type $IMAGE_TYPE"
exit 1

View File

@ -0,0 +1,3 @@
# name lanes alias speed autoneg fec
Ethernet1 0,1,2,3 Ethernet1 100000 on rs
Ethernet2 4,5,6,7 Ethernet2 100000 on rs

View File

@ -0,0 +1 @@
Pensando-elba t1

View File

@ -0,0 +1 @@
pensando

View File

@ -0,0 +1,100 @@
#
# ssd_generic.py
#
# Generic implementation of the SSD health API
# SSD models supported:
# - InnoDisk
# - StorFly
# - Virtium
try:
import re
import subprocess
from sonic_platform_base.sonic_ssd.ssd_base import SsdBase
except ImportError as e:
raise ImportError (str(e) + "- required module not found")
NOT_AVAILABLE = "N/A"
MMC_DATA_PATH = "/sys/class/mmc_host/mmc0/mmc0:0001/{}"
class SsdUtil(SsdBase):
"""
Generic implementation of the SSD health API
"""
model = NOT_AVAILABLE
serial = NOT_AVAILABLE
firmware = NOT_AVAILABLE
temperature = NOT_AVAILABLE
health = NOT_AVAILABLE
ssd_info = NOT_AVAILABLE
vendor_ssd_info = NOT_AVAILABLE
def __init__(self, diskdev):
self.dev = diskdev
try:
self.model = ("emmc {}".format(open(MMC_DATA_PATH.format("name")).read())).replace("\n", "")
self.serial = open(MMC_DATA_PATH.format("serial")).read().replace("\n", "")
self.firmware = open(MMC_DATA_PATH.format("fwrev")).read().replace("\n", "")
value = open(MMC_DATA_PATH.format("life_time")).read().replace("\n", "")
[lifetime_a, lifetime_b] = [int(val, 16) for val in value.split()]
lifetime = lifetime_a if lifetime_a >= lifetime_b else lifetime_b
self.health = float(100 - (lifetime*10))
except:
pass
def get_health(self):
"""
Retrieves current disk health in percentages
Returns:
A float number of current ssd health
e.g. 83.5
"""
return self.health
def get_temperature(self):
"""
Retrieves current disk temperature in Celsius
Returns:
A float number of current temperature in Celsius
e.g. 40.1
"""
return self.temperature
def get_model(self):
"""
Retrieves model for the given disk device
Returns:
A string holding disk model as provided by the manufacturer
"""
return self.model
def get_firmware(self):
"""
Retrieves firmware version for the given disk device
Returns:
A string holding disk firmware version as provided by the manufacturer
"""
return self.firmware
def get_serial(self):
"""
Retrieves serial number for the given disk device
Returns:
A string holding disk serial number as provided by the manufacturer
"""
return self.serial
def get_vendor_output(self):
"""
Retrieves vendor specific data for the given disk device
Returns:
A string holding some vendor specific disk information
"""
return self.vendor_ssd_info

View File

@ -0,0 +1,10 @@
{
"skip_thermalctld": true,
"skip_fancontrol": true,
"skip_ledd": true,
"skip_psud": true,
"skip_syseepromd": false,
"skip_xcvrd": true,
"skip_chassis_db_init": false,
"skip_pcied": true
}

View File

@ -64,6 +64,9 @@ elif [ "$platform" == "mellanox" ]; then
ORCHAGENT_ARGS+=""
elif [ "$platform" == "innovium" ]; then
ORCHAGENT_ARGS+="-m $MAC_ADDRESS"
elif [ "$platform" == "pensando" ]; then
MAC_ADDRESS=$(ip link property add dev oob_mnic0 altname eth0; ip link show oob_mnic0 | grep ether | awk '{print $2}')
ORCHAGENT_ARGS+="-m $MAC_ADDRESS"
else
# Should we use the fallback MAC in case it is not found in Device.Metadata
ORCHAGENT_ARGS+="-m $MAC_ADDRESS"

View File

@ -31,6 +31,7 @@ IMAGE_DISTRO=$3
set -x -e
CONFIGURED_ARCH=$([ -f .arch ] && cat .arch || echo amd64)
CONFIGURED_PLATFORM=$([ -f .platform ] && cat .platform || echo generic)
. functions.sh
BUILD_SCRIPTS_DIR=files/build_scripts
@ -762,6 +763,14 @@ sudo LANG=C DOCKER_HOST="$DOCKER_HOST" chroot $FILESYSTEM_ROOT docker tag {{imag
fi
{% endfor %}
if [[ $CONFIGURED_PLATFORM == pensando ]]; then
#Disable rc.local
sudo LANG=C chroot $FILESYSTEM_ROOT chmod -x /etc/rc.local
sudo cp files/dsc/dpu.service $FILESYSTEM_ROOT_USR_LIB_SYSTEMD_SYSTEM/
sudo cp files/dsc/dpu.init $FILESYSTEM_ROOT/etc/init.d/dpu
sudo LANG=C chroot $FILESYSTEM_ROOT systemctl enable dpu.service
fi
SONIC_PACKAGE_MANAGER_FOLDER="/var/lib/sonic-package-manager/"
sudo mkdir -p $FILESYSTEM_ROOT/$SONIC_PACKAGE_MANAGER_FOLDER
target_machine="$TARGET_MACHINE" j2 $BUILD_TEMPLATES/packages.json.j2 | sudo tee $FILESYSTEM_ROOT/$SONIC_PACKAGE_MANAGER_FOLDER/packages.json

20
files/dsc/MANIFEST.j2 Normal file
View File

@ -0,0 +1,20 @@
{
"metadata_version": 1,
"package_version": 2,
"asic_compat": "elba",
"build_date": "{{ build_date }}",
"build_user": "{{ build_user }}",
"installer": {
"name": "install_debian",
"verify": {
"algorithm": "sha512",
"hash": "{{ installer_sha }}"
}
},
"shas": {
"fs.zip": "{{ installer_payload_sha }}"
},
"package_compat": {
"board_policy": "accept"
}
}

85
files/dsc/dpu.init Executable file
View File

@ -0,0 +1,85 @@
#!/bin/bash
# {C} Copyright 2023 AMD Systems Inc. All rights reserved
# This script starts/stops dpu sw
### BEGIN INIT INFO
# Provides: load-dpu
# Required-Start:
# Required-Stop:
# Should-Start:
# Should-Stop:
# Default-Start: S
# Default-Stop: 0 6
# Short-Description: Load dpu sw
### END INIT INFO
ACTIVE_FILE="/boot/active.txt"
NIC_MOUNT=""
LOG_FILE="/tmp/active_nic"
TAG="latest"
HOST_DIR=/host/dpu
function start_dpu()
{
modprobe ionic_mnic
modprobe mnet_uio_pdrv_genirq
modprobe mdev
mkdir -p $HOST_DIR/update
mkdir -p $HOST_DIR/sysconfig/config0
mkdir -p $HOST_DIR/sysconfig/config1
mkdir -p $HOST_DIR/obfl
mkdir -p $HOST_DIR/data
mkdir -p $HOST_DIR/tmpfsshare
mkdir -p $HOST_DIR/runfs
mkdir -p $HOST_DIR/logfs
mount -t tmpfs -o size=20M,mode=1777 tmpfs $HOST_DIR/tmpfsshare
mount -t tmpfs -o size=20M,mode=0755 runs $HOST_DIR/runfs
mount -t tmpfs -o size=20M,mode=0755 logfs $HOST_DIR/logfs
if [ -f "$ACTIVE_FILE" ]; then
ACTIVE_CONTENTS=$(cat "$ACTIVE_FILE")
ACTIVE_NIC=$(echo "$ACTIVE_CONTENTS" | cut -d " " -f 8-)
if [ "$ACTIVE_NIC" = "/boot/nicA" ]; then
NIC_MOUNT="-v /dev/shm:/dev/shm -v /boot/nicA/nic_core:/nic -v /boot/nicA/shared/conf/gen:/nic/conf/gen"
elif [ "$ACTIVE_NIC" = "/boot/nicB" ]; then
NIC_MOUNT="-v /dev/shm:/dev/shm -v /boot/nicB/nic_core:/nic -v /boot/nicB/shared/conf/gen:/nic/conf/gen"
fi
else
echo "/boot/active.txt not present" > $LOG_FILE
fi
echo "Active Nic: $ACTIVE_NIC" >> $LOG_FILE
echo "NIC_MOUNT: $NIC_MOUNT" >> $LOG_FILE
docker ps -a --format "{{.ID}}\t{{.Image}}" | grep "docker-dpu:latest" | awk '{print $1}' | xargs -I {} docker rm {}
docker run -v $HOST_DIR/update:/update -v $HOST_DIR/sysconfig/config0:/sysconfig/config0 -v $HOST_DIR/sysconfig/config1:/sysconfig/config1 -v $HOST_DIR/obfl:/obfl -v $HOST_DIR/data:/data -v $HOST_DIR/tmpfsshare:/tmp -v $HOST_DIR/runfs:/run -v $HOST_DIR/logfs:/var/log -v /sys:/sys $NIC_MOUNT --net=host --name=dpu --privileged docker-dpu:$TAG
}
case "$1" in
start)
echo -n "Start dpu... "
start_dpu
echo "done."
;;
stop)
echo "Not supported"
;;
force-reload|restart)
echo "Not supported"
;;
*)
echo "Usage: /etc/init.d/dpu.init {start}"
exit 1
;;
esac
exit 0

17
files/dsc/dpu.service Normal file
View File

@ -0,0 +1,17 @@
[Unit]
Description=dpu sw
Requires=docker.service
After=docker.service
Requires=local-fs.target
After=local-fs.target
Requires=ionic-modules.service
After=ionic-modules.service
[Service]
Type=oneshot
ExecStart=/etc/init.d/dpu start
ExecStop=/etc/init.d/dpu stop
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

331
files/dsc/install_debian.j2 Executable file
View File

@ -0,0 +1,331 @@
#!/bin/sh
R=""
export LD_LIBRARY_PATH=/platform/lib:/nic/lib:$LD_LIBRARY_PATH
export PATH=/platform/bin:$PATH
if [ $(arch) != aarch64 ]; then
export PATH=$(pwd)/bin:$PATH
R=$(pwd)/data
fi
root_mnt=$R/mnt
image_version={{ SONIC_IMAGE_VERSION }}
image_dir=image-$image_version
INSTALLER_PAYLOAD=fs.zip
DOCKERFS_DIR=docker
FILESYSTEM_DOCKERFS=dockerfs.tar.gz
BL_CONF=boot.conf
DATA_PARTUUID=6ED62003-DD8D-44B8-9538-0A2B7C7E628F
ROOT_PARTUUID=C7F48DD2-C265-404B-959D-C64D21D49168
ROOT_PARTSIZE=24G
exec 0< /dev/tty 1> /dev/tty 2> /dev/tty
PKG=""
ACTION=""
root_pn=0
data_pn=0
REPART_NEEDED=0
set -e
fatal()
{
echo "FATAL: $1" >&2
exit 1
}
hash()
{
sha512sum $1 | awk '{ print $1 }'
}
check_running_goldfw()
{
local fw=$(expr "$(cat $R/proc/cmdline)" : '.*FW_NAME=\([a-z]\+\)')
if [ "$fw" != "goldfw" ]; then
fatal "Installer can only be run from goldfw"
fi
}
check_package()
{
local f want_hash got_hash
echo "==> Checking package"
for f in $(jq -r ".shas | keys | join(\" \") | tostring" MANIFEST); do
echo -n "${f}..."
want_hash=$(jq -r .shas.\"$f\" MANIFEST)
got_hash=$(tar xfO $PKG $f | hash /dev/stdin)
if [ "$got_hash" != "$want_hash" ]; then
echo " BAD"
echo "WANT: $want_hash"
echo "GOT: $got_hash"
fatal "Package file error"
else
echo " OK"
fi
done
}
get_install_mode()
{
local r
while [ -z "$install_mode" ]; do
echo "### Select install mode:"
echo "### 1. Whole disk (wipe all Pensando filesystems)"
echo "### 2. Half of /data (lose /data contents)"
echo "### 3. Half of /data (save/restore /data contents)"
read -p '### Selection: ' r
case "X-$r" in
X-1) install_mode=FULL_DISK; ;;
X-2) install_mode=DATA_LOSE; ;;
X-3) install_mode=DATA_KEEP; ;;
*) fatal "ABORTED"; ;;
esac
done
}
check_existing_parts()
{
local nparts i partuuid boot_partsize boot_lastsec data_firstsec
nparts=$(sgdisk -p /dev/mmcblk0 | grep '^[ ]*[1-9]' | wc -l)
for i in $(seq $nparts); do
partuuid=$(sgdisk -i $i /dev/mmcblk0 | awk '/Partition unique GUID/ { print $NF }')
case "$partuuid" in
$DATA_PARTUUID) data_pn=$i; ;;
$ROOT_PARTUUID) root_pn=$i; ;;
esac
done
if [ $install_mode != FULL_DISK ]; then
if [ $root_pn -ne 0 ]; then
boot_partsize=$(sgdisk -i $root_pn /dev/mmcblk0 | awk -F '[( ]' '/Partition size/ {print int($6)}')
boot_lastsec=$(sgdisk -i $root_pn /dev/mmcblk0 | awk '/Last sector/ {print $3}')
if [ ${boot_partsize}G = $ROOT_PARTSIZE ]; then
echo "SONiC root partitions already present with requested size. No repartition, only formatting"
else
echo "SONiC root partitions already present with mismatch size ${partsize}G. Repartition needed"
REPART_NEEDED=1
fi
fi
if [ $data_pn -eq 0 ]; then
echo "Data partition not found; Repartition needed"
REPART_NEEDED=1
elif [ $data_pn -ne $nparts ]; then
fatal "Data partition is not the last partition; exiting." >&2
else
data_firstsec=$(sgdisk -i $data_pn /dev/mmcblk0 | awk '/First sector/ {print $3}')
if [ $data_firstsec -ne $((boot_lastsec+1)) ]; then
echo "Data partition not contigent with boot partition. Repartition needed"
REPART_NEEDED=1
fi
fi
fi
}
save_data()
{
echo "==> Saving /data"
mount /dev/mmcblk0p$data_pn $R/mnt
tar cvf $R/root/data.tar -C $R/mnt .
umount $R/mnt
}
setup_partitions_full()
{
local i
echo "==> Setting up partitions..."
root_pn=1
set +e
sgdisk -Z /dev/mmcblk0 >/dev/null 2>&1
partprobe >/dev/null 2>&1
sgdisk \
-n $root_pn:+0:+$ROOT_PARTSIZE -t $root_pn:8300 \
-u $root_pn:$ROOT_PARTUUID -c $root_pn:"SONiC Root Filesystem" \
/dev/mmcblk0 >/dev/null
while true; do
partprobe
if [ ! -e $R/dev/mmcblk0p3 ]; then
break
fi
sleep 1
done
echo "==> Creating filesystems"
for i in $root_pn; do
mkfs.ext4 -F -q /dev/mmcblk0p$i >/dev/null
done
set -e
}
setup_partitions_multi()
{
echo "==> Setting up partitions..."
set +e
if [ $REPART_NEEDED -eq 0 ]; then
mkfs.ext4 -F -q /dev/mmcblk0p$root_pn >/dev/null
else
if [ $root_pn -ne 0 ]; then
sgdisk -d $root_pn /dev/mmcblk0 >/dev/null
fi
[ $data_pn -ne 0 ] && sgdisk -d $data_pn /dev/mmcblk0 >/dev/null
if [ $root_pn -eq 0 ]; then
root_pn=10
data_pn=$(($root_pn + 1))
fi
if [ $data_pn -eq 0 ]; then
data_pn=$(($root_pn + 1))
fi
sgdisk \
-n $root_pn:+0:+$ROOT_PARTSIZE -t $root_pn:8300 \
-u $root_pn:$ROOT_PARTUUID -c $root_pn:"SONiC Root Filesystem" \
-n $data_pn:+0:0 -t $data_pn:8300 -u $data_pn:$DATA_PARTUUID \
-c $data_pn:"Data Filesystem" \
/dev/mmcblk0 >/dev/null
sgdisk -U R /dev/mmcblk0 >/dev/null
while true; do
partprobe
if [ -e $R/dev/mmcblk0p$data_pn ]; then
break
fi
sleep 1
done
echo "==> Creating filesystems"
for i in $root_pn $data_pn; do
mkfs.ext4 -F -q /dev/mmcblk0p$i >/dev/null
done
fi
set -e
}
setup_partitions()
{
if [ $install_mode = INSTALL_FULL ]; then
setup_partitions_full
else
setup_partitions_multi
fi
}
restore_data()
{
echo "==> Restoring /data"
if [ -f $R/root/data.tar ]; then
mount /dev/mmcblk0p$data_pn $R/mnt
tar xpvf $R/root/data.tar -C $R/mnt
umount $R/mnt
fi
}
create_bootloader_conf()
{
echo "==> Create bootloader config"
cat <<EOF >> $root_mnt/$BL_CONF
default main
label main
kernel /$image_dir/boot/vmlinuz-6.1.0-11-2-arm64
initrd /$image_dir/boot/initrd.img-6.1.0-11-2-arm64
devicetree /$image_dir/boot/elba-asic-psci.dtb
append softdog.soft_panic=1 FW_NAME=mainfwa root=/dev/mmcblk0p10 rw rootwait rootfstype=ext4 loopfstype=squashfs loop=/$image_dir/fs.squashfs
}
EOF
}
install_root_filesystem()
{
echo "==> Installing root filesystem"
mount /dev/mmcblk0p$root_pn $root_mnt
mkdir -p $root_mnt/$image_dir
# Decompress the file for the file system directly to the partition
if [ x"$docker_inram" = x"on" ]; then
# when disk is small, keep dockerfs.tar.gz in disk, expand it into ramfs during initrd
tar xfO $PKG $INSTALLER_PAYLOAD | unzip -o - -x "platform.tar.gz" -d $root_mnt/$image_dir
else
tar xfO $PKG $INSTALLER_PAYLOAD | unzip -o - -x "$FILESYSTEM_DOCKERFS" "platform.tar.gz" -d $root_mnt/$image_dir
TAR_EXTRA_OPTION="--numeric-owner"
mkdir -p $root_mnt/$image_dir/$DOCKERFS_DIR
tar xfO $PKG $INSTALLER_PAYLOAD | unzip -op - "$FILESYSTEM_DOCKERFS" | tar xz $TAR_EXTRA_OPTION -f - -C $root_mnt/$image_dir/$DOCKERFS_DIR
fi
create_bootloader_conf
umount $root_mnt
}
set_boot_command()
{
local pn
#set to mainfwa where sonic is installed
mtd=/dev/$(grep fwsel /proc/mtd | sed -e 's/:.*//')
flash_erase -q $mtd 0 1
echo -n mainfwa | dd of=$mtd status=none
echo "==> Setting u-boot environment for Debian Boot"
pn=$(printf "%x" $root_pn)
fwenv -E \
-s kernel_comp_addr_r 88000000 \
-s kernel_comp_size 8000000 \
-s kernel_addr_r a0000000 \
-s fdt_addr_r bb100000 \
-s ramdisk_addr_r a4000000 \
-s bootcmd "sysboot mmc 0:$pn any bf000000 /$BL_CONF"
}
main()
{
while getopts ":p:i:" opt; do
case "$opt" in
p) PKG=$OPTARG; ;;
i) ACTION=INSTALL; ;;
*) true; ;;
esac
done
if [ "$PKG" = "" -o "$ACTION" != "INSTALL" ]; then
fatal "Needs -p filename -i all"
fi
tar xf $PKG MANIFEST
check_running_goldfw
check_package
# get_install_mode
install_mode=DATA_KEEP
check_existing_parts
if [ $install_mode = DATA_KEEP -a $REPART_NEEDED -eq 1 -a $data_pn -ne 0 ]; then
save_data
fi
setup_partitions
if [ $install_mode = DATA_KEEP -a $REPART_NEEDED -eq 1 ]; then
restore_data
fi
install_root_filesystem
set_boot_command
echo "==> Installation complete"
}
main "$@"

View File

@ -218,9 +218,9 @@ fi
# Decompress the file for the file system directly to the partition
if [ x"$docker_inram" = x"on" ]; then
# when disk is small, keep dockerfs.tar.gz in disk, expand it into ramfs during initrd
unzip -o $ONIE_INSTALLER_PAYLOAD -x "platform.tar.gz" -d $demo_mnt/$image_dir
unzip -o $INSTALLER_PAYLOAD -x "platform.tar.gz" -d $demo_mnt/$image_dir
else
unzip -o $ONIE_INSTALLER_PAYLOAD -x "$FILESYSTEM_DOCKERFS" "platform.tar.gz" -d $demo_mnt/$image_dir
unzip -o $INSTALLER_PAYLOAD -x "$FILESYSTEM_DOCKERFS" "platform.tar.gz" -d $demo_mnt/$image_dir
if [ "$install_env" = "onie" ]; then
TAR_EXTRA_OPTION="--numeric-owner"
@ -228,11 +228,11 @@ else
TAR_EXTRA_OPTION="--numeric-owner --warning=no-timestamp"
fi
mkdir -p $demo_mnt/$image_dir/$DOCKERFS_DIR
unzip -op $ONIE_INSTALLER_PAYLOAD "$FILESYSTEM_DOCKERFS" | tar xz $TAR_EXTRA_OPTION -f - -C $demo_mnt/$image_dir/$DOCKERFS_DIR
unzip -op $INSTALLER_PAYLOAD "$FILESYSTEM_DOCKERFS" | tar xz $TAR_EXTRA_OPTION -f - -C $demo_mnt/$image_dir/$DOCKERFS_DIR
fi
mkdir -p $demo_mnt/$image_dir/platform
unzip -op $ONIE_INSTALLER_PAYLOAD "platform.tar.gz" | tar xz $TAR_EXTRA_OPTION -f - -C $demo_mnt/$image_dir/platform
unzip -op $INSTALLER_PAYLOAD "platform.tar.gz" | tar xz $TAR_EXTRA_OPTION -f - -C $demo_mnt/$image_dir/platform
if [ "$install_env" = "onie" ]; then
# Store machine description in target file system

View File

@ -19,7 +19,7 @@ FILESYSTEM_ROOT=./fsroot-${TARGET_MACHINE}
FILESYSTEM_SQUASHFS=fs.squashfs
## Filename for onie installer payload, will be the main part of onie installer
ONIE_INSTALLER_PAYLOAD=fs.zip
INSTALLER_PAYLOAD=fs.zip
## Filename for docker file system
FILESYSTEM_DOCKERFS=dockerfs.tar.gz
@ -50,3 +50,6 @@ OUTPUT_ABOOT_IMAGE=target/sonic-aboot-$TARGET_MACHINE.swi
## Aboot boot image name
ABOOT_BOOT_IMAGE=.sonic-boot.swi
## Output file name for dsc installer
OUTPUT_DSC_IMAGE=target/sonic-$TARGET_MACHINE.tar

View File

@ -19,7 +19,7 @@ FILESYSTEM_ROOT=./fsroot-${TARGET_MACHINE}
FILESYSTEM_SQUASHFS=fs.squashfs
## Filename for onie installer payload, will be the main part of onie installer
ONIE_INSTALLER_PAYLOAD=fs.zip
INSTALLER_PAYLOAD=fs.zip
## Filename for docker file system
FILESYSTEM_DOCKERFS=dockerfs.tar.gz

View File

@ -19,7 +19,7 @@ FILESYSTEM_ROOT=./fsroot-${TARGET_MACHINE}
FILESYSTEM_SQUASHFS=fs.squashfs
## Filename for onie installer payload, will be the main part of onie installer
ONIE_INSTALLER_PAYLOAD=fs.zip
INSTALLER_PAYLOAD=fs.zip
## Filename for docker file system
FILESYSTEM_DOCKERFS=dockerfs.tar.gz
@ -56,3 +56,6 @@ OUTPUT_KVM_4ASIC_IMAGE=target/sonic-4asic-$TARGET_MACHINE.img
### Output file name for 6-asic kvm image
OUTPUT_KVM_6ASIC_IMAGE=target/sonic-6asic-$TARGET_MACHINE.img
## Output file name for dsc installer
OUTPUT_DSC_IMAGE=target/sonic-$TARGET_MACHINE.tar

View File

@ -0,0 +1,4 @@
DEP_FILES := rules/docker-dpu-base.dep rules/docker-dpu-base.mk
$(DOCKER_DPU_BASE)_CACHE_MODE := none
$(DOCKER_DPU_BASE)_DEP_FILES := $(DEP_FILES)

View File

@ -0,0 +1,10 @@
# docker dpu image for load
DOCKER_DPU_BASE_STEM = docker-dpu-base
DOCKER_DPU_BASE = $(DOCKER_DPU_BASE_STEM).gz
$(DOCKER_DPU_BASE)_URL = https://github.com/pensando/dsc-artifacts/blob/main/docker-dpu-base.gz?raw=true
DOWNLOADED_DOCKER_IMAGES += $(DOCKER_DPU_BASE)

View File

@ -0,0 +1,7 @@
DOCKER_DPU_STEM = docker-dpu
DOCKER_DPU = $(DOCKER_DPU_STEM).gz
DPATH := $($(DOCKER_DPU)_PATH)
DEP_FILES := platform/pensando/docker-dpu.dep platform/pensando/docker-dpu.mk
$(DOCKER_DPU)_CACHE_MODE := none
$(DOCKER_DPU)_DEP_FILES := $(DEP_FILES)

View File

@ -0,0 +1,20 @@
# docker dpu image for load
DOCKER_DPU_STEM = docker-dpu
DOCKER_DPU = $(DOCKER_DPU_STEM).gz
$(DOCKER_DPU)_SQUASH = n
$(DOCKER_DPU)_PATH = $(PLATFORM_PATH)/$(DOCKER_DPU_STEM)
$(DOCKER_DPU)_LOAD_DOCKERS = $(DOCKER_DPU_BASE)
SONIC_DOCKER_IMAGES += $(DOCKER_DPU)
$(DOCKER_DPU)_LOAD_DOCKERS += $(DOCKER_CONFIG_ENGINE_BULLSEYE)
$(DOCKER_DPU)_PACKAGE_NAME = dpu
$(DOCKER_DPU)_CONTAINER_NAME = dpu
$(DOCKER_DPU)_VERSION = 1.0.0
SONIC_BULLSEYE_DOCKERS += $(DOCKER_DPU)
SONIC_INSTALL_DOCKER_IMAGES += $(DOCKER_DPU)

View File

@ -0,0 +1,2 @@
FROM docker-dpu-base
# SKIP_HOOK

View File

@ -0,0 +1,20 @@
# docker image for centec syncd
DOCKER_SYNCD_PLATFORM_CODE = pensando
include $(PLATFORM_PATH)/../template/docker-syncd-bullseye.mk
$(DOCKER_SYNCD_BASE)_DEPENDS += $(SYNCD)
$(DOCKER_SYNCD_BASE)_DBG_DEPENDS += $(SYNCD_DBG) \
$(LIBSWSSCOMMON_DBG) \
$(LIBSAIMETADATA_DBG) \
$(LIBSAIREDIS_DBG)
$(DOCKER_SYNCD_BASE)_VERSION = 1.0.0
$(DOCKER_SYNCD_BASE)_PACKAGE_NAME = syncd
$(DOCKER_SYNCD_BASE)_RUN_OPT += --privileged -t
$(DOCKER_SYNCD_BASE)_RUN_OPT += -v /host/warmboot:/var/warmboot
$(DOCKER_SYNCD_BASE)_RUN_OPT += -v /host/machine.conf:/etc/machine.conf
$(DOCKER_SYNCD_BASE)_RUN_OPT += -v /var/run/docker-syncd:/var/run/sswsyncd
$(DOCKER_SYNCD_BASE)_RUN_OPT += -v /etc/sonic:/etc/sonic:ro

View File

@ -0,0 +1,34 @@
FROM docker-config-engine-bullseye-{{DOCKER_USERNAME}}:{{DOCKER_USERTAG}}
ARG docker_container_name
## Make apt-get non-interactive
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update
RUN apt-get install -y protobuf-compiler libprotobuf-dev libgrpc++-dev
COPY \
{% for deb in docker_syncd_pensando_debs.split(' ') -%}
debs/{{ deb }}{{' '}}
{%- endfor -%}
debs/
RUN dpkg -i \
{% for deb in docker_syncd_pensando_debs.split(' ') -%}
debs/{{ deb }}{{' '}}
{%- endfor %}
## TODO: add kmod into Depends
RUN apt-get install -yf kmod
COPY ["supervisord.conf", "/etc/supervisor/conf.d/"]
COPY ["files/supervisor-proc-exit-listener", "/usr/bin/"]
COPY ["critical_processes", "/etc/supervisor/"]
## Clean up
RUN apt-get clean -y; apt-get autoclean -y; apt-get autoremove -y
RUN rm -rf /debs
ENTRYPOINT ["/usr/local/bin/supervisord"]

View File

@ -0,0 +1 @@
program:syncd

View File

@ -0,0 +1,39 @@
[supervisord]
logfile_maxbytes=1MB
logfile_backups=2
nodaemon=true
[eventlistener:dependent-startup]
command=python3 -m supervisord_dependent_startup
autostart=true
autorestart=unexpected
startretries=0
exitcodes=0,3
events=PROCESS_STATE
buffer_size=1024
[eventlistener:supervisor-proc-exit-listener]
command=python3 /usr/bin/supervisor-proc-exit-listener --container-name syncd
events=PROCESS_STATE_EXITED,PROCESS_STATE_RUNNING
autostart=true
autorestart=unexpected
buffer_size=1024
[program:rsyslogd]
command=/usr/sbin/rsyslogd -n -iNONE
priority=1
autostart=false
autorestart=unexpected
stdout_logfile=syslog
stderr_logfile=syslog
dependent_startup=true
[program:syncd]
command=/usr/bin/syncd_start.sh
priority=2
autostart=false
autorestart=false
stdout_logfile=syslog
stderr_logfile=syslog
dependent_startup=true
dependent_startup_wait_for=rsyslogd:running

View File

@ -0,0 +1,9 @@
# Pensando Elba kernel modules
IONIC_MODULE_VERSION = 22.11.1-001
IONIC_MODULE = ionic-modules_$(IONIC_MODULE_VERSION)_arm64.deb
$(IONIC_MODULE)_SRC_PATH = $(PLATFORM_PATH)/dsc-drivers
$(IONIC_MODULE)_DEPENDS += $(LINUX_HEADERS) $(LINUX_HEADERS_COMMON)
$(IONIC_MODULE)_MACHINE = pensando
SONIC_DPKG_DEBS += $(IONIC_MODULE)

View File

@ -0,0 +1,5 @@
ionic (22.11.1-001) unstable; urgency=medium
* Initial packaging
-- Guohan Lu <lguohan@gmail.com> Tue, 19 APR 2022 12:30:00 +0000

View File

@ -0,0 +1 @@
10

View File

@ -0,0 +1,14 @@
Source: ionic
Section: main
Priority: extra
Maintainer: Shantanu Shrivastava <shanshri@amd.com>
Build-Depends: debhelper (>= 8.0.0), bzip2
Standards-Version: 3.9.3
#Vcs-Git: git://git.debian.org/collab-maint/bcmsdk.git
#Vcs-Browser: http://git.debian.org/?p=collab-maint/bcmsdk.git;a=summary
Package: ionic-modules
Architecture: arm64
Section: main
Depends: linux-image-6.1.0-11-2-arm64-unsigned
Description: kernel modules for pensando elba

View File

@ -0,0 +1,4 @@
src/drivers/linux/build/mdev.ko lib/modules/6.1.0-11-2-arm64/extra
src/drivers/linux/build/mnet_uio_pdrv_genirq.ko lib/modules/6.1.0-11-2-arm64/extra
src/drivers/linux/build/ionic_mnic.ko lib/modules/6.1.0-11-2-arm64/extra
systemd/ionic-modules.service lib/systemd/system

View File

@ -0,0 +1,65 @@
#!/usr/bin/make -f
# -*- makefile -*-
# Sample debian/rules that uses debhelper.
# This file was originally written by Joey Hess and Craig Small.
# As a special exception, when this file is copied by dh-make into a
# dh-make output file, you may use that output file without restriction.
# This special exception was added by Craig Small in version 0.37 of dh-make.
include /usr/share/dpkg/pkg-info.mk
# Uncomment this to turn on verbose mode.
#export DH_VERBOSE=1
export INSTALL_MOD_DIR:=extra
PACKAGE_PRE_NAME := ionic-modules
KVERSION ?= $(shell uname -r)
KERNEL_SRC := /lib/modules/$(KVERSION)
MOD_SRC_DIR:= $(shell pwd)
%:
dh $@
clean:
dh_testdir
dh_testroot
dh_clean
ARCH=aarch64 KSRC=/lib/modules/$(KVERSION)/build KMOD_OUT_DIR=$(CURDIR)/src/drivers/linux/build KMOD_SRC_DIR=$(CURDIR)/src/drivers/linux make -C $(CURDIR)/src/drivers/linux clean
build:
ARCH=aarch64 KSRC=/lib/modules/$(KVERSION)/build KMOD_OUT_DIR=$(CURDIR)/src/drivers/linux/build KMOD_SRC_DIR=$(CURDIR)/src/drivers/linux make -C $(CURDIR)/src/drivers/linux
binary: binary-arch binary-indep
# Nothing to do
binary-arch:
# Nothing to do
#install: build
#dh_testdir
#dh_testroot
#dh_clean -k
#dh_installdirs
binary-indep:
dh_testdir
dh_installdirs
# Resuming debhelper scripts
dh_testroot
dh_install
dh_installchangelogs
dh_installdocs
dh_systemd_enable
dh_installinit
dh_systemd_start
dh_link
dh_fixperms
dh_compress
dh_strip
dh_installdeb
dh_gencontrol
dh_md5sums
dh_builddeb
.PHONY: build binary binary-arch binary-indep clean

View File

@ -0,0 +1,167 @@
# dsc-drivers
## Overview
This directory holds the three drivers that are used for device support
inside the Pensando DSC: ionic/ionic_mnic, mnet, and mnet_uio_pdrv_genirq.
These are all out-of-tree drivers, not used in the standard kernel tree.
However, a variant of the ionic driver is in the upstream kernel, but does
not support the internal DSC platform.
When building for the host, only the "ionic" driver is built,
and uses ionic_bus_pci.c. In tandem with the kconfig files, this
driver can be built on a number of different Linux distributions and
kernel versions. When building for the DSC, "ionic_mnic" is built, with
ionic_bus_platform.c, along with mnet and mnet_uio_pdrv_genirq drivers.
The mnet and mnet_uio_pdrv_genirq drivers are only built on the DSC
Linux kernel, so don't make use of the kcompat facilities.
In the DSC build the driver .ko files are found in /platform/drivers,
and are loaded by /nic/tools/sysinit.sh. Later, the nicmgr process reads
its device description file, e.g. /platform/etc/nicmgrd/device.json,
to determine what network interface ports are to be created. It then uses
ioctl() calls into the mnet driver to instantiate those network interfaces.
## Drivers
drivers/common:
API description files for communication between drivers
and the DSC.
drivers/linux/eth/ionic:
Driver that supports standard network interface ports.
drivers/linux/mnet:
Driver that listens for ioctl() commands from userland to start
and stop the network interface ports.
drivers/linux/mnet_uio_pdrv_genirq:
UIO interface driver for supporting userspace I/O platform drivers.
## Building
The Makefile in drivers/linux will build all three drivers when
ARCH=aarch64, else will build the host version of ionic. Simply cd to
the drivers/linux directory and type 'make'.
Well, okay maybe not that simple any more - it should be, but some things
changed in the makefiles internally, and it's a little more complex. Also,
we wanted to keep this archive closer to what is used internally.
If the headers for your current Linux kernel are findable under
/lib/modules with kernel config values defined, this should work:
make M=`pwd` KCFLAGS="-Werror -Ddrv_ver=\\\"1.15.9.7\\\"" modules
If the kernel config file doesn't have the Pensando configuration strings
set in it, you can add them in the make line.
For Naples drivers:
make M=`pwd` KCFLAGS="-Werror -Ddrv_ver=\\\"1.15.9.7\\\"" CONFIG_IONIC_MNIC=m CONFIG_MDEV=m CONFIG_MNET_UIO_PDRV_GENIRQ=m modules
For the host driver:
make M=`pwd` KCFLAGS="-Werror -Ddrv_ver=\\\"1.15.9.7\\\"" CONFIG_IONIC=m modules
As usual, if the Linux headers are elsewhere, add the appropriate -C magic:
make -C <kernel-header-path> M=`pwd` ...
## History
2020-07-07 - initial drivers using 1.8.0-E-48
2021-01-08 - driver updates to 1.15.3-C-14
- FW update fixes
- Makefile cleanups
- Add support for choosing individual Tx and Rx interrupts rather than paired
- Fix memory leaks and timing issues
- Kcompat fixes for newer upstream and Red Hat kernels
- Add interrupt affinity option for mnic_ionic use
- Other optimizations and stability fixes
2021-02-02 - driver updates to 1.15.4-C-8
- Added support for PTP
- Dropped support for macvlan offload
- Cleaned some 'sparse' complaints
- Add support for devlink firmware update
- Dynamic interrupt coalescing
- Add support for separate Tx interrupts
- Rework queue reconfiguration for better memory handling
- Reorder some configuration steps to remove race conditions
- Changes to napi handling for better performance
2021-02-24 - driver updates to 1.15.5-C-4
- Add weak links for PTP api for compile and load on DSC kernel without PTP support
- Don't set up PTP in ionic_mnic if PTP bar is not available
- Closed a small window to prevent starting queues when in FW reset
- Other small bug fixes to PTP support
- Compat fixes for compiling on Linux v5.11
- Guard against adminq use after free
2021-03-29 - driver updates to 1.15.6-C-8
- better error case handling
- bug fixes for PTP support and error handling
- clean up mnet code to upstream code format standards
- updates for compiling under v5.10
2021-04-30 - driver updates to 1.15.7-C-3
- Copyright updates
- Minor code cleanups to better match upstream drivers
- Renamed mnet to mdev to be more generic
- Added support in mdev for future mcrypt devices
2021-05-19 - driver updates to 1.15.8-C-12
- added support for cmb-rings - Tx/Rx descriptor rings allocated in
DSC Controller Memory Buffers rather than on host
- rx_mode locking to block thread race
- struct ionic_lif rework for better cache line layout
2021-06-30 - driver updates for 1.15.9-C-7
- monitoring fw status generation for fw restart hints
- catch lack of PTP support earlier in service routine
- makefile fixes for sles 15 sp3
- lower page splitting limit to better account for headers
- VF stats area fix for PF
- better thread-safe rx_mode work
drivers: updates for 1.15.9.21
2021-08-04 - driver updates for 1.15.9-C-21
- Added watchdog to platform for closer tracking of FW updates
and crash recycle
- Fixed dynamic interrupt management accounting
- Fixes for mac filter management
2021-08-16 - driver updates for 1.15.9-C-26
- Add work-around for Elba doorbell issue
2021-08-19 - driver updates for 1.15.9-C-28
- Additional queue config locking for stress timing issue
- Suppressed unnecessary log message
2021-08-25 - driver update for 1.15.9-C-32
- added use of reserved memory region for dma
2022-02-02 - driver update for 1.15.9-C-64
- Remove an unnecessary kcompat macro
2022-02-03 - driver update for 1.15.9-C-65
- add vlan filter management to mac filter management
- update filter management for handling overflow
- updates for recent upstream kernels and distros
- better handling of various FW recovery scenarios
2022-06-20 - driver update for 1.15.9-C-100
- various code cleanups
- add debugfs support to count number of Tx/Rx allocations
- better memory handling
- minor bug fixes
2022-12-05 - driver update for 22.11.1-001
- update ionic drivers to 22.11.1-001; version numbers now follow
the driver release numbers rather than the DSC firmware release version
- enable tunnel offloads
- support for changes in MTU, queue count, and ring length while CMB is active
- set random VF mac addresses by default
- better oprom debugging support
- Rx/Tx performance tuning
- fixes imported from upstream driver
- bug fixes

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,145 @@
/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
/* Copyright (c) 2018 - 2021 Pensando Systems, Inc. All rights reserved. */
#ifndef IONIC_REGS_H
#define IONIC_REGS_H
#include <linux/io.h>
/** struct ionic_intr - interrupt control register set.
* @coal_init: coalesce timer initial value.
* @mask: interrupt mask value.
* @credits: interrupt credit count and return.
* @mask_assert: interrupt mask value on assert.
* @coal: coalesce timer time remaining.
*/
struct ionic_intr {
u32 coal_init;
u32 mask;
u32 credits;
u32 mask_assert;
u32 coal;
u32 rsvd[3];
};
/** enum ionic_intr_mask_vals - valid values for mask and mask_assert.
* @IONIC_INTR_MASK_CLEAR: unmask interrupt.
* @IONIC_INTR_MASK_SET: mask interrupt.
*/
enum ionic_intr_mask_vals {
IONIC_INTR_MASK_CLEAR = 0,
IONIC_INTR_MASK_SET = 1,
};
/** enum ionic_intr_credits_bits - bitwise composition of credits values.
* @IONIC_INTR_CRED_COUNT: bit mask of credit count, no shift needed.
* @IONIC_INTR_CRED_COUNT_SIGNED: bit mask of credit count, including sign bit.
* @IONIC_INTR_CRED_UNMASK: unmask the interrupt.
* @IONIC_INTR_CRED_RESET_COALESCE: reset the coalesce timer.
* @IONIC_INTR_CRED_REARM: unmask the and reset the timer.
*/
enum ionic_intr_credits_bits {
IONIC_INTR_CRED_COUNT = 0x7fffu,
IONIC_INTR_CRED_COUNT_SIGNED = 0xffffu,
IONIC_INTR_CRED_UNMASK = 0x10000u,
IONIC_INTR_CRED_RESET_COALESCE = 0x20000u,
IONIC_INTR_CRED_REARM = (IONIC_INTR_CRED_UNMASK |
IONIC_INTR_CRED_RESET_COALESCE),
};
static inline void ionic_intr_coal_init(struct ionic_intr __iomem *intr_ctrl,
int intr_idx, u32 coal)
{
iowrite32(coal, &intr_ctrl[intr_idx].coal_init);
}
static inline void ionic_intr_mask(struct ionic_intr __iomem *intr_ctrl,
int intr_idx, u32 mask)
{
iowrite32(mask, &intr_ctrl[intr_idx].mask);
}
static inline void ionic_intr_credits(struct ionic_intr __iomem *intr_ctrl,
int intr_idx, u32 cred, u32 flags)
{
if (WARN_ON_ONCE(cred > IONIC_INTR_CRED_COUNT)) {
cred = ioread32(&intr_ctrl[intr_idx].credits);
cred &= IONIC_INTR_CRED_COUNT_SIGNED;
}
iowrite32(cred | flags, &intr_ctrl[intr_idx].credits);
}
static inline void ionic_intr_clean_flags(struct ionic_intr __iomem *intr_ctrl,
int intr_idx, u32 flags)
{
u32 cred;
cred = ioread32(&intr_ctrl[intr_idx].credits);
cred &= IONIC_INTR_CRED_COUNT_SIGNED;
cred |= flags;
iowrite32(cred, &intr_ctrl[intr_idx].credits);
}
static inline void ionic_intr_clean(struct ionic_intr __iomem *intr_ctrl,
int intr_idx)
{
ionic_intr_clean_flags(intr_ctrl, intr_idx,
IONIC_INTR_CRED_RESET_COALESCE);
}
static inline void ionic_intr_mask_assert(struct ionic_intr __iomem *intr_ctrl,
int intr_idx, u32 mask)
{
iowrite32(mask, &intr_ctrl[intr_idx].mask_assert);
}
/** enum ionic_dbell_bits - bitwise composition of dbell values.
*
* @IONIC_DBELL_QID_MASK: unshifted mask of valid queue id bits.
* @IONIC_DBELL_QID_SHIFT: queue id shift amount in dbell value.
* @IONIC_DBELL_QID: macro to build QID component of dbell value.
*
* @IONIC_DBELL_RING_MASK: unshifted mask of valid ring bits.
* @IONIC_DBELL_RING_SHIFT: ring shift amount in dbell value.
* @IONIC_DBELL_RING: macro to build ring component of dbell value.
*
* @IONIC_DBELL_RING_0: ring zero dbell component value.
* @IONIC_DBELL_RING_1: ring one dbell component value.
* @IONIC_DBELL_RING_2: ring two dbell component value.
* @IONIC_DBELL_RING_3: ring three dbell component value.
*
* @IONIC_DBELL_INDEX_MASK: bit mask of valid index bits, no shift needed.
*/
enum ionic_dbell_bits {
IONIC_DBELL_QID_MASK = 0xffffff,
IONIC_DBELL_QID_SHIFT = 24,
#define IONIC_DBELL_QID(n) \
(((u64)(n) & IONIC_DBELL_QID_MASK) << IONIC_DBELL_QID_SHIFT)
IONIC_DBELL_RING_MASK = 0x7,
IONIC_DBELL_RING_SHIFT = 16,
#define IONIC_DBELL_RING(n) \
(((u64)(n) & IONIC_DBELL_RING_MASK) << IONIC_DBELL_RING_SHIFT)
IONIC_DBELL_RING_0 = 0,
IONIC_DBELL_RING_1 = IONIC_DBELL_RING(1),
IONIC_DBELL_RING_2 = IONIC_DBELL_RING(2),
IONIC_DBELL_RING_3 = IONIC_DBELL_RING(3),
IONIC_DBELL_INDEX_MASK = 0xffff,
};
static inline void ionic_dbell_ring(u64 __iomem *db_page, int qtype, u64 val)
{
#if defined(CONFIG_IONIC_MNIC)
wmb();
writeq_relaxed(val, &db_page[qtype]);
#else
writeq(val, &db_page[qtype]);
#endif
}
#endif /* IONIC_REGS_H */

View File

@ -0,0 +1,116 @@
ifneq ($(KERNELRELEASE),)
obj-$(CONFIG_IONIC) += eth/ionic/
obj-$(CONFIG_IONIC_MNIC) += eth/ionic/
obj-$(CONFIG_MDEV) += mdev/
obj-$(CONFIG_MNET_UIO_PDRV_GENIRQ) += mnet_uio_pdrv_genirq/
else
IONIC_ETH_SRC = $(CURDIR)/eth/ionic
#KOPT += V=1 # verbose build
#KOPT += W=1 # extra warnings
#KOPT += C=1 # static analysis
#KOPT += CHECK=sparse # static analysis tool
#KOPT += CHECK=scripts/coccicheck
default: all
# Discover kernel configuration.
#
# Override running kernel with
# `make KSRC=/path/to/your/sources` or
# `export KSRC=/path/to/your/sources`
#
ifeq ($(ARCH),aarch64)
# Ionic mnic and mdev for drivers ARM
KSRC ?= ${NICDIR}/buildroot/output/${ASIC}/linux-headers
KMOD_OUT_DIR ?= ${BLD_OUT_DIR}/drivers_submake
KMOD_SRC_DIR ?= ${TOPDIR}/platform/drivers/linux-ionic
ETH_KOPT += CONFIG_IONIC_MNIC=m
ETH_KOPT += CONFIG_MDEV=m
ETH_KOPT += CONFIG_MNET_UIO_PDRV_GENIRQ=m
KOPT += ARCH=arm64
KCFLAGS += -DCONFIG_IONIC_MNIC
KCFLAGS += -DCONFIG_MDEV
KCFLAGS += -DCONFIG_MNET_UIO_PDRV_GENIRQ
ALL = mnic
ALL += mnet_uio_pdrv_genirq
ALL += mdev
export PATH := $(PATH):$(TOOLCHAIN_DIR)/bin
KSYMS_MNIC = "KBUILD_EXTRA_SYMBOLS=${KMOD_OUT_DIR}/Module.symvers.mnic"
KSYMS = "${KSYMS_MNIC} ${KMOD_OUT_DIR}/Module.symvers.uio"
else
DVER = $(shell git describe --tags 2>/dev/null)
# Ionic driver for host
include linux_ver.mk
KSRC ?= /lib/modules/$(shell uname -r)/build
ETH_KOPT += CONFIG_IONIC=m
ETH_KOPT += CONFIG_IONIC_MNIC=_
ETH_KOPT += CONFIG_MDEV=_
ETH_KOPT += CONFIG_MNET_UIO_PDRV_GENIRQ=_
KCFLAGS += -DCONFIG_IONIC
KCFLAGS = -Werror
KCFLAGS += $(EXTRA_CFLAGS)
ALL = eth
endif
ifeq ($(DVER),)
DVER = "22.11.1-001"
endif
KCFLAGS += -Ddrv_ver=\\\"$(DVER)\\\"
KOPT += KCFLAGS="$(KCFLAGS)"
all: $(ALL)
KBUILD_RULE = $(MAKE) -C $(KSRC) $(KOPT) M=$(CURDIR)
mnic: KOPT+=$(ETH_KOPT)
mnic:
@echo "===> Building MNIC driver "
touch $(KMOD_OUT_DIR)/Makefile || true
$(MAKE) -C $(KSRC) V=1 M=$(KMOD_OUT_DIR) src=$(KMOD_SRC_DIR)/eth/ionic $(KOPT)
mv ${KMOD_OUT_DIR}/Module.symvers ${KMOD_OUT_DIR}/Module.symvers.mnic
mnet_uio_pdrv_genirq: KOPT+=$(ETH_KOPT)
mnet_uio_pdrv_genirq:
@echo "===> Building MNET_UIO driver "
$(MAKE) -C $(KSRC) V=1 M=$(KMOD_OUT_DIR) src=$(KMOD_SRC_DIR)/mnet_uio_pdrv_genirq $(KOPT)
mv ${KMOD_OUT_DIR}/Module.symvers ${KMOD_OUT_DIR}/Module.symvers.uio
mdev: KOPT+=$(ETH_KOPT)
mdev:
@echo "===> Building MDEV driver "
$(MAKE) -C $(KSRC) $(KSYMS) V=1 M=$(KMOD_OUT_DIR) src=$(KMOD_SRC_DIR)/mdev $(KOPT)
eth: KOPT+=$(ETH_KOPT)
eth:
$(KBUILD_RULE)
clean: KOPT+=$(ETH_KOPT)
clean:
$(KBUILD_RULE) clean
install: modules_install
modules_install: KOPT+=$(ETH_KOPT)
modules_install:
$(KBUILD_RULE) modules_install
cscope:
find $(IONIC_ETH_SRC) -name '*.[ch]' > cscope.files
cscope -bkq
.PHONY: default all mnic mdev mnet_uio_pdrv_genirq eth clean install modules_install cscope
endif

View File

@ -0,0 +1,34 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2019 - 2020 Pensando Systems, Inc
#
# Pensando device configuration
#
config NET_VENDOR_PENSANDO
bool "Pensando devices"
default y
help
If you have a Distributed Services Card (DSC) belonging to this
class, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about Pensando cards. If you say Y, you will be asked
for your specific card in the following questions.
if NET_VENDOR_PENSANDO
config IONIC
tristate "Pensando DSC Ethernet Support"
depends on 64BIT && PCI
select NET_DEVLINK
help
This enables Ethernet support for the Pensando family of Distributed
Services Cards (DSCs). More specific information on this driver can
be found in
<file:Documentation/networking/device_drivers/pensando/ionic.rst>.
To compile this driver as a module, choose M here. The module
will be called ionic.
endif # NET_VENDOR_PENSANDO

View File

@ -0,0 +1,19 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright(c) 2017 - 2019 Pensando Systems, Inc
obj-$(CONFIG_IONIC) := ionic.o
obj-$(CONFIG_IONIC_MNIC) := ionic_mnic.o
ccflags-y := -g -I$(KMOD_SRC_DIR)/../common
ionic-y := ionic_main.o ionic_bus_pci.o ionic_dev.o ionic_ethtool.o \
ionic_lif.o ionic_rx_filter.o ionic_txrx.o ionic_debugfs.o \
ionic_api.o ionic_stats.o ionic_devlink.o kcompat.o ionic_fw.o \
dim.o net_dim.o
ionic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o
ionic_mnic-y := ionic_main.o ionic_bus_platform.o ionic_dev.o ionic_ethtool.o \
ionic_lif.o ionic_rx_filter.o ionic_txrx.o ionic_debugfs.o \
ionic_api.o ionic_stats.o ionic_devlink.o kcompat.o ionic_fw.o \
dim.o net_dim.o
ionic_mnic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o ionic_phc_weak.o

View File

@ -0,0 +1,85 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
#ifndef CONFIG_DIMLIB
#include "dim.h"
bool dim_on_top(struct dim *dim)
{
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
case DIM_PARKING_TIRED:
return true;
case DIM_GOING_RIGHT:
return (dim->steps_left > 1) && (dim->steps_right == 1);
default: /* DIM_GOING_LEFT */
return (dim->steps_right > 1) && (dim->steps_left == 1);
}
}
//EXPORT_SYMBOL(dim_on_top);
void dim_turn(struct dim *dim)
{
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
case DIM_PARKING_TIRED:
break;
case DIM_GOING_RIGHT:
dim->tune_state = DIM_GOING_LEFT;
dim->steps_left = 0;
break;
case DIM_GOING_LEFT:
dim->tune_state = DIM_GOING_RIGHT;
dim->steps_right = 0;
break;
}
}
//EXPORT_SYMBOL(dim_turn);
void dim_park_on_top(struct dim *dim)
{
dim->steps_right = 0;
dim->steps_left = 0;
dim->tired = 0;
dim->tune_state = DIM_PARKING_ON_TOP;
}
//EXPORT_SYMBOL(dim_park_on_top);
void dim_park_tired(struct dim *dim)
{
dim->steps_right = 0;
dim->steps_left = 0;
dim->tune_state = DIM_PARKING_TIRED;
}
//EXPORT_SYMBOL(dim_park_tired);
void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
struct dim_stats *curr_stats)
{
/* u32 holds up to 71 minutes, should be enough */
u32 delta_us = ktime_us_delta(end->time, start->time);
u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
start->byte_ctr);
u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr,
start->comp_ctr);
if (!delta_us)
return;
curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC,
delta_us);
curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
if (curr_stats->epms != 0)
curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL(
curr_stats->cpms * 100, curr_stats->epms);
else
curr_stats->cpe_ratio = 0;
}
//EXPORT_SYMBOL(dim_calc_stats);
#endif

View File

@ -0,0 +1,344 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#ifndef DIM_H
#define DIM_H
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#ifndef DIV_ROUND_DOWN_ULL
#define DIV_ROUND_DOWN_ULL(ll, d) \
({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
#endif
/*
* Number of events between DIM iterations.
* Causes a moderation of the algorithm run.
*/
#define DIM_NEVENTS 64
/*
* Is a difference between values justifies taking an action.
* We consider 10% difference as significant.
*/
#define IS_SIGNIFICANT_DIFF(val, ref) \
(((100UL * abs((val) - (ref))) / (ref)) > 10)
/*
* Calculate the gap between two values.
* Take wrap-around and variable size into consideration.
*/
#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) \
& (BIT_ULL(bits) - 1))
/**
* struct dim_cq_moder - Structure for CQ moderation values.
* Used for communications between DIM and its consumer.
*
* @usec: CQ timer suggestion (by DIM)
* @pkts: CQ packet counter suggestion (by DIM)
* @comps: Completion counter
* @cq_period_mode: CQ period count mode (from CQE/EQE)
*/
struct dim_cq_moder {
u16 usec;
u16 pkts;
u16 comps;
u8 cq_period_mode;
};
/**
* struct dim_sample - Structure for DIM sample data.
* Used for communications between DIM and its consumer.
*
* @time: Sample timestamp
* @pkt_ctr: Number of packets
* @byte_ctr: Number of bytes
* @event_ctr: Number of events
* @comp_ctr: Current completion counter
*/
struct dim_sample {
ktime_t time;
u32 pkt_ctr;
u32 byte_ctr;
u16 event_ctr;
u32 comp_ctr;
};
/**
* struct dim_stats - Structure for DIM stats.
* Used for holding current measured rates.
*
* @ppms: Packets per msec
* @bpms: Bytes per msec
* @epms: Events per msec
* @cpms: Completions per msec
* @cpe_ratio: Ratio of completions to events
*/
struct dim_stats {
int ppms; /* packets per msec */
int bpms; /* bytes per msec */
int epms; /* events per msec */
int cpms; /* completions per msec */
int cpe_ratio; /* ratio of completions to events */
};
/**
* struct dim - Main structure for dynamic interrupt moderation (DIM).
* Used for holding all information about a specific DIM instance.
*
* @state: Algorithm state (see below)
* @prev_stats: Measured rates from previous iteration (for comparison)
* @start_sample: Sampled data at start of current iteration
* @measuring_sample: A &dim_sample that is used to update the current events
* @work: Work to perform on action required
* @priv: A pointer to the struct that points to dim
* @profile_ix: Current moderation profile
* @mode: CQ period count mode
* @tune_state: Algorithm tuning state (see below)
* @steps_right: Number of steps taken towards higher moderation
* @steps_left: Number of steps taken towards lower moderation
* @tired: Parking depth counter
*/
struct dim {
u8 state;
struct dim_stats prev_stats;
struct dim_sample start_sample;
struct dim_sample measuring_sample;
struct work_struct work;
void *priv;
u8 profile_ix;
u8 mode;
u8 tune_state;
u8 steps_right;
u8 steps_left;
u8 tired;
};
/**
* enum dim_cq_period_mode - Modes for CQ period count
*
* @DIM_CQ_PERIOD_MODE_START_FROM_EQE: Start counting from EQE
* @DIM_CQ_PERIOD_MODE_START_FROM_CQE: Start counting from CQE (implies timer reset)
* @DIM_CQ_PERIOD_NUM_MODES: Number of modes
*/
enum dim_cq_period_mode {
DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
DIM_CQ_PERIOD_NUM_MODES
};
/**
* enum dim_state - DIM algorithm states
*
* These will determine if the algorithm is in a valid state to start an iteration.
*
* @DIM_START_MEASURE: This is the first iteration (also after applying a new profile)
* @DIM_MEASURE_IN_PROGRESS: Algorithm is already in progress - check if
* need to perform an action
* @DIM_APPLY_NEW_PROFILE: DIM consumer is currently applying a profile - no need to measure
*/
enum dim_state {
DIM_START_MEASURE,
DIM_MEASURE_IN_PROGRESS,
DIM_APPLY_NEW_PROFILE,
};
/**
* enum dim_tune_state - DIM algorithm tune states
*
* These will determine which action the algorithm should perform.
*
* @DIM_PARKING_ON_TOP: Algorithm found a local top point - exit on significant difference
* @DIM_PARKING_TIRED: Algorithm found a deep top point - don't exit if tired > 0
* @DIM_GOING_RIGHT: Algorithm is currently trying higher moderation levels
* @DIM_GOING_LEFT: Algorithm is currently trying lower moderation levels
*/
enum dim_tune_state {
DIM_PARKING_ON_TOP,
DIM_PARKING_TIRED,
DIM_GOING_RIGHT,
DIM_GOING_LEFT,
};
/**
* enum dim_stats_state - DIM algorithm statistics states
*
* These will determine the verdict of current iteration.
*
* @DIM_STATS_WORSE: Current iteration shows worse performance than before
* @DIM_STATS_SAME: Current iteration shows same performance than before
* @DIM_STATS_BETTER: Current iteration shows better performance than before
*/
enum dim_stats_state {
DIM_STATS_WORSE,
DIM_STATS_SAME,
DIM_STATS_BETTER,
};
/**
* enum dim_step_result - DIM algorithm step results
*
* These describe the result of a step.
*
* @DIM_STEPPED: Performed a regular step
* @DIM_TOO_TIRED: Same kind of step was done multiple times - should go to
* tired parking
* @DIM_ON_EDGE: Stepped to the most left/right profile
*/
enum dim_step_result {
DIM_STEPPED,
DIM_TOO_TIRED,
DIM_ON_EDGE,
};
/**
* dim_on_top - check if current state is a good place to stop (top location)
* @dim: DIM context
*
* Check if current profile is a good place to park at.
* This will result in reducing the DIM checks frequency as we assume we
* shouldn't probably change profiles, unless traffic pattern wasn't changed.
*/
bool dim_on_top(struct dim *dim);
/**
* dim_turn - change profile altering direction
* @dim: DIM context
*
* Go left if we were going right and vice-versa.
* Do nothing if currently parking.
*/
void dim_turn(struct dim *dim);
/**
* dim_park_on_top - enter a parking state on a top location
* @dim: DIM context
*
* Enter parking state.
* Clear all movement history.
*/
void dim_park_on_top(struct dim *dim);
/**
* dim_park_tired - enter a tired parking state
* @dim: DIM context
*
* Enter parking state.
* Clear all movement history and cause DIM checks frequency to reduce.
*/
void dim_park_tired(struct dim *dim);
/**
* dim_calc_stats - calculate the difference between two samples
* @start: start sample
* @end: end sample
* @curr_stats: delta between samples
*
* Calculate the delta between two samples (in data rates).
* Takes into consideration counter wrap-around.
*/
void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
struct dim_stats *curr_stats);
/**
* dim_update_sample - set a sample's fields with given values
* @event_ctr: number of events to set
* @packets: number of packets to set
* @bytes: number of bytes to set
* @s: DIM sample
*/
static inline void
dim_update_sample(u16 event_ctr, u64 packets, u64 bytes, struct dim_sample *s)
{
s->time = ktime_get();
s->pkt_ctr = packets;
s->byte_ctr = bytes;
s->event_ctr = event_ctr;
}
/**
* dim_update_sample_with_comps - set a sample's fields with given
* values including the completion parameter
* @event_ctr: number of events to set
* @packets: number of packets to set
* @bytes: number of bytes to set
* @comps: number of completions to set
* @s: DIM sample
*/
static inline void
dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps,
struct dim_sample *s)
{
dim_update_sample(event_ctr, packets, bytes, s);
s->comp_ctr = comps;
}
/* Net DIM */
/**
* net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile
* @cq_period_mode: CQ period mode
* @ix: Profile index
*/
struct dim_cq_moder net_dim_get_rx_moderation(u8 cq_period_mode, int ix);
/**
* net_dim_get_def_rx_moderation - provide the default RX moderation
* @cq_period_mode: CQ period mode
*/
struct dim_cq_moder net_dim_get_def_rx_moderation(u8 cq_period_mode);
/**
* net_dim_get_tx_moderation - provide a CQ moderation object for the given TX profile
* @cq_period_mode: CQ period mode
* @ix: Profile index
*/
struct dim_cq_moder net_dim_get_tx_moderation(u8 cq_period_mode, int ix);
/**
* net_dim_get_def_tx_moderation - provide the default TX moderation
* @cq_period_mode: CQ period mode
*/
struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode);
/**
* net_dim - main DIM algorithm entry point
* @dim: DIM instance information
* @end_sample: Current data measurement
*
* Called by the consumer.
* This is the main logic of the algorithm, where data is processed in order
* to decide on next required action.
*/
void net_dim(struct dim *dim, struct dim_sample end_sample);
/* RDMA DIM */
/*
* RDMA DIM profile:
* profile size must be of RDMA_DIM_PARAMS_NUM_PROFILES.
*/
#define RDMA_DIM_PARAMS_NUM_PROFILES 9
#define RDMA_DIM_START_PROFILE 0
/**
* rdma_dim - Runs the adaptive moderation.
* @dim: The moderation struct.
* @completions: The number of completions collected in this round.
*
* Each call to rdma_dim takes the latest amount of completions that
* have been collected and counts them as a new event.
* Once enough events have been collected the algorithm decides a new
* moderation level.
*/
void rdma_dim(struct dim *dim, u64 completions);
#endif /* DIM_H */

View File

@ -0,0 +1,116 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#ifndef _IONIC_H_
#define _IONIC_H_
struct ionic_lif;
#include "kcompat.h"
#include "ionic_if.h"
#include "ionic_dev.h"
#include "ionic_devlink.h"
#define IONIC_DRV_NAME "ionic"
#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
#define IONIC_DRV_VERSION drv_ver
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT 0x1004
#define DEVCMD_TIMEOUT 5
#define SHORT_TIMEOUT 1
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
#define MAX_ETH_EQS 64
#define IONIC_PHC_UPDATE_NS 10000000000L /* 10s in nanoseconds */
#define NORMAL_PPB 1000000000 /* one billion parts per billion */
#define SCALED_PPM (1000000ull << 16) /* 2^16 million parts per 2^16 million */
extern bool port_init_up;
extern unsigned int rx_copybreak;
extern unsigned int rx_fill_threshold;
extern unsigned int tx_budget;
extern unsigned int devcmd_timeout;
extern unsigned long affinity_mask_override;
struct ionic_vf {
u16 index;
u8 macaddr[6];
__le32 maxrate;
__le16 vlanid;
u8 spoofchk;
u8 trusted;
u8 linkstate;
dma_addr_t stats_pa;
struct ionic_lif_stats stats;
};
struct ionic {
struct pci_dev *pdev;
struct platform_device *pfdev;
struct device *dev;
struct ionic_dev idev;
struct mutex dev_cmd_lock; /* lock for dev_cmd operations */
struct dentry *dentry;
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
struct ionic_identity ident;
bool is_mgmt_nic;
struct ionic_lif *lif;
struct ionic_eq **eqs;
unsigned int nnqs_per_lif;
unsigned int nrdma_eqs_per_lif;
unsigned int ntxqs_per_lif;
unsigned int nrxqs_per_lif;
unsigned int nlifs;
unsigned int neth_eqs;
DECLARE_BITMAP(lifbits, IONIC_LIFS_MAX);
DECLARE_BITMAP(ethbits, IONIC_LIFS_MAX);
unsigned int nintrs;
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
#ifndef HAVE_PCI_IRQ_API
struct msix_entry *msix;
#endif
struct work_struct nb_work;
struct notifier_block nb;
#ifdef IONIC_DEVLINK
struct devlink_port dl_port;
#endif
struct rw_semaphore vf_op_lock; /* lock for VF operations */
struct ionic_vf *vfs;
int num_vfs;
struct timer_list watchdog_timer;
int watchdog_period;
};
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
const int err, const bool do_msg);
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait);
void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status,
int err);
int ionic_set_dma_mask(struct ionic *ionic);
int ionic_setup(struct ionic *ionic);
int ionic_identify(struct ionic *ionic);
int ionic_init(struct ionic *ionic);
int ionic_reset(struct ionic *ionic);
int ionic_port_identify(struct ionic *ionic);
int ionic_port_init(struct ionic *ionic);
int ionic_port_reset(struct ionic *ionic);
const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr);
#endif /* _IONIC_H_ */

View File

@ -0,0 +1,266 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017 - 2022 Pensando Systems, Inc. All rights reserved. */
#include <linux/kernel.h>
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_dev.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"
void *ionic_get_handle_from_netdev(struct net_device *netdev,
const char *api_version,
enum ionic_api_prsn prsn)
{
struct ionic_lif *lif;
if (strcmp(api_version, IONIC_API_VERSION))
return ERR_PTR(-EINVAL);
lif = ionic_netdev_lif(netdev);
if (!lif || !lif->nrdma_eqs)
return ERR_PTR(-ENXIO);
/* TODO: Rework if supporting more than one child */
if (lif->child_lif_cfg.prsn != IONIC_PRSN_NONE &&
lif->child_lif_cfg.prsn != prsn)
return ERR_PTR(-EBUSY);
return lif;
}
EXPORT_SYMBOL_GPL(ionic_get_handle_from_netdev);
bool ionic_api_stay_registered(void *handle)
{
/* TODO: Implement when eth driver reset is implemented */
return false;
}
EXPORT_SYMBOL_GPL(ionic_api_stay_registered);
void ionic_api_request_reset(void *handle)
{
struct ionic_lif *lif = handle;
struct ionic *ionic;
int err;
union ionic_dev_cmd cmd = {
.cmd.opcode = IONIC_CMD_RDMA_RESET_LIF,
.cmd.lif_index = cpu_to_le16(lif->child_lif_cfg.index),
};
ionic = lif->ionic;
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_go(&ionic->idev, &cmd);
err = ionic_dev_cmd_wait(ionic, devcmd_timeout);
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
netdev_warn(lif->netdev, "request_reset: error %d\n", err);
return;
}
if (lif->child_lif_cfg.priv &&
lif->child_lif_cfg.reset_cb)
(*lif->child_lif_cfg.reset_cb)(lif->child_lif_cfg.priv);
}
EXPORT_SYMBOL_GPL(ionic_api_request_reset);
void *ionic_api_get_private(void *handle, enum ionic_api_prsn prsn)
{
struct ionic_lif *lif = handle;
if (lif->child_lif_cfg.prsn != prsn)
return NULL;
return lif->child_lif_cfg.priv;
}
EXPORT_SYMBOL_GPL(ionic_api_get_private);
int ionic_api_set_private(void *handle, void *priv,
void (*reset_cb)(void *priv),
enum ionic_api_prsn prsn)
{
struct ionic_lif *lif = handle;
struct ionic_lif_cfg *cfg = &lif->child_lif_cfg;
if (priv && cfg->priv)
return -EBUSY;
cfg->priv = priv;
cfg->prsn = prsn;
cfg->reset_cb = reset_cb;
return 0;
}
EXPORT_SYMBOL_GPL(ionic_api_set_private);
struct device *ionic_api_get_device(void *handle)
{
struct ionic_lif *lif = handle;
return lif->netdev->dev.parent;
}
EXPORT_SYMBOL_GPL(ionic_api_get_device);
const struct ionic_devinfo *ionic_api_get_devinfo(void *handle)
{
struct ionic_lif *lif = handle;
return &lif->ionic->idev.dev_info;
}
EXPORT_SYMBOL_GPL(ionic_api_get_devinfo);
struct dentry *ionic_api_get_debug_ctx(void *handle)
{
struct ionic_lif *lif = handle;
return lif->dentry;
}
EXPORT_SYMBOL_GPL(ionic_api_get_debug_ctx);
const union ionic_lif_identity *ionic_api_get_identity(void *handle,
int *lif_index)
{
struct ionic_lif *lif = handle;
if (lif_index)
*lif_index = lif->child_lif_cfg.index;
/* TODO: Do all LIFs have the same ident? */
return &lif->ionic->ident.lif;
}
EXPORT_SYMBOL_GPL(ionic_api_get_identity);
int ionic_api_get_intr(void *handle, int *irq)
{
struct ionic_lif *lif = handle;
struct ionic_intr_info *intr_obj;
int err;
if (!lif->nrdma_eqs_avail)
return -ENOSPC;
intr_obj = kzalloc(sizeof(*intr_obj), GFP_KERNEL);
if (!intr_obj)
return -ENOSPC;
err = ionic_intr_alloc(lif->ionic, intr_obj);
if (err)
goto done;
err = ionic_bus_get_irq(lif->ionic, intr_obj->index);
if (err < 0) {
ionic_intr_free(lif->ionic, intr_obj->index);
goto done;
}
lif->nrdma_eqs_avail--;
*irq = err;
err = intr_obj->index;
done:
kfree(intr_obj);
return err;
}
EXPORT_SYMBOL_GPL(ionic_api_get_intr);
void ionic_api_put_intr(void *handle, int intr)
{
struct ionic_lif *lif = handle;
ionic_intr_free(lif->ionic, intr);
lif->nrdma_eqs_avail++;
}
EXPORT_SYMBOL_GPL(ionic_api_put_intr);
int ionic_api_get_cmb(void *handle, u32 *pgid, phys_addr_t *pgaddr, int order)
{
struct ionic_lif *lif = handle;
return ionic_get_cmb(lif, pgid, pgaddr, order);
}
EXPORT_SYMBOL_GPL(ionic_api_get_cmb);
void ionic_api_put_cmb(void *handle, u32 pgid, int order)
{
struct ionic_lif *lif = handle;
ionic_put_cmb(lif, pgid, order);
}
EXPORT_SYMBOL_GPL(ionic_api_put_cmb);
void ionic_api_kernel_dbpage(void *handle,
struct ionic_intr __iomem **intr_ctrl,
u32 *dbid, u64 __iomem **dbpage)
{
struct ionic_lif *lif = handle;
*intr_ctrl = lif->ionic->idev.intr_ctrl;
*dbid = lif->kern_pid;
*dbpage = lif->kern_dbpage;
}
EXPORT_SYMBOL_GPL(ionic_api_kernel_dbpage);
int ionic_api_get_dbid(void *handle, u32 *dbid, phys_addr_t *addr)
{
struct ionic_lif *lif = handle;
int id, dbpage_num;
if (ionic_bus_dbpage_per_pid(lif->ionic)) {
mutex_lock(&lif->dbid_inuse_lock);
if (!lif->dbid_inuse) {
mutex_unlock(&lif->dbid_inuse_lock);
return -EINVAL;
}
id = find_first_zero_bit(lif->dbid_inuse, lif->dbid_count);
if (id == lif->dbid_count) {
mutex_unlock(&lif->dbid_inuse_lock);
return -ENOMEM;
}
set_bit(id, lif->dbid_inuse);
mutex_unlock(&lif->dbid_inuse_lock);
dbpage_num = ionic_db_page_num(lif, id);
} else {
id = 0;
dbpage_num = 0;
}
*dbid = id;
*addr = ionic_bus_phys_dbpage(lif->ionic, dbpage_num);
return 0;
}
EXPORT_SYMBOL_GPL(ionic_api_get_dbid);
void ionic_api_put_dbid(void *handle, int dbid)
{
struct ionic_lif *lif = handle;
if (ionic_bus_dbpage_per_pid(lif->ionic)) {
mutex_lock(&lif->dbid_inuse_lock);
if (lif->dbid_inuse)
clear_bit(dbid, lif->dbid_inuse);
mutex_unlock(&lif->dbid_inuse_lock);
}
}
EXPORT_SYMBOL_GPL(ionic_api_put_dbid);
int ionic_api_adminq_post(void *handle, struct ionic_admin_ctx *ctx)
{
struct ionic_lif *lif = handle;
return ionic_adminq_post(lif, ctx);
}
EXPORT_SYMBOL_GPL(ionic_api_adminq_post);

View File

@ -0,0 +1,275 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2017 - 2022 Pensando Systems, Inc. All rights reserved. */
#ifndef IONIC_API_H
#define IONIC_API_H
#include <linux/completion.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include "ionic_if.h"
#include "ionic_regs.h"
/**
* IONIC_API_VERSION - Version number of this interface
*
* Any interface changes to this interface must also change the version.
*
* If netdev and other (eg, rdma) drivers are compiled from different sources,
* they are compatible only if IONIC_API_VERSION is statically the same in both
* sources. Drivers must have matching values of IONIC_API_VERSION at compile
* time, to be considered compatible at run time.
*/
#define IONIC_API_VERSION "8"
struct dentry;
/**
* struct ionic_devinfo - device information
* @asic_type: Device ASIC type code
* @asic_rev: Device ASIC revision code
* @fw_version: Device firmware version, as a string
* @serial_num: Device serial number, as a string
*/
struct ionic_devinfo {
u8 asic_type;
u8 asic_rev;
char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN + 1];
char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN + 1];
};
/**
* enum ionic_api_prsn - personalities that can be applied to a lif
* @IONIC_PRSN_NONE: No personality assigned
* @IONIC_PRSN_ETH: Ethernet NIC personality assigned
* @IONIC_PRSN_RDMA: RDMA HCA personality assigned
*/
enum ionic_api_prsn {
IONIC_PRSN_NONE = 0,
IONIC_PRSN_ETH,
IONIC_PRSN_RDMA,
};
/**
* ionic_get_handle_from_netdev() - Get a handle if the netdev is ionic
* @netdev: Net device to check
* @api_version: IONIC_API_VERSION
* @prsn: Personality to apply
*
* This returns an opaque handle if and only if the netdev was created
* by the ionic driver and the api version matches as described
* above for IONIC_API_VERSION.
*
* Return: Handle, if netdev is a compatible ionic device, or ERR_PTR(error)
*/
void *ionic_get_handle_from_netdev(struct net_device *netdev,
const char *api_version,
enum ionic_api_prsn prsn);
/**
* ionic_api_stay_registered() - stay registered through net interface changes
* @handle: Handle to lif
*
* Return: true if the child device should ignore net deregistration events
*/
bool ionic_api_stay_registered(void *handle);
/**
* ionic_api_request_reset() - request reset or disable the device or lif
* @handle: Handle to lif
*
* The reset will be carried out asynchronously. If it succeeds, then the
* callback specified in ionic_api_set_private() will be called.
*/
void ionic_api_request_reset(void *handle);
/**
* ionic_api_get_private() - Get private data associated with the lif
* @handle: Handle to lif
* @prsn: Personality to which the private data applies
*
* Get the private data of some kind. The private data may be, for example, an
* instance of an rdma device for this lif.
*
* Return: private data or NULL
*/
void *ionic_api_get_private(void *handle, enum ionic_api_prsn prsn);
/**
* ionic_api_set_private() - Set private data associated with the lif
* @handle: Handle to lif
* @priv: Private data or NULL
* @reset_cb: Callback if device has been disabled or reset
* @prsn: Personality to which the private data applies
*
* Set the private data of some kind. The private data may be, for example, an
* instance of an rdma device for this lif.
*
* This will fail if private data is already set for that personality.
*
* Return: zero or negative error status
*/
int ionic_api_set_private(void *handle, void *priv,
void (*reset_cb)(void *priv),
enum ionic_api_prsn prsn);
/**
* ionic_api_clear_private() - Clear private data associated with the lif
* @handle: Handle to lif
*/
static inline void ionic_api_clear_private(void *handle)
{
(void)ionic_api_set_private(handle, NULL, NULL, IONIC_PRSN_NONE);
}
/**
* ionic_api_get_device() - Get the underlying device
* @handle: Handle to lif
*
* Return: pointer to underlying OS struct device associated with the lif
*/
struct device *ionic_api_get_device(void *handle);
/**
* ionic_api_get_devinfo() - Get device information
* @handle: Handle to lif
*
* Return: pointer to device information
*/
const struct ionic_devinfo *ionic_api_get_devinfo(void *handle);
/**
* ionic_api_get_debug_ctx() - Get the debug context (if any) for the lif
* @handle: Handle to lif
*
* This is the directory entry of the LIF in debugfs.
*
* Return: debug context for the lif or NULL
*/
struct dentry *ionic_api_get_debug_ctx(void *handle);
/**
* ionic_api_get_identity() - Get result of device identification
* @handle: Handle to lif
* @lif_index: This lif index
*
* Return: pointer to result of identification
*/
const union ionic_lif_identity *ionic_api_get_identity(void *handle,
int *lif_index);
/**
* ionic_api_get_intr() - Reserve a device interrupt index
* @handle: Handle to lif
* @irq: OS interrupt number returned
*
* Reserve an interrupt index, and indicate the irq number for that index.
*
* Return: interrupt index or negative error status
*/
int ionic_api_get_intr(void *handle, int *irq);
/**
* ionic_api_put_intr() - Release a device interrupt index
* @handle: Handle to lif
* @intr: Interrupt index
*
* Mark the interrupt index unused so that it can be reserved again.
*/
void ionic_api_put_intr(void *handle, int intr);
/**
* ionic_api_get_cmb() - Reserve cmb pages
* @handle: Handle to lif
* @pgid: First page index
* @pgaddr: First page bus addr (contiguous)
* @order: Log base two number of pages (PAGE_SIZE)
*
* Return: zero or negative error status
*/
int ionic_api_get_cmb(void *handle, u32 *pgid, phys_addr_t *pgaddr, int order);
/**
* ionic_api_put_cmb() - Release cmb pages
* @handle: Handle to lif
* @pgid: First page index
* @order: Log base two number of pages (PAGE_SIZE)
*/
void ionic_api_put_cmb(void *handle, u32 pgid, int order);
/**
* ionic_api_kernel_dbpage() - Get mapped doorbell page for use in kernel space
* @handle: Handle to lif
* @intr_ctrl: Interrupt control registers
* @dbid: Doorbell id for use in kernel space
* @dbpage: One ioremapped doorbell page for use in kernel space
*
* This also provides mapped interrupt control registers.
*
* The id and page returned here refer to the doorbell page reserved for use in
* kernel space for this lif. For user space, use ionic_api_get_dbid to
* allocate a doorbell id for exclusive use by a process.
*/
void ionic_api_kernel_dbpage(void *handle,
struct ionic_intr __iomem **intr_ctrl,
u32 *dbid, u64 __iomem **dbpage);
/**
* ionic_api_get_dbid() - Reserve a doorbell id
* @handle: Handle to lif
* @dbid: Doorbell id
* @addr: Phys address of doorbell page
*
* Reserve a doorbell id. This corresponds with exactly one doorbell page at
* an offset from the doorbell page base address, that can be mapped into a
* user space process.
*
* Return: zero on success or negative error status
*/
int ionic_api_get_dbid(void *handle, u32 *dbid, phys_addr_t *addr);
/**
* ionic_api_put_dbid() - Release a doorbell id
* @handle: Handle to lif
* @dbid: Doorbell id
*
* Mark the doorbell id unused, so that it can be reserved again.
*/
void ionic_api_put_dbid(void *handle, int dbid);
/**
* struct ionic_admin_ctx - Admin command context
* @work: Work completion wait queue element
* @cmd: Admin command (64B) to be copied to the queue
* @comp: Admin completion (16B) copied from the queue
*/
struct ionic_admin_ctx {
struct completion work;
union ionic_adminq_cmd cmd;
union ionic_adminq_comp comp;
};
/**
* ionic_api_adminq_post() - Post an admin command
* @handle: Handle to lif
* @ctx: API admin command context
*
* Post the command to an admin queue in the ethernet driver. If this command
* succeeds, then the command has been posted, but that does not indicate a
* completion. If this command returns success, then the completion callback
* will eventually be called.
*
* Return: zero or negative error status
*/
int ionic_api_adminq_post(void *handle, struct ionic_admin_ctx *ctx);
/**
* ionic_error_to_errno() - Transform ionic_if errors to os errno
* @code: Ionic error number
*
* Return: Negative OS error number or zero
*/
int ionic_error_to_errno(enum ionic_status_code code);
#endif /* IONIC_API_H */

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#ifndef _IONIC_BUS_H_
#define _IONIC_BUS_H_
int ionic_bus_get_irq(struct ionic *ionic, unsigned int num);
const char *ionic_bus_info(struct ionic *ionic);
int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs);
void ionic_bus_free_irq_vectors(struct ionic *ionic);
int ionic_bus_register_driver(void);
void ionic_bus_unregister_driver(void);
struct net_device *ionic_alloc_netdev(struct ionic *ionic);
void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num);
void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page);
phys_addr_t ionic_bus_phys_dbpage(struct ionic *ionic, int page_num);
static inline bool ionic_bus_dbpage_per_pid(struct ionic *ionic)
{
return ionic->pdev;
}
#endif /* _IONIC_BUS_H_ */

View File

@ -0,0 +1,484 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_lif.h"
#include "ionic_debugfs.h"
/* Supported devices */
static const struct pci_device_id ionic_id_table[] = {
{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF) },
{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF) },
{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT) },
{ 0, } /* end of table */
};
MODULE_DEVICE_TABLE(pci, ionic_id_table);
int ionic_bus_get_irq(struct ionic *ionic, unsigned int num)
{
#ifdef HAVE_PCI_IRQ_API
return pci_irq_vector(ionic->pdev, num);
#else
return ionic->msix[num].vector;
#endif
}
const char *ionic_bus_info(struct ionic *ionic)
{
return pci_name(ionic->pdev);
}
int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs)
{
#ifdef HAVE_PCI_IRQ_API
return pci_alloc_irq_vectors(ionic->pdev, nintrs, nintrs,
PCI_IRQ_MSIX);
#else
int err;
int i;
if (ionic->msix)
return -EBUSY;
ionic->msix = devm_kzalloc(ionic->dev,
sizeof(*ionic->msix) * nintrs, GFP_KERNEL);
if (!ionic->msix)
return -ENOMEM;
for (i = 0; i < nintrs; i++)
ionic->msix[i].entry = i;
err = pci_enable_msix_exact(ionic->pdev, ionic->msix, nintrs);
if (err < 0) {
devm_kfree(ionic->dev, ionic->msix);
ionic->msix = NULL;
return err;
}
return nintrs;
#endif
}
void ionic_bus_free_irq_vectors(struct ionic *ionic)
{
if (!ionic->nintrs)
return;
#ifdef HAVE_PCI_IRQ_API
pci_free_irq_vectors(ionic->pdev);
#else
pci_disable_msix(ionic->pdev);
devm_kfree(ionic->dev, ionic->msix);
ionic->msix = NULL;
#endif
}
struct net_device *ionic_alloc_netdev(struct ionic *ionic)
{
dev_dbg(ionic->dev, "nxqs=%d nlifs=%d nintrs=%d\n",
ionic->ntxqs_per_lif, ionic->nlifs, ionic->nintrs);
return alloc_etherdev_mqs(sizeof(struct ionic_lif),
ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
}
static int ionic_map_bars(struct ionic *ionic)
{
struct pci_dev *pdev = ionic->pdev;
struct device *dev = ionic->dev;
struct ionic_dev_bar *bars;
unsigned int i, j;
bars = ionic->bars;
ionic->num_bars = 0;
for (i = 0, j = 0; i < IONIC_BARS_MAX; i++) {
if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
continue;
bars[j].len = pci_resource_len(pdev, i);
/* only map the whole bar 0 */
if (j > 0) {
bars[j].vaddr = NULL;
} else {
bars[j].vaddr = pci_iomap(pdev, i, bars[j].len);
if (!bars[j].vaddr) {
dev_err(dev,
"Cannot memory-map BAR %d, aborting\n",
i);
return -ENODEV;
}
}
bars[j].bus_addr = pci_resource_start(pdev, i);
bars[j].res_index = i;
ionic->num_bars++;
j++;
}
ionic_debugfs_add_bars(ionic);
return 0;
}
static void ionic_unmap_bars(struct ionic *ionic)
{
struct ionic_dev_bar *bars = ionic->bars;
unsigned int i;
for (i = 0; i < IONIC_BARS_MAX; i++) {
if (bars[i].vaddr) {
iounmap(bars[i].vaddr);
bars[i].bus_addr = 0;
bars[i].vaddr = NULL;
bars[i].len = 0;
}
}
}
void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num)
{
#ifdef HAVE_PCI_IOMAP_RANGE
return pci_iomap_range(ionic->pdev,
ionic->bars[IONIC_PCI_BAR_DBELL].res_index,
(u64)page_num << PAGE_SHIFT, PAGE_SIZE);
#else
int bar = ionic->bars[IONIC_PCI_BAR_DBELL].res_index;
phys_addr_t start = pci_resource_start(ionic->pdev, bar);
phys_addr_t offset = start + ((phys_addr_t)page_num << PAGE_SHIFT);
return ioremap(offset, PAGE_SIZE);
#endif /* HAVE_PCI_IOMAP_RANGE */
}
void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page)
{
iounmap(page);
}
phys_addr_t ionic_bus_phys_dbpage(struct ionic *ionic, int page_num)
{
return ionic->bars[IONIC_PCI_BAR_DBELL].bus_addr +
((phys_addr_t)page_num << PAGE_SHIFT);
}
static void ionic_vf_dealloc_locked(struct ionic *ionic)
{
struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_STATSADDR };
struct ionic_vf *v;
int i;
if (!ionic->vfs)
return;
for (i = ionic->num_vfs - 1; i >= 0; i--) {
v = &ionic->vfs[i];
if (v->stats_pa) {
vfc.stats_pa = 0;
(void)ionic_set_vf_config(ionic, i, &vfc);
dma_unmap_single(ionic->dev, v->stats_pa,
sizeof(v->stats), DMA_FROM_DEVICE);
v->stats_pa = 0;
}
}
kfree(ionic->vfs);
ionic->vfs = NULL;
ionic->num_vfs = 0;
}
static void ionic_vf_dealloc(struct ionic *ionic)
{
down_write(&ionic->vf_op_lock);
ionic_vf_dealloc_locked(ionic);
up_write(&ionic->vf_op_lock);
}
static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
{
struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_STATSADDR };
struct ionic_vf *v;
int err = 0;
int i;
down_write(&ionic->vf_op_lock);
ionic->vfs = kcalloc(num_vfs, sizeof(struct ionic_vf), GFP_KERNEL);
if (!ionic->vfs) {
err = -ENOMEM;
goto out;
}
for (i = 0; i < num_vfs; i++) {
v = &ionic->vfs[i];
v->stats_pa = dma_map_single(ionic->dev, &v->stats,
sizeof(v->stats), DMA_FROM_DEVICE);
if (dma_mapping_error(ionic->dev, v->stats_pa)) {
dev_err(ionic->dev, "DMA mapping failed for vf[%d] stats\n", i);
v->stats_pa = 0;
err = -ENODEV;
goto out;
}
ionic->num_vfs++;
/* ignore failures from older FW, we just won't get stats */
vfc.stats_pa = cpu_to_le64(v->stats_pa);
(void)ionic_set_vf_config(ionic, i, &vfc);
}
out:
if (err)
ionic_vf_dealloc_locked(ionic);
up_write(&ionic->vf_op_lock);
return err;
}
static int ionic_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ionic *ionic = pci_get_drvdata(pdev);
struct device *dev = ionic->dev;
int ret = 0;
if (ionic->lif &&
test_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
return -EBUSY;
if (num_vfs > 0) {
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
dev_err(dev, "Cannot enable SRIOV: %d\n", ret);
goto out;
}
ret = ionic_vf_alloc(ionic, num_vfs);
if (ret) {
dev_err(dev, "Cannot alloc VFs: %d\n", ret);
pci_disable_sriov(pdev);
goto out;
}
ret = num_vfs;
} else {
pci_disable_sriov(pdev);
ionic_vf_dealloc(ionic);
}
out:
return ret;
}
static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ionic *ionic;
int num_vfs;
int err;
ionic = ionic_devlink_alloc(dev);
if (!ionic)
return -ENOMEM;
ionic->pdev = pdev;
ionic->dev = dev;
pci_set_drvdata(pdev, ionic);
mutex_init(&ionic->dev_cmd_lock);
ionic->is_mgmt_nic =
ent->device == PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT;
ionic->pfdev = NULL;
err = ionic_set_dma_mask(ionic);
if (err) {
dev_err(dev, "Cannot set DMA mask: %d, aborting\n", err);
goto err_out_clear_drvdata;
}
ionic_debugfs_add_dev(ionic);
/* Setup PCI device */
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(dev, "Cannot enable PCI device: %d, aborting\n", err);
goto err_out_debugfs_del_dev;
}
err = pci_request_regions(pdev, IONIC_DRV_NAME);
if (err) {
dev_err(dev, "Cannot request PCI regions: %d, aborting\n", err);
goto err_out_pci_disable_device;
}
pcie_print_link_status(pdev);
err = ionic_map_bars(ionic);
if (err)
goto err_out_pci_release_regions;
/* Configure the device */
err = ionic_setup(ionic);
if (err) {
dev_err(dev, "Cannot setup device: %d, aborting\n", err);
goto err_out_unmap_bars;
}
pci_set_master(pdev);
err = ionic_identify(ionic);
if (err) {
dev_err(dev, "Cannot identify device: %d, aborting\n", err);
goto err_out_teardown;
}
ionic_debugfs_add_ident(ionic);
err = ionic_init(ionic);
if (err) {
dev_err(dev, "Cannot init device: %d, aborting\n", err);
goto err_out_teardown;
}
/* Configure the ports */
err = ionic_port_identify(ionic);
if (err) {
dev_err(dev, "Cannot identify port: %d, aborting\n", err);
goto err_out_reset;
}
err = ionic_port_init(ionic);
if (err) {
dev_err(dev, "Cannot init port: %d, aborting\n", err);
goto err_out_reset;
}
/* Allocate and init the LIF */
err = ionic_lif_size(ionic);
if (err) {
dev_err(dev, "Cannot size LIF: %d, aborting\n", err);
goto err_out_port_reset;
}
err = ionic_lif_alloc(ionic);
if (err) {
dev_err(dev, "Cannot allocate LIF: %d, aborting\n", err);
goto err_out_free_irqs;
}
err = ionic_lif_init(ionic->lif);
if (err) {
dev_err(dev, "Cannot init LIF: %d, aborting\n", err);
goto err_out_free_lifs;
}
init_rwsem(&ionic->vf_op_lock);
num_vfs = pci_num_vf(pdev);
if (num_vfs) {
dev_info(dev, "%d VFs found already enabled\n", num_vfs);
err = ionic_vf_alloc(ionic, num_vfs);
if (err)
dev_err(dev, "Cannot enable existing VFs: %d\n", err);
}
err = ionic_devlink_register(ionic);
if (err) {
dev_err(dev, "Cannot register devlink: %d\n", err);
goto err_out_deinit_lifs;
}
err = ionic_lif_register(ionic->lif);
if (err) {
dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
goto err_out_deregister_devlink;
}
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
return 0;
err_out_deregister_devlink:
ionic_devlink_unregister(ionic);
err_out_deinit_lifs:
ionic_vf_dealloc(ionic);
ionic_lif_deinit(ionic->lif);
err_out_free_lifs:
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
err_out_free_irqs:
ionic_bus_free_irq_vectors(ionic);
err_out_port_reset:
ionic_port_reset(ionic);
err_out_reset:
ionic_reset(ionic);
err_out_teardown:
ionic_dev_teardown(ionic);
pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep
* the hw interface around for inspection
*/
return 0;
err_out_unmap_bars:
ionic_unmap_bars(ionic);
err_out_pci_release_regions:
pci_release_regions(pdev);
err_out_pci_disable_device:
pci_disable_device(pdev);
err_out_debugfs_del_dev:
ionic_debugfs_del_dev(ionic);
err_out_clear_drvdata:
mutex_destroy(&ionic->dev_cmd_lock);
ionic_devlink_free(ionic);
pci_set_drvdata(pdev, NULL);
return err;
}
static void ionic_remove(struct pci_dev *pdev)
{
struct ionic *ionic = pci_get_drvdata(pdev);
if (!ionic)
return;
del_timer_sync(&ionic->watchdog_timer);
if (ionic->lif) {
ionic_lif_unregister(ionic->lif);
ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
ionic_bus_free_irq_vectors(ionic);
}
ionic_port_reset(ionic);
ionic_reset(ionic);
ionic_dev_teardown(ionic);
pci_clear_master(pdev);
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
pci_disable_device(pdev);
ionic_debugfs_del_dev(ionic);
mutex_destroy(&ionic->dev_cmd_lock);
ionic_devlink_free(ionic);
}
static struct pci_driver ionic_driver = {
.name = IONIC_DRV_NAME,
.id_table = ionic_id_table,
.probe = ionic_probe,
.remove = ionic_remove,
.sriov_configure = ionic_sriov_configure,
};
int ionic_bus_register_driver(void)
{
return pci_register_driver(&ionic_driver);
}
void ionic_bus_unregister_driver(void)
{
pci_unregister_driver(&ionic_driver);
}

View File

@ -0,0 +1,472 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/msi.h>
#include <linux/interrupt.h>
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_lif.h"
#include "ionic_debugfs.h"
#define IONIC_DEV_BAR 0
#define IONIC_INTR_CTRL_BAR 1
#define IONIC_MSIX_CFG_BAR 2
#define IONIC_DOORBELL_BAR 3
#define IONIC_TSTAMP_BAR 4
#define IONIC_REQUIRED_BARS 4
#define IONIC_NUM_OF_BAR 5
#define IONIC_INTR_MSIXCFG_STRIDE 0x10
struct ionic_intr_msixcfg {
__le64 msgaddr;
__le32 msgdata;
__le32 vector_ctrl;
};
static void *ionic_intr_msixcfg_addr(struct device *mnic_dev, const int intr)
{
struct ionic_dev *idev = (struct ionic_dev *) mnic_dev->platform_data;
dev_info(mnic_dev, "msix_cfg_base: %p\n", idev->msix_cfg_base);
return (idev->msix_cfg_base + (intr * IONIC_INTR_MSIXCFG_STRIDE));
}
static void ionic_intr_msixcfg(struct device *mnic_dev,
const int intr, const u64 msgaddr,
const u32 msgdata, const int vctrl)
{
void *pa = ionic_intr_msixcfg_addr(mnic_dev, intr);
writeq(msgaddr, (pa + offsetof(struct ionic_intr_msixcfg, msgaddr)));
writel(msgdata, (pa + offsetof(struct ionic_intr_msixcfg, msgdata)));
writel(vctrl, (pa + offsetof(struct ionic_intr_msixcfg, vector_ctrl)));
}
/* Resources can only be mapped once at a time. A second mapping will fail.
* For resources that are shared by multiple devices, we avoid using devm,
* because the mapping will not be used exclusively by one device, and if
* devices are unregistered in any order, the mapping must not be destroyed
* when the first device is unregistered, when other devices may still be using
* it. ionic_shared_resource just maintains a refcount for mapping a shared
* resource for use by multiple ionic devices.
*/
struct ionic_shared_resource {
struct mutex lock;
void __iomem *base;
int refs;
};
#define IONIC_SHARED_RESOURCE_INITIALIZER(shres) { .lock = __MUTEX_INITIALIZER(shres.lock) }
static void __iomem *ionic_ioremap_shared_resource(struct ionic_shared_resource *shres,
struct resource *res)
{
void __iomem *base;
mutex_lock(&shres->lock);
if (shres->refs) {
base = shres->base;
++shres->refs;
} else {
if (!request_mem_region(res->start, resource_size(res), res->name ?: KBUILD_MODNAME)) {
base = IOMEM_ERR_PTR(-EBUSY);
} else {
base = ioremap(res->start, resource_size(res));
if (!IS_ERR_OR_NULL(base)) {
shres->base = base;
++shres->refs;
}
}
}
mutex_unlock(&shres->lock);
return base;
}
static void ionic_iounmap_shared_resource(struct ionic_shared_resource *shres,
void __iomem *vaddr,
resource_size_t start,
resource_size_t n)
{
mutex_lock(&shres->lock);
if (WARN_ON(!shres->refs)) {
mutex_unlock(&shres->lock);
return;
}
--shres->refs;
if (!shres->refs) {
iounmap(vaddr);
release_mem_region(start, n);
}
mutex_unlock(&shres->lock);
}
static struct ionic_shared_resource tstamp_res =
IONIC_SHARED_RESOURCE_INITIALIZER(tstamp_res);
int ionic_bus_get_irq(struct ionic *ionic, unsigned int num)
{
struct msi_desc *desc;
int i = 0;
msi_for_each_desc(desc, ionic->dev, MSI_DESC_ALL) {
if (i == num) {
pr_info("[i = %d] msi_entry: %d.%d\n",
i, desc->msi_index,
desc->irq);
return desc->irq;
}
i++;
}
return -1; //return error if user is asking more irqs than allocated
}
const char *ionic_bus_info(struct ionic *ionic)
{
return ionic->pfdev->name;
}
static void ionic_mnic_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{
dev_dbg(desc->dev, "msi_index: [%d] (msi_addr hi_lo): %x_%x msi_data: %x\n",
desc->msi_index, msg->address_hi,
msg->address_lo, msg->data);
ionic_intr_msixcfg(desc->dev, desc->msi_index,
(((u64)msg->address_hi << 32) | msg->address_lo),
msg->data, 0/*vctrl*/);
}
int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs)
{
int err = 0;
err = platform_msi_domain_alloc_irqs(ionic->dev, nintrs,
ionic_mnic_set_msi_msg);
if (err)
return err;
return nintrs;
}
void ionic_bus_free_irq_vectors(struct ionic *ionic)
{
platform_msi_domain_free_irqs(ionic->dev);
}
struct net_device *ionic_alloc_netdev(struct ionic *ionic)
{
struct net_device *netdev = NULL;
struct ionic_lif *lif;
netdev = alloc_netdev_mqs(sizeof(struct ionic_lif), ionic->pfdev->name,
NET_NAME_USER, ether_setup,
ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
if (!netdev)
return netdev;
lif = netdev_priv(netdev);
/* lif name is used for naming the interrupt handler so better
* to name them differently for mnic
*/
snprintf(lif->name, sizeof(lif->name), "%s-", ionic->pfdev->name);
return netdev;
}
static int ionic_mnic_dev_setup(struct ionic *ionic)
{
unsigned int num_bars = ionic->num_bars;
struct ionic_dev *idev = &ionic->idev;
u32 sig;
if (num_bars < IONIC_REQUIRED_BARS)
return -EFAULT;
idev->dev_info_regs = ionic->bars[IONIC_DEV_BAR].vaddr;
idev->dev_cmd_regs = ionic->bars[IONIC_DEV_BAR].vaddr +
offsetof(union ionic_dev_regs, devcmd);
idev->intr_ctrl = ionic->bars[IONIC_INTR_CTRL_BAR].vaddr;
idev->msix_cfg_base = ionic->bars[IONIC_MSIX_CFG_BAR].vaddr;
if (num_bars > IONIC_TSTAMP_BAR)
idev->hwstamp_regs = ionic->bars[IONIC_TSTAMP_BAR].vaddr;
else
idev->hwstamp_regs = NULL;
/* save the idev into dev->platform_data so we can use it later */
ionic->dev->platform_data = idev;
sig = ioread32(&idev->dev_info_regs->signature);
if (sig != IONIC_DEV_INFO_SIGNATURE)
return -EFAULT;
ionic_init_devinfo(ionic);
ionic_watchdog_init(ionic);
idev->db_pages = ionic->bars[IONIC_DOORBELL_BAR].vaddr;
idev->phy_db_pages = ionic->bars[IONIC_DOORBELL_BAR].bus_addr;
ionic_debugfs_add_dev_cmd(ionic);
return 0;
}
static int ionic_map_bars(struct ionic *ionic)
{
struct platform_device *pfdev = ionic->pfdev;
struct ionic_dev_bar *bars = ionic->bars;
struct device *dev = ionic->dev;
struct resource *res;
unsigned int i, j;
void *base;
ionic->num_bars = 0;
for (i = 0, j = 0; i < IONIC_BARS_MAX; i++) {
res = platform_get_resource(pfdev, IORESOURCE_MEM, i);
if (!res)
continue;
if (i == IONIC_TSTAMP_BAR)
base = ionic_ioremap_shared_resource(&tstamp_res, res);
else
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
dev_err(dev, "Cannot memory-map BAR %d, aborting\n", j);
return -ENODEV;
}
bars[j].len = res->end - res->start + 1;
bars[j].vaddr = base;
bars[j].bus_addr = res->start;
ionic->num_bars++;
j++;
}
ionic_debugfs_add_bars(ionic);
return 0;
}
static void ionic_unmap_bars(struct ionic *ionic)
{
struct ionic_dev_bar *bars = ionic->bars;
struct device *dev = ionic->dev;
unsigned int i;
for (i = 0; i < IONIC_BARS_MAX; i++)
if (bars[i].vaddr) {
dev_info(dev, "Unmapping BAR %d @%p, bus_addr: %llx\n",
i, bars[i].vaddr, bars[i].bus_addr);
if (i == IONIC_TSTAMP_BAR) {
ionic_iounmap_shared_resource(&tstamp_res, bars[i].vaddr, bars[i].bus_addr, bars[i].len);
} else {
devm_iounmap(dev, bars[i].vaddr);
devm_release_mem_region(dev, bars[i].bus_addr, bars[i].len);
}
}
}
void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num)
{
return ionic->idev.db_pages;
}
void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page)
{
}
phys_addr_t ionic_bus_phys_dbpage(struct ionic *ionic, int page_num)
{
return ionic->idev.phy_db_pages;
}
int ionic_probe(struct platform_device *pfdev)
{
struct device *dev = &pfdev->dev;
struct device_node *np;
struct ionic *ionic;
int err;
ionic = devm_kzalloc(dev, sizeof(*ionic), GFP_KERNEL);
if (!ionic)
return -ENOMEM;
ionic->pfdev = pfdev;
platform_set_drvdata(pfdev, ionic);
ionic->dev = dev;
mutex_init(&ionic->dev_cmd_lock);
np = dev->of_node;
if (!np) {
dev_err(dev, "No device tree node\n");
return -EINVAL;
}
err = of_reserved_mem_device_init_by_idx(dev, np, 0);
if (err != 0 && err != -ENODEV) {
dev_err(dev, "Failed to init reserved memory region\n");
return err;
}
err = ionic_set_dma_mask(ionic);
if (err) {
dev_err(dev, "Cannot set DMA mask, aborting\n");
return err;
}
ionic_debugfs_add_dev(ionic);
/* Setup platform device */
err = ionic_map_bars(ionic);
if (err)
goto err_out_unmap_bars;
/* Discover ionic dev resources */
err = ionic_mnic_dev_setup(ionic);
if (err) {
dev_err(dev, "Cannot setup device, aborting\n");
goto err_out_unmap_bars;
}
err = ionic_identify(ionic);
if (err) {
dev_err(dev, "Cannot identify device, aborting\n");
goto err_out_unmap_bars;
}
ionic_debugfs_add_ident(ionic);
err = ionic_init(ionic);
if (err) {
dev_err(dev, "Cannot init device, aborting\n");
goto err_out_unmap_bars;
}
/* Configure the ports */
err = ionic_port_identify(ionic);
if (err) {
dev_err(dev, "Cannot identify port: %d, aborting\n", err);
goto err_out_unmap_bars;
}
if (ionic->ident.port.type == IONIC_ETH_HOST_MGMT ||
ionic->ident.port.type == IONIC_ETH_MNIC_INTERNAL_MGMT)
ionic->is_mgmt_nic = true;
err = ionic_port_init(ionic);
if (err) {
dev_err(dev, "Cannot init port: %d, aborting\n", err);
goto err_out_unmap_bars;
}
/* Allocate and init the LIF */
err = ionic_lif_size(ionic);
if (err) {
dev_err(dev, "Cannot size LIF: %d, aborting\n", err);
goto err_out_unmap_bars;
}
err = ionic_lif_alloc(ionic);
if (err) {
dev_err(dev, "Cannot allocate LIF: %d, aborting\n", err);
goto err_out_free_irqs;
}
err = ionic_lif_init(ionic->lif);
if (err) {
dev_err(dev, "Cannot init LIF: %d, aborting\n", err);
goto err_out_free_lifs;
}
err = ionic_lif_register(ionic->lif);
if (err) {
dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
goto err_out_deinit_lifs;
}
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
return 0;
err_out_deinit_lifs:
ionic_lif_deinit(ionic->lif);
err_out_free_lifs:
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
err_out_free_irqs:
ionic_bus_free_irq_vectors(ionic);
err_out_unmap_bars:
ionic_unmap_bars(ionic);
ionic_debugfs_del_dev(ionic);
mutex_destroy(&ionic->dev_cmd_lock);
platform_set_drvdata(pfdev, NULL);
return err;
}
EXPORT_SYMBOL_GPL(ionic_probe);
int ionic_remove(struct platform_device *pfdev)
{
struct ionic *ionic = platform_get_drvdata(pfdev);
if (ionic) {
del_timer_sync(&ionic->watchdog_timer);
ionic_lif_unregister(ionic->lif);
ionic_lif_deinit(ionic->lif);
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
ionic_port_reset(ionic);
ionic_reset(ionic);
ionic_bus_free_irq_vectors(ionic);
ionic_unmap_bars(ionic);
ionic_debugfs_del_dev(ionic);
mutex_destroy(&ionic->dev_cmd_lock);
dev_info(ionic->dev, "removed\n");
}
return 0;
}
EXPORT_SYMBOL_GPL(ionic_remove);
static const struct of_device_id mnic_of_match[] = {
{.compatible = "pensando,ionic-mnic"},
{/* end of table */}
};
static struct platform_driver ionic_driver = {
.probe = ionic_probe,
.remove = ionic_remove,
.driver = {
.name = "ionic-mnic",
.owner = THIS_MODULE,
.of_match_table = mnic_of_match,
},
};
int ionic_bus_register_driver(void)
{
return platform_driver_register(&ionic_driver);
}
void ionic_bus_unregister_driver(void)
{
platform_driver_unregister(&ionic_driver);
}

View File

@ -0,0 +1,584 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#include <linux/netdevice.h>
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_lif.h"
#include "ionic_ethtool.h"
#include "ionic_debugfs.h"
#include "kcompat.h"
#ifdef CONFIG_DEBUG_FS
static struct dentry *ionic_dir;
void ionic_debugfs_create(void)
{
ionic_dir = debugfs_create_dir(IONIC_DRV_NAME, NULL);
}
void ionic_debugfs_destroy(void)
{
debugfs_remove_recursive(ionic_dir);
}
void ionic_debugfs_add_dev(struct ionic *ionic)
{
ionic->dentry = debugfs_create_dir(ionic_bus_info(ionic), ionic_dir);
}
void ionic_debugfs_del_dev(struct ionic *ionic)
{
debugfs_remove_recursive(ionic->dentry);
ionic->dentry = NULL;
}
static int bars_show(struct seq_file *seq, void *v)
{
struct ionic *ionic = seq->private;
struct ionic_dev_bar *bars = ionic->bars;
unsigned int i;
for (i = 0; i < IONIC_BARS_MAX; i++)
if (bars[i].len)
seq_printf(seq, "BAR%d: res %d len 0x%08lx vaddr %pK bus_addr 0x%016llx\n",
i, bars[i].res_index, bars[i].len,
bars[i].vaddr, bars[i].bus_addr);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(bars);
void ionic_debugfs_add_bars(struct ionic *ionic)
{
debugfs_create_file("bars", 0400, ionic->dentry, ionic, &bars_fops);
}
static const struct debugfs_reg32 dev_cmd_regs[] = {
{ .name = "db", .offset = 0, },
{ .name = "done", .offset = 4, },
{ .name = "cmd.word[0]", .offset = 8, },
{ .name = "cmd.word[1]", .offset = 12, },
{ .name = "cmd.word[2]", .offset = 16, },
{ .name = "cmd.word[3]", .offset = 20, },
{ .name = "cmd.word[4]", .offset = 24, },
{ .name = "cmd.word[5]", .offset = 28, },
{ .name = "cmd.word[6]", .offset = 32, },
{ .name = "cmd.word[7]", .offset = 36, },
{ .name = "cmd.word[8]", .offset = 40, },
{ .name = "cmd.word[9]", .offset = 44, },
{ .name = "cmd.word[10]", .offset = 48, },
{ .name = "cmd.word[11]", .offset = 52, },
{ .name = "cmd.word[12]", .offset = 56, },
{ .name = "cmd.word[13]", .offset = 60, },
{ .name = "cmd.word[14]", .offset = 64, },
{ .name = "cmd.word[15]", .offset = 68, },
{ .name = "comp.word[0]", .offset = 72, },
{ .name = "comp.word[1]", .offset = 76, },
{ .name = "comp.word[2]", .offset = 80, },
{ .name = "comp.word[3]", .offset = 84, },
};
void ionic_debugfs_add_dev_cmd(struct ionic *ionic)
{
struct debugfs_regset32 *dev_cmd_regset;
struct device *dev = ionic->dev;
dev_cmd_regset = devm_kzalloc(dev, sizeof(*dev_cmd_regset), GFP_KERNEL);
if (!dev_cmd_regset)
return;
dev_cmd_regset->regs = dev_cmd_regs;
dev_cmd_regset->nregs = ARRAY_SIZE(dev_cmd_regs);
dev_cmd_regset->base = ionic->idev.dev_cmd_regs;
debugfs_create_regset32("dev_cmd", 0400, ionic->dentry, dev_cmd_regset);
}
static void identity_show_qtype(struct seq_file *seq, const char *name,
struct ionic_lif_logical_qtype *qtype)
{
seq_printf(seq, "%s_qtype:\t%d\n", name, qtype->qtype);
seq_printf(seq, "%s_count:\t%d\n", name, qtype->qid_count);
seq_printf(seq, "%s_base:\t%d\n", name, qtype->qid_base);
}
static int identity_show(struct seq_file *seq, void *v)
{
struct ionic *ionic = seq->private;
struct ionic_identity *ident;
struct ionic_dev *idev;
ident = &ionic->ident;
idev = &ionic->idev;
seq_printf(seq, "asic_type: 0x%x\n", idev->dev_info.asic_type);
seq_printf(seq, "asic_rev: 0x%x\n", idev->dev_info.asic_rev);
seq_printf(seq, "serial_num: %s\n", idev->dev_info.serial_num);
seq_printf(seq, "fw_version: %s\n", idev->dev_info.fw_version);
seq_printf(seq, "fw_status: 0x%x\n",
ioread8(&idev->dev_info_regs->fw_status));
seq_printf(seq, "fw_heartbeat: 0x%x\n",
ioread32(&idev->dev_info_regs->fw_heartbeat));
seq_printf(seq, "cmb_pages: 0x%x\n", ionic_cmb_pages_in_use(ionic->lif));
seq_printf(seq, "nlifs: %d\n", ident->dev.nlifs);
seq_printf(seq, "nintrs: %d\n", ident->dev.nintrs);
seq_printf(seq, "eth_eq_count: %d\n", ident->dev.eq_count);
seq_printf(seq, "ndbpgs_per_lif: %d\n", ident->dev.ndbpgs_per_lif);
seq_printf(seq, "intr_coal_mult: %d\n", ident->dev.intr_coal_mult);
seq_printf(seq, "intr_coal_div: %d\n", ident->dev.intr_coal_div);
seq_printf(seq, "max_ucast_filters: %d\n", ident->lif.eth.max_ucast_filters);
seq_printf(seq, "max_mcast_filters: %d\n", ident->lif.eth.max_mcast_filters);
seq_printf(seq, "rdma_qp_opcodes: %d\n", ident->lif.rdma.qp_opcodes);
seq_printf(seq, "rdma_admin_opcodes: %d\n", ident->lif.rdma.admin_opcodes);
seq_printf(seq, "rdma_max_stride: %d\n", ident->lif.rdma.max_stride);
seq_printf(seq, "rdma_cl_stride: %d\n", ident->lif.rdma.cl_stride);
seq_printf(seq, "rdma_pte_stride: %d\n", ident->lif.rdma.pte_stride);
seq_printf(seq, "rdma_rrq_stride: %d\n", ident->lif.rdma.rrq_stride);
seq_printf(seq, "rdma_rsq_stride: %d\n", ident->lif.rdma.rsq_stride);
identity_show_qtype(seq, "rdma_aq", &ident->lif.rdma.aq_qtype);
identity_show_qtype(seq, "rdma_sq", &ident->lif.rdma.sq_qtype);
identity_show_qtype(seq, "rdma_rq", &ident->lif.rdma.rq_qtype);
identity_show_qtype(seq, "rdma_cq", &ident->lif.rdma.cq_qtype);
identity_show_qtype(seq, "rdma_eq", &ident->lif.rdma.eq_qtype);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(identity);
void ionic_debugfs_add_ident(struct ionic *ionic)
{
debugfs_create_file("identity", 0400, ionic->dentry,
ionic, &identity_fops);
}
void ionic_debugfs_add_sizes(struct ionic *ionic)
{
debugfs_create_u32("nlifs", 0400, ionic->dentry,
(u32 *)&ionic->ident.dev.nlifs);
debugfs_create_u32("nintrs", 0400, ionic->dentry, &ionic->nintrs);
debugfs_create_u32("ntxqs_per_lif", 0400, ionic->dentry,
(u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_TXQ]);
debugfs_create_u32("nrxqs_per_lif", 0400, ionic->dentry,
(u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_RXQ]);
}
static int q_tail_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
seq_printf(seq, "%d\n", q->tail_idx);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(q_tail);
static int q_head_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
seq_printf(seq, "%d\n", q->head_idx);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(q_head);
static int cq_tail_show(struct seq_file *seq, void *v)
{
struct ionic_cq *cq = seq->private;
seq_printf(seq, "%d\n", cq->tail_idx);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(cq_tail);
static const struct debugfs_reg32 intr_ctrl_regs[] = {
{ .name = "coal_init", .offset = 0, },
{ .name = "mask", .offset = 4, },
{ .name = "credits", .offset = 8, },
{ .name = "mask_on_assert", .offset = 12, },
{ .name = "coal_timer", .offset = 16, },
};
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
struct dentry *intr_dentry, *stats_dentry;
struct ionic_dev *idev = &lif->ionic->idev;
struct debugfs_regset32 *intr_ctrl_regset;
struct ionic_intr_info *intr = &qcq->intr;
struct debugfs_blob_wrapper *desc_blob;
struct device *dev = lif->ionic->dev;
struct ionic_tx_stats *txqstats;
struct ionic_rx_stats *rxqstats;
struct ionic_queue *q = &qcq->q;
struct ionic_cq *cq = &qcq->cq;
qcq_dentry = debugfs_create_dir(q->name, lif->dentry);
if (IS_ERR_OR_NULL(qcq_dentry))
return;
qcq->dentry = qcq_dentry;
debugfs_create_x64("q_base_pa", 0400, qcq_dentry, &qcq->q_base_pa);
debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size);
debugfs_create_x64("cq_base_pa", 0400, qcq_dentry, &qcq->cq_base_pa);
debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size);
debugfs_create_x64("sg_base_pa", 0400, qcq_dentry, &qcq->sg_base_pa);
debugfs_create_x32("sg_size", 0400, qcq_dentry, &qcq->sg_size);
debugfs_create_x32("cmb_order", 0400, qcq_dentry, &qcq->cmb_order);
debugfs_create_x32("cmb_pgid", 0400, qcq_dentry, &qcq->cmb_pgid);
#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 0) < RHEL_RELEASE_CODE))
debugfs_create_u8("armed", 0400, qcq_dentry, (u8 *)&qcq->armed);
#else
debugfs_create_bool("armed", 0400, qcq_dentry, &qcq->armed);
#endif
q_dentry = debugfs_create_dir("q", qcq->dentry);
if (IS_ERR_OR_NULL(q_dentry))
return;
debugfs_create_u32("index", 0400, q_dentry, &q->index);
debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index);
debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type);
debugfs_create_u64("drop", 0400, q_dentry, &q->drop);
debugfs_create_u64("stop", 0400, q_dentry, &q->stop);
debugfs_create_u64("wake", 0400, q_dentry, &q->wake);
debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops);
debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops);
desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL);
if (!desc_blob)
return;
desc_blob->data = q->base;
desc_blob->size = (unsigned long)q->num_descs * q->desc_size;
debugfs_create_blob("desc_blob", 0400, q_dentry, desc_blob);
if (qcq->flags & IONIC_QCQ_F_SG) {
desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL);
if (!desc_blob)
return;
desc_blob->data = q->sg_base;
desc_blob->size = (unsigned long)q->num_descs * q->sg_desc_size;
debugfs_create_blob("sg_desc_blob", 0400, q_dentry,
desc_blob);
}
if (qcq->flags & IONIC_QCQ_F_TX_STATS) {
stats_dentry = debugfs_create_dir("tx_stats", q_dentry);
if (IS_ERR_OR_NULL(stats_dentry))
return;
txqstats = &lif->txqstats[q->index];
debugfs_create_u64("dma_map_err", 0400, stats_dentry,
&txqstats[q->index].dma_map_err);
debugfs_create_u64("pkts", 0400, stats_dentry,
&txqstats[q->index].pkts);
debugfs_create_u64("bytes", 0400, stats_dentry,
&txqstats[q->index].bytes);
debugfs_create_u64("clean", 0400, stats_dentry,
&txqstats[q->index].clean);
debugfs_create_u64("linearize", 0400, stats_dentry,
&txqstats[q->index].linearize);
debugfs_create_u64("csum_none", 0400, stats_dentry,
&txqstats[q->index].csum_none);
debugfs_create_u64("csum", 0400, stats_dentry,
&txqstats[q->index].csum);
debugfs_create_u64("crc32_csum", 0400, stats_dentry,
&txqstats[q->index].crc32_csum);
debugfs_create_u64("tso", 0400, stats_dentry,
&txqstats[q->index].tso);
debugfs_create_u64("frags", 0400, stats_dentry,
&txqstats[q->index].frags);
}
if (qcq->flags & IONIC_QCQ_F_RX_STATS) {
stats_dentry = debugfs_create_dir("rx_stats", q_dentry);
if (IS_ERR_OR_NULL(stats_dentry))
return;
rxqstats = &lif->rxqstats[q->index];
debugfs_create_u64("dma_map_err", 0400, stats_dentry,
&rxqstats[q->index].dma_map_err);
debugfs_create_u64("alloc_err", 0400, stats_dentry,
&rxqstats[q->index].alloc_err);
debugfs_create_u64("pkts", 0400, stats_dentry,
&rxqstats[q->index].pkts);
debugfs_create_u64("bytes", 0400, stats_dentry,
&rxqstats[q->index].bytes);
debugfs_create_u64("csum_none", 0400, stats_dentry,
&rxqstats[q->index].csum_none);
debugfs_create_u64("csum_complete", 0400, stats_dentry,
&rxqstats[q->index].csum_complete);
debugfs_create_u64("csum_error", 0400, stats_dentry,
&rxqstats[q->index].csum_error);
}
cq_dentry = debugfs_create_dir("cq", qcq->dentry);
if (IS_ERR_OR_NULL(cq_dentry))
return;
debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 0) < RHEL_RELEASE_CODE))
debugfs_create_u8("done_color", 0400, cq_dentry, (u8 *)&cq->done_color);
#else
debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
#endif
debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops);
desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL);
if (!desc_blob)
return;
desc_blob->data = cq->base;
desc_blob->size = (unsigned long)cq->num_descs * cq->desc_size;
debugfs_create_blob("desc_blob", 0400, cq_dentry, desc_blob);
if (qcq->flags & IONIC_QCQ_F_INTR) {
intr_dentry = debugfs_create_dir("intr", qcq->dentry);
if (IS_ERR_OR_NULL(intr_dentry))
return;
debugfs_create_u32("index", 0400, intr_dentry,
&intr->index);
debugfs_create_u32("vector", 0400, intr_dentry,
&intr->vector);
debugfs_create_u32("dim_coal_hw", 0400, intr_dentry,
&intr->dim_coal_hw);
intr_ctrl_regset = devm_kzalloc(dev, sizeof(*intr_ctrl_regset),
GFP_KERNEL);
if (!intr_ctrl_regset)
return;
intr_ctrl_regset->regs = intr_ctrl_regs;
intr_ctrl_regset->nregs = ARRAY_SIZE(intr_ctrl_regs);
intr_ctrl_regset->base = &idev->intr_ctrl[intr->index];
debugfs_create_regset32("intr_ctrl", 0400, intr_dentry,
intr_ctrl_regset);
}
if (qcq->flags & IONIC_QCQ_F_NOTIFYQ) {
stats_dentry = debugfs_create_dir("notifyblock", qcq->dentry);
if (IS_ERR_OR_NULL(stats_dentry))
return;
debugfs_create_u64("eid", 0400, stats_dentry,
(u64 *)&lif->info->status.eid);
debugfs_create_u16("link_status", 0400, stats_dentry,
(u16 *)&lif->info->status.link_status);
debugfs_create_u32("link_speed", 0400, stats_dentry,
(u32 *)&lif->info->status.link_speed);
debugfs_create_u16("link_down_count", 0400, stats_dentry,
(u16 *)&lif->info->status.link_down_count);
}
}
static int netdev_show(struct seq_file *seq, void *v)
{
struct net_device *netdev = seq->private;
seq_printf(seq, "%s\n", netdev->name);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(netdev);
static int lif_identity_show(struct seq_file *seq, void *v)
{
union ionic_lif_identity *lid = seq->private;
seq_printf(seq, "capabilities: 0x%llx\n", lid->capabilities);
seq_printf(seq, "eth-version: 0x%x\n", lid->eth.version);
seq_printf(seq, "max_ucast_filters: %d\n", lid->eth.max_ucast_filters);
seq_printf(seq, "max_mcast_filters: %d\n", lid->eth.max_mcast_filters);
seq_printf(seq, "rss_ind_tbl_sz: %d\n", lid->eth.rss_ind_tbl_sz);
seq_printf(seq, "min_frame_size: %d\n", lid->eth.min_frame_size);
seq_printf(seq, "max_frame_size: %d\n", lid->eth.max_frame_size);
seq_printf(seq, "state: %d\n", lid->eth.config.state);
seq_printf(seq, "name: \"%s\"\n", lid->eth.config.name);
seq_printf(seq, "mtu: %d\n", lid->eth.config.mtu);
seq_printf(seq, "mac: %pM\n", lid->eth.config.mac);
seq_printf(seq, "features: 0x%08llx\n",
lid->eth.config.features);
seq_printf(seq, "adminq-count: %d\n",
lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]);
seq_printf(seq, "notifyq-count: %d\n",
lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]);
seq_printf(seq, "rxq-count: %d\n",
lid->eth.config.queue_count[IONIC_QTYPE_RXQ]);
seq_printf(seq, "txq-count: %d\n",
lid->eth.config.queue_count[IONIC_QTYPE_TXQ]);
seq_printf(seq, "eq-count: %d\n",
lid->eth.config.queue_count[IONIC_QTYPE_EQ]);
seq_puts(seq, "\n");
seq_printf(seq, "rdma_version: 0x%x\n", lid->rdma.version);
seq_printf(seq, "rdma_qp_opcodes: %d\n", lid->rdma.qp_opcodes);
seq_printf(seq, "rdma_admin_opcodes: %d\n", lid->rdma.admin_opcodes);
seq_printf(seq, "rdma_npts_per_lif: %d\n", lid->rdma.npts_per_lif);
seq_printf(seq, "rdma_nmrs_per_lif: %d\n", lid->rdma.nmrs_per_lif);
seq_printf(seq, "rdma_nahs_per_lif: %d\n", lid->rdma.nahs_per_lif);
seq_printf(seq, "rdma_max_stride: %d\n", lid->rdma.max_stride);
seq_printf(seq, "rdma_cl_stride: %d\n", lid->rdma.cl_stride);
seq_printf(seq, "rdma_pte_stride: %d\n", lid->rdma.pte_stride);
seq_printf(seq, "rdma_rrq_stride: %d\n", lid->rdma.rrq_stride);
seq_printf(seq, "rdma_rsq_stride: %d\n", lid->rdma.rsq_stride);
seq_printf(seq, "rdma_dcqcn_profiles: %d\n", lid->rdma.dcqcn_profiles);
identity_show_qtype(seq, "rdma_aq", &lid->rdma.aq_qtype);
identity_show_qtype(seq, "rdma_sq", &lid->rdma.sq_qtype);
identity_show_qtype(seq, "rdma_rq", &lid->rdma.rq_qtype);
identity_show_qtype(seq, "rdma_cq", &lid->rdma.cq_qtype);
identity_show_qtype(seq, "rdma_eq", &lid->rdma.eq_qtype);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(lif_identity);
static int lif_state_show(struct seq_file *seq, void *v)
{
struct ionic_lif *lif = seq->private;
seq_printf(seq, "0x%08lx\n", lif->state[0]);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(lif_state);
static int lif_filters_show(struct seq_file *seq, void *v)
{
struct ionic_lif *lif = seq->private;
struct ionic_rx_filter *f;
struct hlist_head *head;
struct hlist_node *tmp;
unsigned int i;
seq_puts(seq, "id flow state type filter\n");
spin_lock_bh(&lif->rx_filters.lock);
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
head = &lif->rx_filters.by_id[i];
hlist_for_each_entry_safe(f, tmp, head, by_id) {
switch (le16_to_cpu(f->cmd.match)) {
case IONIC_RX_FILTER_MATCH_VLAN:
seq_printf(seq, "0x%04x 0x%08x 0x%02x vlan 0x%04x\n",
f->filter_id, f->flow_id, f->state,
le16_to_cpu(f->cmd.vlan.vlan));
break;
case IONIC_RX_FILTER_MATCH_MAC:
seq_printf(seq, "0x%04x 0x%08x 0x%02x mac %pM\n",
f->filter_id, f->flow_id, f->state,
f->cmd.mac.addr);
break;
case IONIC_RX_FILTER_MATCH_MAC_VLAN:
seq_printf(seq, "0x%04x 0x%08x 0x%02x macvl 0x%04x %pM\n",
f->filter_id, f->flow_id, f->state,
le16_to_cpu(f->cmd.vlan.vlan),
f->cmd.mac.addr);
break;
case IONIC_RX_FILTER_STEER_PKTCLASS:
seq_printf(seq, "0x%04x 0x%08x 0x%02x rxstr 0x%llx\n",
f->filter_id, f->flow_id, f->state,
le64_to_cpu(f->cmd.pkt_class));
break;
}
}
}
spin_unlock_bh(&lif->rx_filters.lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(lif_filters);
static int lif_n_txrx_alloc_show(struct seq_file *seq, void *v)
{
struct ionic_lif *lif = seq->private;
seq_printf(seq, "%llu\n", lif->n_txrx_alloc);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(lif_n_txrx_alloc);
void ionic_debugfs_add_lif(struct ionic_lif *lif)
{
struct dentry *lif_dentry;
lif_dentry = debugfs_create_dir(lif->name, lif->ionic->dentry);
if (IS_ERR_OR_NULL(lif_dentry))
return;
lif->dentry = lif_dentry;
debugfs_create_file("netdev", 0400, lif->dentry,
lif->netdev, &netdev_fops);
debugfs_create_file("identity", 0400, lif->dentry,
lif->identity, &lif_identity_fops);
debugfs_create_file("state", 0400, lif->dentry,
lif, &lif_state_fops);
debugfs_create_file("filters", 0400, lif->dentry,
lif, &lif_filters_fops);
debugfs_create_file("txrx_alloc", 0400, lif->dentry,
lif, &lif_n_txrx_alloc_fops);
}
void ionic_debugfs_del_lif(struct ionic_lif *lif)
{
debugfs_remove_recursive(lif->dentry);
lif->dentry = NULL;
}
void ionic_debugfs_add_eq(struct ionic_eq *eq)
{
const int ring_bytes = sizeof(struct ionic_eq_comp) * IONIC_EQ_DEPTH;
struct device *dev = eq->ionic->dev;
struct debugfs_blob_wrapper *blob;
struct debugfs_regset32 *regset;
struct dentry *ent;
char name[40];
snprintf(name, sizeof(name), "eq%02u", eq->index);
ent = debugfs_create_dir(name, eq->ionic->dentry);
if (IS_ERR_OR_NULL(ent))
return;
blob = devm_kzalloc(dev, sizeof(*blob), GFP_KERNEL);
blob->data = eq->ring[0].base;
blob->size = ring_bytes;
debugfs_create_blob("ring0", 0400, ent, blob);
blob = devm_kzalloc(dev, sizeof(*blob), GFP_KERNEL);
blob->data = eq->ring[1].base;
blob->size = ring_bytes;
debugfs_create_blob("ring1", 0400, ent, blob);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
regset->regs = intr_ctrl_regs;
regset->nregs = ARRAY_SIZE(intr_ctrl_regs);
regset->base = &eq->ionic->idev.intr_ctrl[eq->intr.index];
debugfs_create_regset32("intr_ctrl", 0400, ent, regset);
}
void ionic_debugfs_del_qcq(struct ionic_qcq *qcq)
{
debugfs_remove_recursive(qcq->dentry);
qcq->dentry = NULL;
}
#endif

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#ifndef _IONIC_DEBUGFS_H_
#define _IONIC_DEBUGFS_H_
#include <linux/debugfs.h>
struct ionic;
struct ionic_qcq;
#ifdef CONFIG_DEBUG_FS
void ionic_debugfs_create(void);
void ionic_debugfs_destroy(void);
void ionic_debugfs_add_dev(struct ionic *ionic);
void ionic_debugfs_del_dev(struct ionic *ionic);
void ionic_debugfs_add_bars(struct ionic *ionic);
void ionic_debugfs_add_dev_cmd(struct ionic *ionic);
void ionic_debugfs_add_ident(struct ionic *ionic);
void ionic_debugfs_add_sizes(struct ionic *ionic);
void ionic_debugfs_add_eq(struct ionic_eq *eq);
void ionic_debugfs_add_lif(struct ionic_lif *lif);
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq);
void ionic_debugfs_del_lif(struct ionic_lif *lif);
void ionic_debugfs_del_qcq(struct ionic_qcq *qcq);
#else
static inline void ionic_debugfs_create(void) { }
static inline void ionic_debugfs_destroy(void) { }
static inline void ionic_debugfs_add_dev(struct ionic *ionic) { }
static inline void ionic_debugfs_del_dev(struct ionic *ionic) { }
static inline void ionic_debugfs_add_bars(struct ionic *ionic) { }
static inline void ionic_debugfs_add_dev_cmd(struct ionic *ionic) { }
static inline void ionic_debugfs_add_ident(struct ionic *ionic) { }
static inline void ionic_debugfs_add_sizes(struct ionic *ionic) { }
static inline void ionic_debugfs_add_eq(struct ionic_eq *eq) { }
static inline void ionic_debugfs_add_lif(struct ionic_lif *lif) { }
static inline void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) { }
static inline void ionic_debugfs_del_lif(struct ionic_lif *lif) { }
static inline void ionic_debugfs_del_qcq(struct ionic_qcq *qcq) { }
#endif
#endif /* _IONIC_DEBUGFS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,437 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#ifndef _IONIC_DEV_H_
#define _IONIC_DEV_H_
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include "ionic_if.h"
#include "ionic_api.h"
#include "ionic_regs.h"
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
#define IONIC_MIN_TXRX_DESC 64
#define IONIC_DEF_TXRX_DESC 4096
#define IONIC_RX_FILL_THRESHOLD 64
#define IONIC_RX_FILL_DIV 8
#define IONIC_LIFS_MAX 1024
#define IONIC_WATCHDOG_PCI_SECS 5
#define IONIC_WATCHDOG_PLAT_MSECS 100
#define IONIC_HEARTBEAT_SECS 1
#define IONIC_ITR_COAL_USEC_DEFAULT 8
#define IONIC_DEV_CMD_REG_VERSION 1
#define IONIC_DEV_INFO_REG_COUNT 32
#define IONIC_DEV_CMD_REG_COUNT 32
#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */
#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */
struct ionic_dev_bar {
void __iomem *vaddr;
phys_addr_t bus_addr;
unsigned long len;
int res_index;
};
#ifndef __CHECKER__
/* Registers */
static_assert(sizeof(struct ionic_intr) == 32);
static_assert(sizeof(struct ionic_doorbell) == 8);
static_assert(sizeof(struct ionic_intr_ctrl) == 32);
static_assert(sizeof(struct ionic_intr_status) == 8);
static_assert(sizeof(union ionic_dev_regs) == 4096);
static_assert(sizeof(union ionic_dev_info_regs) == 2048);
static_assert(sizeof(union ionic_dev_cmd_regs) == 2048);
static_assert(sizeof(struct ionic_lif_stats) == 1024);
static_assert(sizeof(struct ionic_admin_cmd) == 64);
static_assert(sizeof(struct ionic_admin_comp) == 16);
static_assert(sizeof(struct ionic_nop_cmd) == 64);
static_assert(sizeof(struct ionic_nop_comp) == 16);
/* Device commands */
static_assert(sizeof(struct ionic_dev_identify_cmd) == 64);
static_assert(sizeof(struct ionic_dev_identify_comp) == 16);
static_assert(sizeof(struct ionic_dev_init_cmd) == 64);
static_assert(sizeof(struct ionic_dev_init_comp) == 16);
static_assert(sizeof(struct ionic_dev_reset_cmd) == 64);
static_assert(sizeof(struct ionic_dev_reset_comp) == 16);
static_assert(sizeof(struct ionic_dev_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_dev_getattr_comp) == 16);
static_assert(sizeof(struct ionic_dev_setattr_cmd) == 64);
static_assert(sizeof(struct ionic_dev_setattr_comp) == 16);
static_assert(sizeof(struct ionic_hii_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_hii_getattr_comp) == 16);
/* Port commands */
static_assert(sizeof(struct ionic_port_identify_cmd) == 64);
static_assert(sizeof(struct ionic_port_identify_comp) == 16);
static_assert(sizeof(struct ionic_port_init_cmd) == 64);
static_assert(sizeof(struct ionic_port_init_comp) == 16);
static_assert(sizeof(struct ionic_port_reset_cmd) == 64);
static_assert(sizeof(struct ionic_port_reset_comp) == 16);
static_assert(sizeof(struct ionic_port_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_port_getattr_comp) == 16);
static_assert(sizeof(struct ionic_port_setattr_cmd) == 64);
static_assert(sizeof(struct ionic_port_setattr_comp) == 16);
/* LIF commands */
static_assert(sizeof(struct ionic_lif_init_cmd) == 64);
static_assert(sizeof(struct ionic_lif_init_comp) == 16);
static_assert(sizeof(struct ionic_lif_reset_cmd) == 64);
static_assert(sizeof(ionic_lif_reset_comp) == 16);
static_assert(sizeof(struct ionic_lif_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_lif_getattr_comp) == 16);
static_assert(sizeof(struct ionic_lif_setattr_cmd) == 64);
static_assert(sizeof(struct ionic_lif_setattr_comp) == 16);
static_assert(sizeof(struct ionic_lif_setphc_cmd) == 64);
static_assert(sizeof(struct ionic_q_init_cmd) == 64);
static_assert(sizeof(struct ionic_q_init_comp) == 16);
static_assert(sizeof(struct ionic_q_control_cmd) == 64);
static_assert(sizeof(ionic_q_control_comp) == 16);
static_assert(sizeof(struct ionic_q_identify_cmd) == 64);
static_assert(sizeof(struct ionic_q_identify_comp) == 16);
static_assert(sizeof(struct ionic_rx_mode_set_cmd) == 64);
static_assert(sizeof(ionic_rx_mode_set_comp) == 16);
static_assert(sizeof(struct ionic_rx_filter_add_cmd) == 64);
static_assert(sizeof(struct ionic_rx_filter_add_comp) == 16);
static_assert(sizeof(struct ionic_rx_filter_del_cmd) == 64);
static_assert(sizeof(ionic_rx_filter_del_comp) == 16);
/* RDMA commands */
static_assert(sizeof(struct ionic_rdma_reset_cmd) == 64);
static_assert(sizeof(struct ionic_rdma_queue_cmd) == 64);
/* Events */
static_assert(sizeof(struct ionic_notifyq_cmd) == 4);
static_assert(sizeof(union ionic_notifyq_comp) == 64);
static_assert(sizeof(struct ionic_notifyq_event) == 64);
static_assert(sizeof(struct ionic_link_change_event) == 64);
static_assert(sizeof(struct ionic_reset_event) == 64);
static_assert(sizeof(struct ionic_heartbeat_event) == 64);
static_assert(sizeof(struct ionic_log_event) == 64);
/* I/O */
static_assert(sizeof(struct ionic_txq_desc) == 16);
static_assert(sizeof(struct ionic_txq_sg_desc) == 128);
static_assert(sizeof(struct ionic_txq_comp) == 16);
static_assert(sizeof(struct ionic_rxq_desc) == 16);
static_assert(sizeof(struct ionic_rxq_sg_desc) == 128);
static_assert(sizeof(struct ionic_rxq_comp) == 16);
/* SR/IOV */
static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_setattr_comp) == 16);
static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_getattr_comp) == 16);
#endif /* __CHECKER__ */
struct ionic_dev {
union ionic_dev_info_regs __iomem *dev_info_regs;
union ionic_dev_cmd_regs __iomem *dev_cmd_regs;
struct ionic_hwstamp_regs __iomem *hwstamp_regs;
atomic_long_t last_check_time;
unsigned long last_hb_time;
u32 last_fw_hb;
bool fw_hb_ready;
bool fw_status_ready;
u8 fw_generation;
u64 __iomem *db_pages;
dma_addr_t phy_db_pages;
struct ionic_intr __iomem *intr_ctrl;
u64 __iomem *intr_status;
u8 *msix_cfg_base;
struct mutex cmb_inuse_lock; /* for cmb_inuse */
unsigned long *cmb_inuse;
dma_addr_t phy_cmb_pages;
u32 cmb_npages;
u32 port_info_sz;
struct ionic_port_info *port_info;
dma_addr_t port_info_pa;
struct ionic_devinfo dev_info;
};
struct ionic_cq_info {
union {
void *cq_desc;
struct ionic_admin_comp *admincq;
struct ionic_notifyq_event *notifyq;
};
};
struct ionic_queue;
struct ionic_qcq;
struct ionic_desc_info;
typedef void (*ionic_desc_cb)(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg);
#define IONIC_PAGE_ORDER 0
#define IONIC_PAGE_SIZE (PAGE_SIZE << IONIC_PAGE_ORDER)
#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 4)
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
__GFP_COMP | __GFP_MEMALLOC)
struct ionic_buf_info {
struct page *page;
dma_addr_t dma_addr;
u32 page_offset;
u32 len;
};
#define IONIC_PAGE_CACHE_SIZE 2048
struct ionic_page_cache {
u32 head;
u32 tail;
struct ionic_buf_info ring[IONIC_PAGE_CACHE_SIZE];
} ____cacheline_aligned_in_smp;
#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
struct ionic_desc_info {
union {
void *desc;
struct ionic_txq_desc *txq_desc;
struct ionic_rxq_desc *rxq_desc;
struct ionic_admin_cmd *adminq_desc;
};
void __iomem *cmb_desc;
union {
void *sg_desc;
struct ionic_txq_sg_desc *txq_sg_desc;
struct ionic_rxq_sg_desc *rxq_sgl_desc;
};
unsigned int bytes;
unsigned int nbufs;
struct ionic_buf_info bufs[IONIC_MAX_FRAGS];
ionic_desc_cb cb;
void *cb_arg;
};
#define IONIC_QUEUE_NAME_MAX_SZ 32
struct ionic_queue {
struct device *dev;
struct ionic_lif *lif;
struct ionic_desc_info *info;
u64 dbval;
unsigned long dbell_deadline;
unsigned long dbell_jiffies;
u16 head_idx;
u16 tail_idx;
unsigned int index;
unsigned int num_descs;
unsigned int max_sg_elems;
u64 dbell_count;
u64 stop;
u64 wake;
u64 drop;
#ifdef IONIC_DEBUG_STATS
u64 depth;
u64 depth_max;
#endif
u64 features;
struct ionic_dev *idev;
unsigned int type;
unsigned int hw_index;
unsigned int hw_type;
union {
void *base;
struct ionic_txq_desc *txq;
struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq;
};
void __iomem *cmb_base;
union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_rxq_sg_desc *rxq_sgl;
};
dma_addr_t base_pa; /* must be page aligned */
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa; /* must be page aligned */
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
struct ionic_page_cache page_cache;
char name[IONIC_QUEUE_NAME_MAX_SZ];
} ____cacheline_aligned_in_smp;
#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
#define IONIC_INTR_NAME_MAX_SZ 32
struct ionic_intr_info {
char name[IONIC_INTR_NAME_MAX_SZ];
unsigned int index;
unsigned int vector;
u64 rearm_count;
unsigned int cpu;
cpumask_t affinity_mask;
u32 dim_coal_hw;
};
struct ionic_cq {
struct ionic_lif *lif;
struct ionic_cq_info *info;
struct ionic_queue *bound_q;
struct ionic_intr_info *bound_intr;
u16 tail_idx;
bool done_color;
unsigned int num_descs;
unsigned int desc_size;
#ifdef IONIC_DEBUG_STATS
u64 compl_count;
#endif
void *base;
dma_addr_t base_pa; /* must be page aligned */
} ____cacheline_aligned_in_smp;
struct ionic_eq_ring {
struct ionic_eq_comp *base;
dma_addr_t base_pa;
int index;
u8 gen_color;
};
struct ionic_eq {
struct ionic *ionic;
struct ionic_eq_ring ring[2];
struct ionic_intr_info intr;
int index;
int depth;
bool is_init;
};
#define IONIC_EQ_DEPTH 0x1000
struct ionic;
static inline void ionic_intr_init(struct ionic_dev *idev,
struct ionic_intr_info *intr,
unsigned long index)
{
ionic_intr_clean(idev->intr_ctrl, index);
intr->index = index;
}
static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
{
unsigned int avail = q->tail_idx;
if (q->head_idx >= avail)
avail += q->num_descs - q->head_idx - 1;
else
avail -= q->head_idx + 1;
return avail;
}
static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
{
return ionic_q_space_avail(q) >= want;
}
void ionic_init_devinfo(struct ionic *ionic);
int ionic_dev_setup(struct ionic *ionic);
void ionic_dev_teardown(struct ionic *ionic);
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd);
u8 ionic_dev_cmd_status(struct ionic_dev *idev);
bool ionic_dev_cmd_done(struct ionic_dev *idev);
void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp);
void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver);
void ionic_dev_cmd_init(struct ionic_dev *idev);
void ionic_dev_cmd_reset(struct ionic_dev *idev);
void ionic_dev_cmd_port_identify(struct ionic_dev *idev);
void ionic_dev_cmd_port_init(struct ionic_dev *idev);
void ionic_dev_cmd_port_reset(struct ionic_dev *idev);
void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state);
void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed);
void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable);
void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
int ionic_set_vf_config(struct ionic *ionic, int vf,
struct ionic_vf_setattr_cmd *vfc);
int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
struct ionic_vf_getattr_comp *comp);
void ionic_vf_start(struct ionic *ionic, int vf);
void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
u16 lif_type, u8 qtype, u8 qver);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
dma_addr_t addr);
void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index);
void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
u16 lif_index, u16 intr_index);
int ionic_db_page_num(struct ionic_lif *lif, int pid);
int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
int ionic_eqs_alloc(struct ionic *ionic);
void ionic_eqs_free(struct ionic *ionic);
void ionic_eqs_deinit(struct ionic *ionic);
int ionic_eqs_init(struct ionic *ionic);
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size);
void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa);
void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q);
typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg);
int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid);
void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa);
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg);
void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start);
void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
unsigned int stop_index);
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
void ionic_watchdog_cb(struct timer_list *t);
void ionic_watchdog_init(struct ionic *ionic);
bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
bool ionic_txq_poke_doorbell(struct ionic_queue *q);
bool ionic_rxq_poke_doorbell(struct ionic_queue *q);
#endif /* _IONIC_DEV_H_ */

View File

@ -0,0 +1,153 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#include <linux/module.h>
#include <linux/netdevice.h>
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_lif.h"
#include "ionic_devlink.h"
#ifdef IONIC_DEVLINK
#ifdef HAVE_DEVLINK_UPDATE_PARAMS
static int ionic_dl_flash_update(struct devlink *dl,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct ionic *ionic = devlink_priv(dl);
#ifdef HAVE_DEVLINK_PREFETCH_FW
return ionic_firmware_update(ionic->lif, params->fw);
#else
return ionic_firmware_fetch_and_update(ionic->lif, params->file_name);
#endif
}
#else
static int ionic_dl_flash_update(struct devlink *dl,
const char *fwname,
const char *component,
struct netlink_ext_ack *extack)
{
struct ionic *ionic = devlink_priv(dl);
if (component)
return -EOPNOTSUPP;
return ionic_firmware_fetch_and_update(ionic->lif, fwname);
}
#endif /* HAVE_DEVLINK_UPDATE_PARAMS */
static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct ionic *ionic = devlink_priv(dl);
struct ionic_dev *idev = &ionic->idev;
char buf[16];
int err = 0;
u32 val;
err = devlink_info_driver_name_put(req, IONIC_DRV_NAME);
if (err)
return err;
err = devlink_info_version_running_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW,
idev->dev_info.fw_version);
if (err)
return err;
val = ioread32(&idev->dev_info_regs->fw_heartbeat);
snprintf(buf, sizeof(buf), "0x%x", val);
err = devlink_info_version_running_put(req, "fw.heartbeat", buf);
if (err)
return err;
val = ioread8(&idev->dev_info_regs->fw_status);
snprintf(buf, sizeof(buf), "0x%x", val);
err = devlink_info_version_running_put(req, "fw.status", buf);
if (err)
return err;
snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_type);
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
buf);
if (err)
return err;
snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_rev);
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
buf);
if (err)
return err;
err = devlink_info_serial_number_put(req, idev->dev_info.serial_num);
return err;
}
static const struct devlink_ops ionic_dl_ops = {
.info_get = ionic_dl_info_get,
.flash_update = ionic_dl_flash_update,
};
struct ionic *ionic_devlink_alloc(struct device *dev)
{
struct devlink *dl;
dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic), dev);
return devlink_priv(dl);
}
void ionic_devlink_free(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
devlink_free(dl);
}
int ionic_devlink_register(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
int err;
#ifdef HAVE_VOID_DEVLINK_REGISTER
err = devlink_port_register(dl, &ionic->dl_port, 0);
if (err) {
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
devlink_unregister(dl);
return err;
}
devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
devlink_register(dl);
#else
err = devlink_register(dl, ionic->dev);
if (err) {
dev_warn(ionic->dev, "devlink_register failed: %d\n", err);
return err;
}
err = devlink_port_register(dl, &ionic->dl_port, 0);
if (err) {
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
devlink_unregister(dl);
return err;
}
devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
#endif
return 0;
}
void ionic_devlink_unregister(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
devlink_port_unregister(&ionic->dl_port);
devlink_unregister(dl);
}
#endif /* IONIC_DEVLINK */

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#ifndef _IONIC_DEVLINK_H_
#define _IONIC_DEVLINK_H_
#include <linux/firmware.h>
#if IS_ENABLED(CONFIG_NET_DEVLINK)
#include <net/devlink.h>
#endif
int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw);
int ionic_firmware_fetch_and_update(struct ionic_lif *lif, const char *fw_name);
/* make sure we've got a new-enough devlink support to use dev info */
#ifdef DEVLINK_INFO_VERSION_GENERIC_BOARD_ID
#define IONIC_DEVLINK
struct ionic *ionic_devlink_alloc(struct device *dev);
void ionic_devlink_free(struct ionic *ionic);
int ionic_devlink_register(struct ionic *ionic);
void ionic_devlink_unregister(struct ionic *ionic);
#else
#define ionic_devlink_alloc(dev) devm_kzalloc(dev, sizeof(struct ionic), GFP_KERNEL)
#define ionic_devlink_free(i) devm_kfree(i->dev, i)
#define ionic_devlink_register(x) 0
#define ionic_devlink_unregister(x)
#endif
#if !IS_ENABLED(CONFIG_NET_DEVLINK)
#define priv_to_devlink(i) 0
#define devlink_flash_update_begin_notify(d)
#define devlink_flash_update_end_notify(d)
#define devlink_flash_update_status_notify(d, s, c, n, t)
#endif /* CONFIG_NET_DEVLINK */
#endif /* _IONIC_DEVLINK_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#ifndef _IONIC_ETHTOOL_H_
#define _IONIC_ETHTOOL_H_
int ionic_cmb_pages_in_use(struct ionic_lif *lif);
void ionic_ethtool_set_ops(struct net_device *netdev);
#endif /* _IONIC_ETHTOOL_H_ */

View File

@ -0,0 +1,206 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2020 - 2022 Pensando Systems, Inc */
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include "ionic.h"
#include "ionic_dev.h"
#include "ionic_lif.h"
/* The worst case wait for the install activity is about 25 minutes when
* installing a new CPLD, which is very seldom. Normal is about 30-35
* seconds. Since the driver can't tell if a CPLD update will happen we
* set the timeout for the ugly case.
*/
#define IONIC_FW_INSTALL_TIMEOUT (25 * 60)
#define IONIC_FW_ACTIVATE_TIMEOUT 30
/* Number of periodic log updates during fw file download */
#define IONIC_FW_INTERVAL_FRACTION 32
static void ionic_dev_cmd_firmware_download(struct ionic_dev *idev, u64 addr,
u32 offset, u32 length)
{
union ionic_dev_cmd cmd = {
.fw_download.opcode = IONIC_CMD_FW_DOWNLOAD,
.fw_download.offset = cpu_to_le32(offset),
.fw_download.addr = cpu_to_le64(addr),
.fw_download.length = cpu_to_le32(length),
};
ionic_dev_cmd_go(idev, &cmd);
}
static void ionic_dev_cmd_firmware_install(struct ionic_dev *idev)
{
union ionic_dev_cmd cmd = {
.fw_control.opcode = IONIC_CMD_FW_CONTROL,
.fw_control.oper = IONIC_FW_INSTALL_ASYNC
};
ionic_dev_cmd_go(idev, &cmd);
}
static void ionic_dev_cmd_firmware_install_status(struct ionic_dev *idev)
{
union ionic_dev_cmd cmd = {
.fw_control.opcode = IONIC_CMD_FW_CONTROL,
.fw_control.oper = IONIC_FW_INSTALL_STATUS
};
ionic_dev_cmd_go(idev, &cmd);
}
static void ionic_dev_cmd_firmware_activate(struct ionic_dev *idev, u8 slot)
{
union ionic_dev_cmd cmd = {
.fw_control.opcode = IONIC_CMD_FW_CONTROL,
.fw_control.oper = IONIC_FW_ACTIVATE_ASYNC,
.fw_control.slot = slot
};
ionic_dev_cmd_go(idev, &cmd);
}
static void ionic_dev_cmd_firmware_activate_status(struct ionic_dev *idev)
{
union ionic_dev_cmd cmd = {
.fw_control.opcode = IONIC_CMD_FW_CONTROL,
.fw_control.oper = IONIC_FW_ACTIVATE_STATUS,
};
ionic_dev_cmd_go(idev, &cmd);
}
int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct net_device *netdev = lif->netdev;
struct ionic *ionic = lif->ionic;
union ionic_dev_cmd_comp comp;
u32 buf_sz, copy_sz, offset;
struct devlink *dl;
int next_interval;
int err = 0;
u8 fw_slot;
dl = priv_to_devlink(ionic);
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
buf_sz = sizeof(idev->dev_cmd_regs->data);
netdev_dbg(netdev,
"downloading firmware - size %d part_sz %d nparts %lu\n",
(int)fw->size, buf_sz, DIV_ROUND_UP(fw->size, buf_sz));
devlink_flash_update_status_notify(dl, "Downloading", NULL, 0, fw->size);
offset = 0;
next_interval = fw->size / IONIC_FW_INTERVAL_FRACTION;
while (offset < fw->size) {
copy_sz = min_t(unsigned int, buf_sz, fw->size - offset);
mutex_lock(&ionic->dev_cmd_lock);
memcpy_toio(&idev->dev_cmd_regs->data, fw->data + offset, copy_sz);
ionic_dev_cmd_firmware_download(idev,
offsetof(union ionic_dev_cmd_regs, data),
offset, copy_sz);
err = ionic_dev_cmd_wait(ionic, devcmd_timeout);
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
netdev_err(netdev,
"download failed offset 0x%x addr 0x%lx len 0x%x\n",
offset, offsetof(union ionic_dev_cmd_regs, data),
copy_sz);
goto err_out;
}
offset += copy_sz;
if (offset > next_interval) {
devlink_flash_update_status_notify(dl, "Downloading",
NULL, offset, fw->size);
next_interval = offset + (fw->size / IONIC_FW_INTERVAL_FRACTION);
}
}
devlink_flash_update_status_notify(dl, "Downloading", NULL, 1, 1);
netdev_info(netdev, "installing firmware\n");
devlink_flash_update_status_notify(dl, "Installing", NULL, 0, 2);
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_firmware_install(idev);
err = ionic_dev_cmd_wait(ionic, devcmd_timeout);
ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
fw_slot = comp.fw_control.slot;
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
netdev_err(netdev, "failed to start firmware install\n");
goto err_out;
}
devlink_flash_update_status_notify(dl, "Installing", NULL, 1, 2);
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_firmware_install_status(idev);
err = ionic_dev_cmd_wait(ionic, IONIC_FW_INSTALL_TIMEOUT);
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
netdev_err(netdev, "firmware install failed\n");
goto err_out;
}
devlink_flash_update_status_notify(dl, "Installing", NULL, 2, 2);
netdev_info(netdev, "selecting firmware\n");
devlink_flash_update_status_notify(dl, "Selecting", NULL, 0, 2);
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_firmware_activate(idev, fw_slot);
err = ionic_dev_cmd_wait(ionic, devcmd_timeout);
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
netdev_err(netdev, "failed to start firmware select\n");
goto err_out;
}
devlink_flash_update_status_notify(dl, "Selecting", NULL, 1, 2);
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_firmware_activate_status(idev);
err = ionic_dev_cmd_wait(ionic, IONIC_FW_ACTIVATE_TIMEOUT);
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
netdev_err(netdev, "firmware select failed\n");
goto err_out;
}
devlink_flash_update_status_notify(dl, "Selecting", NULL, 2, 2);
netdev_info(netdev, "Firmware update completed\n");
err_out:
if (err)
devlink_flash_update_status_notify(dl, "Flash failed", NULL, 0, 0);
return err;
}
int ionic_firmware_fetch_and_update(struct ionic_lif *lif, const char *fw_name)
{
const struct firmware *fw;
struct devlink *dl;
int err;
netdev_info(lif->netdev, "Installing firmware %s\n", fw_name);
dl = priv_to_devlink(lif->ionic);
devlink_flash_update_begin_notify(dl);
err = request_firmware(&fw, fw_name, lif->ionic->dev);
if (err)
goto err_out;
err = ionic_firmware_update(lif, fw);
err_out:
devlink_flash_update_end_notify(dl);
release_firmware(fw);
return err;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,498 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#ifndef _IONIC_LIF_H_
#define _IONIC_LIF_H_
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#ifdef CONFIG_DIMLIB
#include <linux/dim.h>
#else
#include "dim.h"
#endif
#include "ionic_rx_filter.h"
#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */
#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */
#ifdef IONIC_DEBUG_STATS
#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1)
#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1)
#endif
#define ADD_ADDR true
#define DEL_ADDR false
#define CAN_SLEEP true
#define CAN_NOT_SLEEP false
/* Tunables */
#define IONIC_RX_COPYBREAK_DEFAULT 256
#define IONIC_TX_BUDGET_DEFAULT 256
struct ionic_tx_stats {
u64 pkts;
u64 bytes;
u64 csum_none;
u64 csum;
u64 tso;
u64 tso_bytes;
u64 frags;
u64 vlan_inserted;
u64 clean;
u64 linearize;
u64 crc32_csum;
#ifdef IONIC_DEBUG_STATS
u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
#endif
u64 dma_map_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
};
struct ionic_rx_stats {
u64 pkts;
u64 bytes;
u64 csum_none;
u64 csum_complete;
#ifdef IONIC_DEBUG_STATS
u64 buffers_posted;
#endif
u64 dropped;
u64 vlan_stripped;
u64 csum_error;
u64 dma_map_err;
u64 alloc_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
u64 cache_full;
u64 cache_empty;
u64 cache_busy;
u64 cache_get;
u64 cache_put;
u64 buf_reused;
u64 buf_exhausted;
u64 buf_not_reusable;
};
#define IONIC_QCQ_F_INITED BIT(0)
#define IONIC_QCQ_F_SG BIT(1)
#define IONIC_QCQ_F_INTR BIT(2)
#define IONIC_QCQ_F_TX_STATS BIT(3)
#define IONIC_QCQ_F_RX_STATS BIT(4)
#define IONIC_QCQ_F_NOTIFYQ BIT(5)
#define IONIC_QCQ_F_CMB_RINGS BIT(6)
#ifdef IONIC_DEBUG_STATS
struct ionic_napi_stats {
u64 poll_count;
u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
};
#endif
struct ionic_qcq {
void *q_base;
dma_addr_t q_base_pa; /* might not be page aligned */
u32 q_size;
u32 cq_size;
void *cq_base;
dma_addr_t cq_base_pa; /* might not be page aligned */
void *sg_base;
dma_addr_t sg_base_pa; /* might not be page aligned */
u32 sg_size;
void __iomem *cmb_q_base;
phys_addr_t cmb_q_base_pa;
u32 cmb_q_size;
u32 cmb_pgid;
u32 cmb_order;
bool armed;
struct dim dim;
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
struct timer_list napi_deadline;
struct napi_struct napi;
#ifdef IONIC_DEBUG_STATS
struct ionic_napi_stats napi_stats;
#endif
unsigned int flags;
struct ionic_qcq *napi_qcq;
struct dentry *dentry;
};
#define q_to_qcq(q) container_of(q, struct ionic_qcq, q)
#define q_to_tx_stats(q) (&(q)->lif->txqstats[(q)->index])
#define q_to_rx_stats(q) (&(q)->lif->rxqstats[(q)->index])
#define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi)
#define napi_to_cq(napi) (&napi_to_qcq(napi)->cq)
enum ionic_deferred_work_type {
IONIC_DW_TYPE_RX_MODE,
IONIC_DW_TYPE_LINK_STATUS,
IONIC_DW_TYPE_LIF_RESET,
};
struct ionic_deferred_work {
struct list_head list;
enum ionic_deferred_work_type type;
union {
u8 addr[ETH_ALEN];
u8 fw_status;
};
};
struct ionic_deferred {
spinlock_t lock; /* lock for deferred work list */
struct list_head list;
struct work_struct work;
};
struct ionic_lif_sw_stats {
u64 tx_packets;
u64 tx_bytes;
u64 rx_packets;
u64 rx_bytes;
u64 tx_tso;
u64 tx_tso_bytes;
u64 tx_csum_none;
u64 tx_csum;
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_error;
u64 tx_hwstamp_valid;
u64 tx_hwstamp_invalid;
u64 rx_hwstamp_valid;
u64 rx_hwstamp_invalid;
u64 hw_tx_dropped;
u64 hw_rx_dropped;
u64 hw_rx_over_errors;
u64 hw_rx_missed_errors;
u64 hw_tx_aborted_errors;
};
enum ionic_lif_state_flags {
IONIC_LIF_F_INITED,
IONIC_LIF_F_SW_DEBUG_STATS,
IONIC_LIF_F_UP,
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FILTER_SYNC_NEEDED,
IONIC_LIF_F_FW_RESET,
IONIC_LIF_F_FW_STOPPING,
IONIC_LIF_F_RDMA_SNIFFER,
IONIC_LIF_F_SPLIT_INTR,
IONIC_LIF_F_BROKEN,
IONIC_LIF_F_TX_DIM_INTR,
IONIC_LIF_F_RX_DIM_INTR,
IONIC_LIF_F_CMB_RINGS,
/* leave this as last */
IONIC_LIF_F_STATE_SIZE
};
struct ionic_lif_cfg {
int index;
enum ionic_api_prsn prsn;
void *priv;
void (*reset_cb)(void *priv);
};
struct ionic_qtype_info {
u8 version;
u8 supported;
u64 features;
u16 desc_sz;
u16 comp_sz;
u16 sg_desc_sz;
u16 max_sg_elems;
u16 sg_desc_stride;
};
struct ionic_phc;
#define IONIC_LIF_NAME_MAX_SZ 32
struct ionic_lif {
struct net_device *netdev;
DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE);
struct ionic *ionic;
u64 __iomem *kern_dbpage;
u32 rx_copybreak;
unsigned int nxqs;
struct ionic_qcq **txqcqs;
struct ionic_tx_stats *txqstats;
struct ionic_qcq **rxqcqs;
struct ionic_rx_stats *rxqstats;
struct ionic_qcq *hwstamp_txq;
struct ionic_qcq *hwstamp_rxq;
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
struct mutex queue_lock; /* lock for queue structures */
struct mutex config_lock; /* lock for config actions */
spinlock_t adminq_lock; /* lock for AdminQ operations */
unsigned int kern_pid;
struct work_struct tx_timeout_work;
struct ionic_deferred deferred;
u64 last_eid;
unsigned int nrdma_eqs;
unsigned int nrdma_eqs_avail;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
u64 rxq_features;
u16 rx_mode;
bool registered;
u64 hw_features;
unsigned int index;
unsigned int hw_index;
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
u8 *rss_ind_tbl;
dma_addr_t rss_ind_tbl_pa;
u32 rss_ind_tbl_sz;
u16 rss_types;
u16 lif_type;
unsigned int nmcast;
unsigned int nucast;
unsigned int nvlans;
unsigned int max_vlans;
char name[IONIC_LIF_NAME_MAX_SZ];
struct ionic_lif_info *info;
dma_addr_t info_pa;
u32 info_sz;
unsigned int dbid_count;
struct mutex dbid_inuse_lock; /* lock the dbid bit list */
unsigned long *dbid_inuse;
union ionic_lif_identity *identity;
struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
struct ionic_rx_filters rx_filters;
u32 rx_coalesce_usecs; /* what the user asked for */
u32 rx_coalesce_hw; /* what the hw is using */
u32 tx_coalesce_usecs; /* what the user asked for */
u32 tx_coalesce_hw; /* what the hw is using */
struct ionic_phc *phc;
/* TODO: Make this a list if more than one child is supported */
struct ionic_lif_cfg child_lif_cfg;
u64 n_txrx_alloc;
struct dentry *dentry;
};
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ionic_phc {
spinlock_t lock; /* lock for cc and tc */
struct cyclecounter cc;
struct timecounter tc;
struct mutex config_lock; /* lock for ts_config */
struct hwtstamp_config ts_config;
u64 ts_config_rx_filt;
u32 ts_config_tx_mode;
u32 init_cc_mult;
long aux_work_delay;
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp;
struct ionic_lif *lif;
#ifndef HAVE_PTP_CLOCK_DO_AUX_WORK
struct delayed_work dwork;
#endif
};
#endif
struct ionic_queue_params {
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
u64 rxq_features;
bool intr_split;
bool cmb_enabled;
};
static inline void ionic_init_queue_params(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
qparam->nxqs = lif->nxqs;
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
qparam->rxq_features = lif->rxq_features;
qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->cmb_enabled = test_bit(IONIC_LIF_F_CMB_RINGS, lif->state);
}
static inline void ionic_set_queue_params(struct ionic_lif *lif,
struct ionic_queue_params *qparam)
{
lif->nxqs = qparam->nxqs;
lif->ntxq_descs = qparam->ntxq_descs;
lif->nrxq_descs = qparam->nrxq_descs;
lif->rxq_features = qparam->rxq_features;
if (qparam->intr_split)
set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
else
clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
if (qparam->cmb_enabled)
set_bit(IONIC_LIF_F_CMB_RINGS, lif->state);
else
clear_bit(IONIC_LIF_F_CMB_RINGS, lif->state);
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
{
u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult);
u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div);
/* Div-by-zero should never be an issue, but check anyway */
if (!div || !mult)
return 0;
/* Round up in case usecs is close to the next hw unit */
usecs += (div / mult) >> 1;
/* Convert from usecs to device units */
return (usecs * mult) / div;
}
static inline bool ionic_is_pf(struct ionic *ionic)
{
return ionic->pdev &&
ionic->pdev->device == PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF;
}
static inline bool ionic_use_eqs(struct ionic_lif *lif)
{
return lif->ionic->neth_eqs &&
lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_EQ;
}
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
struct ionic_deferred_work *work);
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
#ifdef HAVE_VOID_NDO_GET_STATS64
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
#else
struct rtnl_link_stats64 *ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
#endif
int ionic_lif_register(struct ionic_lif *lif);
void ionic_lif_unregister(struct ionic_lif *lif);
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lif_ident);
int ionic_lif_size(struct ionic *ionic);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void ionic_lif_hwstamp_replay(struct ionic_lif *lif);
void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif);
int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr);
int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr);
ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter);
void ionic_lif_register_phc(struct ionic_lif *lif);
void ionic_lif_unregister_phc(struct ionic_lif *lif);
void ionic_lif_alloc_phc(struct ionic_lif *lif);
void ionic_lif_free_phc(struct ionic_lif *lif);
#else
static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {}
static inline void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) {}
static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
{
return -EOPNOTSUPP;
}
static inline int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
{
return -EOPNOTSUPP;
}
static inline ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter)
{
return ns_to_ktime(0);
}
static inline void ionic_lif_register_phc(struct ionic_lif *lif) {}
static inline void ionic_lif_unregister_phc(struct ionic_lif *lif) {}
static inline void ionic_lif_alloc_phc(struct ionic_lif *lif) {}
static inline void ionic_lif_free_phc(struct ionic_lif *lif) {}
#endif
int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif);
int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif);
int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all);
int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode);
int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class);
int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
const u8 *key, const u32 *indir);
int ionic_intr_alloc(struct ionic *ionic, struct ionic_intr_info *intr);
void ionic_intr_free(struct ionic *ionic, int index);
void ionic_lif_rx_mode(struct ionic_lif *lif);
int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_queue_params *qparam);
int ionic_lif_alloc(struct ionic *ionic);
int ionic_lif_init(struct ionic_lif *lif);
void ionic_lif_free(struct ionic_lif *lif);
void ionic_lif_deinit(struct ionic_lif *lif);
int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
struct ionic_lif *ionic_netdev_lif(struct net_device *netdev);
void ionic_device_reset(struct ionic_lif *lif);
#ifdef IONIC_DEBUG_STATS
static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
{
struct ionic_txq_desc *desc = &q->txq[q->head_idx];
u8 num_sg_elems;
q->dbell_count += dbell;
num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
& IONIC_TXQ_DESC_NSGE_MASK);
if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++;
}
static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
unsigned int work_done)
{
qcq->napi_stats.poll_count++;
if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1))
work_done = IONIC_MAX_NUM_NAPI_CNTR - 1;
qcq->napi_stats.work_done_cntr[work_done]++;
}
#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
debug_stats_napi_poll(qcq, work_done)
#else
#define DEBUG_STATS_CQE_CNT(cq)
#define DEBUG_STATS_RX_BUFF_CNT(q)
#define DEBUG_STATS_TXQ_POST(q, dbell)
#define DEBUG_STATS_NAPI_POLL(qcq, work_done)
#endif
#endif /* _IONIC_LIF_H_ */

View File

@ -0,0 +1,831 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#include <linux/module.h>
#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/dynamic_debug.h>
#include <linux/utsname.h>
#include <linux/ctype.h>
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_lif.h"
#include "ionic_debugfs.h"
bool port_init_up = 1;
module_param(port_init_up, bool, 0);
MODULE_PARM_DESC(max_slaves, "Set port to ADMIN_UP on init (default 1, 0 to disable)");
MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION);
MODULE_AUTHOR("Pensando Systems, Inc");
MODULE_LICENSE("GPL");
MODULE_VERSION(IONIC_DRV_VERSION);
MODULE_INFO(supported, "external");
unsigned int rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
module_param(rx_copybreak, uint, 0600);
MODULE_PARM_DESC(rx_copybreak, "Maximum size of packet that is copied to a bounce buffer on RX");
unsigned int rx_fill_threshold = IONIC_RX_FILL_THRESHOLD;
module_param(rx_fill_threshold, uint, 0600);
MODULE_PARM_DESC(rx_fill_threshold, "Minimum number of buffers to fill");
unsigned int tx_budget = IONIC_TX_BUDGET_DEFAULT;
module_param(tx_budget, uint, 0600);
MODULE_PARM_DESC(tx_budget, "Number of tx completions to process per NAPI poll");
unsigned int devcmd_timeout = DEVCMD_TIMEOUT;
module_param(devcmd_timeout, uint, 0600);
MODULE_PARM_DESC(devcmd_timeout, "Devcmd timeout in seconds (default 30 secs)");
unsigned long affinity_mask_override;
module_param(affinity_mask_override, ulong, 0600);
MODULE_PARM_DESC(affinity_mask_override, "IRQ affinity mask to override (max 64 bits)");
static const char *ionic_error_to_str(enum ionic_status_code code)
{
switch (code) {
case IONIC_RC_SUCCESS:
return "IONIC_RC_SUCCESS";
case IONIC_RC_EVERSION:
return "IONIC_RC_EVERSION";
case IONIC_RC_EOPCODE:
return "IONIC_RC_EOPCODE";
case IONIC_RC_EIO:
return "IONIC_RC_EIO";
case IONIC_RC_EPERM:
return "IONIC_RC_EPERM";
case IONIC_RC_EQID:
return "IONIC_RC_EQID";
case IONIC_RC_EQTYPE:
return "IONIC_RC_EQTYPE";
case IONIC_RC_ENOENT:
return "IONIC_RC_ENOENT";
case IONIC_RC_EINTR:
return "IONIC_RC_EINTR";
case IONIC_RC_EAGAIN:
return "IONIC_RC_EAGAIN";
case IONIC_RC_ENOMEM:
return "IONIC_RC_ENOMEM";
case IONIC_RC_EFAULT:
return "IONIC_RC_EFAULT";
case IONIC_RC_EBUSY:
return "IONIC_RC_EBUSY";
case IONIC_RC_EEXIST:
return "IONIC_RC_EEXIST";
case IONIC_RC_EINVAL:
return "IONIC_RC_EINVAL";
case IONIC_RC_ENOSPC:
return "IONIC_RC_ENOSPC";
case IONIC_RC_ERANGE:
return "IONIC_RC_ERANGE";
case IONIC_RC_BAD_ADDR:
return "IONIC_RC_BAD_ADDR";
case IONIC_RC_DEV_CMD:
return "IONIC_RC_DEV_CMD";
case IONIC_RC_ENOSUPP:
return "IONIC_RC_ENOSUPP";
case IONIC_RC_ERROR:
return "IONIC_RC_ERROR";
case IONIC_RC_ERDMA:
return "IONIC_RC_ERDMA";
case IONIC_RC_BAD_FW:
return "IONIC_RC_BAD_FW";
default:
return "IONIC_RC_UNKNOWN";
}
}
int ionic_error_to_errno(enum ionic_status_code code)
{
switch (code) {
case IONIC_RC_SUCCESS:
return 0;
case IONIC_RC_EVERSION:
case IONIC_RC_EQTYPE:
case IONIC_RC_EQID:
case IONIC_RC_EINVAL:
case IONIC_RC_ENOSUPP:
return -EINVAL;
case IONIC_RC_EPERM:
return -EPERM;
case IONIC_RC_ENOENT:
return -ENOENT;
case IONIC_RC_EAGAIN:
return -EAGAIN;
case IONIC_RC_ENOMEM:
return -ENOMEM;
case IONIC_RC_EFAULT:
return -EFAULT;
case IONIC_RC_EBUSY:
return -EBUSY;
case IONIC_RC_EEXIST:
return -EEXIST;
case IONIC_RC_ENOSPC:
return -ENOSPC;
case IONIC_RC_ERANGE:
return -ERANGE;
case IONIC_RC_BAD_ADDR:
return -EFAULT;
case IONIC_RC_BAD_FW:
return -ENOEXEC;
case IONIC_RC_EOPCODE:
case IONIC_RC_EINTR:
case IONIC_RC_DEV_CMD:
case IONIC_RC_ERROR:
case IONIC_RC_ERDMA:
case IONIC_RC_EIO:
default:
return -EIO;
}
}
EXPORT_SYMBOL_GPL(ionic_error_to_errno);
static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
{
switch (opcode) {
case IONIC_CMD_NOP:
return "IONIC_CMD_NOP";
case IONIC_CMD_INIT:
return "IONIC_CMD_INIT";
case IONIC_CMD_RESET:
return "IONIC_CMD_RESET";
case IONIC_CMD_IDENTIFY:
return "IONIC_CMD_IDENTIFY";
case IONIC_CMD_GETATTR:
return "IONIC_CMD_GETATTR";
case IONIC_CMD_SETATTR:
return "IONIC_CMD_SETATTR";
case IONIC_CMD_PORT_IDENTIFY:
return "IONIC_CMD_PORT_IDENTIFY";
case IONIC_CMD_PORT_INIT:
return "IONIC_CMD_PORT_INIT";
case IONIC_CMD_PORT_RESET:
return "IONIC_CMD_PORT_RESET";
case IONIC_CMD_PORT_GETATTR:
return "IONIC_CMD_PORT_GETATTR";
case IONIC_CMD_PORT_SETATTR:
return "IONIC_CMD_PORT_SETATTR";
case IONIC_CMD_LIF_INIT:
return "IONIC_CMD_LIF_INIT";
case IONIC_CMD_LIF_RESET:
return "IONIC_CMD_LIF_RESET";
case IONIC_CMD_LIF_IDENTIFY:
return "IONIC_CMD_LIF_IDENTIFY";
case IONIC_CMD_LIF_SETATTR:
return "IONIC_CMD_LIF_SETATTR";
case IONIC_CMD_LIF_GETATTR:
return "IONIC_CMD_LIF_GETATTR";
case IONIC_CMD_LIF_SETPHC:
return "IONIC_CMD_LIF_SETPHC";
case IONIC_CMD_RX_MODE_SET:
return "IONIC_CMD_RX_MODE_SET";
case IONIC_CMD_RX_FILTER_ADD:
return "IONIC_CMD_RX_FILTER_ADD";
case IONIC_CMD_RX_FILTER_DEL:
return "IONIC_CMD_RX_FILTER_DEL";
case IONIC_CMD_Q_IDENTIFY:
return "IONIC_CMD_Q_IDENTIFY";
case IONIC_CMD_Q_INIT:
return "IONIC_CMD_Q_INIT";
case IONIC_CMD_Q_CONTROL:
return "IONIC_CMD_Q_CONTROL";
case IONIC_CMD_RDMA_RESET_LIF:
return "IONIC_CMD_RDMA_RESET_LIF";
case IONIC_CMD_RDMA_CREATE_EQ:
return "IONIC_CMD_RDMA_CREATE_EQ";
case IONIC_CMD_RDMA_CREATE_CQ:
return "IONIC_CMD_RDMA_CREATE_CQ";
case IONIC_CMD_RDMA_CREATE_ADMINQ:
return "IONIC_CMD_RDMA_CREATE_ADMINQ";
case IONIC_CMD_FW_DOWNLOAD:
return "IONIC_CMD_FW_DOWNLOAD";
case IONIC_CMD_FW_CONTROL:
return "IONIC_CMD_FW_CONTROL";
case IONIC_CMD_FW_DOWNLOAD_V1:
return "IONIC_CMD_FW_DOWNLOAD_V1";
case IONIC_CMD_FW_CONTROL_V1:
return "IONIC_CMD_FW_CONTROL_V1";
case IONIC_CMD_VF_GETATTR:
return "IONIC_CMD_VF_GETATTR";
case IONIC_CMD_VF_SETATTR:
return "IONIC_CMD_VF_SETATTR";
default:
return "DEVCMD_UNKNOWN";
}
}
const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr)
{
switch (attr) {
case IONIC_VF_ATTR_SPOOFCHK:
return "IONIC_VF_ATTR_SPOOFCHK";
case IONIC_VF_ATTR_TRUST:
return "IONIC_VF_ATTR_TRUST";
case IONIC_VF_ATTR_LINKSTATE:
return "IONIC_VF_ATTR_LINKSTATE";
case IONIC_VF_ATTR_MAC:
return "IONIC_VF_ATTR_MAC";
case IONIC_VF_ATTR_VLAN:
return "IONIC_VF_ATTR_VLAN";
case IONIC_VF_ATTR_RATE:
return "IONIC_VF_ATTR_RATE";
case IONIC_VF_ATTR_STATSADDR:
return "IONIC_VF_ATTR_STATSADDR";
default:
return "IONIC_VF_ATTR_UNKNOWN";
}
}
static void ionic_adminq_flush(struct ionic_lif *lif)
{
struct ionic_desc_info *desc_info;
unsigned long irqflags;
struct ionic_queue *q;
spin_lock_irqsave(&lif->adminq_lock, irqflags);
if (!lif->adminqcq) {
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
return;
}
q = &lif->adminqcq->q;
while (q->tail_idx != q->head_idx) {
desc_info = &q->info[q->tail_idx];
memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd));
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
}
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
}
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err)
{
const char *stat_str;
stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" :
ionic_error_to_str(status);
netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n",
ionic_opcode_to_str(opcode), opcode, stat_str, err);
}
static int ionic_adminq_check_err(struct ionic_lif *lif,
struct ionic_admin_ctx *ctx,
const bool timeout,
const bool do_msg)
{
int err = 0;
if (ctx->comp.comp.status || timeout) {
err = timeout ? -ETIMEDOUT :
ionic_error_to_errno(ctx->comp.comp.status);
if (do_msg)
ionic_adminq_netdev_err_print(lif, ctx->cmd.cmd.opcode,
ctx->comp.comp.status, err);
if (timeout)
ionic_adminq_flush(lif);
}
return err;
}
static void ionic_adminq_cb(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg)
{
struct ionic_admin_ctx *ctx = cb_arg;
struct ionic_admin_comp *comp;
if (!ctx)
return;
comp = cq_info->cq_desc;
memcpy(&ctx->comp, comp, sizeof(*comp));
dev_dbg(q->dev, "comp admin queue command:\n");
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->comp, sizeof(ctx->comp), true);
complete_all(&ctx->work);
}
bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
{
struct ionic_lif *lif = q->lif;
unsigned long now, then, dif;
unsigned long irqflags;
spin_lock_irqsave(&lif->adminq_lock, irqflags);
if (q->tail_idx == q->head_idx) {
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
return false;
}
now = READ_ONCE(jiffies);
then = q->dbell_jiffies;
dif = now - then;
if (dif > q->dbell_deadline) {
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
q->dbell_jiffies = now;
}
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
return true;
}
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
struct ionic_desc_info *desc_info;
unsigned long irqflags;
struct ionic_queue *q;
int err = 0;
spin_lock_irqsave(&lif->adminq_lock, irqflags);
if (!lif->adminqcq) {
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
return -EIO;
}
q = &lif->adminqcq->q;
if (!ionic_q_has_space(q, 1)) {
err = -ENOSPC;
goto err_out;
}
err = ionic_heartbeat_check(lif->ionic);
if (err)
goto err_out;
desc_info = &q->info[q->head_idx];
memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->cmd, sizeof(ctx->cmd), true);
ionic_q_post(q, true, ionic_adminq_cb, ctx);
err_out:
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
return err;
}
int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
const int err, const bool do_msg)
{
struct net_device *netdev = lif->netdev;
unsigned long time_limit;
unsigned long time_start;
unsigned long time_done;
unsigned long remaining;
const char *name;
name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
if (err) {
if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
name, ctx->cmd.cmd.opcode, err);
ctx->comp.comp.status = IONIC_RC_ERROR;
return err;
}
time_start = jiffies;
time_limit = time_start + HZ * (ulong)devcmd_timeout;
do {
remaining = wait_for_completion_timeout(&ctx->work,
IONIC_ADMINQ_TIME_SLICE);
/* check for done */
if (remaining)
break;
/* force a check of FW status and break out if FW reset */
(void) ionic_heartbeat_check(lif->ionic);
if ((test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
!lif->ionic->idev.fw_status_ready) ||
test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
if (do_msg)
netdev_warn(netdev, "%s (%d) interrupted, FW in reset\n",
name, ctx->cmd.cmd.opcode);
ctx->comp.comp.status = IONIC_RC_ERROR;
return -ENXIO;
}
} while (time_before(jiffies, time_limit));
time_done = jiffies;
dev_dbg(lif->ionic->dev, "%s: elapsed %d msecs\n",
__func__, jiffies_to_msecs(time_done - time_start));
return ionic_adminq_check_err(lif, ctx,
time_after_eq(time_done, time_limit),
do_msg);
}
static int __ionic_adminq_post_wait(struct ionic_lif *lif,
struct ionic_admin_ctx *ctx,
const bool do_msg)
{
int err;
/* if platform dev is resetting, don't bother with AdminQ, it's not there */
if (lif->ionic->pfdev && test_bit(IONIC_LIF_F_FW_STOPPING, lif->state))
return 0;
err = ionic_adminq_post(lif, ctx);
return ionic_adminq_wait(lif, ctx, err, do_msg);
}
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
return __ionic_adminq_post_wait(lif, ctx, true);
}
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
return __ionic_adminq_post_wait(lif, ctx, false);
}
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
iowrite32(0, &idev->dev_cmd_regs->doorbell);
memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status,
int err)
{
const char *stat_str;
stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" :
ionic_error_to_str(status);
dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
ionic_opcode_to_str(opcode), opcode, stat_str, err);
}
static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
const bool do_msg)
{
struct ionic_dev *idev = &ionic->idev;
unsigned long start_time;
unsigned long max_wait;
unsigned long duration;
int done = 0;
bool fw_up;
int opcode;
int err;
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
* but don't wait any longer than max_seconds.
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
opcode = ioread8(&idev->dev_cmd_regs->cmd.cmd.opcode);
start_time = jiffies;
for (fw_up = ionic_is_fw_running(idev);
!done && fw_up && time_before(jiffies, max_wait);
fw_up = ionic_is_fw_running(idev)) {
done = ionic_dev_cmd_done(idev);
if (done)
break;
usleep_range(100, 200);
}
duration = jiffies - start_time;
dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n",
ionic_opcode_to_str(opcode), opcode,
done, duration / HZ, duration);
if (!done && !fw_up) {
ionic_dev_cmd_clean(ionic);
dev_warn(ionic->dev, "DEVCMD %s (%d) interrupted - FW is down\n",
ionic_opcode_to_str(opcode), opcode);
return -ENXIO;
}
if (!done && !time_before(jiffies, max_wait)) {
ionic_dev_cmd_clean(ionic);
dev_warn(ionic->dev, "DEVCMD %s (%d) timeout after %ld secs\n",
ionic_opcode_to_str(opcode), opcode, max_seconds);
return -ETIMEDOUT;
}
err = ionic_dev_cmd_status(&ionic->idev);
if (err) {
if (err == IONIC_RC_EAGAIN && !time_after(jiffies, max_wait)) {
dev_dbg(ionic->dev, "DEV_CMD %s (%d), %s (%d) retrying...\n",
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
iowrite32(0, &idev->dev_cmd_regs->done);
msleep(1000);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
goto try_again;
}
if (do_msg)
ionic_dev_cmd_dev_err_print(ionic, opcode, err,
ionic_error_to_errno(err));
return ionic_error_to_errno(err);
}
ionic_dev_cmd_clean(ionic);
return 0;
}
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
{
return __ionic_dev_cmd_wait(ionic, max_seconds, true);
}
int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_seconds)
{
return __ionic_dev_cmd_wait(ionic, max_seconds, false);
}
int ionic_set_dma_mask(struct ionic *ionic)
{
struct device *dev = ionic->dev;
int err;
/* Query system for DMA addressing limitation for the device. */
#ifdef CONFIG_PPC64
ionic->pdev->no_64bit_msi = 1;
#endif
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN));
if (err)
dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n",
err);
return err;
}
int ionic_setup(struct ionic *ionic)
{
int err;
err = ionic_dev_setup(ionic);
if (err)
return err;
ionic_debugfs_add_dev_cmd(ionic);
ionic_reset(ionic);
return 0;
}
int ionic_identify(struct ionic *ionic)
{
struct ionic_identity *ident = &ionic->ident;
struct ionic_dev *idev = &ionic->idev;
size_t sz;
int err;
memset(ident, 0, sizeof(*ident));
ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX);
ident->drv.os_dist = 0;
strncpy(ident->drv.os_dist_str, utsname()->release,
sizeof(ident->drv.os_dist_str) - 1);
ident->drv.kernel_ver = cpu_to_le32(LINUX_VERSION_CODE);
strncpy(ident->drv.kernel_ver_str, utsname()->version,
sizeof(ident->drv.kernel_ver_str) - 1);
strncpy(ident->drv.driver_ver_str, IONIC_DRV_VERSION,
sizeof(ident->drv.driver_ver_str) - 1);
mutex_lock(&ionic->dev_cmd_lock);
sz = min(sizeof(ident->drv), sizeof(idev->dev_cmd_regs->data));
memcpy_toio(&idev->dev_cmd_regs->data, &ident->drv, sz);
#if defined(IONIC_DEV_IDENTITY_VERSION_2)
ionic_dev_cmd_identify(idev, IONIC_DEV_IDENTITY_VERSION_2);
#else
ionic_dev_cmd_identify(idev, IONIC_IDENTITY_VERSION_1);
#endif
err = ionic_dev_cmd_wait(ionic, devcmd_timeout);
if (!err) {
sz = min(sizeof(ident->dev), sizeof(idev->dev_cmd_regs->data));
memcpy_fromio(&ident->dev, &idev->dev_cmd_regs->data, sz);
}
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
dev_err(ionic->dev, "Cannot identify ionic: %d\n", err);
goto err_out;
}
if (isprint(idev->dev_info.fw_version[0]) &&
isascii(idev->dev_info.fw_version[0]))
dev_info(ionic->dev, "FW: %.*s\n",
(int)(sizeof(idev->dev_info.fw_version) - 1),
idev->dev_info.fw_version);
else
dev_info(ionic->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n",
(u8)idev->dev_info.fw_version[0],
(u8)idev->dev_info.fw_version[1],
(u8)idev->dev_info.fw_version[2],
(u8)idev->dev_info.fw_version[3]);
err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC,
&ionic->ident.lif);
if (err) {
dev_err(ionic->dev, "Cannot identify LIFs: %d\n", err);
goto err_out;
}
return 0;
err_out:
return err;
}
int ionic_init(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
int err;
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_init(idev);
err = ionic_dev_cmd_wait(ionic, devcmd_timeout);
mutex_unlock(&ionic->dev_cmd_lock);
return err;
}
int ionic_reset(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
int err;
if (!ionic_is_fw_running(idev))
return 0;
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_reset(idev);
err = ionic_dev_cmd_wait(ionic, devcmd_timeout);
mutex_unlock(&ionic->dev_cmd_lock);
return err;
}
int ionic_port_identify(struct ionic *ionic)
{
struct ionic_identity *ident = &ionic->ident;
struct ionic_dev *idev = &ionic->idev;
struct device *dev = ionic->dev;
size_t sz;
int err;
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_port_identify(idev);
err = ionic_dev_cmd_wait(ionic, devcmd_timeout);
if (!err) {
sz = min(sizeof(ident->port), sizeof(idev->dev_cmd_regs->data));
memcpy_fromio(&ident->port, &idev->dev_cmd_regs->data, sz);
}
mutex_unlock(&ionic->dev_cmd_lock);
dev_dbg(dev, "type %d\n", ident->port.type);
dev_dbg(dev, "speed %d\n", ident->port.config.speed);
dev_dbg(dev, "mtu %d\n", ident->port.config.mtu);
dev_dbg(dev, "state %d\n", ident->port.config.state);
dev_dbg(dev, "an_enable %d\n", ident->port.config.an_enable);
dev_dbg(dev, "fec_type %d\n", ident->port.config.fec_type);
dev_dbg(dev, "pause_type %d\n", ident->port.config.pause_type);
dev_dbg(dev, "loopback_mode %d\n", ident->port.config.loopback_mode);
return err;
}
int ionic_port_init(struct ionic *ionic)
{
struct ionic_identity *ident = &ionic->ident;
struct ionic_dev *idev = &ionic->idev;
size_t sz;
int err;
if (!idev->port_info) {
idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
idev->port_info = dma_alloc_coherent(ionic->dev,
idev->port_info_sz,
&idev->port_info_pa,
GFP_KERNEL);
if (!idev->port_info)
return -ENOMEM;
}
sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data));
mutex_lock(&ionic->dev_cmd_lock);
memcpy_toio(&idev->dev_cmd_regs->data, &ident->port.config, sz);
ionic_dev_cmd_port_init(idev);
err = ionic_dev_cmd_wait(ionic, devcmd_timeout);
if (port_init_up) {
ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP);
(void)ionic_dev_cmd_wait(ionic, devcmd_timeout);
}
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
dev_err(ionic->dev, "Failed to init port\n");
dma_free_coherent(ionic->dev, idev->port_info_sz,
idev->port_info, idev->port_info_pa);
idev->port_info = NULL;
idev->port_info_pa = 0;
}
return err;
}
int ionic_port_reset(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
int err = 0;
if (!idev->port_info)
return 0;
if (ionic_is_fw_running(idev)) {
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_port_reset(idev);
err = ionic_dev_cmd_wait(ionic, devcmd_timeout);
mutex_unlock(&ionic->dev_cmd_lock);
}
dma_free_coherent(ionic->dev, idev->port_info_sz,
idev->port_info, idev->port_info_pa);
idev->port_info = NULL;
idev->port_info_pa = 0;
return err;
}
static int __init ionic_init_module(void)
{
unsigned long max_affinity = GENMASK_ULL((min(num_present_cpus(),
(unsigned int)(sizeof(unsigned long)*BITS_PER_BYTE))-1), 0);
pr_info("%s %s, ver %s\n",
IONIC_DRV_NAME, IONIC_DRV_DESCRIPTION, IONIC_DRV_VERSION);
ionic_debugfs_create();
if (affinity_mask_override) {
/* limit affinity mask override to the available CPUs */
if (affinity_mask_override > max_affinity) {
affinity_mask_override = (affinity_mask_override & max_affinity);
pr_info("limiting affinity mask to: 0x%lx\n",
affinity_mask_override);
} else {
pr_info("affinity_mask_override: %lx\n",
affinity_mask_override);
}
}
return ionic_bus_register_driver();
}
static void __exit ionic_cleanup_module(void)
{
/* If there's a long devcmd_timeout set, don't let
* hung FW slow us down when exiting
*/
devcmd_timeout = min_t(int, devcmd_timeout, SHORT_TIMEOUT);
ionic_bus_unregister_driver();
ionic_debugfs_destroy();
pr_info("%s removed\n", IONIC_DRV_NAME);
}
module_init(ionic_init_module);
module_exit(ionic_cleanup_module);

View File

@ -0,0 +1,711 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_lif.h"
#include "ionic_ethtool.h"
/* XXX not for upstream: kernel config is changed in kcompat */
/* normally this file will not be compiled if ptp is not enabled */
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
static int ionic_hwstamp_tx_mode(int config_tx_type)
{
switch (config_tx_type) {
case HWTSTAMP_TX_OFF:
return IONIC_TXSTAMP_OFF;
case HWTSTAMP_TX_ON:
return IONIC_TXSTAMP_ON;
case HWTSTAMP_TX_ONESTEP_SYNC:
return IONIC_TXSTAMP_ONESTEP_SYNC;
#ifdef HAVE_HWSTAMP_TX_ONESTEP_P2P
case HWTSTAMP_TX_ONESTEP_P2P:
return IONIC_TXSTAMP_ONESTEP_P2P;
#endif
default:
return -ERANGE;
}
}
static u64 ionic_hwstamp_rx_filt(int config_rx_filter)
{
switch (config_rx_filter) {
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
return IONIC_PKT_CLS_PTP1_ALL;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
return IONIC_PKT_CLS_PTP1_SYNC;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
return IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP1_DREQ;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
return IONIC_PKT_CLS_PTP2_L4_ALL;
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
return IONIC_PKT_CLS_PTP2_L4_SYNC;
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
return IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L4_DREQ;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
return IONIC_PKT_CLS_PTP2_L2_ALL;
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
return IONIC_PKT_CLS_PTP2_L2_SYNC;
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
return IONIC_PKT_CLS_PTP2_L2_SYNC | IONIC_PKT_CLS_PTP2_L2_DREQ;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
return IONIC_PKT_CLS_PTP2_ALL;
case HWTSTAMP_FILTER_PTP_V2_SYNC:
return IONIC_PKT_CLS_PTP2_SYNC;
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
return IONIC_PKT_CLS_PTP2_SYNC | IONIC_PKT_CLS_PTP2_DREQ;
#ifdef HAVE_HWTSTAMP_FILTER_NTP_ALL
case HWTSTAMP_FILTER_NTP_ALL:
return IONIC_PKT_CLS_NTP_ALL;
#endif
default:
return 0;
}
}
static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
struct hwtstamp_config *new_ts)
{
struct ionic *ionic = lif->ionic;
struct hwtstamp_config *config;
struct hwtstamp_config ts;
int tx_mode = 0;
u64 rx_filt = 0;
int err, err2;
bool rx_all;
__le64 mask;
if (!lif->phc || !lif->phc->ptp)
return -EOPNOTSUPP;
mutex_lock(&lif->phc->config_lock);
if (new_ts) {
config = new_ts;
} else {
/* If called with new_ts == NULL, replay the previous request
* primarily for recovery after a FW_RESET.
* We saved the previous configuration request info, so copy
* the previous request for reference, clear the current state
* to match the device's reset state, and run with it.
*/
config = &ts;
memcpy(config, &lif->phc->ts_config, sizeof(*config));
memset(&lif->phc->ts_config, 0, sizeof(lif->phc->ts_config));
lif->phc->ts_config_tx_mode = 0;
lif->phc->ts_config_rx_filt = 0;
}
tx_mode = ionic_hwstamp_tx_mode(config->tx_type);
if (tx_mode < 0) {
err = tx_mode;
goto err_queues;
}
mask = cpu_to_le64(BIT_ULL(tx_mode));
if ((ionic->ident.lif.eth.hwstamp_tx_modes & mask) != mask) {
err = -ERANGE;
goto err_queues;
}
rx_filt = ionic_hwstamp_rx_filt(config->rx_filter);
rx_all = config->rx_filter != HWTSTAMP_FILTER_NONE && !rx_filt;
mask = cpu_to_le64(rx_filt);
if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) != mask) {
rx_filt = 0;
rx_all = true;
config->rx_filter = HWTSTAMP_FILTER_ALL;
}
dev_dbg(ionic->dev, "%s: config_rx_filter %d rx_filt %#llx rx_all %d\n",
__func__, config->rx_filter, rx_filt, rx_all);
if (tx_mode) {
err = ionic_lif_create_hwstamp_txq(lif);
if (err)
goto err_queues;
}
if (rx_filt) {
err = ionic_lif_create_hwstamp_rxq(lif);
if (err)
goto err_queues;
}
if (tx_mode != lif->phc->ts_config_tx_mode) {
err = ionic_lif_set_hwstamp_txmode(lif, tx_mode);
if (err)
goto err_txmode;
}
if (rx_filt != lif->phc->ts_config_rx_filt) {
err = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt);
if (err)
goto err_rxfilt;
}
if (rx_all != (lif->phc->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)) {
err = ionic_lif_config_hwstamp_rxq_all(lif, rx_all);
if (err)
goto err_rxall;
}
memcpy(&lif->phc->ts_config, config, sizeof(*config));
lif->phc->ts_config_rx_filt = rx_filt;
lif->phc->ts_config_tx_mode = tx_mode;
mutex_unlock(&lif->phc->config_lock);
return 0;
err_rxall:
if (rx_filt != lif->phc->ts_config_rx_filt) {
rx_filt = lif->phc->ts_config_rx_filt;
err2 = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt);
if (err2)
dev_err(ionic->dev,
"Failed to revert rx timestamp filter: %d\n", err2);
}
err_rxfilt:
if (tx_mode != lif->phc->ts_config_tx_mode) {
tx_mode = lif->phc->ts_config_tx_mode;
err2 = ionic_lif_set_hwstamp_txmode(lif, tx_mode);
if (err2)
dev_err(ionic->dev,
"Failed to revert tx timestamp mode: %d\n", err2);
}
err_txmode:
/* special queues remain allocated, just unused */
err_queues:
mutex_unlock(&lif->phc->config_lock);
return err;
}
int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
{
struct hwtstamp_config config;
int err;
if (!lif->phc || !lif->phc->ptp)
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
mutex_lock(&lif->queue_lock);
err = ionic_lif_hwstamp_set_ts_config(lif, &config);
mutex_unlock(&lif->queue_lock);
if (err) {
netdev_info(lif->netdev, "hwstamp set failed: %d\n", err);
return err;
}
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
return -EFAULT;
return 0;
}
void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
{
int err;
if (!lif->phc || !lif->phc->ptp)
return;
mutex_lock(&lif->queue_lock);
err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
mutex_unlock(&lif->queue_lock);
if (err)
netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
}
void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif)
{
int err;
if (!lif->phc || !lif->phc->ptp)
return;
mutex_lock(&lif->phc->config_lock);
if (lif->phc->ts_config_tx_mode) {
err = ionic_lif_create_hwstamp_txq(lif);
if (err)
netdev_info(lif->netdev, "hwstamp recreate txq failed: %d\n", err);
}
if (lif->phc->ts_config_rx_filt) {
err = ionic_lif_create_hwstamp_rxq(lif);
if (err)
netdev_info(lif->netdev, "hwstamp recreate rxq failed: %d\n", err);
}
mutex_unlock(&lif->phc->config_lock);
}
int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
{
struct hwtstamp_config config;
if (!lif->phc || !lif->phc->ptp)
return -EOPNOTSUPP;
mutex_lock(&lif->phc->config_lock);
memcpy(&config, &lif->phc->ts_config, sizeof(config));
mutex_unlock(&lif->phc->config_lock);
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
return -EFAULT;
return 0;
}
#ifdef HAVE_PHC_GETTIMEX64
static u64 ionic_hwstamp_read(struct ionic *ionic,
struct ptp_system_timestamp *sts)
#else
static u64 ionic_hwstamp_read(struct ionic *ionic)
#endif
{
u32 tick_high_before, tick_high, tick_low;
/* read and discard low part to defeat hw staging of high part */
(void)ioread32(&ionic->idev.hwstamp_regs->tick_low);
tick_high_before = ioread32(&ionic->idev.hwstamp_regs->tick_high);
#ifdef HAVE_PHC_GETTIMEX64
ptp_read_system_prets(sts);
#endif
tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low);
#ifdef HAVE_PHC_GETTIMEX64
ptp_read_system_postts(sts);
#endif
tick_high = ioread32(&ionic->idev.hwstamp_regs->tick_high);
/* If tick_high changed, re-read tick_low once more. Assume tick_high
* cannot change again so soon as in the span of re-reading tick_low.
*/
if (tick_high != tick_high_before) {
#ifdef HAVE_PHC_GETTIMEX64
ptp_read_system_prets(sts);
#endif
tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low);
#ifdef HAVE_PHC_GETTIMEX64
ptp_read_system_postts(sts);
#endif
}
return (u64)tick_low | ((u64)tick_high << 32);
}
static u64 ionic_cc_read(const struct cyclecounter *cc)
{
struct ionic_phc *phc = container_of(cc, struct ionic_phc, cc);
struct ionic *ionic = phc->lif->ionic;
#ifdef HAVE_PHC_GETTIMEX64
return ionic_hwstamp_read(ionic, NULL);
#else
return ionic_hwstamp_read(ionic);
#endif
}
static int ionic_setphc_cmd(struct ionic_phc *phc, struct ionic_admin_ctx *ctx)
{
ctx->work = COMPLETION_INITIALIZER_ONSTACK(ctx->work);
ctx->cmd.lif_setphc.opcode = IONIC_CMD_LIF_SETPHC;
ctx->cmd.lif_setphc.lif_index = cpu_to_le16(phc->lif->index);
ctx->cmd.lif_setphc.tick = cpu_to_le64(phc->tc.cycle_last);
ctx->cmd.lif_setphc.nsec = cpu_to_le64(phc->tc.nsec);
ctx->cmd.lif_setphc.frac = cpu_to_le64(phc->tc.frac);
ctx->cmd.lif_setphc.mult = cpu_to_le32(phc->cc.mult);
ctx->cmd.lif_setphc.shift = cpu_to_le32(phc->cc.shift);
return ionic_adminq_post(phc->lif, ctx);
}
#ifdef HAVE_PTP_ADJFINE
static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
struct ionic_admin_ctx ctx = {};
unsigned long irqflags;
s64 adj;
int err;
/* Reject phc adjustments during device upgrade */
if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
return -EBUSY;
/* Adjustment value scaled by 2^16 million */
adj = (s64)scaled_ppm * phc->init_cc_mult;
/* Adjustment value to scale */
adj /= (s64)SCALED_PPM;
/* Final adjusted multiplier */
adj += phc->init_cc_mult;
spin_lock_irqsave(&phc->lock, irqflags);
/* update the point-in-time basis to now, before adjusting the rate */
timecounter_read(&phc->tc);
phc->cc.mult = adj;
/* Setphc commands are posted in-order, sequenced by phc->lock. We
* need to drop the lock before waiting for the command to complete.
*/
err = ionic_setphc_cmd(phc, &ctx);
spin_unlock_irqrestore(&phc->lock, irqflags);
return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
#endif
static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
{
struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
struct ionic_admin_ctx ctx = {};
unsigned long irqflags;
int err;
/* Reject phc adjustments during device upgrade */
if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
return -EBUSY;
spin_lock_irqsave(&phc->lock, irqflags);
timecounter_adjtime(&phc->tc, delta);
/* Setphc commands are posted in-order, sequenced by phc->lock. We
* need to drop the lock before waiting for the command to complete.
*/
err = ionic_setphc_cmd(phc, &ctx);
spin_unlock_irqrestore(&phc->lock, irqflags);
return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_settime64(struct ptp_clock_info *info,
const struct timespec64 *ts)
{
struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
struct ionic_admin_ctx ctx = {};
unsigned long irqflags;
int err;
u64 ns;
/* Reject phc adjustments during device upgrade */
if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
return -EBUSY;
ns = timespec64_to_ns(ts);
spin_lock_irqsave(&phc->lock, irqflags);
timecounter_init(&phc->tc, &phc->cc, ns);
/* Setphc commands are posted in-order, sequenced by phc->lock. We
* need to drop the lock before waiting for the command to complete.
*/
err = ionic_setphc_cmd(phc, &ctx);
spin_unlock_irqrestore(&phc->lock, irqflags);
return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
#ifdef HAVE_PHC_GETTIMEX64
static int ionic_phc_gettimex64(struct ptp_clock_info *info,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
#else
static int ionic_phc_gettime64(struct ptp_clock_info *info,
struct timespec64 *ts)
#endif
{
struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
struct ionic *ionic = phc->lif->ionic;
unsigned long irqflags;
u64 tick, ns;
/* Do not attempt to read device time during upgrade */
if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
return -EBUSY;
spin_lock_irqsave(&phc->lock, irqflags);
#ifdef HAVE_PHC_GETTIMEX64
tick = ionic_hwstamp_read(ionic, sts);
#else
tick = ionic_hwstamp_read(ionic);
#endif
ns = timecounter_cyc2time(&phc->tc, tick);
spin_unlock_irqrestore(&phc->lock, irqflags);
*ts = ns_to_timespec64(ns);
return 0;
}
static long ionic_phc_aux_work(struct ptp_clock_info *info)
{
struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
struct ionic_admin_ctx ctx = {};
unsigned long irqflags;
int err;
/* Do not update phc during device upgrade, but keep polling to resume
* after upgrade. Since we don't update the point in time basis, there
* is no expectation that we are maintaining the phc time during the
* upgrade. After upgrade, it will need to be readjusted back to the
* correct time by the ptp daemon.
*/
if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
return phc->aux_work_delay;
spin_lock_irqsave(&phc->lock, irqflags);
/* update point-in-time basis to now */
timecounter_read(&phc->tc);
/* Setphc commands are posted in-order, sequenced by phc->lock. We
* need to drop the lock before waiting for the command to complete.
*/
err = ionic_setphc_cmd(phc, &ctx);
spin_unlock_irqrestore(&phc->lock, irqflags);
ionic_adminq_wait(phc->lif, &ctx, err, true);
return phc->aux_work_delay;
}
#ifndef HAVE_PTP_CLOCK_DO_AUX_WORK
void ionic_phc_aux_work_helper(struct work_struct *work)
{
struct ionic_phc *phc = container_of(work, struct ionic_phc, dwork.work);
long delay;
delay = ionic_phc_aux_work(&phc->ptp_info);
schedule_delayed_work(&phc->dwork, delay);
}
#endif
ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 tick)
{
unsigned long irqflags;
u64 ns;
if (!lif->phc)
return ktime_set(0, 0);
spin_lock_irqsave(&lif->phc->lock, irqflags);
ns = timecounter_cyc2time(&lif->phc->tc, tick);
spin_unlock_irqrestore(&lif->phc->lock, irqflags);
return ns_to_ktime(ns);
}
static const struct ptp_clock_info ionic_ptp_info = {
.owner = THIS_MODULE,
.name = "ionic_ptp",
#ifdef HAVE_PTP_ADJFINE
.adjfine = ionic_phc_adjfine,
#endif
.adjtime = ionic_phc_adjtime,
#ifdef HAVE_PHC_GETTIMEX64
.gettimex64 = ionic_phc_gettimex64,
#else
.gettime64 = ionic_phc_gettime64,
#endif
.settime64 = ionic_phc_settime64,
#ifdef HAVE_PTP_CLOCK_DO_AUX_WORK
.do_aux_work = ionic_phc_aux_work,
#endif
};
void ionic_lif_register_phc(struct ionic_lif *lif)
{
if (!lif->phc || !(lif->hw_features & IONIC_ETH_HW_TIMESTAMP))
return;
lif->phc->ptp = ptp_clock_register(&lif->phc->ptp_info, lif->ionic->dev);
if (IS_ERR(lif->phc->ptp)) {
dev_warn(lif->ionic->dev, "Cannot register phc device: %ld\n",
PTR_ERR(lif->phc->ptp));
lif->phc->ptp = NULL;
}
if (lif->phc->ptp)
#ifndef HAVE_PTP_CLOCK_DO_AUX_WORK
schedule_delayed_work(&lif->phc->dwork, lif->phc->aux_work_delay);
#else
ptp_schedule_worker(lif->phc->ptp, lif->phc->aux_work_delay);
#endif
}
void ionic_lif_unregister_phc(struct ionic_lif *lif)
{
if (!lif->phc || !lif->phc->ptp)
return;
#ifndef HAVE_PTP_CLOCK_DO_AUX_WORK
cancel_delayed_work_sync(&lif->phc->dwork);
#endif
ptp_clock_unregister(lif->phc->ptp);
lif->phc->ptp = NULL;
}
void ionic_lif_alloc_phc(struct ionic_lif *lif)
{
struct ionic *ionic = lif->ionic;
struct ionic_phc *phc;
u64 delay, diff, mult;
u64 frac = 0;
u64 features;
u32 shift;
if (!ionic->idev.hwstamp_regs)
return;
features = le64_to_cpu(ionic->ident.lif.eth.config.features);
if (!(features & IONIC_ETH_HW_TIMESTAMP))
return;
phc = devm_kzalloc(ionic->dev, sizeof(*phc), GFP_KERNEL);
if (!phc)
return;
phc->lif = lif;
#ifndef HAVE_PTP_CLOCK_DO_AUX_WORK
INIT_DELAYED_WORK(&phc->dwork, ionic_phc_aux_work_helper);
#endif
phc->cc.read = ionic_cc_read;
phc->cc.mask = le64_to_cpu(ionic->ident.dev.hwstamp_mask);
phc->cc.mult = le32_to_cpu(ionic->ident.dev.hwstamp_mult);
phc->cc.shift = le32_to_cpu(ionic->ident.dev.hwstamp_shift);
if (!phc->cc.mult) {
dev_err(lif->ionic->dev,
"Invalid device PHC mask multiplier %u, disabling HW timestamp support\n",
phc->cc.mult);
devm_kfree(lif->ionic->dev, phc);
lif->phc = NULL;
return;
}
dev_dbg(lif->ionic->dev, "Device PHC mask %#llx mult %u shift %u\n",
phc->cc.mask, phc->cc.mult, phc->cc.shift);
spin_lock_init(&phc->lock);
mutex_init(&phc->config_lock);
/* max ticks is limited by the multiplier, or by the update period. */
if (phc->cc.shift + 2 + ilog2(IONIC_PHC_UPDATE_NS) >= 64) {
/* max ticks that do not overflow when multiplied by max
* adjusted multiplier (twice the initial multiplier)
*/
diff = U64_MAX / phc->cc.mult / 2;
} else {
/* approx ticks at four times the update period */
diff = (u64)IONIC_PHC_UPDATE_NS << (phc->cc.shift + 2);
diff = DIV_ROUND_UP(diff, phc->cc.mult);
}
/* transform to bitmask */
diff |= diff >> 1;
diff |= diff >> 2;
diff |= diff >> 4;
diff |= diff >> 8;
diff |= diff >> 16;
diff |= diff >> 32;
/* constrain to the hardware bitmask, and use this as the bitmask */
diff &= phc->cc.mask;
phc->cc.mask = diff;
/* the wrap period is now defined by diff (or phc->cc.mask)
*
* we will update the time basis at about 1/4 the wrap period, so
* should not see a difference of more than +/- diff/4.
*
* this is sufficient not see a difference of more than +/- diff/2, as
* required by timecounter_cyc2time, to detect an old time stamp.
*
* adjust the initial multiplier, being careful to avoid overflow:
* - do not overflow 63 bits: init_cc_mult * SCALED_PPM
* - do not overflow 64 bits: max_mult * (diff / 2)
*
* we want to increase the initial multiplier as much as possible, to
* allow for more precise adjustment in ionic_phc_adjfine.
*
* only adjust the multiplier if we can double it or more.
*/
mult = U64_MAX / 2 / max(diff / 2, SCALED_PPM);
shift = mult / phc->cc.mult;
if (shift >= 2) {
/* initial multiplier will be 2^n of hardware cc.mult */
shift = fls(shift);
/* increase cc.mult and cc.shift by the same 2^n and n. */
phc->cc.mult <<= shift;
phc->cc.shift += shift;
}
dev_dbg(lif->ionic->dev, "Initial PHC mask %#llx mult %u shift %u\n",
phc->cc.mask, phc->cc.mult, phc->cc.shift);
/* frequency adjustments are relative to the initial multiplier */
phc->init_cc_mult = phc->cc.mult;
timecounter_init(&phc->tc, &phc->cc, ktime_get_real_ns());
/* Update cycle_last at 1/4 the wrap period, or IONIC_PHC_UPDATE_NS */
delay = min_t(u64, IONIC_PHC_UPDATE_NS,
cyclecounter_cyc2ns(&phc->cc, diff / 4, 0, &frac));
dev_dbg(lif->ionic->dev, "Work delay %llu ms\n", delay / NSEC_PER_MSEC);
phc->aux_work_delay = nsecs_to_jiffies(delay);
phc->ptp_info = ionic_ptp_info;
/* We have allowed to adjust the multiplier up to +/- 1 part per 1.
* Here expressed as NORMAL_PPB (1 billion parts per billion).
*/
phc->ptp_info.max_adj = NORMAL_PPB;
lif->phc = phc;
}
void ionic_lif_free_phc(struct ionic_lif *lif)
{
if (!lif->phc)
return;
mutex_destroy(&lif->phc->config_lock);
devm_kfree(lif->ionic->dev, lif->phc);
lif->phc = NULL;
}
#endif

View File

@ -0,0 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2022 Pensando Systems, Inc */
#include <linux/errno.h>
#include <linux/kernel.h>
struct device;
struct ptp_clock;
struct ptp_clock_event;
struct ptp_clock_info;
enum ptp_pin_function { PTP_PIN_DUMMY };
__weak struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, struct device *parent) { return NULL; }
__weak int ptp_clock_unregister(struct ptp_clock *ptp) { return 0; }
__weak void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event) { }
__weak int ptp_clock_index(struct ptp_clock *ptp) { return -1; }
__weak int ptp_find_pin(struct ptp_clock *ptp, enum ptp_pin_function func, unsigned int chan) { return -1; }
__weak int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) { return -EOPNOTSUPP; }
__weak void ptp_cancel_worker_sync(struct ptp_clock *ptp) { }

View File

@ -0,0 +1,619 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#include <linux/netdevice.h>
#include <linux/dynamic_debug.h>
#include <linux/etherdevice.h>
#include <linux/list.h>
#include "ionic.h"
#include "ionic_lif.h"
#include "ionic_rx_filter.h"
void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
{
struct device *dev = lif->ionic->dev;
hlist_del(&f->by_id);
hlist_del(&f->by_hash);
devm_kfree(dev, f);
}
void ionic_rx_filter_replay(struct ionic_lif *lif)
{
struct ionic_rx_filter_add_cmd *ac;
struct hlist_head new_id_list;
struct ionic_admin_ctx ctx;
struct ionic_rx_filter *f;
struct hlist_head *head;
struct hlist_node *tmp;
unsigned int key;
unsigned int i;
int err;
INIT_HLIST_HEAD(&new_id_list);
ac = &ctx.cmd.rx_filter_add;
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
head = &lif->rx_filters.by_id[i];
hlist_for_each_entry_safe(f, tmp, head, by_id) {
ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work);
memcpy(ac, &f->cmd, sizeof(f->cmd));
dev_dbg(&lif->netdev->dev, "replay filter command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx.cmd, sizeof(ctx.cmd), true);
err = ionic_adminq_post_wait(lif, &ctx);
if (err) {
switch (le16_to_cpu(ac->match)) {
case IONIC_RX_FILTER_MATCH_VLAN:
netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n",
err,
le16_to_cpu(ac->vlan.vlan));
break;
case IONIC_RX_FILTER_MATCH_MAC:
netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n",
err, ac->mac.addr);
break;
case IONIC_RX_FILTER_MATCH_MAC_VLAN:
netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n",
err,
le16_to_cpu(ac->vlan.vlan),
ac->mac.addr);
break;
}
spin_lock_bh(&lif->rx_filters.lock);
ionic_rx_filter_free(lif, f);
spin_unlock_bh(&lif->rx_filters.lock);
continue;
}
/* remove from old id list, save new id in tmp list */
spin_lock_bh(&lif->rx_filters.lock);
hlist_del(&f->by_id);
spin_unlock_bh(&lif->rx_filters.lock);
f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id);
hlist_add_head(&f->by_id, &new_id_list);
}
}
/* rebuild the by_id hash lists with the new filter ids */
spin_lock_bh(&lif->rx_filters.lock);
hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) {
key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
head = &lif->rx_filters.by_id[key];
hlist_add_head(&f->by_id, head);
}
spin_unlock_bh(&lif->rx_filters.lock);
}
int ionic_rx_filters_init(struct ionic_lif *lif)
{
unsigned int i;
spin_lock_init(&lif->rx_filters.lock);
spin_lock_bh(&lif->rx_filters.lock);
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
}
spin_unlock_bh(&lif->rx_filters.lock);
return 0;
}
void ionic_rx_filters_deinit(struct ionic_lif *lif)
{
struct ionic_rx_filter *f;
struct hlist_head *head;
struct hlist_node *tmp;
unsigned int i;
spin_lock_bh(&lif->rx_filters.lock);
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
head = &lif->rx_filters.by_id[i];
hlist_for_each_entry_safe(f, tmp, head, by_id)
ionic_rx_filter_free(lif, f);
}
spin_unlock_bh(&lif->rx_filters.lock);
}
int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
u32 hash, struct ionic_admin_ctx *ctx,
enum ionic_filter_state state)
{
struct device *dev = lif->ionic->dev;
struct ionic_rx_filter_add_cmd *ac;
struct ionic_rx_filter *f = NULL;
struct hlist_head *head;
unsigned int key;
ac = &ctx->cmd.rx_filter_add;
switch (le16_to_cpu(ac->match)) {
case IONIC_RX_FILTER_MATCH_VLAN:
key = le16_to_cpu(ac->vlan.vlan);
f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
break;
case IONIC_RX_FILTER_MATCH_MAC:
key = *(u32 *)ac->mac.addr;
f = ionic_rx_filter_by_addr(lif, ac->mac.addr);
break;
case IONIC_RX_FILTER_MATCH_MAC_VLAN:
key = le16_to_cpu(ac->mac_vlan.vlan);
break;
case IONIC_RX_FILTER_STEER_PKTCLASS:
key = 0;
break;
default:
return -EINVAL;
}
if (f) {
/* remove from current linking so we can refresh it */
hlist_del(&f->by_id);
hlist_del(&f->by_hash);
} else {
f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC);
if (!f)
return -ENOMEM;
}
f->flow_id = flow_id;
f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
f->state = state;
f->rxq_index = rxq_index;
memcpy(&f->cmd, ac, sizeof(f->cmd));
netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id);
INIT_HLIST_NODE(&f->by_hash);
INIT_HLIST_NODE(&f->by_id);
key = hash_32(key, IONIC_RX_FILTER_HASH_BITS);
head = &lif->rx_filters.by_hash[key];
hlist_add_head(&f->by_hash, head);
key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
head = &lif->rx_filters.by_id[key];
hlist_add_head(&f->by_id, head);
return 0;
}
struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid)
{
struct ionic_rx_filter *f;
struct hlist_head *head;
unsigned int key;
key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS);
head = &lif->rx_filters.by_hash[key];
hlist_for_each_entry(f, head, by_hash) {
if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN)
continue;
if (le16_to_cpu(f->cmd.vlan.vlan) == vid)
return f;
}
return NULL;
}
struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif,
const u8 *addr)
{
struct ionic_rx_filter *f;
struct hlist_head *head;
unsigned int key;
key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS);
head = &lif->rx_filters.by_hash[key];
hlist_for_each_entry(f, head, by_hash) {
if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC)
continue;
if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0)
return f;
}
return NULL;
}
struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
{
struct ionic_rx_filter *f;
struct hlist_head *head;
unsigned int key;
key = hash_32(0, IONIC_RX_FILTER_HASH_BITS);
head = &lif->rx_filters.by_hash[key];
hlist_for_each_entry(f, head, by_hash) {
if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_STEER_PKTCLASS)
continue;
return f;
}
return NULL;
}
static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif,
struct ionic_rx_filter_add_cmd *ac)
{
switch (le16_to_cpu(ac->match)) {
case IONIC_RX_FILTER_MATCH_VLAN:
return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
case IONIC_RX_FILTER_MATCH_MAC:
return ionic_rx_filter_by_addr(lif, ac->mac.addr);
default:
netdev_err(lif->netdev, "unsupported filter match %d",
le16_to_cpu(ac->match));
return NULL;
}
}
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
{
struct ionic_rx_filter *f;
int err;
spin_lock_bh(&lif->rx_filters.lock);
f = ionic_rx_filter_by_addr(lif, addr);
if (mode == ADD_ADDR && !f) {
struct ionic_admin_ctx ctx = {
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
.cmd.rx_filter_add = {
.opcode = IONIC_CMD_RX_FILTER_ADD,
.lif_index = cpu_to_le16(lif->index),
.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
},
};
memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
IONIC_FILTER_STATE_NEW);
if (err) {
spin_unlock_bh(&lif->rx_filters.lock);
return err;
}
} else if (mode == ADD_ADDR && f) {
if (f->state == IONIC_FILTER_STATE_OLD)
f->state = IONIC_FILTER_STATE_SYNCED;
} else if (mode == DEL_ADDR && f) {
if (f->state == IONIC_FILTER_STATE_NEW)
ionic_rx_filter_free(lif, f);
else if (f->state == IONIC_FILTER_STATE_SYNCED)
f->state = IONIC_FILTER_STATE_OLD;
} else if (mode == DEL_ADDR && !f) {
spin_unlock_bh(&lif->rx_filters.lock);
return -ENOENT;
}
spin_unlock_bh(&lif->rx_filters.lock);
set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
return 0;
}
static int ionic_lif_filter_add(struct ionic_lif *lif,
struct ionic_rx_filter_add_cmd *ac)
{
struct ionic_admin_ctx ctx = {
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
};
struct ionic_rx_filter *f;
int nfilters;
int err = 0;
ctx.cmd.rx_filter_add = *ac;
ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD,
ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index),
spin_lock_bh(&lif->rx_filters.lock);
f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
if (f) {
/* don't bother if we already have it and it is sync'd */
if (f->state == IONIC_FILTER_STATE_SYNCED) {
spin_unlock_bh(&lif->rx_filters.lock);
return 0;
}
/* mark preemptively as sync'd to block any parallel attempts */
f->state = IONIC_FILTER_STATE_SYNCED;
} else {
/* save as SYNCED to catch any DEL requests while processing */
err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
IONIC_FILTER_STATE_SYNCED);
}
spin_unlock_bh(&lif->rx_filters.lock);
if (err)
return err;
/* Don't bother with the write to FW if we know there's no room,
* we can try again on the next sync attempt.
* Since the FW doesn't have a way to tell us the vlan limit,
* we start max_vlans at 0 until we hit the ENOSPC error.
*/
switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
case IONIC_RX_FILTER_MATCH_VLAN:
netdev_dbg(lif->netdev, "%s: rx_filter add VLAN %d\n",
__func__, ctx.cmd.rx_filter_add.vlan.vlan);
if (lif->max_vlans && lif->nvlans >= lif->max_vlans)
err = -ENOSPC;
break;
case IONIC_RX_FILTER_MATCH_MAC:
netdev_dbg(lif->netdev, "%s: rx_filter add ADDR %pM\n",
__func__, ctx.cmd.rx_filter_add.mac.addr);
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
if ((lif->nucast + lif->nmcast) >= nfilters)
err = -ENOSPC;
break;
}
if (err != -ENOSPC)
err = ionic_adminq_post_wait_nomsg(lif, &ctx);
spin_lock_bh(&lif->rx_filters.lock);
if (err && err != -EEXIST) {
/* set the state back to NEW so we can try again later */
f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
f->state = IONIC_FILTER_STATE_NEW;
/* If -ENOSPC we won't waste time trying to sync again
* until there is a delete that might make room
*/
if (err != -ENOSPC)
set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
}
spin_unlock_bh(&lif->rx_filters.lock);
/* store the max_vlans limit that we found */
if (err == -ENOSPC &&
le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
lif->max_vlans = lif->nvlans;
/* Prevent unnecessary error messages on recoverable
* errors as the filter will get retried on the next
* sync attempt.
*/
switch (err) {
case -ENOSPC:
case -ENXIO:
case -ETIMEDOUT:
case -EAGAIN:
case -EBUSY:
return 0;
default:
break;
}
ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
ctx.comp.comp.status, err);
switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
case IONIC_RX_FILTER_MATCH_VLAN:
netdev_info(lif->netdev, "rx_filter add failed: VLAN %d\n",
ctx.cmd.rx_filter_add.vlan.vlan);
break;
case IONIC_RX_FILTER_MATCH_MAC:
netdev_info(lif->netdev, "rx_filter add failed: ADDR %pM\n",
ctx.cmd.rx_filter_add.mac.addr);
break;
}
return err;
}
switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
case IONIC_RX_FILTER_MATCH_VLAN:
lif->nvlans++;
break;
case IONIC_RX_FILTER_MATCH_MAC:
if (is_multicast_ether_addr(ctx.cmd.rx_filter_add.mac.addr))
lif->nmcast++;
else
lif->nucast++;
break;
}
f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
if (f && f->state == IONIC_FILTER_STATE_OLD) {
/* Someone requested a delete while we were adding
* so update the filter info with the results from the add
* and the data will be there for the delete on the next
* sync cycle.
*/
err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
IONIC_FILTER_STATE_OLD);
} else {
err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
IONIC_FILTER_STATE_SYNCED);
}
spin_unlock_bh(&lif->rx_filters.lock);
return err;
}
int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
{
struct ionic_rx_filter_add_cmd ac = {
.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
};
memcpy(&ac.mac.addr, addr, ETH_ALEN);
return ionic_lif_filter_add(lif, &ac);
}
int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid)
{
struct ionic_rx_filter_add_cmd ac = {
.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
.vlan.vlan = cpu_to_le16(vid),
};
return ionic_lif_filter_add(lif, &ac);
}
static int ionic_lif_filter_del(struct ionic_lif *lif,
struct ionic_rx_filter_add_cmd *ac)
{
struct ionic_admin_ctx ctx = {
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
.cmd.rx_filter_del = {
.opcode = IONIC_CMD_RX_FILTER_DEL,
.lif_index = cpu_to_le16(lif->index),
},
};
struct ionic_rx_filter *f;
int state;
int err;
spin_lock_bh(&lif->rx_filters.lock);
f = ionic_rx_filter_find(lif, ac);
if (!f) {
spin_unlock_bh(&lif->rx_filters.lock);
return -ENOENT;
}
switch (le16_to_cpu(ac->match)) {
case IONIC_RX_FILTER_MATCH_VLAN:
netdev_dbg(lif->netdev, "%s: rx_filter del VLAN %d id %d\n",
__func__, ac->vlan.vlan, f->filter_id);
lif->nvlans--;
break;
case IONIC_RX_FILTER_MATCH_MAC:
netdev_dbg(lif->netdev, "%s: rx_filter del ADDR %pM id %d\n",
__func__, ac->mac.addr, f->filter_id);
if (is_multicast_ether_addr(ac->mac.addr) && lif->nmcast)
lif->nmcast--;
else if (!is_multicast_ether_addr(ac->mac.addr) && lif->nucast)
lif->nucast--;
break;
}
state = f->state;
ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
ionic_rx_filter_free(lif, f);
spin_unlock_bh(&lif->rx_filters.lock);
if (state != IONIC_FILTER_STATE_NEW) {
err = ionic_adminq_post_wait_nomsg(lif, &ctx);
switch (err) {
/* ignore these errors */
case -EEXIST:
case -ENXIO:
case -ETIMEDOUT:
case -EAGAIN:
case -EBUSY:
case 0:
break;
default:
ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
ctx.comp.comp.status, err);
return err;
}
}
return 0;
}
int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
{
struct ionic_rx_filter_add_cmd ac = {
.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
};
memcpy(&ac.mac.addr, addr, ETH_ALEN);
return ionic_lif_filter_del(lif, &ac);
}
int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid)
{
struct ionic_rx_filter_add_cmd ac = {
.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
.vlan.vlan = cpu_to_le16(vid),
};
return ionic_lif_filter_del(lif, &ac);
}
struct sync_item {
struct list_head list;
struct ionic_rx_filter f;
};
void ionic_rx_filter_sync(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
struct list_head sync_add_list;
struct list_head sync_del_list;
struct sync_item *sync_item;
struct ionic_rx_filter *f;
struct hlist_head *head;
struct hlist_node *tmp;
struct sync_item *spos;
unsigned int i;
INIT_LIST_HEAD(&sync_add_list);
INIT_LIST_HEAD(&sync_del_list);
clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
/* Copy the filters to be added and deleted
* into a separate local list that needs no locking.
*/
spin_lock_bh(&lif->rx_filters.lock);
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
head = &lif->rx_filters.by_id[i];
hlist_for_each_entry_safe(f, tmp, head, by_id) {
if (f->state == IONIC_FILTER_STATE_NEW ||
f->state == IONIC_FILTER_STATE_OLD) {
sync_item = devm_kzalloc(dev, sizeof(*sync_item),
GFP_ATOMIC);
if (!sync_item)
goto loop_out;
sync_item->f = *f;
if (f->state == IONIC_FILTER_STATE_NEW)
list_add(&sync_item->list, &sync_add_list);
else
list_add(&sync_item->list, &sync_del_list);
}
}
}
loop_out:
spin_unlock_bh(&lif->rx_filters.lock);
/* If the add or delete fails, it won't get marked as sync'd
* and will be tried again in the next sync action.
* Do the deletes first in case we're in an overflow state and
* they can clear room for some new filters
*/
list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
(void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
(void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
}

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#ifndef _IONIC_RX_FILTER_H_
#define _IONIC_RX_FILTER_H_
#define IONIC_RXQ_INDEX_ANY (0xFFFF)
enum ionic_filter_state {
IONIC_FILTER_STATE_SYNCED,
IONIC_FILTER_STATE_NEW,
IONIC_FILTER_STATE_OLD,
};
struct ionic_rx_filter {
u32 flow_id;
u32 filter_id;
u16 rxq_index;
enum ionic_filter_state state;
struct ionic_rx_filter_add_cmd cmd;
struct hlist_node by_hash;
struct hlist_node by_id;
};
#define IONIC_RX_FILTER_HASH_BITS 10
#define IONIC_RX_FILTER_HLISTS BIT(IONIC_RX_FILTER_HASH_BITS)
#define IONIC_RX_FILTER_HLISTS_MASK (IONIC_RX_FILTER_HLISTS - 1)
struct ionic_rx_filters {
spinlock_t lock; /* filter list lock */
struct hlist_head by_hash[IONIC_RX_FILTER_HLISTS]; /* by skb hash */
struct hlist_head by_id[IONIC_RX_FILTER_HLISTS]; /* by filter_id */
};
void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f);
void ionic_rx_filter_replay(struct ionic_lif *lif);
int ionic_rx_filters_init(struct ionic_lif *lif);
void ionic_rx_filters_deinit(struct ionic_lif *lif);
int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
u32 hash, struct ionic_admin_ctx *ctx,
enum ionic_filter_state state);
struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid);
struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 *addr);
struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif);
void ionic_rx_filter_sync(struct ionic_lif *lif);
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode);
int ionic_rx_filters_need_sync(struct ionic_lif *lif);
int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid);
int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid);
#endif /* _IONIC_RX_FILTER_H_ */

View File

@ -0,0 +1,573 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#include <linux/kernel.h>
#include <linux/mutex.h>
#include "ionic.h"
#include "ionic_lif.h"
#include "ionic_stats.h"
static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
IONIC_LIF_STAT_DESC(tx_packets),
IONIC_LIF_STAT_DESC(tx_bytes),
IONIC_LIF_STAT_DESC(rx_packets),
IONIC_LIF_STAT_DESC(rx_bytes),
IONIC_LIF_STAT_DESC(tx_tso),
IONIC_LIF_STAT_DESC(tx_tso_bytes),
IONIC_LIF_STAT_DESC(tx_csum_none),
IONIC_LIF_STAT_DESC(tx_csum),
IONIC_LIF_STAT_DESC(rx_csum_none),
IONIC_LIF_STAT_DESC(rx_csum_complete),
IONIC_LIF_STAT_DESC(rx_csum_error),
IONIC_LIF_STAT_DESC(hw_tx_dropped),
IONIC_LIF_STAT_DESC(hw_rx_dropped),
IONIC_LIF_STAT_DESC(hw_rx_over_errors),
IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
};
static const struct ionic_stat_desc ionic_port_stats_desc[] = {
IONIC_PORT_STAT_DESC(frames_rx_ok),
IONIC_PORT_STAT_DESC(frames_rx_all),
IONIC_PORT_STAT_DESC(frames_rx_bad_fcs),
IONIC_PORT_STAT_DESC(frames_rx_bad_all),
IONIC_PORT_STAT_DESC(octets_rx_ok),
IONIC_PORT_STAT_DESC(octets_rx_all),
IONIC_PORT_STAT_DESC(frames_rx_unicast),
IONIC_PORT_STAT_DESC(frames_rx_multicast),
IONIC_PORT_STAT_DESC(frames_rx_broadcast),
IONIC_PORT_STAT_DESC(frames_rx_pause),
IONIC_PORT_STAT_DESC(frames_rx_bad_length),
IONIC_PORT_STAT_DESC(frames_rx_undersized),
IONIC_PORT_STAT_DESC(frames_rx_oversized),
IONIC_PORT_STAT_DESC(frames_rx_fragments),
IONIC_PORT_STAT_DESC(frames_rx_jabber),
IONIC_PORT_STAT_DESC(frames_rx_pripause),
IONIC_PORT_STAT_DESC(frames_rx_stomped_crc),
IONIC_PORT_STAT_DESC(frames_rx_too_long),
IONIC_PORT_STAT_DESC(frames_rx_vlan_good),
IONIC_PORT_STAT_DESC(frames_rx_dropped),
IONIC_PORT_STAT_DESC(frames_rx_less_than_64b),
IONIC_PORT_STAT_DESC(frames_rx_64b),
IONIC_PORT_STAT_DESC(frames_rx_65b_127b),
IONIC_PORT_STAT_DESC(frames_rx_128b_255b),
IONIC_PORT_STAT_DESC(frames_rx_256b_511b),
IONIC_PORT_STAT_DESC(frames_rx_512b_1023b),
IONIC_PORT_STAT_DESC(frames_rx_1024b_1518b),
IONIC_PORT_STAT_DESC(frames_rx_1519b_2047b),
IONIC_PORT_STAT_DESC(frames_rx_2048b_4095b),
IONIC_PORT_STAT_DESC(frames_rx_4096b_8191b),
IONIC_PORT_STAT_DESC(frames_rx_8192b_9215b),
IONIC_PORT_STAT_DESC(frames_rx_other),
IONIC_PORT_STAT_DESC(frames_tx_ok),
IONIC_PORT_STAT_DESC(frames_tx_all),
IONIC_PORT_STAT_DESC(frames_tx_bad),
IONIC_PORT_STAT_DESC(octets_tx_ok),
IONIC_PORT_STAT_DESC(octets_tx_total),
IONIC_PORT_STAT_DESC(frames_tx_unicast),
IONIC_PORT_STAT_DESC(frames_tx_multicast),
IONIC_PORT_STAT_DESC(frames_tx_broadcast),
IONIC_PORT_STAT_DESC(frames_tx_pause),
IONIC_PORT_STAT_DESC(frames_tx_pripause),
IONIC_PORT_STAT_DESC(frames_tx_vlan),
IONIC_PORT_STAT_DESC(frames_tx_less_than_64b),
IONIC_PORT_STAT_DESC(frames_tx_64b),
IONIC_PORT_STAT_DESC(frames_tx_65b_127b),
IONIC_PORT_STAT_DESC(frames_tx_128b_255b),
IONIC_PORT_STAT_DESC(frames_tx_256b_511b),
IONIC_PORT_STAT_DESC(frames_tx_512b_1023b),
IONIC_PORT_STAT_DESC(frames_tx_1024b_1518b),
IONIC_PORT_STAT_DESC(frames_tx_1519b_2047b),
IONIC_PORT_STAT_DESC(frames_tx_2048b_4095b),
IONIC_PORT_STAT_DESC(frames_tx_4096b_8191b),
IONIC_PORT_STAT_DESC(frames_tx_8192b_9215b),
IONIC_PORT_STAT_DESC(frames_tx_other),
IONIC_PORT_STAT_DESC(frames_tx_pri_0),
IONIC_PORT_STAT_DESC(frames_tx_pri_1),
IONIC_PORT_STAT_DESC(frames_tx_pri_2),
IONIC_PORT_STAT_DESC(frames_tx_pri_3),
IONIC_PORT_STAT_DESC(frames_tx_pri_4),
IONIC_PORT_STAT_DESC(frames_tx_pri_5),
IONIC_PORT_STAT_DESC(frames_tx_pri_6),
IONIC_PORT_STAT_DESC(frames_tx_pri_7),
IONIC_PORT_STAT_DESC(frames_rx_pri_0),
IONIC_PORT_STAT_DESC(frames_rx_pri_1),
IONIC_PORT_STAT_DESC(frames_rx_pri_2),
IONIC_PORT_STAT_DESC(frames_rx_pri_3),
IONIC_PORT_STAT_DESC(frames_rx_pri_4),
IONIC_PORT_STAT_DESC(frames_rx_pri_5),
IONIC_PORT_STAT_DESC(frames_rx_pri_6),
IONIC_PORT_STAT_DESC(frames_rx_pri_7),
IONIC_PORT_STAT_DESC(tx_pripause_0_1us_count),
IONIC_PORT_STAT_DESC(tx_pripause_1_1us_count),
IONIC_PORT_STAT_DESC(tx_pripause_2_1us_count),
IONIC_PORT_STAT_DESC(tx_pripause_3_1us_count),
IONIC_PORT_STAT_DESC(tx_pripause_4_1us_count),
IONIC_PORT_STAT_DESC(tx_pripause_5_1us_count),
IONIC_PORT_STAT_DESC(tx_pripause_6_1us_count),
IONIC_PORT_STAT_DESC(tx_pripause_7_1us_count),
IONIC_PORT_STAT_DESC(rx_pripause_0_1us_count),
IONIC_PORT_STAT_DESC(rx_pripause_1_1us_count),
IONIC_PORT_STAT_DESC(rx_pripause_2_1us_count),
IONIC_PORT_STAT_DESC(rx_pripause_3_1us_count),
IONIC_PORT_STAT_DESC(rx_pripause_4_1us_count),
IONIC_PORT_STAT_DESC(rx_pripause_5_1us_count),
IONIC_PORT_STAT_DESC(rx_pripause_6_1us_count),
IONIC_PORT_STAT_DESC(rx_pripause_7_1us_count),
IONIC_PORT_STAT_DESC(rx_pause_1us_count),
IONIC_PORT_STAT_DESC(frames_tx_truncated),
};
static const struct ionic_stat_desc ionic_mgmt_port_stats_desc[] = {
IONIC_MGMT_PORT_STAT_DESC(frames_rx_ok),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_all),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_bad_fcs),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_bad_all),
IONIC_MGMT_PORT_STAT_DESC(octets_rx_ok),
IONIC_MGMT_PORT_STAT_DESC(octets_rx_all),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_unicast),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_multicast),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_broadcast),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_pause),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_bad_length),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_undersized),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_oversized),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_fragments),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_jabber),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_64b),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_65b_127b),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_128b_255b),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_256b_511b),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_512b_1023b),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_1024b_1518b),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_gt_1518b),
IONIC_MGMT_PORT_STAT_DESC(frames_rx_fifo_full),
IONIC_MGMT_PORT_STAT_DESC(frames_tx_ok),
IONIC_MGMT_PORT_STAT_DESC(frames_tx_all),
IONIC_MGMT_PORT_STAT_DESC(frames_tx_bad),
IONIC_MGMT_PORT_STAT_DESC(octets_tx_ok),
IONIC_MGMT_PORT_STAT_DESC(octets_tx_total),
IONIC_MGMT_PORT_STAT_DESC(frames_tx_unicast),
IONIC_MGMT_PORT_STAT_DESC(frames_tx_multicast),
IONIC_MGMT_PORT_STAT_DESC(frames_tx_broadcast),
IONIC_MGMT_PORT_STAT_DESC(frames_tx_pause),
};
static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(pkts),
IONIC_TX_STAT_DESC(bytes),
IONIC_TX_STAT_DESC(clean),
IONIC_TX_STAT_DESC(dma_map_err),
IONIC_TX_STAT_DESC(linearize),
IONIC_TX_STAT_DESC(tso),
IONIC_TX_STAT_DESC(tso_bytes),
IONIC_TX_STAT_DESC(hwstamp_valid),
IONIC_TX_STAT_DESC(hwstamp_invalid),
#ifdef IONIC_DEBUG_STATS
IONIC_TX_STAT_DESC(vlan_inserted),
IONIC_TX_STAT_DESC(frags),
IONIC_TX_STAT_DESC(csum),
IONIC_TX_STAT_DESC(csum_none),
#endif
};
static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(pkts),
IONIC_RX_STAT_DESC(bytes),
IONIC_RX_STAT_DESC(dma_map_err),
IONIC_RX_STAT_DESC(alloc_err),
#ifdef IONIC_DEBUG_STATS
IONIC_RX_STAT_DESC(vlan_stripped),
IONIC_RX_STAT_DESC(csum_none),
IONIC_RX_STAT_DESC(csum_complete),
#endif
IONIC_RX_STAT_DESC(csum_error),
IONIC_RX_STAT_DESC(hwstamp_valid),
IONIC_RX_STAT_DESC(hwstamp_invalid),
IONIC_RX_STAT_DESC(dropped),
IONIC_RX_STAT_DESC(cache_full),
IONIC_RX_STAT_DESC(cache_empty),
IONIC_RX_STAT_DESC(cache_busy),
IONIC_RX_STAT_DESC(cache_get),
IONIC_RX_STAT_DESC(cache_put),
IONIC_RX_STAT_DESC(buf_exhausted),
IONIC_RX_STAT_DESC(buf_not_reusable),
IONIC_RX_STAT_DESC(buf_reused),
};
#ifdef IONIC_DEBUG_STATS
static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
IONIC_TX_Q_STAT_DESC(stop),
IONIC_TX_Q_STAT_DESC(wake),
IONIC_TX_Q_STAT_DESC(drop),
IONIC_TX_Q_STAT_DESC(dbell_count),
IONIC_TX_Q_STAT_DESC(depth),
IONIC_TX_Q_STAT_DESC(depth_max)
};
#endif
#ifdef IONIC_DEBUG_STATS
static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = {
IONIC_CQ_STAT_DESC(compl_count),
};
#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc)
static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = {
IONIC_INTR_STAT_DESC(rearm_count),
};
#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc)
static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
IONIC_NAPI_STAT_DESC(poll_count),
};
#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc)
#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc)
#endif
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_MGMT_PORT_STATS ARRAY_SIZE(ionic_mgmt_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc)
#define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues)
static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num,
struct ionic_lif_sw_stats *stats)
{
struct ionic_tx_stats *txstats = &lif->txqstats[q_num];
stats->tx_packets += txstats->pkts;
stats->tx_bytes += txstats->bytes;
stats->tx_tso += txstats->tso;
stats->tx_tso_bytes += txstats->tso_bytes;
stats->tx_csum_none += txstats->csum_none;
stats->tx_csum += txstats->csum;
stats->tx_hwstamp_valid += txstats->hwstamp_valid;
stats->tx_hwstamp_invalid += txstats->hwstamp_invalid;
}
static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
struct ionic_lif_sw_stats *stats)
{
struct ionic_rx_stats *rxstats = &lif->rxqstats[q_num];
stats->rx_packets += rxstats->pkts;
stats->rx_bytes += rxstats->bytes;
stats->rx_csum_none += rxstats->csum_none;
stats->rx_csum_complete += rxstats->csum_complete;
stats->rx_csum_error += rxstats->csum_error;
stats->rx_hwstamp_valid += rxstats->hwstamp_valid;
stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid;
}
static void ionic_get_lif_stats(struct ionic_lif *lif,
struct ionic_lif_sw_stats *stats)
{
struct rtnl_link_stats64 ns;
int q_num;
memset(stats, 0, sizeof(*stats));
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
ionic_add_lif_txq_stats(lif, q_num, stats);
ionic_add_lif_rxq_stats(lif, q_num, stats);
}
if (lif->hwstamp_txq)
ionic_add_lif_txq_stats(lif, lif->hwstamp_txq->q.index, stats);
if (lif->hwstamp_rxq)
ionic_add_lif_rxq_stats(lif, lif->hwstamp_rxq->q.index, stats);
ionic_get_stats64(lif->netdev, &ns);
stats->hw_tx_dropped = ns.tx_dropped;
stats->hw_rx_dropped = ns.rx_dropped;
stats->hw_rx_over_errors = ns.rx_over_errors;
stats->hw_rx_missed_errors = ns.rx_missed_errors;
stats->hw_tx_aborted_errors = ns.tx_aborted_errors;
}
static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
{
u64 total = 0, tx_queues = MAX_Q(lif), rx_queues = MAX_Q(lif);
total += IONIC_NUM_LIF_STATS;
if (lif->ionic->is_mgmt_nic)
total += IONIC_NUM_MGMT_PORT_STATS;
else
total += IONIC_NUM_PORT_STATS;
if (lif->hwstamp_txq)
tx_queues += 1;
if (lif->hwstamp_rxq)
rx_queues += 1;
total += tx_queues * IONIC_NUM_TX_STATS;
total += rx_queues * IONIC_NUM_RX_STATS;
#ifdef IONIC_DEBUG_STATS
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
total += tx_queues * (IONIC_NUM_DBG_CQ_STATS +
IONIC_NUM_TX_Q_STATS +
IONIC_NUM_DBG_INTR_STATS +
IONIC_NUM_DBG_NAPI_STATS +
IONIC_MAX_NUM_NAPI_CNTR +
IONIC_MAX_NUM_SG_CNTR);
/* rx debug stats */
total += rx_queues * (IONIC_NUM_DBG_CQ_STATS +
IONIC_NUM_DBG_INTR_STATS +
IONIC_NUM_DBG_NAPI_STATS +
IONIC_MAX_NUM_NAPI_CNTR);
}
#endif
return total;
}
static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf,
int q_num)
{
int i;
for (i = 0; i < IONIC_NUM_TX_STATS; i++)
ethtool_sprintf(buf, "tx_%d_%s", q_num,
ionic_tx_stats_desc[i].name);
#ifdef IONIC_DEBUG_STATS
if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
!test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
return;
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++)
ethtool_sprintf(buf, "txq_%d_%s", q_num,
ionic_txq_stats_desc[i].name);
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
ethtool_sprintf(buf, "txq_%d_cq_%s", q_num,
ionic_dbg_cq_stats_desc[i].name);
for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
ethtool_sprintf(buf, "txq_%d_intr_%s", q_num,
ionic_dbg_intr_stats_desc[i].name);
for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++)
ethtool_sprintf(buf, "txq_%d_napi_%s", q_num,
ionic_dbg_napi_stats_desc[i].name);
for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++)
ethtool_sprintf(buf, "txq_%d_napi_work_done_%d", q_num, i);
for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++)
ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i);
#endif
}
static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf,
int q_num)
{
int i;
for (i = 0; i < IONIC_NUM_RX_STATS; i++)
ethtool_sprintf(buf, "rx_%d_%s", q_num,
ionic_rx_stats_desc[i].name);
#ifdef IONIC_DEBUG_STATS
if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
!test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
return;
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num,
ionic_dbg_cq_stats_desc[i].name);
for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num,
ionic_dbg_intr_stats_desc[i].name);
for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++)
ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num,
ionic_dbg_napi_stats_desc[i].name);
for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++)
ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i);
#endif
}
static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
{
int i, q_num;
for (i = 0; i < IONIC_NUM_LIF_STATS; i++)
ethtool_sprintf(buf, ionic_lif_stats_desc[i].name);
if (lif->ionic->is_mgmt_nic) {
for (i = 0; i < IONIC_NUM_MGMT_PORT_STATS; i++)
ethtool_sprintf(buf, ionic_mgmt_port_stats_desc[i].name);
} else {
for (i = 0; i < IONIC_NUM_PORT_STATS; i++)
ethtool_sprintf(buf, ionic_port_stats_desc[i].name);
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++)
ionic_sw_stats_get_tx_strings(lif, buf, q_num);
if (lif->hwstamp_txq)
ionic_sw_stats_get_tx_strings(lif, buf, lif->hwstamp_txq->q.index);
for (q_num = 0; q_num < MAX_Q(lif); q_num++)
ionic_sw_stats_get_rx_strings(lif, buf, q_num);
if (lif->hwstamp_rxq)
ionic_sw_stats_get_rx_strings(lif, buf, lif->hwstamp_rxq->q.index);
}
static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
int q_num)
{
struct ionic_tx_stats *txstats;
#ifdef IONIC_DEBUG_STATS
struct ionic_qcq *txqcq;
#endif
int i;
txstats = &lif->txqstats[q_num];
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
**buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]);
(*buf)++;
}
#ifdef IONIC_DEBUG_STATS
if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
!test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
return;
txqcq = lif->txqcqs[q_num];
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
&ionic_txq_stats_desc[i]);
(*buf)++;
}
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->cq,
&ionic_dbg_cq_stats_desc[i]);
(*buf)++;
}
for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->intr,
&ionic_dbg_intr_stats_desc[i]);
(*buf)++;
}
for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->napi_stats,
&ionic_dbg_napi_stats_desc[i]);
(*buf)++;
}
for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
**buf = txqcq->napi_stats.work_done_cntr[i];
(*buf)++;
}
for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
**buf = txstats->sg_cntr[i];
(*buf)++;
}
#endif
}
static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf,
int q_num)
{
struct ionic_rx_stats *rxstats;
#ifdef IONIC_DEBUG_STATS
struct ionic_qcq *rxqcq;
#endif
int i;
rxstats = &lif->rxqstats[q_num];
for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
**buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]);
(*buf)++;
}
#ifdef IONIC_DEBUG_STATS
if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
!test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
return;
rxqcq = lif->rxqcqs[q_num];
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
&ionic_dbg_cq_stats_desc[i]);
(*buf)++;
}
for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->intr,
&ionic_dbg_intr_stats_desc[i]);
(*buf)++;
}
for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->napi_stats,
&ionic_dbg_napi_stats_desc[i]);
(*buf)++;
}
for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
**buf = rxqcq->napi_stats.work_done_cntr[i];
(*buf)++;
}
#endif
}
static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
{
struct ionic_mgmt_port_stats *mgmt_stats;
struct ionic_port_stats *port_stats;
struct ionic_lif_sw_stats lif_stats;
int i, q_num;
ionic_get_lif_stats(lif, &lif_stats);
for (i = 0; i < IONIC_NUM_LIF_STATS; i++) {
**buf = IONIC_READ_STAT64(&lif_stats, &ionic_lif_stats_desc[i]);
(*buf)++;
}
if (lif->ionic->is_mgmt_nic) {
mgmt_stats = &lif->ionic->idev.port_info->mgmt_stats;
for (i = 0; i < IONIC_NUM_MGMT_PORT_STATS; i++) {
**buf = IONIC_READ_STAT_LE64(mgmt_stats,
&ionic_mgmt_port_stats_desc[i]);
(*buf)++;
}
} else {
port_stats = &lif->ionic->idev.port_info->stats;
for (i = 0; i < IONIC_NUM_PORT_STATS; i++) {
**buf = IONIC_READ_STAT_LE64(port_stats,
&ionic_port_stats_desc[i]);
(*buf)++;
}
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++)
ionic_sw_stats_get_txq_values(lif, buf, q_num);
if (lif->hwstamp_txq)
ionic_sw_stats_get_txq_values(lif, buf, lif->hwstamp_txq->q.index);
for (q_num = 0; q_num < MAX_Q(lif); q_num++)
ionic_sw_stats_get_rxq_values(lif, buf, q_num);
if (lif->hwstamp_rxq)
ionic_sw_stats_get_rxq_values(lif, buf, lif->hwstamp_rxq->q.index);
}
const struct ionic_stats_group_intf ionic_stats_groups[] = {
/* SW Stats group */
{
.get_strings = ionic_sw_stats_get_strings,
.get_values = ionic_sw_stats_get_values,
.get_count = ionic_sw_stats_get_count,
},
/* Add more stat groups here */
};
const int ionic_num_stats_grps = ARRAY_SIZE(ionic_stats_groups);

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#ifndef _IONIC_STATS_H_
#define _IONIC_STATS_H_
#define IONIC_STAT_TO_OFFSET(type, stat_name) (offsetof(type, stat_name))
#define IONIC_STAT_DESC(type, stat_name) { \
.name = #stat_name, \
.offset = IONIC_STAT_TO_OFFSET(type, stat_name) \
}
#define IONIC_PORT_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_port_stats, stat_name)
#define IONIC_MGMT_PORT_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_mgmt_port_stats, stat_name)
#define IONIC_LIF_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_lif_sw_stats, stat_name)
#define IONIC_TX_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_tx_stats, stat_name)
#define IONIC_RX_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_rx_stats, stat_name)
#ifdef IONIC_DEBUG_STATS
#define IONIC_TX_Q_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_queue, stat_name)
#define IONIC_CQ_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_cq, stat_name)
#define IONIC_INTR_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_intr_info, stat_name)
#define IONIC_NAPI_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_napi_stats, stat_name)
#endif
/* Interface structure for a particalar stats group */
struct ionic_stats_group_intf {
void (*get_strings)(struct ionic_lif *lif, u8 **buf);
void (*get_values)(struct ionic_lif *lif, u64 **buf);
u64 (*get_count)(struct ionic_lif *lif);
};
extern const struct ionic_stats_group_intf ionic_stats_groups[];
extern const int ionic_num_stats_grps;
#define IONIC_READ_STAT64(base_ptr, desc_ptr) \
(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
#define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \
__le64_to_cpu(*((__le64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
struct ionic_stat_desc {
char name[ETH_GSTRING_LEN];
u64 offset;
};
#endif /* _IONIC_STATS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2017 - 2022 Pensando Systems, Inc */
#ifndef _IONIC_TXRX_H_
#define _IONIC_TXRX_H_
void ionic_rx_flush(struct ionic_cq *cq);
void ionic_tx_flush(struct ionic_cq *cq);
void ionic_rx_fill(struct ionic_queue *q);
void ionic_rx_empty(struct ionic_queue *q);
void ionic_tx_empty(struct ionic_queue *q);
int ionic_rx_napi(struct napi_struct *napi, int budget);
int ionic_tx_napi(struct napi_struct *napi, int budget);
int ionic_txrx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
#endif /* _IONIC_TXRX_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,319 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 1999 - 2019 Intel Corporation. */
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
#ifndef __LINUX_OVERFLOW_H
#define __LINUX_OVERFLOW_H
#include <linux/compiler.h>
/*
* In the fallback code below, we need to compute the minimum and
* maximum values representable in a given type. These macros may also
* be useful elsewhere, so we provide them outside the
* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
*
* It would seem more obvious to do something like
*
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
*
* Unfortunately, the middle expressions, strictly speaking, have
* undefined behaviour, and at least some versions of gcc warn about
* the type_max expression (but not if -fsanitize=undefined is in
* effect; in that case, the warning is deferred to runtime...).
*
* The slightly excessive casting in type_min is to make sure the
* macros also produce sensible values for the exotic type _Bool. [The
* overflow checkers only almost work for _Bool, but that's
* a-feature-not-a-bug, since people shouldn't be doing arithmetic on
* _Bools. Besides, the gcc builtins don't allow _Bool* as third
* argument.]
*
* Idea stolen from
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
* credit to Christian Biere.
*/
/* The is_signed_type macro is redefined in a few places in various kernel
* headers. If this header is included at the same time as one of those, we
* will generate compilation warnings. Since we can't fix every old kernel,
* rename is_signed_type for this file to _kc_is_signed_type. This prevents
* the macro name collision, and should be safe since our drivers do not
* directly call the macro.
*/
#define _kc_is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
/*
* For simplicity and code hygiene, the fallback code below insists on
* a, b and *d having the same type (similar to the min() and max()
* macros), whereas gcc's type-generic overflow checkers accept
* different types. Hence we don't just make check_add_overflow an
* alias for __builtin_add_overflow, but add type checks similar to
* below.
*/
#define check_add_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
__builtin_add_overflow(__a, __b, __d); \
})
#define check_sub_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
__builtin_sub_overflow(__a, __b, __d); \
})
#define check_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
__builtin_mul_overflow(__a, __b, __d); \
})
#else
/* Checking for unsigned overflow is relatively easy without causing UB. */
#define __unsigned_add_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a + __b; \
*__d < __a; \
})
#define __unsigned_sub_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a - __b; \
__a < __b; \
})
/*
* If one of a or b is a compile-time constant, this avoids a division.
*/
#define __unsigned_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = __a * __b; \
__builtin_constant_p(__b) ? \
__b > 0 && __a > type_max(typeof(__a)) / __b : \
__a > 0 && __b > type_max(typeof(__b)) / __a; \
})
/*
* For signed types, detecting overflow is much harder, especially if
* we want to avoid UB. But the interface of these macros is such that
* we must provide a result in *d, and in fact we must produce the
* result promised by gcc's builtins, which is simply the possibly
* wrapped-around value. Fortunately, we can just formally do the
* operations in the widest relevant unsigned type (u64) and then
* truncate the result - gcc is smart enough to generate the same code
* with and without the (u64) casts.
*/
/*
* Adding two signed integers can overflow only if they have the same
* sign, and overflow has happened iff the result has the opposite
* sign.
*/
#define __signed_add_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a + (u64)__b; \
(((~(__a ^ __b)) & (*__d ^ __a)) \
& type_min(typeof(__a))) != 0; \
})
/*
* Subtraction is similar, except that overflow can now happen only
* when the signs are opposite. In this case, overflow has happened if
* the result has the opposite sign of a.
*/
#define __signed_sub_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a - (u64)__b; \
((((__a ^ __b)) & (*__d ^ __a)) \
& type_min(typeof(__a))) != 0; \
})
/*
* Signed multiplication is rather hard. gcc always follows C99, so
* division is truncated towards 0. This means that we can write the
* overflow check like this:
*
* (a > 0 && (b > MAX/a || b < MIN/a)) ||
* (a < -1 && (b > MIN/a || b < MAX/a) ||
* (a == -1 && b == MIN)
*
* The redundant casts of -1 are to silence an annoying -Wtype-limits
* (included in -Wextra) warning: When the type is u8 or u16, the
* __b_c_e in check_mul_overflow obviously selects
* __unsigned_mul_overflow, but unfortunately gcc still parses this
* code and warns about the limited range of __b.
*/
#define __signed_mul_overflow(a, b, d) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
typeof(d) __d = (d); \
typeof(a) __tmax = type_max(typeof(a)); \
typeof(a) __tmin = type_min(typeof(a)); \
(void) (&__a == &__b); \
(void) (&__a == __d); \
*__d = (u64)__a * (u64)__b; \
(__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
(__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
(__b == (typeof(__b))-1 && __a == __tmin); \
})
#define check_add_overflow(a, b, d) \
__builtin_choose_expr(_kc_is_signed_type(typeof(a)), \
__signed_add_overflow(a, b, d), \
__unsigned_add_overflow(a, b, d))
#define check_sub_overflow(a, b, d) \
__builtin_choose_expr(_kc_is_signed_type(typeof(a)), \
__signed_sub_overflow(a, b, d), \
__unsigned_sub_overflow(a, b, d))
#define check_mul_overflow(a, b, d) \
__builtin_choose_expr(_kc_is_signed_type(typeof(a)), \
__signed_mul_overflow(a, b, d), \
__unsigned_mul_overflow(a, b, d))
#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
/** check_shl_overflow() - Calculate a left-shifted value and check overflow
*
* @a: Value to be shifted
* @s: How many bits left to shift
* @d: Pointer to where to store the result
*
* Computes *@d = (@a << @s)
*
* Returns true if '*d' cannot hold the result or when 'a << s' doesn't
* make sense. Example conditions:
* - 'a << s' causes bits to be lost when stored in *d.
* - 's' is garbage (e.g. negative) or so large that the result of
* 'a << s' is guaranteed to be 0.
* - 'a' is negative.
* - 'a << s' sets the sign bit, if any, in '*d'.
*
* '*d' will hold the results of the attempted shift, but is not
* considered "safe for use" if false is returned.
*/
#define check_shl_overflow(a, s, d) ({ \
typeof(a) _a = a; \
typeof(s) _s = s; \
typeof(d) _d = d; \
u64 _a_full = _a; \
unsigned int _to_shift = \
_s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \
*_d = (_a_full << _to_shift); \
(_to_shift != _s || *_d < 0 || _a < 0 || \
(*_d >> _to_shift) != _a); \
})
/**
* array_size() - Calculate size of 2-dimensional array.
*
* @a: dimension one
* @b: dimension two
*
* Calculates size of 2-dimensional array: @a * @b.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
static inline __must_check size_t array_size(size_t a, size_t b)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* array3_size() - Calculate size of 3-dimensional array.
*
* @a: dimension one
* @b: dimension two
* @c: dimension three
*
* Calculates size of 3-dimensional array: @a * @b * @c.
*
* Returns: number of bytes needed to represent the array or SIZE_MAX on
* overflow.
*/
static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
{
size_t bytes;
if (check_mul_overflow(a, b, &bytes))
return SIZE_MAX;
if (check_mul_overflow(bytes, c, &bytes))
return SIZE_MAX;
return bytes;
}
static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
{
size_t bytes;
if (check_mul_overflow(n, size, &bytes))
return SIZE_MAX;
if (check_add_overflow(bytes, c, &bytes))
return SIZE_MAX;
return bytes;
}
/**
* struct_size() - Calculate size of structure with trailing array.
* @p: Pointer to the structure.
* @member: Name of the array member.
* @n: Number of elements in the array.
*
* Calculates size of memory needed for structure @p followed by an
* array of @n @member elements.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define struct_size(p, member, n) \
__ab_c_size(n, \
sizeof(*(p)->member) + __must_be_array((p)->member),\
sizeof(*(p)))
#endif /* __LINUX_OVERFLOW_H */

View File

@ -0,0 +1,248 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
*/
#ifndef CONFIG_DIMLIB
#include "dim.h"
/*
* Net DIM profiles:
* There are different set of profiles for each CQ period mode.
* There are different set of profiles for RX/TX CQs.
* Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
*/
#define NET_DIM_PARAMS_NUM_PROFILES 5
#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
#define NET_DIM_DEF_PROFILE_CQE 1
#define NET_DIM_DEF_PROFILE_EQE 1
#define NET_DIM_RX_EQE_PROFILES { \
{1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
{256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
}
#define NET_DIM_RX_CQE_PROFILES { \
{2, 256}, \
{8, 128}, \
{16, 64}, \
{32, 64}, \
{64, 64} \
}
#define NET_DIM_TX_EQE_PROFILES { \
{1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
{8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
{32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
{64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
{128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
}
#define NET_DIM_TX_CQE_PROFILES { \
{5, 128}, \
{8, 64}, \
{16, 32}, \
{32, 32}, \
{64, 32} \
}
static const struct dim_cq_moder
rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
NET_DIM_RX_EQE_PROFILES,
NET_DIM_RX_CQE_PROFILES,
};
static const struct dim_cq_moder
tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
NET_DIM_TX_EQE_PROFILES,
NET_DIM_TX_CQE_PROFILES,
};
struct dim_cq_moder
net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
{
struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
cq_moder.cq_period_mode = cq_period_mode;
return cq_moder;
}
//EXPORT_SYMBOL(net_dim_get_rx_moderation);
struct dim_cq_moder
net_dim_get_def_rx_moderation(u8 cq_period_mode)
{
u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
}
//EXPORT_SYMBOL(net_dim_get_def_rx_moderation);
struct dim_cq_moder
net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
{
struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
cq_moder.cq_period_mode = cq_period_mode;
return cq_moder;
}
//EXPORT_SYMBOL(net_dim_get_tx_moderation);
struct dim_cq_moder
net_dim_get_def_tx_moderation(u8 cq_period_mode)
{
u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
}
//EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
static int net_dim_step(struct dim *dim)
{
if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
return DIM_TOO_TIRED;
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
case DIM_PARKING_TIRED:
break;
case DIM_GOING_RIGHT:
if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
return DIM_ON_EDGE;
dim->profile_ix++;
dim->steps_right++;
break;
case DIM_GOING_LEFT:
if (dim->profile_ix == 0)
return DIM_ON_EDGE;
dim->profile_ix--;
dim->steps_left++;
break;
}
dim->tired++;
return DIM_STEPPED;
}
static void net_dim_exit_parking(struct dim *dim)
{
dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT;
net_dim_step(dim);
}
static int net_dim_stats_compare(struct dim_stats *curr,
struct dim_stats *prev)
{
if (!prev->bpms)
return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
if (!prev->ppms)
return curr->ppms ? DIM_STATS_BETTER :
DIM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
if (!prev->epms)
return DIM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
return DIM_STATS_SAME;
}
static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
{
int prev_state = dim->tune_state;
int prev_ix = dim->profile_ix;
int stats_res;
int step_res;
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
stats_res = net_dim_stats_compare(curr_stats,
&dim->prev_stats);
if (stats_res != DIM_STATS_SAME)
net_dim_exit_parking(dim);
break;
case DIM_PARKING_TIRED:
dim->tired--;
if (!dim->tired)
net_dim_exit_parking(dim);
break;
case DIM_GOING_RIGHT:
case DIM_GOING_LEFT:
stats_res = net_dim_stats_compare(curr_stats,
&dim->prev_stats);
if (stats_res != DIM_STATS_BETTER)
dim_turn(dim);
if (dim_on_top(dim)) {
dim_park_on_top(dim);
break;
}
step_res = net_dim_step(dim);
switch (step_res) {
case DIM_ON_EDGE:
dim_park_on_top(dim);
break;
case DIM_TOO_TIRED:
dim_park_tired(dim);
break;
}
break;
}
if (prev_state != DIM_PARKING_ON_TOP ||
dim->tune_state != DIM_PARKING_ON_TOP)
dim->prev_stats = *curr_stats;
return dim->profile_ix != prev_ix;
}
void net_dim(struct dim *dim, struct dim_sample end_sample)
{
struct dim_stats curr_stats;
u16 nevents;
switch (dim->state) {
case DIM_MEASURE_IN_PROGRESS:
nevents = BIT_GAP(BITS_PER_TYPE(u16),
end_sample.event_ctr,
dim->start_sample.event_ctr);
if (nevents < DIM_NEVENTS)
break;
dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats);
if (net_dim_decision(&curr_stats, dim)) {
dim->state = DIM_APPLY_NEW_PROFILE;
schedule_work(&dim->work);
break;
}
/* fall through */
case DIM_START_MEASURE:
dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
end_sample.byte_ctr, &dim->start_sample);
dim->state = DIM_MEASURE_IN_PROGRESS;
break;
case DIM_APPLY_NEW_PROFILE:
break;
}
}
//EXPORT_SYMBOL(net_dim);
#endif

View File

@ -0,0 +1,211 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright(c) 1999 - 2019 Intel Corporation.
# (thanks Intel!)
#
#####################
# Helpful functions #
#####################
readlink = $(shell readlink -f ${1})
# helper functions for converting kernel version to version codes
get_kver = $(or $(word ${2},$(subst ., ,${1})),0)
get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \
[ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \
[ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \
printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) )
################
# depmod Macro #
################
cmd_depmod = /sbin/depmod $(if ${SYSTEM_MAP_FILE},-e -F ${SYSTEM_MAP_FILE}) \
$(if $(strip ${INSTALL_MOD_PATH}),-b ${INSTALL_MOD_PATH}) \
-a ${KVER}
################
# dracut Macro #
################
cmd_initrd := $(shell \
if which dracut > /dev/null 2>&1 ; then \
echo "dracut --force"; \
elif which update-initramfs > /dev/null 2>&1 ; then \
echo "update-initramfs -u"; \
fi )
#####################
# Environment tests #
#####################
DRIVER_UPPERCASE := $(shell echo ${DRIVER} | tr "[:lower:]" "[:upper:]")
ifeq (,${BUILD_KERNEL})
BUILD_KERNEL=$(shell uname -r)
endif
# Kernel Search Path
# All the places we look for kernel source
KSP := /lib/modules/${BUILD_KERNEL}/build \
/lib/modules/${BUILD_KERNEL}/source \
/usr/src/linux-${BUILD_KERNEL} \
/usr/src/linux-$(shell echo ${BUILD_KERNEL} | sed 's/-.*//') \
/usr/src/kernel-headers-${BUILD_KERNEL} \
/usr/src/kernel-source-${BUILD_KERNEL} \
/usr/src/linux-$(shell echo ${BUILD_KERNEL} | sed 's/\([0-9]*\.[0-9]*\)\..*/\1/') \
/usr/src/linux \
/usr/src/kernels/${BUILD_KERNEL} \
/usr/src/kernels
# prune the list down to only values that exist and have an include/linux
# sub-directory. We can't use include/config because some older kernels don't
# have this.
test_dir = $(shell [ -e ${dir}/include/linux -o -e ${dir}/include/generated ] && echo ${dir})
KSP := $(foreach dir, ${KSP}, ${test_dir})
# we will use this first valid entry in the search path
ifeq (,${KSRC})
KSRC := $(firstword ${KSP})
endif
ifeq (,${KSRC})
$(warning *** Kernel header files not in any of the expected locations.)
$(warning *** Install the appropriate kernel development package, e.g.)
$(error kernel-devel, for building kernel modules and try again)
else
ifeq (/lib/modules/${BUILD_KERNEL}/source, ${KSRC})
KOBJ := /lib/modules/${BUILD_KERNEL}/build
else
KOBJ := ${KSRC}
endif
endif
# Version file Search Path
VSP := ${KOBJ}/include/generated/utsrelease.h \
${KOBJ}/include/linux/utsrelease.h \
${KOBJ}/include/linux/version.h \
${KOBJ}/include/generated/uapi/linux/version.h \
/boot/vmlinuz.version.h
# Config file Search Path
CSP := ${KOBJ}/include/generated/autoconf.h \
${KOBJ}/include/linux/autoconf.h \
/boot/vmlinuz.autoconf.h
# System.map Search Path (for depmod)
MSP := ${KSRC}/System.map \
/boot/System.map-${BUILD_KERNEL}
# prune the lists down to only files that exist
test_file = $(shell [ -f ${file} ] && echo ${file})
VSP := $(foreach file, ${VSP}, ${test_file})
CSP := $(foreach file, ${CSP}, ${test_file})
MSP := $(foreach file, ${MSP}, ${test_file})
# and use the first valid entry in the Search Paths
ifeq (,${VERSION_FILE})
VERSION_FILE := $(firstword ${VSP})
endif
ifeq (,${CONFIG_FILE})
CONFIG_FILE := $(firstword ${CSP})
endif
ifeq (,${SYSTEM_MAP_FILE})
SYSTEM_MAP_FILE := $(firstword ${MSP})
endif
ifeq (,$(wildcard ${VERSION_FILE}))
$(error Linux kernel source not configured - missing version header file)
endif
ifeq (,$(wildcard ${CONFIG_FILE}))
$(error Linux kernel source not configured - missing autoconf.h)
endif
ifeq (,$(wildcard ${SYSTEM_MAP_FILE}))
$(warning Missing System.map file - depmod will not check for missing symbols)
endif
ifneq ($(words $(subst :, ,$(CURDIR))), 1)
$(error Sources directory '$(CURDIR)' cannot contain spaces nor colons. Rename directory or move sources to another path)
endif
#######################
# Linux Version Setup #
#######################
# The following command line parameter is intended for development of KCOMPAT
# against upstream kernels such as net-next which have broken or non-updated
# version codes in their Makefile. They are intended for debugging and
# development purpose only so that we can easily test new KCOMPAT early. If you
# don't know what this means, you do not need to set this flag. There is no
# arcane magic here.
# Convert LINUX_VERSION into LINUX_VERSION_CODE
ifneq (${LINUX_VERSION},)
LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3))
endif
# Honor LINUX_VERSION_CODE
ifneq (${LINUX_VERSION_CODE},)
$(warning Forcing target kernel to build with LINUX_VERSION_CODE of ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}). Do this at your own risk.)
KVER_CODE := ${LINUX_VERSION_CODE}
EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE}
endif
# Determine SLE_LOCALVERSION_CODE for SuSE SLE >= 11 (needed by kcompat)
# This assumes SuSE will continue setting CONFIG_LOCALVERSION to the string
# appended to the stable kernel version on which their kernel is based with
# additional versioning information (up to 3 numbers), a possible abbreviated
# git SHA1 commit id and a kernel type, e.g. CONFIG_LOCALVERSION=-1.2.3-default
# or CONFIG_LOCALVERSION=-999.gdeadbee-default
ifeq (1,$(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
grep -m 1 CONFIG_SUSE_KERNEL | awk '{ print $$3 }'))
ifneq (10,$(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
grep -m 1 CONFIG_SLE_VERSION | awk '{ print $$3 }'))
LOCALVERSION := $(shell ${CC} -E -dM ${CONFIG_FILE} 2> /dev/null |\
grep -m 1 CONFIG_LOCALVERSION | awk '{ print $$3 }' |\
cut -d'-' -f2 | sed 's/\.g[[:xdigit:]]\{7\}//')
LOCALVER_A := $(shell echo ${LOCALVERSION} | cut -d'.' -f1)
LOCALVER_B := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f2)
LOCALVER_C := $(shell echo ${LOCALVERSION} | cut -s -d'.' -f3)
SLE_LOCALVERSION_CODE := $(shell expr ${LOCALVER_A} \* 65536 + \
0${LOCALVER_B} \* 256 + 0${LOCALVER_C})
EXTRA_CFLAGS += -DSLE_LOCALVERSION_CODE=${SLE_LOCALVERSION_CODE}
endif
endif
# get the kernel version - we use this to find the correct install path
KVER := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VERSION_FILE} | grep UTS_RELEASE | \
awk '{ print $$3 }' | sed 's/\"//g')
# assume source symlink is the same as build, otherwise adjust KOBJ
ifneq (,$(wildcard /lib/modules/${KVER}/build))
ifneq (${KSRC},$(call readlink,/lib/modules/${KVER}/build))
KOBJ=/lib/modules/${KVER}/build
endif
endif
ifeq (${KVER_CODE},)
KVER_CODE := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VSP} 2> /dev/null |\
grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g')
endif
# minimum_kver_check
#
# helper function to provide uniform output for different drivers to abort the
# build based on kernel version check. Usage: "$(call minimum_kver_check,2,6,XX)".
define _minimum_kver_check
ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,${1},${2},${3}) ]; echo "$$?"))
$$(warning *** Aborting the build.)
$$(error This driver is not supported on kernel versions older than ${1}.${2}.${3})
endif
endef
minimum_kver_check = $(eval $(call _minimum_kver_check,${1},${2},${3}))

View File

@ -0,0 +1,3 @@
obj-$(CONFIG_MDEV) := mdev.o
mdev-y := mdev_drv.o

View File

@ -0,0 +1,670 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2017-2021 Pensando Systems, Inc
* Copyright (C) 2008 Magnus Damm
*
* Based on uio_pdrv.c by Uwe Kleine-Koenig,
* Copyright (C) 2008 by Digi International Inc.
*/
#include <linux/module.h>
#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/of.h>
#include <linux/ioctl.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/uio_driver.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/stringify.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include "mdev_drv.h"
#define DEVINFO_SIZE 0x1000
#define DRVCFG_SIZE 0x80
#define MSIXCFG_SIZE 0x40
#define DOORBELL_PG_SIZE 0x8
#define TSTAMP_SIZE 0x8
#define MDEV_NODE_NAME_LEN 0x8
typedef enum mdev_type {
MDEV_TYPE_MNET,
MDEV_TYPE_MCRYPT,
} mdev_type_t;
struct mdev_dev;
typedef int (*platform_rsrc_func_t)(struct mdev_dev *,
struct mdev_create_req *);
typedef int (*attach_func_t)(struct platform_device *);
typedef int (*detach_func_t)(struct platform_device *);
struct mdev_dev {
struct device_node *of_node;
struct platform_device *pdev;
struct list_head node;
mdev_type_t type;
platform_rsrc_func_t platform_rsrc;
attach_func_t attach;
detach_func_t detach;
};
LIST_HEAD(mdev_list);
static struct class *mdev_class;
static dev_t mdev_dev;
struct device *mdev_device;
struct device *mnet_device;
static unsigned int mdev_major;
static struct cdev mdev_cdev;
/* Yuck */
extern int ionic_probe(struct platform_device *pfdev);
extern int ionic_remove(struct platform_device *pfdev);
struct uio_pdrv_genirq_platdata {
struct uio_info *uioinfo;
spinlock_t lock;
unsigned long flags;
struct platform_device *pdev;
};
/* Bits in uio_pdrv_genirq_platdata.flags */
enum {
UIO_IRQ_DISABLED = 0,
};
static int uio_pdrv_genirq_open(struct uio_info *info, struct inode *inode)
{
return 0;
}
static int uio_pdrv_genirq_release(struct uio_info *info, struct inode *inode)
{
return 0;
}
static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info)
{
struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
/* Just disable the interrupt in the interrupt controller, and
* remember the state so we can allow user space to enable it later.
*/
spin_lock(&priv->lock);
if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
disable_irq_nosync(irq);
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
{
struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
unsigned long flags;
/* Allow user space to enable and disable the interrupt
* in the interrupt controller, but keep track of the
* state to prevent per-irq depth damage.
*
* Serialize this operation to support multiple tasks and concurrency
* with irq handler on SMP systems.
*/
spin_lock_irqsave(&priv->lock, flags);
if (irq_on) {
if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags))
enable_irq(dev_info->irq);
} else {
if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
disable_irq_nosync(dev_info->irq);
}
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int mdev_uio_pdrv_genirq_probe(struct platform_device *pdev)
{
struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
struct uio_pdrv_genirq_platdata *priv;
struct uio_mem *uiomem;
int ret = -EINVAL;
int i;
if (pdev->dev.of_node) {
/* alloc uioinfo for one device */
uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo),
GFP_KERNEL);
if (!uioinfo) {
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
uioinfo->name = pdev->name;
uioinfo->version = "devicetree";
/* Multiple IRQs are not supported */
}
if (!uioinfo || !uioinfo->name || !uioinfo->version) {
dev_err(&pdev->dev, "missing platform_data\n");
return ret;
}
if (uioinfo->handler || uioinfo->irqcontrol ||
uioinfo->irq_flags & IRQF_SHARED) {
dev_err(&pdev->dev, "interrupt configuration error\n");
return ret;
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
priv->uioinfo = uioinfo;
spin_lock_init(&priv->lock);
priv->flags = 0; /* interrupt is enabled to begin with */
priv->pdev = pdev;
if (!uioinfo->irq) {
#if (KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE)
ret = platform_get_irq(pdev, 0);
#else
ret = platform_get_irq_optional(pdev, 0);
#endif
uioinfo->irq = ret;
if (ret == -ENXIO && pdev->dev.of_node)
uioinfo->irq = UIO_IRQ_NONE;
else if (ret < 0) {
dev_err(&pdev->dev, "failed to get IRQ: %d\n", ret);
return ret;
}
}
uiomem = &uioinfo->mem[0];
for (i = 0; i < pdev->num_resources; ++i) {
struct resource *r = &pdev->resource[i];
if (r->flags != IORESOURCE_MEM)
continue;
if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
dev_warn(&pdev->dev, "device has more than "
__stringify(MAX_UIO_MAPS)
" I/O memory resources.\n");
break;
}
uiomem->memtype = UIO_MEM_PHYS;
uiomem->addr = r->start & PAGE_MASK;
uiomem->offs = (r->start & (PAGE_SIZE - 1));
uiomem->size = PAGE_ALIGN(resource_size(r));
dev_info(&pdev->dev, "resource %d size %llu", i, uiomem->size);
uiomem->name = r->name;
++uiomem;
}
while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
uiomem->size = 0;
++uiomem;
}
/* This driver requires no hardware specific kernel code to handle
* interrupts. Instead, the interrupt handler simply disables the
* interrupt in the interrupt controller. User space is responsible
* for performing hardware specific acknowledge and re-enabling of
* the interrupt in the interrupt controller.
*
* Interrupt sharing is not supported.
*/
uioinfo->handler = uio_pdrv_genirq_handler;
uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol;
uioinfo->open = uio_pdrv_genirq_open;
uioinfo->release = uio_pdrv_genirq_release;
uioinfo->priv = priv;
ret = uio_register_device(&pdev->dev, priv->uioinfo);
if (ret) {
dev_err(&pdev->dev, "unable to register uio device\n");
return ret;
}
platform_set_drvdata(pdev, priv);
return 0;
}
static int mdev_uio_pdrv_genirq_remove(struct platform_device *pdev)
{
struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev);
uio_unregister_device(priv->uioinfo);
priv->uioinfo->handler = NULL;
priv->uioinfo->irqcontrol = NULL;
return 0;
}
static int mdev_open(struct inode *inode, struct file *filep)
{
return 0;
}
static int mdev_close(struct inode *i, struct file *f)
{
return 0;
}
static int mdev_get_mnet_platform_rsrc(struct mdev_dev *mdev,
struct mdev_create_req *req)
{
struct resource mnet_resource[] = {
{ /*devinfo*/
.flags = IORESOURCE_MEM,
.start = req->regs_pa,
.end = req->regs_pa + DEVINFO_SIZE - 1
}, {/*drvcfg/intr_ctrl*/
.flags = IORESOURCE_MEM,
.start = req->drvcfg_pa,
.end = req->drvcfg_pa + DRVCFG_SIZE - 1
}, {/*msixcfg*/
.flags = IORESOURCE_MEM,
.start = req->msixcfg_pa,
.end = req->msixcfg_pa + MSIXCFG_SIZE - 1
}, {/*doorbell*/
.flags = IORESOURCE_MEM,
.start = req->doorbell_pa,
.end = req->doorbell_pa + DOORBELL_PG_SIZE - 1
}, {/*tstamp*/
.flags = IORESOURCE_MEM,
.start = req->tstamp_pa,
.end = req->tstamp_pa + TSTAMP_SIZE - 1
}
};
/* add resource info */
return platform_device_add_resources(mdev->pdev, mnet_resource,
ARRAY_SIZE(mnet_resource));
}
static int mdev_get_mcrypt_platform_rsrc(struct mdev_dev *mdev,
struct mdev_create_req *req)
{
struct resource mcrypt_resource[] = {
{ /*devinfo*/
.flags = IORESOURCE_MEM,
.start = req->regs_pa,
.end = req->regs_pa + DEVINFO_SIZE - 1
}, {/*drvcfg/intr_ctrl*/
.flags = IORESOURCE_MEM,
.start = req->drvcfg_pa,
.end = req->drvcfg_pa + DRVCFG_SIZE - 1
}, {/*msixcfg*/
.flags = IORESOURCE_MEM,
.start = req->msixcfg_pa,
.end = req->msixcfg_pa + MSIXCFG_SIZE - 1
}, {/*doorbell*/
.flags = IORESOURCE_MEM,
.start = req->doorbell_pa,
.end = req->doorbell_pa + DOORBELL_PG_SIZE - 1
}
};
/* add resource info */
return platform_device_add_resources(mdev->pdev, mcrypt_resource,
ARRAY_SIZE(mcrypt_resource));
}
static int mdev_attach_one(struct mdev_dev *mdev,
struct mdev_create_req *req)
{
char *mdev_name = NULL;
int err = 0;
mdev->pdev = of_find_device_by_node(mdev->of_node);
if (!mdev->pdev) {
dev_err(mdev_device, "Can't find device for of_node %s\n",
mdev->of_node->name);
err = -ENXIO;
goto err;
}
err = (*mdev->platform_rsrc)(mdev, req);
if (err) {
dev_err(mdev_device, "Can't get platform resources\n");
err = -ENOSPC;
goto err_unset_pdev;
}
mdev_name = devm_kzalloc(mdev_device, MDEV_NAME_LEN + 1, GFP_KERNEL);
if (!mdev_name) {
dev_err(mdev_device, "Can't allocate memory for name\n");
err = -ENOMEM;
goto err_unset_pdev;
}
strncpy(mdev_name, req->name, MDEV_NAME_LEN);
mdev->pdev->name = mdev_name;
/* call probe with this platform_device */
err = (*mdev->attach)(mdev->pdev);
if (err) {
dev_err(mdev_device, "probe for %s failed: %d\n",
mdev->pdev->name, err);
goto err_free_name;
}
dev_info(mdev_device, "%s created successfully\n", mdev->pdev->name);
return 0;
err_free_name:
//devm_kfree(mdev_device, mdev->pdev->name);
//mdev->pdev->name = NULL;
err_unset_pdev:
mdev->pdev = NULL;
err:
return err;
}
static int mdev_detach_one(struct mdev_dev *mdev)
{
int err;
if (!mdev->pdev)
return 0;
dev_info(mdev_device, "Removing interface %s\n", mdev->pdev->name);
err = (*mdev->detach)(mdev->pdev);
if (err) {
dev_err(mdev_device, "Failed to remove %s\n",
mdev->pdev->name);
return err;
}
dev_info(mdev_device, "Successfully removed %s\n", mdev->pdev->name);
//devm_kfree(mdev_device, mdev->pdev->name);
mdev->pdev = NULL;
return 0;
}
static inline bool mdev_ioctl_matches(struct mdev_dev *mdev, uint32_t cmd)
{
if (cmd == MDEV_CREATE_MNET && mdev->type == MDEV_TYPE_MNET)
return true;
if (cmd == MDEV_CREATE_MCRYPT && mdev->type == MDEV_TYPE_MCRYPT)
return true;
return false;
}
static long mdev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
char name[MDEV_NAME_LEN+1] = {0};
struct mdev_create_req req;
struct mdev_dev *mdev;
int ret = -EDQUOT;
switch (cmd) {
case MDEV_CREATE_MNET:
case MDEV_CREATE_MCRYPT:
if (copy_from_user(&req, argp, sizeof(req))) {
dev_err(mdev_device, "copy_from_user failed\n");
ret = -EFAULT;
break;
}
dev_info(mdev_device, "Creating %s %s\n",
req.name, req.is_uio_dev ? "(UIO)" : "");
/* scan the list to see if it already exists,
* and if so, quietly ignore this request
*/
list_for_each_entry(mdev, &mdev_list, node) {
if (mdev->pdev &&
!strncmp(mdev->pdev->name, req.name, MDEV_NAME_LEN))
return 0;
}
/* find the first useful empty slot */
list_for_each_entry(mdev, &mdev_list, node) {
if (mdev->pdev || !mdev_ioctl_matches(mdev, cmd))
continue;
if (req.is_uio_dev) {
mdev->attach = mdev_uio_pdrv_genirq_probe;
mdev->detach = mdev_uio_pdrv_genirq_remove;
} else if (mdev->type == MDEV_TYPE_MNET) {
mdev->attach = ionic_probe;
mdev->detach = ionic_remove;
} else {
ret = -EINVAL;
break;
}
ret = mdev_attach_one(mdev, &req);
break;
}
break;
case MDEV_DESTROY:
if (copy_from_user(name, argp, MDEV_NAME_LEN)) {
dev_err(mdev_device, "copy_from_user failed\n");
ret = -EFAULT;
break;
}
dev_info(mdev_device, "Removing %s\n", name);
list_for_each_entry(mdev, &mdev_list, node) {
if (!mdev->pdev ||
strncmp(mdev->pdev->name, name, MDEV_NAME_LEN))
continue;
ret = mdev_detach_one(mdev);
break;
}
break;
default:
dev_dbg(mdev_device, "Invalid ioctl %d\n", cmd);
ret = -EINVAL;
break;
}
return ret;
}
static int mdev_probe(struct platform_device *pfdev)
{
return 0;
}
static int mdev_remove(struct platform_device *pfdev)
{
struct mdev_dev *mdev, *tmp;
list_for_each_entry_safe(mdev, tmp, &mdev_list, node) {
(void)mdev_detach_one(mdev);
list_del(&mdev->node);
devm_kfree(mdev_device, mdev);
}
return 0;
}
static const struct of_device_id mdev_of_match[] = {
{.compatible = "pensando,mnet"},
{.compatible = "pensando,mcrypt"},
{/* end of table */}
};
static struct platform_driver mdev_driver = {
.probe = mdev_probe,
.remove = mdev_remove,
.driver = {
.name = "pensando-mdev",
.owner = THIS_MODULE,
.of_match_table = mdev_of_match,
},
};
static const struct file_operations mdev_fops = {
.owner = THIS_MODULE,
.open = mdev_open,
.release = mdev_close,
.unlocked_ioctl = mdev_ioctl,
};
static int mdev_init_dev_list(uint32_t max_dev, const char *pfx,
platform_rsrc_func_t platform_rsrc)
{
char of_node_name[MDEV_NODE_NAME_LEN + 1] = {0};
struct mdev_dev *mdev;
uint32_t i;
for (i = 0; i < max_dev; i++) {
mdev = devm_kzalloc(mdev_device, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
snprintf(of_node_name, sizeof(of_node_name), "%s%u",
pfx, i);
mdev->of_node = of_find_node_by_name(NULL, of_node_name);
/* skip any node not found in device tree */
if (mdev->of_node == NULL) {
devm_kfree(mdev_device, mdev);
continue;
}
dev_info(mdev_device, "Found node %s\n", mdev->of_node->name);
mdev->platform_rsrc = platform_rsrc;
list_add_tail(&mdev->node, &mdev_list);
// TODO: Should this put() happen when driver unloads?
of_node_put(mdev->of_node);
}
return 0;
}
static int __init mdev_init(void)
{
struct mdev_dev *mdev, *tmp;
int ret;
mdev_class = class_create(THIS_MODULE, DRV_NAME);
if (IS_ERR(mdev_class)) {
ret = PTR_ERR(mdev_class);
goto error_out;
}
ret = alloc_chrdev_region(&mdev_dev, 0, NUM_MDEV_DEVICES,
MDEV_CHAR_DEV_NAME);
if (ret < 0)
goto error_destroy_class;
mdev_major = MAJOR(mdev_dev);
pr_info("Pensando mdev driver: mdev_major = %d\n", mdev_major);
mdev_device = device_create(mdev_class, NULL,
MKDEV(mdev_major, 0), NULL, DRV_NAME);
if (IS_ERR(mdev_device)) {
pr_err("Failed to create device %s", DRV_NAME);
ret = PTR_ERR(mdev_class);
goto error_unregister_chrdev;
}
dev_info(mdev_device, "device %s created\n", DRV_NAME);
#ifndef MDEV_HACK
mnet_device = device_create(mdev_class, NULL,
MKDEV(mdev_major, 1), NULL, DRV_NAME_ALT);
if (IS_ERR(mnet_device)) {
pr_err("Failed to create device %s", DRV_NAME_ALT);
ret = PTR_ERR(mdev_class);
goto error_destroy_mdev;
}
dev_info(mdev_device, "device %s created\n", DRV_NAME_ALT);
#endif
cdev_init(&mdev_cdev, &mdev_fops);
mdev_cdev.owner = THIS_MODULE;
ret = cdev_add(&mdev_cdev, mdev_dev, NUM_MDEV_DEVICES);
if (ret) {
dev_err(mdev_device, "Error adding character device %s\n",
MDEV_CHAR_DEV_NAME);
goto error_destroy_mnet;
}
ret = mdev_init_dev_list(MAX_MNET_DEVICES, "mnet",
mdev_get_mnet_platform_rsrc);
if (ret)
goto error_destroy_cdev;
ret = mdev_init_dev_list(MAX_MCRYPT_DEVICES, "mcrypt",
mdev_get_mcrypt_platform_rsrc);
if (ret)
goto error_destroy_list;
ret = platform_driver_register(&mdev_driver);
if (ret)
goto error_destroy_list;
return 0;
error_destroy_list:
list_for_each_entry_safe(mdev, tmp, &mdev_list, node) {
list_del(&mdev->node);
devm_kfree(mdev_device, mdev);
}
error_destroy_cdev:
cdev_del(&mdev_cdev);
error_destroy_mnet:
#ifndef MDEV_HACK
device_destroy(mdev_class, MKDEV(mdev_major, 1));
error_destroy_mdev:
#endif
device_destroy(mdev_class, MKDEV(mdev_major, 0));
error_unregister_chrdev:
unregister_chrdev_region(mdev_dev, NUM_MDEV_DEVICES);
error_destroy_class:
class_destroy(mdev_class);
error_out:
return ret;
}
static void __exit mdev_cleanup(void)
{
platform_driver_unregister(&mdev_driver);
cdev_del(&mdev_cdev);
#ifndef MDEV_HACK
device_destroy(mdev_class, MKDEV(mdev_major, 1));
#endif
device_destroy(mdev_class, MKDEV(mdev_major, 0));
unregister_chrdev_region(mdev_dev, NUM_MDEV_DEVICES);
class_destroy(mdev_class);
}
module_init(mdev_init);
module_exit(mdev_cleanup);
MODULE_AUTHOR("Pensando Systems");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);

View File

@ -0,0 +1,47 @@
#ifndef _MDEV_DRV_H
#define _MDEV_DRV_H
#include <linux/ioctl.h>
#define DRV_VERSION "0.1"
#define DRV_DESCRIPTION "Pensando mdev Driver"
/* XXX There is a bug in older versions of the mnet driver - it fails to call
* cdev_del() on removal, leaving a landmine in the kobj_map. We can work around
* the issue by making sure this module loads at the same point in the map.
* Hence leaving the DRV_NAME as "mnet" and creating only a single device.
*
* Ideally this can be removed when we no longer support NDU from affected versions.
*/
#define MDEV_HACK
#ifdef MDEV_HACK
#define DRV_NAME "mnet"
#define MDEV_CHAR_DEV_NAME "pen-mnet"
#define NUM_MDEV_DEVICES 1 /* The parent device(s) */
#else
#define DRV_NAME "mdev"
#define DRV_NAME_ALT "mnet"
#define MDEV_CHAR_DEV_NAME "pen-mdev"
#define NUM_MDEV_DEVICES 2 /* The parent device(s) */
#endif
#define MAX_MNET_DEVICES 32
#define MAX_MCRYPT_DEVICES 32
#define MDEV_NAME_LEN 32
struct mdev_create_req {
uint64_t regs_pa;
uint64_t drvcfg_pa;
uint64_t msixcfg_pa;
uint64_t doorbell_pa;
uint64_t tstamp_pa;
int is_uio_dev;
char name[MDEV_NAME_LEN];
};
#define MDEV_CREATE_MNET _IOWR('Q', 11, struct mdev_create_req)
#define MDEV_DESTROY _IOW('Q', 12, const char*)
#define MDEV_CREATE_MCRYPT _IOWR('Q', 13, struct mdev_create_req)
#endif /* _MDEV_DRV_H */

View File

@ -0,0 +1,3 @@
obj-$(CONFIG_MNET_UIO_PDRV_GENIRQ) := mnet_uio_pdrv_genirq.o
mnet_uio_pdrv_genirq-y := mnet_uio_pdrv_genirq_drv.o

View File

@ -0,0 +1,291 @@
/*
* drivers/uio/uio_pdrv_genirq.c
*
* Userspace I/O platform driver with generic IRQ handling code.
*
* Copyright (C) 2008 Magnus Damm
*
* Based on uio_pdrv.c by Uwe Kleine-Koenig,
* Copyright (C) 2008 by Digi International Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/uio_driver.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/stringify.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#define DRIVER_NAME "uio_pdrv_genirq"
struct uio_pdrv_genirq_platdata {
struct uio_info *uioinfo;
spinlock_t lock;
unsigned long flags;
struct platform_device *pdev;
};
/* Bits in uio_pdrv_genirq_platdata.flags */
enum {
UIO_IRQ_DISABLED = 0,
};
static int uio_pdrv_genirq_open(struct uio_info *info, struct inode *inode)
{
// struct uio_pdrv_genirq_platdata *priv = info->priv;
/* Wait until the Runtime PM code has woken up the device */
//pm_runtime_get_sync(&priv->pdev->dev);
return 0;
}
static int uio_pdrv_genirq_release(struct uio_info *info, struct inode *inode)
{
// struct uio_pdrv_genirq_platdata *priv = info->priv;
/* Tell the Runtime PM code that the device has become idle */
//pm_runtime_put_sync(&priv->pdev->dev);
return 0;
}
static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info)
{
struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
/* Just disable the interrupt in the interrupt controller, and
* remember the state so we can allow user space to enable it later.
*/
spin_lock(&priv->lock);
if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
disable_irq_nosync(irq);
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
{
struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
unsigned long flags;
/* Allow user space to enable and disable the interrupt
* in the interrupt controller, but keep track of the
* state to prevent per-irq depth damage.
*
* Serialize this operation to support multiple tasks and concurrency
* with irq handler on SMP systems.
*/
spin_lock_irqsave(&priv->lock, flags);
if (irq_on) {
if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags))
enable_irq(dev_info->irq);
} else {
if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
disable_irq_nosync(dev_info->irq);
}
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
int mnet_uio_pdrv_genirq_probe(struct platform_device *pdev)
{
struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
struct uio_pdrv_genirq_platdata *priv;
struct uio_mem *uiomem;
int ret = -EINVAL;
int i;
if (pdev->dev.of_node) {
/* alloc uioinfo for one device */
uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo),
GFP_KERNEL);
if (!uioinfo) {
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
uioinfo->name = pdev->name;
uioinfo->version = "devicetree";
/* Multiple IRQs are not supported */
}
if (!uioinfo || !uioinfo->name || !uioinfo->version) {
dev_err(&pdev->dev, "missing platform_data\n");
return ret;
}
if (uioinfo->handler || uioinfo->irqcontrol ||
uioinfo->irq_flags & IRQF_SHARED) {
dev_err(&pdev->dev, "interrupt configuration error\n");
return ret;
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
priv->uioinfo = uioinfo;
spin_lock_init(&priv->lock);
priv->flags = 0; /* interrupt is enabled to begin with */
priv->pdev = pdev;
if (!uioinfo->irq) {
ret = platform_get_irq(pdev, 0);
uioinfo->irq = ret;
if (ret == -ENXIO && pdev->dev.of_node)
uioinfo->irq = UIO_IRQ_NONE;
else if (ret < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
return ret;
}
}
uiomem = &uioinfo->mem[0];
for (i = 0; i < pdev->num_resources; ++i) {
struct resource *r = &pdev->resource[i];
if (r->flags != IORESOURCE_MEM)
continue;
if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
dev_warn(&pdev->dev, "device has more than "
__stringify(MAX_UIO_MAPS)
" I/O memory resources.\n");
break;
}
uiomem->memtype = UIO_MEM_PHYS;
uiomem->addr = r->start & PAGE_MASK;
uiomem->offs = (r->start & (PAGE_SIZE - 1));
uiomem->size = PAGE_ALIGN(resource_size(r));
#if 1
dev_info(&pdev->dev, "resource %d size %llu", i, uiomem->size);
uiomem->name = r->name;
#endif
++uiomem;
}
while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
uiomem->size = 0;
++uiomem;
}
/* This driver requires no hardware specific kernel code to handle
* interrupts. Instead, the interrupt handler simply disables the
* interrupt in the interrupt controller. User space is responsible
* for performing hardware specific acknowledge and re-enabling of
* the interrupt in the interrupt controller.
*
* Interrupt sharing is not supported.
*/
uioinfo->handler = uio_pdrv_genirq_handler;
uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol;
uioinfo->open = uio_pdrv_genirq_open;
uioinfo->release = uio_pdrv_genirq_release;
uioinfo->priv = priv;
/* Enable Runtime PM for this device:
* The device starts in suspended state to allow the hardware to be
* turned off by default. The Runtime PM bus code should power on the
* hardware and enable clocks at open().
*/
//pm_runtime_enable(&pdev->dev);
ret = uio_register_device(&pdev->dev, priv->uioinfo);
if (ret) {
dev_err(&pdev->dev, "unable to register uio device\n");
// pm_runtime_disable(&pdev->dev);
return ret;
}
platform_set_drvdata(pdev, priv);
return 0;
}
int mnet_uio_pdrv_genirq_remove(struct platform_device *pdev)
{
struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev);
uio_unregister_device(priv->uioinfo);
// pm_runtime_disable(&pdev->dev);
priv->uioinfo->handler = NULL;
priv->uioinfo->irqcontrol = NULL;
return 0;
}
#if 0
static int uio_pdrv_genirq_runtime_nop(struct device *dev)
{
/* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
* are used at open() and release() time. This allows the
* Runtime PM code to turn off power to the device while the
* device is unused, ie before open() and after release().
*
* This Runtime PM callback does not need to save or restore
* any registers since user space is responsbile for hardware
* register reinitialization after open().
*/
return 0;
}
static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
.runtime_suspend = uio_pdrv_genirq_runtime_nop,
.runtime_resume = uio_pdrv_genirq_runtime_nop,
};
#ifdef CONFIG_OF
static struct of_device_id uio_of_genirq_match[] = {
{ /* This is filled with module_parm */ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
module_param_string(of_id, uio_of_genirq_match[0].compatible, 128, 0);
MODULE_PARM_DESC(of_id, "Openfirmware id of the device to be handled by uio");
#endif
static struct platform_driver uio_pdrv_genirq = {
.probe = uio_pdrv_genirq_probe,
.remove = uio_pdrv_genirq_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &uio_pdrv_genirq_dev_pm_ops,
.of_match_table = of_match_ptr(uio_of_genirq_match),
},
};
EXPORT_SYMBOL(uio_pdrv_genirq_probe);
EXPORT_SYMBOL(uio_pdrv_genirq_remove);
module_platform_driver(uio_pdrv_genirq);
#endif
EXPORT_SYMBOL(mnet_uio_pdrv_genirq_probe);
EXPORT_SYMBOL(mnet_uio_pdrv_genirq_remove);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);

View File

@ -0,0 +1,42 @@
#
# usage: make KDIR=/path/to/kernel/build/area
#
MODNAME = pciesvc
obj-m := $(MODNAME).o
$(shell echo '#define PCIESVC_VERSION "'`date`'"' >version.h)
kpci :=
pciesvc-src := $(shell cd $(PWD) && ls pciesvc/src/*.c)
pciesvc-obj := $(patsubst %.c,%.o,$(pciesvc-src))
kpci += $(pciesvc-obj)
INCLUDES = -I$(PWD) \
-I$(PWD)/pciesvc/include \
-I$(PWD)/pciesvc/src
$(MODNAME)-y := $(kpci) kpcimgr_module.o kpcinterface.o kpci_entry.o \
kpci_kexec.o kpci_test.o pciesvc_end.o
KDIR := /lib/modules/$(shell uname -r)/build
PWD := $(shell pwd)
UTS := X$(shell grep UTS_RELEASE $(KDIR)/include/generated/utsrelease.h)
REL := $(shell echo $(UTS) | awk '{ print $$3 }' | sed -e 's/"//g')
KCFLAGS = -fno-jump-tables -fno-stack-protector -fno-function-sections
KCFLAGS += -fno-data-sections -fno-store-merging -mstrict-align
KCFLAGS += $(INCLUDES) -DASIC_ELBA -DPCIESVC_SYSTEM_EXTERN
KOPT = KCFLAGS="$(KCFLAGS)"
all:
$(MAKE) -C $(KDIR) M=$(PWD) $(KOPT) modules
@mkdir -p $(REL)
@mv $(patsubst %.o,%.ko,$(obj-m)) $(REL)
@echo Checking for illegal relocations...
tools/reloc_check $(REL)/$(MODNAME).ko
clean:
$(MAKE) -C $(KDIR) M=$(PWD) clean

View File

@ -0,0 +1,22 @@
# pciesvc module
## Overview
This driver module is a companion to the kpcimgr driver. This module
provide support for servicing the pcie bus hardware "indirect" and
"notify" transaction interrupts. This driver runs on the Pensando ARM cpu.
The core of the driver is built using sources from the pciesvc library
with only a thin wrapper of code here to package the pciesvc core
and register with the kpcimgr driver by calling "kpcimgr_module_register".
## Building
The Makefile in this directory can be used to build the module.
If the kernel build support files are in /lib/modules then "make" will
find them. If kernel build support files are in another path then
specify on the make command line with "make KDIR=/path/to/kernel".
## History
2022-12-02 - initial version

View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates.
*/
/*
* Layout of non-Linux Memory:
* (base address provided in device tree and may change)
* C500 0000 SHMEM segment (pciehw_shmem_t) [0x942440 bytes ~9.25Mb]
* C5F0 0000 kpcimgr state (kstate_t) [3 * 64k]
* C5F3 0000 relocated code [Allow 256k]
* C5F7 0000 available for stack when in nommu mode (64k)
* C5F8 0000 top of stack
* C5FF FFFF end of 1M allotted range
*/
#define SHMEM_KSTATE_OFFSET 0xF00000
#define SHMEM_KSTATE_SIZE 0x30000
#define KSTATE_STACK_OFFSET 0x80000
#define KSTATE_CODE_OFFSET (SHMEM_KSTATE_OFFSET + SHMEM_KSTATE_SIZE)
#define KSTATE_CODE_SIZE (256 * 1024)
#define KSTATE_MAGIC 0x1743BA1F
/* size of trace data arrays */
#define DATA_SIZE 100
#define MSG_BUF_SIZE 32768
/* uart and time related constants */
#define PEN_UART 0x4800
#define UART_THR 0
#define UART_LSR 0x14
#define DATA_READY 1
#define OK_TO_WRITE 0x20
#define UART_THRE_BIT 5
/* phases */
#define NOMMU 0
#define NORMAL 1
#define NUM_PHASES 2
#define MSI_INDIRECT_IDX 0 /* indirect vector */
#define MSI_NOTIFY_IDX 1 /* notify vector */
#define MSI_NVECTORS 2

View File

@ -0,0 +1,364 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates.
*/
/*
* Low Level Functions for kpcimgr (a.k.a. pciesvc glue layer)
*
* Author: rob.gardner@oracle.com
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/sysreg.h>
#include "kpci_constants.h"
/* Calling conventions for printl: */
/* We use x12 as the branch link register and x13 as the first function arg */
#define return_addr x12
#define arg0 x13
/* defines for exception count and cpuid */
#define ex_count tpidr_el0
#define cpuidreg tpidr_el1
/* macro to print a char given in x17 */
.macro putc0
mov x16, PEN_UART
strb w17, [x16, #UART_THR]
11: ldrb w17, [x16, #UART_LSR]
tbz x17, #UART_THRE_BIT, 11b
.endm
/* macro to print a given literal char */
.macro putc, c
mov x17, \c
putc0
.endm
/* macro to print a literal string */
.macro print, msg
adr x18, 77f
76: ldrb w17, [x18], #1
cbz x17, 78f
putc0
b 76b
77: .asciz "\msg"
.align 2
78:
.endm
/* macro to print a literal string with added cr/lf */
.macro println, msg
print "\msg"
print "\r\n"
.endm
/* macro to print a system register */
.macro printsr, reg
print "\reg"
putc ':'
mrs arg0, \reg
adr return_addr, 99f
b printl
99:
.endm
/* print delineation marker */
.macro delineate, c
mov x15, #4
4: putc \c
sub x15, x15, 1
cbnz x15, 4b
.endm
/* macro to drop to exception level 1 */
.macro drop_to_el1
mrs x29, CurrentEL
asr x29, x29, 2
tbnz x29, #0, 88f /* what? already at EL1 */
putc '2'
/* set up EL2 exception vectors */
adr x29, xcpt_vectors
msr vbar_el2, x29
isb
/* do the actual drop to EL1 */
putc '#'
adr x29, 88f
msr elr_el2, x29
eret
88:
putc '1'
msr cpuidreg, x2 /* save cpu number */
/* save original address of spin table */
adr x29, spin_table_start_addr
str x0, [x29]
/* limit number of times the exception handler runs */
mov x16, 2
msr ex_count, x16
putc '!'
/* set up EL1 exception vectors */
adr x29, xcpt_vectors
msr vbar_el1, x29
isb
putc 'V'
/* unmask Serror */
msr daifclr, #(1|4|8)
putc 'D'
.endm
/* macro to print the exception count value */
.macro print_ex_count
putc '('
print "ex_count:"
mrs x15, ex_count
add x17, x15, '0'
putc0
putc ')'
.endm
/* macro to print the cpu number */
.macro print_cpuid
putc '<'
print "CPU"
mrs x17, cpuidreg
add x17, x17, '0'
putc0
putc '>'
.endm
/* macro to print exception level */
.macro print_el
putc '['
print "EL"
mrs x17, CurrentEL
asr x17, x17, 2
add x17, x17, '0'
putc0
putc ']'
.endm
/*
* This is the actual entry point for the first
* cpu to be hijacked. After dropping to EL1,
* we just need to set up a stack and we can
* jump to C code to do the real work.
*/
SYM_CODE_START(__kpcimgr_cpu_holding_pen)
delineate '>'
drop_to_el1
/* load kstate base and set initial stack pointer */
adr x0, kstate_paddr
ldr x0, [x0]
add x3, x0, KSTATE_STACK_OFFSET
mov sp, x3
/* jump to the real holding pen */
bl kpcimgr_cpu_holding_pen
/* when C returns control here, we're done */
putc '='
/* trap to EL2 and return to spin table */
mov x0, #1
hvc #0
/* we should never get here */
putc 'Q'
b .exit
SYM_CODE_END(__kpcimgr_cpu_holding_pen)
/*
* This is the entry point for the second hijacked
* cpu. Its job is to run the serial thread, which
* can interact with a console user should the need
* arise. Similar to the holding pen thread, we
* drop to EL1, set up our own unique stack, and
* jump to C.
*/
SYM_CODE_START(__kpcimgr_serial_thread)
delineate ']'
drop_to_el1
putc '\\'
adr x1, kstate_paddr
ldr x0, [x1]
add x3, x0, KSTATE_STACK_OFFSET
sub x3, x3, 0x2000 /* need a stack, different from other thread */
mov sp, x3
bl kpcimgr_serial_thread
putc '+'
/* trap to EL2 and return to spin table */
mov x0, #1
hvc #0
/* we should never get here */
b .exit
SYM_CODE_END(__kpcimgr_serial_thread)
/* C callable functions */
/* long read_el(void) */
SYM_CODE_START(read_el)
mrs x0, CurrentEL
lsr x0, x0, #2
ret
SYM_CODE_END(read_el)
/* int cpuid(void) */
SYM_CODE_START(cpuid)
mrs x0, cpuidreg
ret
SYM_CODE_END(cpuid)
/* int release(void) */
SYM_CODE_START(release)
adr x1, spin_table_start_addr
ldr x1, [x1]
ldr x0, [x1,#0x10]
ret
SYM_CODE_END(release)
/*
* printl, basically performs a printf("[%lx]")
*
* We use a few registers indiscriminately, but I am
* reasonably sure they are not used elsewhere
*/
#define shiftval x14
#define nchars x15
#define nibble x17
SYM_CODE_START(printl)
putc '['
mov nchars, #0 /* number of characters actually printed */
mov shiftval, #64
.loop_top:
sub shiftval, shiftval, 4
lsr nibble, arg0, shiftval
and nibble, nibble, #0xf
cbnz nibble, .print /* always print a non-zero nibble */
cbz shiftval, .print /* always print the last nibble, even if zero */
cbz nchars, .loop_bottom /* don't print leading zeros */
.print:
add nchars, nchars, 1
add nibble, nibble, #'0'
cmp nibble, #'0'+0xA
b.lt 1f
add nibble, nibble, #-0xA-'0'+'A'
1: putc0
.loop_bottom:
cbnz shiftval, .loop_top
putc ']'
br return_addr
SYM_CODE_END(printl)
/*
* Exception handler
*
* Mainly used to deal with Serror
*
* EL2 exceptions are fatal, but exceptions that arrive here
* at EL1 cause some useful output to the console, and return.
* The number of exceptions handled this way is limited to a few.
* The Serror exception is an exception to this rule.
*/
SYM_CODE_START(exception_handler)
print_el
mrs x29, CurrentEL
cmp x29, #8
b.ne 1f
/* EL2 (fatal) */
printsr elr_el2
b .exit
/* EL1 */
1: printsr elr_el1
printsr far_el1
printsr spsr_el1
printsr esr_el1
printsr sctlr_el1
print_ex_count
/* limit number of times we go through this code */
/* to avoid an infinite stream of exceptions */
mrs x15, ex_count
cbz x15, .exit
sub x15, x15, 1
msr ex_count, x15
print "\r\n"
eret
/*
* Finish by jumping back to the original
* spin table
*/
.exit:
print_el
print_cpuid
println "done"
adr x29, spin_table_start_addr
ldr x0, [x29]
br x0
SYM_CODE_END(exception_handler)
.macro hyper, c
.align 7
putc \c
b .exit
.endm
.macro exlog, c
.align 7
putc \c
print_el
b exception_handler
.endm
.macro serror, c
.align 7
putc \c
mov x16, #3
msr ex_count, x16
b exception_handler
.endm
.align 3
spin_table_start_addr:
.dword
/* The actual Exception Vector table, used for both EL1 and EL2 */
.align 11
xcpt_vectors:
/* Current exception level with SP_EL0 */
exlog 'A' /* Sync */
exlog 'B' /* IRQ/vIRQ */
exlog 'C' /* FIQ/cFIQ */
exlog 'D' /* SError/vSError */
/* Current exception level with SP_ELx, x>0 */
hyper 'H' /* Sync */
exlog 'I' /* IRQ/vIRQ */
exlog 'Q' /* FIQ/cFIQ */
serror 'S' /* SError/vSError */

View File

@ -0,0 +1,263 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates.
*/
/*
* Kernel PCIE Manager - kexec related code
*
* Author: rob.gardner@oracle.com
*/
#include "kpcimgr_api.h"
#include "pciesvc.h"
#include "pciesvc_system.h"
#define TICKS_PER_US 200
#define TICKS_PER_MS (1000*TICKS_PER_US)
#define TICKS_PER_SEC (1000*TICKS_PER_MS)
int holding_pen_idx;
unsigned long kstate_paddr;
kstate_t *kstate = NULL;
void set_kstate(kstate_t *ks)
{
kstate = ks;
kstate_paddr = ks->shmembase + SHMEM_KSTATE_OFFSET;
}
int virtual(void)
{
return (unsigned long)kstate != kstate_paddr;
}
/* called in physical mode */
void kpcimgr_nommu_poll(kstate_t *ks)
{
kpcimgr_poll(ks, 0, NOMMU);
ks->trace_data[NOMMU][LAST_CALL_TIME] = read_sysreg(cntvct_el0);
}
void kpcimgr_cpu_holding_pen(kstate_t *ks)
{
long npolls = 0;
int i;
set_kstate(ks);
ks->uart_addr = (void *) PEN_UART;
if (ks->debug)
_uart_write((void *) PEN_UART, 'C');
kpcimgr_init_poll(ks);
kpr_err("%s with EL%ld on cpu%d\n", __func__, read_el(), cpuid());
holding_pen_idx = 0;
kpr_err("going into poll loop...\n");
while (1) {
if (ks->debug)
_uart_write((void *) PEN_UART, 'S');
kpcimgr_nommu_poll(ks);
npolls++;
for (i=0; i<10; i++) {
if (release()) {
kpcimgr_nommu_poll(ks);
kpr_err("poll loop done, returning after %ld polls.\n", npolls);
return;
}
kp_udelay(1*1000); /* 1ms */
}
}
}
void serial_help(void)
{
kpr_err("Commands:\n");
kpr_err(" c Cpu id\n");
kpr_err(" e Event queue\n");
kpr_err(" f Show/set cfgval\n");
kpr_err(" h Show help message\n");
kpr_err(" m Memory ranges\n");
kpr_err(" q Quit serial thread\n");
kpr_err(" r Reboot\n");
kpr_err(" s Serror trigger\n");
kpr_err(" t Report Stats\n");
}
void set_cfgval(kstate_t *ks)
{
int cfgval = 0, modify = 0;
char c;
kpr_err("New cfgval: ");
while (1) {
while (uart_read(ks, &c) == 0);
uart_write(ks, c);
if (c >= '0' && c <= '9')
cfgval = (cfgval << 4) + (c - '0');
else if (c >= 'a' && c <= 'f')
cfgval = (cfgval << 4) + (10 + c - 'a');
else if (c >= 'A' && c <= 'F')
cfgval = (cfgval << 4) + (10 + c - 'A');
else
break;
modify = 1;
}
if (modify) {
kpr_err("\r\ncfgval set to %x\n", cfgval);
ks->cfgval = cfgval;
}
else
kpr_err("\r\ncfgval not modified\n");
}
#define WDOG_REGS (void *)0x1400
#define WDOG_CONTROL_REG_OFFSET 0x00
#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
#define WDOG_CONTROL_REG_RESP_MODE_MASK 0x02
void watchdog_reboot(void)
{
u32 val = readl(WDOG_REGS + WDOG_CONTROL_REG_OFFSET);
kpr_err("Rebooting...\n");
/* Disable interrupt mode; always perform system reset. */
val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
/* Enable watchdog. */
val |= WDOG_CONTROL_REG_WDT_EN_MASK;
writel(val, WDOG_REGS + WDOG_CONTROL_REG_OFFSET);
}
void serial_input(char c)
{
kstate_t *ks = get_kstate();
int n;
switch (c) {
case 'c': case 'C':
kpr_err("serial thread running on cpu#%d\n", cpuid());
break;
case 'e': case 'E':
n = ks->evq_head - ks->evq_tail;
if (n < 0)
n += EVENT_QUEUE_LENGTH;
kpr_err("event queue contains %d records\n", n);
break;
case 'f': case 'F':
kpr_err("cfgval = %x\n", ks->cfgval);
set_cfgval(ks);
break;
case '?':
case 'h':
case 'H':
serial_help();
break;
case 'm': case 'M':
for (n=0; n<ks->nranges; n++) {
struct mem_range_t *mr = &ks->mem_ranges[n];
kpr_err("range [%lx..%lx] mapped at %lx\n",
mr->base, mr->end, mr->vaddr);
}
break;
case 'q': case 'Q':
__asm__("hvc #0;" ::);
break;
case 'r': case 'R':
watchdog_reboot();
break;
case 's':
case 'S':
trigger_serr(0x100);
break;
case 't':
case 'T':
kpcimgr_report_stats(ks, NOMMU, 1, 1);
break;
default:
kpr_err("'%c' unknown command\n", c);
break;
}
}
void kpcimgr_serial_thread(kstate_t *ks)
{
unsigned long start = read_sysreg(cntvct_el0);
int warning_printed = 0;
ks->uart_addr = (void *) PEN_UART;
set_kstate(ks);
kpr_err("%s el%d on cpu%d\n", __func__, read_el(), cpuid());
while (!release()) {
char c;
if (uart_read(ks, &c))
serial_input(c);
if (!warning_printed && time_elapsed(start, 2*TICKS_PER_SEC)) {
kpr_err("Serial thread running for >2s, 'H' for help\n");
warning_printed = 1;
}
}
kpr_err("%s done\n", __func__);
}
/*
* Called from kpcimgr when the secondary CPUs are being taken
* offline. We return a physical address which the secondary CPU will
* jump to. The global 'holding_pen_idx' keeps a count of how many
* times we've been called so that we can return the appropriate
* function pointer for a given cpu. It would seem that there are some
* very dangerous race conditions here:
*
* 1. Can't this function be called concurrently on multiple CPUs?
* No, it cannot, because we are called by kpcimgr_get_entry(),
* which protects against this with a spinlock.
*
* 2. holding_pen_idx is reset to zero in kpcimgr_cpu_holding_pen(),
* and can't that execute on CPU1 while this function executes
* concurrently on CPU2?
* Good question! The answer is yes, they can execute
* simultaneously, but it is not a race because they will operate
* on different memory. When this function is called, it is in
* virtual mode, with the code and data in normal module_alloc'ed
* memory. But when kpcimgr_cpu_holding_pen() executes, it is
* running in physical mode from a copy of the code and data that
* has been relocated to persistent memory. Thus, references to
* 'holding_pen_idx' in these two functions refer to different
* memory locations.
*/
unsigned long kpcimgr_get_holding_pen(unsigned long old_entry, unsigned int cpu)
{
kstate_t *ks = get_kstate();
unsigned long offset, entry;
extern void __kpcimgr_cpu_holding_pen(void);
extern void __kpcimgr_serial_thread(void);
if (ks == NULL || ks->valid != KSTATE_MAGIC || !ks->running || !ks->have_persistent_mem)
return old_entry;
if (cpu == 0)
return old_entry;
switch (holding_pen_idx) {
case 0:
offset = (unsigned long) __kpcimgr_cpu_holding_pen - (unsigned long) ks->code_base;
break;
case 1:
offset = (unsigned long) __kpcimgr_serial_thread - (unsigned long) ks->code_base;
break;
default:
return old_entry;
}
holding_pen_idx++;
entry = ks->shmembase + KSTATE_CODE_OFFSET + offset;
kpr_err("%s(cpu%d) entry = %lx\n", __func__, cpu, entry);
return entry;
}

View File

@ -0,0 +1,201 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates.
*/
/*
* Kernel PCIE Manager - test/serial/debug code
*
* Author: rob.gardner@oracle.com
*/
#include "kpcimgr_api.h"
#include "pciesvc.h"
#include "pciesvc_system.h"
#define TICKS_PER_US 200
#define TICKS_PER_MS (1000*TICKS_PER_US)
#define TICKS_PER_SEC (1000*TICKS_PER_MS)
/*
* kp_udelay
*
* Like kernel udelay(), but avoids an external call.
*/
void kp_udelay(unsigned long us)
{
unsigned long last = read_sysreg(cntvct_el0);
unsigned long now, elapsed = 0;
unsigned long limit = us * TICKS_PER_US;
while (elapsed < limit) {
now = read_sysreg(cntvct_el0);
if (now > last)
elapsed += now - last;
last = now;
}
}
int time_elapsed(unsigned long start, unsigned long elapsed)
{
unsigned long now = read_sysreg(cntvct_el0);
if (now > start + elapsed)
return 1;
if (now < start && now > elapsed) /* good enough */
return 1;
return 0;
}
/*
* Very simple global spin lock:
* Not very well throught out or tested since it is
* not used for any important purpose. It is only
* used by the serial puts() function.
*/
unsigned long lock_table[16];
void kp_lock(void)
{
int i, cpu = cpuid();
unsigned long sum;
while (1) {
lock_table[cpu] = 1;
__asm__ __volatile__("dsb sy;" ::);
for (sum=0, i=0; i<16; i++)
sum += lock_table[i];
if (sum == 1) /* acquired lock */
return;
lock_table[cpu] = 0;
__asm__ __volatile__("dsb sy;" ::);
kp_udelay(1000+cpu*1000); /* a few ms */
}
}
void kp_unlock(void)
{
lock_table[cpuid()] = 0;
__asm__ __volatile__("dsb sy;" ::);
}
/*
* Mini serial output driver
*
* We want to avoid a potential infinite loop if something
* goes wrong with the uart, so let's wait no more than 1ms
* for the transmitter shift register to become empty. The
* baud rate is 115200, so theoretically, the shift register
* should never take longer than 100us to become empty.
*/
void _uart_write(unsigned char *reg, char c)
{
int i;
for (i=0; i<10; i++) {
if (*(reg + UART_LSR) & OK_TO_WRITE)
break;
kp_udelay(100);
}
*(reg + UART_THR) = c;
for (i=0; i<10; i++) {
if (*(reg + UART_LSR) & OK_TO_WRITE)
break;
kp_udelay(100);
}
}
void uart_write(kstate_t *ks, char c)
{
_uart_write(ks->uart_addr, c);
}
int uart_read(kstate_t *ks, char *c)
{
volatile unsigned char *reg = ks->uart_addr;
if (*(reg + UART_LSR) & DATA_READY) {
*c = *(reg + UART_THR);
return 1;
}
return 0;
}
void uart_write_debug(kstate_t *ks, char c)
{
if (ks->debug)
_uart_write(ks->uart_addr, c);
}
void kdbg_puts(const char *s)
{
kstate_t *ks = get_kstate();
if (ks->uart_addr == NULL)
return;
kp_lock();
for ( ; *s; s++) {
uart_write(ks, *s);
if (*s == '\n')
uart_write(ks, '\r');
}
kp_unlock();
}
/*
* For testing, this causes an SERR to be generated
*/
void trigger_serr(int val)
{
const uint64_t good_bad_pa = 0x20141000;
uint32_t dummy;
kdbg_puts("kpcimgr: triggering serr\n");
if (val == 0x100)
dummy = pciesvc_reg_rd32(good_bad_pa);
else
pciesvc_pciepreg_rd32(good_bad_pa, &dummy);
}
void kpcimgr_report_stats(kstate_t *ks, int phase, int always, int rightnow)
{
pciehw_shmem_t *pshmem = pciesvc_shmem_get();
unsigned long now = read_sysreg(cntvct_el0);
uint64_t cfgrd, cfgwr, memrd, memwr;
static unsigned long last_call = 0;
pciemgr_stats_t *s;
pciehw_port_t *p;
if (!always && (now - last_call) < 5 * TICKS_PER_SEC)
return;
p = &pshmem->port[0];
s = &p->stats;
cfgrd = s->ind_cfgrd - ks->ind_cfgrd;
cfgwr = s->ind_cfgwr - ks->ind_cfgwr;
memrd = s->ind_memrd - ks->ind_memrd;
memwr = s->ind_memwr - ks->ind_memwr;
if (!always && (cfgrd + cfgwr + memrd + memwr) == 0)
return;
if (rightnow || ks->debug) {
kpr_err("KPCIMGR: called %d times during %s phase: %lld cfgrd, %lld cfgwr, %lld memrd, %lld memwr\n",
ks->ncalls, (phase == NOMMU) ? "nommu" : "normal",
cfgrd, cfgwr, memrd, memwr);
kpr_err(" %d ind_intr, %d not_intr, %d event_intr\n", ks->ind_intr, ks->not_intr, ks->event_intr);
}
ks->ind_cfgrd = s->ind_cfgrd;
ks->ind_cfgwr = s->ind_cfgwr;
ks->ind_memrd = s->ind_memrd;
ks->ind_memwr = s->ind_memwr;
last_call = read_sysreg(cntvct_el0);
}

View File

@ -0,0 +1,172 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates.
*/
#ifndef __KPCIMGR_API_H__
#define __KPCIMGR_API_H__
#ifdef __KERNEL__
#include <linux/miscdevice.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/reboot.h>
#include <linux/poll.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/mm.h>
#include <linux/kallsyms.h>
#include <linux/moduleloader.h>
#include <linux/set_memory.h>
#include <asm/insn.h>
#endif
#include "kpci_constants.h"
#define K_ENTRY_INIT_INTR 0
#define K_ENTRY_INIT_POLL 1
#define K_ENTRY_SHUT 2
#define K_ENTRY_POLL 3
#define K_ENTRY_HOLDING_PEN 4
#define K_ENTRY_INDIRECT_INTR 5
#define K_ENTRY_NOTIFY_INTR 6
#define K_ENTRY_INIT_FN 7
#define K_ENTRY_CMD_READ 8
#define K_ENTRY_CMD_WRITE 9
#define K_ENTRY_GET_VERSION 10
#define K_NUM_ENTRIES 16
struct kpcimgr_entry_points_t {
int expected_mgr_version;
int lib_version_major;
int lib_version_minor;
void *code_end;
void *entry_point[K_NUM_ENTRIES];
};
/* upcalls */
#define WAKE_UP_EVENT_QUEUE 1
#define PRINT_LOG_MSG 2
#define PREG_READ 3
/* event queue sizing */
#define EVENT_QUEUE_LENGTH 1024
#define EVENT_SIZE 128
/* max command size for sysfs cmd node */
#define CMD_SIZE 4096
/* max number of memory ranges from device tree */
#define NUM_MEMRANGES 32
struct kpcimgr_state_t {
/* essential state */
int valid;
int debug;
int running;
int active_port;
int have_persistent_mem;
int lib_version_major;
int lib_version_minor;
/* timestamps and general trace data */
long kexec_time;
long driver_start_time;
unsigned long trace_data[NUM_PHASES][DATA_SIZE];
/* virtual addresses */
void *uart_addr;
void *code_base;
void *persistent_base;
void *upcall;
void *pfdev;
void *shmemva;
unsigned long shmembase, shmem_size, code_size;
struct mem_range_t {
unsigned long base, end;
void *vaddr;
} mem_ranges[NUM_MEMRANGES];
int nranges;
int hwmem_idx;
/* interrupt vectors */
struct msi_info {
unsigned long msgaddr;
unsigned int msgdata;
} msi[MSI_NVECTORS];
/* stats for work done */
int ind_cfgrd, ind_cfgwr;
int ind_memrd, ind_memwr;
int ncalls;
int ind_intr, not_intr, event_intr;
int unused1[7]; /* was version=2 code_offsets[], keep evq* compat */
/* Event queue handling */
int evq_head, evq_tail;
char evq[EVENT_QUEUE_LENGTH][EVENT_SIZE];
/* debugging */
void *mod;
int msg_idx;
int cfgval;
/* offsets into relocated library code */
int code_offsets[K_NUM_ENTRIES];
};
typedef struct kpcimgr_state_t kstate_t;
_Static_assert(sizeof(kstate_t) < SHMEM_KSTATE_SIZE,
"kstate size insufficient");
/* trace_data[] elements */
#define FIRST_CALL_TIME 0
#define FIRST_SEQNUM 1
#define LAST_SEQNUM 2
#define TAG 3
#define PA_BAD_CNT 4
#define NUM_CHECKS 5
#define NUM_CALLS 6
#define NUM_PENDINGS 7
#define LAST_CALL_TIME 8
#define EARLY_POLL 9
#define MAX_DATA 10
#define KPCIMGR_DEV "/dev/kpcimgr"
#define KPCIMGR_NAME "kpcimgr"
#define PFX KPCIMGR_NAME ": "
#define KPCIMGR_KERNEL_VERSION 3
#ifdef __KERNEL__
int kpcimgr_module_register(struct module *mod,
struct kpcimgr_entry_points_t *ep, int relocate);
void kpcimgr_start_running(void);
void kpcimgr_stop_running(void);
void kpcimgr_sysfs_setup(struct platform_device *pfdev);
void *kpci_memcpy(void *dst, const void *src, size_t n);
void wake_up_event_queue(void);
int aarch64_insn_read(void *addr, u32 *insnp);
extern spinlock_t kpcimgr_lock;
#define reset_stats(k) \
kpci_memset((void *)&(k)->trace_data[0][0], 0, sizeof((k)->trace_data))
static inline void set_init_state(kstate_t *k)
{
k->trace_data[NORMAL][FIRST_CALL_TIME] = 0;
k->ncalls = 0;
}
static inline kstate_t *get_kstate(void)
{
extern kstate_t *kstate;
return kstate;
}
#endif
#endif

View File

@ -0,0 +1,90 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates.
*/
/*
* PCIESVC Library Loader
*
* Author: rob.gardner@oracle.com
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/ctype.h>
MODULE_LICENSE("GPL");
#include "kpcimgr_api.h"
#include "version.h"
static int relocate = 0;
#ifdef DEBUG_KPCIMGR
module_param(relocate, int, 0600);
MODULE_PARM_DESC(relocate, "specifies whether or not to relocate module");
#endif
extern char pciesvc_end;
extern void kpcimgr_init_intr(void *);
extern void kpcimgr_init_fn(void *);
extern void kpcimgr_version_fn(char **);
extern void kpcimgr_init_poll(void *);
extern void pciesvc_shut(int);
extern void kpcimgr_poll(kstate_t *, int, int);
extern unsigned long kpcimgr_get_holding_pen(unsigned long, unsigned int);
extern int kpcimgr_ind_intr(void *, int);
extern int kpcimgr_not_intr(void *, int);
extern void kpcimgr_undefined_entry(void);
extern int pciesvc_sysfs_cmd_read(void *, char *, int *);
extern int pciesvc_sysfs_cmd_write(void *, char *, size_t, int *);
extern int pciesvc_version_major;
extern int pciesvc_version_minor;
static int __init pciesvc_dev_init(void)
{
struct kpcimgr_entry_points_t ep;
int i, ret = 0;
/* initialize entry_points struct via executable code so that
* PC relative relocations are generated */
ep.expected_mgr_version = 3;
ep.lib_version_major = pciesvc_version_major;
ep.lib_version_minor = pciesvc_version_minor;
ep.code_end = &pciesvc_end;
for (i=0; i<K_NUM_ENTRIES; i++)
ep.entry_point[i] = kpcimgr_undefined_entry;
ep.entry_point[K_ENTRY_INIT_INTR] = kpcimgr_init_intr;
ep.entry_point[K_ENTRY_INIT_POLL] = kpcimgr_init_poll;
ep.entry_point[K_ENTRY_SHUT] = pciesvc_shut;
ep.entry_point[K_ENTRY_POLL] = kpcimgr_poll;
ep.entry_point[K_ENTRY_HOLDING_PEN] = kpcimgr_get_holding_pen;
ep.entry_point[K_ENTRY_INDIRECT_INTR] = kpcimgr_ind_intr;
ep.entry_point[K_ENTRY_NOTIFY_INTR] = kpcimgr_not_intr;
ep.entry_point[K_ENTRY_INIT_FN] = kpcimgr_init_fn;
ep.entry_point[K_ENTRY_CMD_READ] = pciesvc_sysfs_cmd_read;
ep.entry_point[K_ENTRY_CMD_WRITE] = pciesvc_sysfs_cmd_write;
ep.entry_point[K_ENTRY_GET_VERSION] = kpcimgr_version_fn;
/* call to Pensando SOC driver to copy the code to persistent memory */
ret = kpcimgr_module_register(THIS_MODULE, &ep, relocate);
#ifdef PEN_COMPAT_V2
if (ret < 0) {
/* attempt compat registration, some entry_point[] unused */
ep.expected_mgr_version = 2;
ret = kpcimgr_module_register(THIS_MODULE, &ep, relocate);
}
#endif
return ret;
}
module_init(pciesvc_dev_init);
static void __exit pciesvc_dev_detach(void)
{
}
module_exit(pciesvc_dev_detach);

View File

@ -0,0 +1,567 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates.
*/
/*
* Kernel PCIE Manager "glue" code
*
* Author: rob.gardner@oracle.com
*/
#include "kpcimgr_api.h"
#include "pciesvc.h"
#include "pciesvc_system.h"
#include "version.h"
/*
* This file contains only functions essential to the
* operation of the pciesvc library code.
*/
void kpcimgr_init_fn(kstate_t *ks)
{
set_kstate(ks);
}
void kpcimgr_version_fn(char **version)
{
if (version)
*version = PCIESVC_VERSION;
}
/*
* Dummy function called for undefined entry points
*/
void kpcimgr_undefined_entry(void)
{
pciesvc_log(KERN_INFO "undefined entry called\n");
}
/*
* Initialize pciesvc for interrupt based operation
*/
void kpcimgr_init_intr(kstate_t *ks)
{
pciesvc_params_t p;
volatile struct msi_info *msi;
set_kstate(ks);
memset(&p, 0, sizeof(pciesvc_params_t));
p.version = 0;
p.params_v0.port = ks->active_port;
msi = &ks->msi[MSI_INDIRECT_IDX];
p.params_v0.ind_intr = 1;
p.params_v0.ind_msgaddr = msi->msgaddr;
p.params_v0.ind_msgdata = msi->msgdata;
msi = &ks->msi[MSI_NOTIFY_IDX];
p.params_v0.not_intr = 1;
p.params_v0.not_msgaddr = msi->msgaddr;
p.params_v0.not_msgdata = msi->msgdata;
if (pciesvc_init(&p))
kpr_err("%s: pciesvc_init failed\n", __func__);
/* clear out any pending transactions */
kpcimgr_poll(ks, 0, NORMAL);
}
/*
* Initialize pciesvc for polling based operation
*/
void kpcimgr_init_poll(kstate_t *ks)
{
pciesvc_params_t p;
set_kstate(ks);
memset(&p, 0, sizeof(pciesvc_params_t));
p.version = 0;
p.params_v0.port = ks->active_port;
p.params_v0.ind_poll = 1;
p.params_v0.not_poll = 1;
pciesvc_init(&p);
}
/*
* Main poll function
*
* Essentially a wrapper for pciesvc_poll() that
* updates statistics, does some error checking,
* and outputs some debugging information.
*/
void kpcimgr_poll(kstate_t *ks, int index, int phase)
{
int i, result;
long ts = read_sysreg(cntvct_el0);
set_kstate(ks);
ks->ncalls++;
if (ks->trace_data[phase][FIRST_CALL_TIME] == 0) {
uart_write_debug(ks, 'F');
ks->trace_data[phase][FIRST_CALL_TIME] = ts;
if (phase == NOMMU)
kpcimgr_report_stats(ks, NORMAL, 1, 0);
else
kpcimgr_report_stats(ks, NOMMU, 1, 0);
}
ks->trace_data[phase][NUM_CALLS]++;
if (phase == NOMMU)
uart_write_debug(ks, 'M');
if (ks->valid != KSTATE_MAGIC) {
uart_write_debug(ks, 'V');
return;
}
if (!ks->running) {
uart_write_debug(ks, 'P');
return;
}
ks->trace_data[phase][LAST_CALL_TIME] = ts;
ks->trace_data[phase][NUM_CHECKS]++;
if (ks->debug & 0x300) {
trigger_serr(ks->debug & 0x300);
ks->debug &= ~0x300;
}
for (i=0; i<10; i++) {
result = pciesvc_poll(0);
/*
* return value:
* 1: valid pending and handled
* 0: nothing pending
*/
if (result == 0)
break;
if (result == -1) {
uart_write_debug(ks, '?');
break;
}
uart_write_debug(ks, 'h');
ks->trace_data[phase][NUM_PENDINGS]++;
}
kpcimgr_report_stats(ks, phase, 0, 0);
}
/*
* ISR for Indirect Interrupt
*/
int kpcimgr_ind_intr(kstate_t *ks, int port)
{
int ret;
set_kstate(ks);
ret = pciesvc_indirect_intr(port);
if (ks->debug & 0x300) {
trigger_serr(ks->debug & 0x300);
ks->debug &= ~0x300;
}
return ret;
}
/*
* ISR for Notify Interrupt
*/
int kpcimgr_not_intr(kstate_t *ks, int port)
{
set_kstate(ks);
return pciesvc_notify_intr(port);
}
/*
* Return a VA from one of our known ranges
*
* If we're running with the MMU turned off, then just return the
* physical address.
*
*/
void *kpcimgr_va_get(unsigned long pa, unsigned long sz)
{
kstate_t *ks = get_kstate();
int i;
if (!virtual())
return (void *) pa;
for (i=0; i<ks->nranges; i++) {
struct mem_range_t *mr = &ks->mem_ranges[i];
if (pa >= mr->base && pa < mr->end)
return mr->vaddr + (pa - mr->base);
}
kpr_err("%s: bad pa 0x%lx\n", __func__, pa);
pciesvc_assert(0);
return NULL;
}
/*
* Reverse translation: return a physical address
* corresponding to some virtual address.
*/
u64 pciesvc_vtop(const void *hwmemva)
{
kstate_t *ks = get_kstate();
u64 hwptr = (u64) hwmemva;
int i;
for (i=0; i<ks->nranges; i++) {
struct mem_range_t *mr = &ks->mem_ranges[i];
u64 size;
/* was a physical address passed in to us? */
if (hwptr >= mr->base && hwptr < mr->end)
return hwptr;
size = mr->end - mr->base;
if (hwmemva >= mr->vaddr &&
hwmemva < mr->vaddr + size)
return mr->base + (hwmemva - mr->vaddr);
}
return 0;
}
/*
* Up calls from pciesvc
*/
uint32_t
pciesvc_reg_rd32(const uint64_t pa)
{
u_int32_t val, *va = kpcimgr_va_get(pa, 4);
pciesvc_assert((pa & 0x3) == 0);
val = readl(va);
__asm__ __volatile__("isb; dsb sy;" ::);
return val;
}
static inline void
pciesvc_reg_rd32w(const uint64_t pa, uint32_t *w, const uint32_t nw)
{
int i;
for (i = 0; i < nw; i++) {
w[i] = pciesvc_reg_rd32(pa + (i * 4));
}
}
void
pciesvc_pciepreg_rd32(const uint64_t pa, uint32_t *dest)
{
u_int32_t val, (*upcall)(int req, unsigned long pa);
kstate_t *ks = get_kstate();
pciesvc_assert((pa & 0x3) == 0);
upcall = ks->upcall;
if (upcall && virtual())
val = upcall(PREG_READ, pa);
else
val = pciesvc_reg_rd32(pa);
*dest = val;
}
void
pciesvc_reg_wr32(const uint64_t pa, const uint32_t val)
{
u_int32_t *va = kpcimgr_va_get(pa, 4);
pciesvc_assert((pa & 0x3) == 0);
writel(val, va);
}
static inline void
pciesvc_reg_wr32w(const uint64_t pa, const uint32_t *w, const uint32_t nw)
{
int i;
for (i = 0; i < nw; i++) {
pciesvc_reg_wr32(pa + (i * 4), w[i]);
}
}
/*
* Similar calls implemented in terms of rd32/wr32.
*/
typedef union {
u_int32_t l;
u_int16_t h[2];
u_int8_t b[4];
} iodata_t;
int
pciesvc_mem_rd(const uint64_t pa, void *buf, const size_t sz)
{
uint64_t pa_aligned;
uint8_t idx;
iodata_t v;
switch (sz) {
case 1:
pa_aligned = pa & ~0x3;
idx = pa & 0x3;
v.l = pciesvc_reg_rd32(pa_aligned);
*(uint8_t *)buf = v.b[idx];
break;
case 2:
pa_aligned = pa & ~0x3;
idx = (pa & 0x3) >> 1;
v.l = pciesvc_reg_rd32(pa_aligned);
*(uint16_t *)buf = v.h[idx];
break;
case 4:
case 8:
pciesvc_reg_rd32w(pa, (uint32_t *)buf, sz >> 2);
break;
default:
return -1;
}
return 0;
}
void
pciesvc_mem_wr(const uint64_t pa, const void *buf, const size_t sz)
{
uint64_t pa_aligned;
uint8_t idx;
iodata_t v;
switch (sz) {
case 1:
pa_aligned = pa & ~0x3;
idx = pa & 0x3;
v.l = pciesvc_reg_rd32(pa_aligned);
v.b[idx] = *(uint8_t *)buf;
pciesvc_reg_wr32(pa_aligned, v.l);
break;
case 2:
pa_aligned = pa & ~0x3;
idx = (pa & 0x3) >> 1;
v.l = pciesvc_reg_rd32(pa_aligned);
v.h[idx] = *(uint16_t *)buf;
pciesvc_reg_wr32(pa_aligned, v.l);
break;
case 4:
case 8:
pciesvc_reg_wr32w(pa, (uint32_t *)buf, sz >> 2);
break;
default:
break;
}
}
void
pciesvc_mem_barrier(void)
{
mb();
}
/*
* We need our own memset/memcpy functions because we
* cannot call any kernel functions. And even if we could,
* we need to avoid cache operations since "non-linux" memory
* is non-cached.
*/
void *
pciesvc_memset(void *s, int c, size_t n)
{
if (((uintptr_t)s & 0x3) == 0 && (n & 0x3) == 0) {
volatile u_int32_t *p;
int i;
c &= 0xff;
c = ((c << 0) |
(c << 8) |
(c << 16) |
(c << 24));
for (p = s, i = 0; i < n >> 2; i++, p++) {
*p = c;
}
} else {
volatile u_int8_t *p;
int i;
for (p = s, i = 0; i < n; i++, p++) {
*p = c;
}
}
return s;
}
void *
pciesvc_memcpy(void *dst, const void *src, size_t n)
{
volatile u_int8_t *d = dst;
const u_int8_t *s = src;
int i;
for (i = 0; i < n; i++) {
*d++ = *s++;
}
return dst;
}
void *
pciesvc_memcpy_toio(void *dsthw, const void *src, size_t n)
{
return pciesvc_memcpy(dsthw, src, n);
}
void *
pciesvc_shmem_get(void)
{
kstate_t *ks = get_kstate();
if (virtual())
return ks->shmemva;
else
return (void *) ks->shmembase;
}
void *pciesvc_hwmem_get(void)
{
kstate_t *ks = get_kstate();
if (virtual())
return ks->mem_ranges[ks->hwmem_idx].vaddr;
else
return (void *) ks->mem_ranges[ks->hwmem_idx].base;
}
void
pciesvc_log(const char *msg)
{
kstate_t *ks = get_kstate();
u64 (*upcall)(int req, char *msg);
upcall = ks->upcall;
if (upcall && virtual())
upcall(PRINT_LOG_MSG, (char *)msg);
else
kdbg_puts((char *)msg);
}
void wakeup_event_queue(void)
{
kstate_t *ks = get_kstate();
u64 (*upcall)(int req);
upcall = ks->upcall;
if (upcall && virtual())
upcall(WAKE_UP_EVENT_QUEUE);
}
/*
* Event Queue Handler
*
* Event queue semantics:
* evq_head = index of slot used for next insertion
* evq_tail = index of slot used for next removal
* queue is empty when head == tail
* queue is full when (head + 1) % queue_size == tail
* queue is nearly full when (head + 2) % queue_size == tail
*
* Only head is modified here, and the read() function only
* modifies tail, so theoretically no race can exist. It is
* possible for the reader to see an empty queue momentarily
* or the handler to see a full queue momentarily, but these
* situations do not justify adding locks.
*/
int pciesvc_event_handler(pciesvc_eventdata_t *evdata, const size_t evsize)
{
kstate_t *ks = get_kstate();
int ret = 0;
static int was_full = 0;
if (evsize != sizeof(pciesvc_eventdata_t)) {
kpr_err("%s: evsize != sizeof(pciesvc_eventdata_t))\n", __func__);
return -1;
}
if ((ks->evq_head + 1) % EVENT_QUEUE_LENGTH == ks->evq_tail) {
if (!was_full)
pciesvc_log(KERN_INFO "pciesvc_event_handler: event queue full\n");
was_full = 1;
return -1;
}
was_full = 0;
if ((ks->evq_head + 2) % EVENT_QUEUE_LENGTH == ks->evq_tail) {
pciesvc_log(KERN_INFO "pciesvc_event_handler: event queue almost full\n");
evdata->evtype = PCIESVC_EV_QFULL;
ret = -1;
}
pciesvc_memcpy_toio((void *)ks->evq[ks->evq_head], evdata, sizeof(pciesvc_eventdata_t));
ks->evq_head = (ks->evq_head + 1) % EVENT_QUEUE_LENGTH;
wakeup_event_queue();
return ret;
}
void pciesvc_debug_cmd(uint32_t *cmd)
{
kstate_t *ks = get_kstate();
uint32_t delayus;
switch (*cmd) {
case 0x17:
*cmd = virtual();
return;
case 0x19:
*cmd = ks->cfgval;
return;
case 0x100:
case 0x200:
ks->debug |= *cmd;
return;
default:
delayus = *cmd;
if (delayus) {
pciesvc_usleep(delayus);
}
break;
}
}
/*
* cmd read/write
*/
int pciesvc_sysfs_cmd_read(kstate_t *ks, char *buf, loff_t off, size_t count, int *exists)
{
int ret;
if (exists)
*exists = 1;
ret = pciesvc_cmd_read(buf, off, count);
return ret < 0 ? -EINVAL : ret;
}
int pciesvc_sysfs_cmd_write(kstate_t *ks, char *buf, loff_t off, size_t count, int *exists)
{
int ret;
if (exists)
*exists = 1;
ret = pciesvc_cmd_write(buf, off, count);
return ret < 0 ? -EINVAL : ret;
}

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018, Pensando Systems Inc.
*/
#ifndef __INDIRECT_ENTRY_H__
#define __INDIRECT_ENTRY_H__
#include "tlpauxinfo.h"
typedef enum {
#define PCIEIND_REASON_DEF(NAME, VAL) \
PCIEIND_REASON_##NAME = VAL,
#include "indirect_reason.h"
PCIEIND_REASON_MAX
} pcieind_reason_t;
/*
* Completion Status field values
* PCIe 4.0, Table 2-34.
*/
typedef enum {
PCIECPL_SC = 0x0, /* Successful Completion */
PCIECPL_UR = 0x1, /* Unsupported Request */
PCIECPL_CRS = 0x2, /* Config Retry Status */
PCIECPL_CA = 0x4, /* Completer Abort */
} pciecpl_t;
#define INDIRECT_TLPSZ 64
typedef struct indirect_entry_s {
u_int32_t port;
pciecpl_t cpl; /* PCIECPL_* completion type */
u_int32_t completed:1; /* completion has been delivered */
u_int32_t data[4];
u_int8_t rtlp[INDIRECT_TLPSZ];
tlpauxinfo_t info;
} indirect_entry_t;
#endif /* __INDIRECT_ENTRY_H__ */

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018, Pensando Systems Inc.
*/
#ifndef PCIEIND_REASON_DEF
#define PCIEIND_REASON_DEF(NAME, VAL)
#endif
PCIEIND_REASON_DEF(RSRV0, 0)
PCIEIND_REASON_DEF(RSRV1, 1)
PCIEIND_REASON_DEF(MSG, 2)
PCIEIND_REASON_DEF(UNSUPPORTED, 3)
PCIEIND_REASON_DEF(PMV, 4)
PCIEIND_REASON_DEF(DBPMV, 5)
PCIEIND_REASON_DEF(ATOMIC, 6)
PCIEIND_REASON_DEF(PMTMISS, 7)
PCIEIND_REASON_DEF(PMRMISS, 8)
PCIEIND_REASON_DEF(PRTMISS, 9)
PCIEIND_REASON_DEF(DBF2VFIDMISS, 10)
PCIEIND_REASON_DEF(PRTOOR, 11)
PCIEIND_REASON_DEF(VFIDOOR, 12)
PCIEIND_REASON_DEF(BDFOOR, 13)
PCIEIND_REASON_DEF(PMRIND, 14)
PCIEIND_REASON_DEF(PRTIND, 15)
PCIEIND_REASON_DEF(PMRECC, 16)
PCIEIND_REASON_DEF(PRTECC, 17)
#undef PCIEIND_REASON_DEF

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018,2021, Pensando Systems Inc.
*/
#ifndef __NOTIFY_ENTRY_H__
#define __NOTIFY_ENTRY_H__
#include "tlpauxinfo.h"
#define NOTIFY_TLPSZ 48
typedef struct notify_entry_s {
uint8_t rtlp[NOTIFY_TLPSZ];
tlpauxinfo_t info;
} notify_entry_t;
#endif /* __NOTIFY_ENTRY_H__ */

View File

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2022, Pensando Systems Inc.
*/
#ifndef __PCIEHDEVICE_TYPES_H__
#define __PCIEHDEVICE_TYPES_H__
#ifdef __cplusplus
extern "C" {
#if 0
} /* close to calm emacs autoindent */
#endif
#endif
typedef enum pciehdevice_type_e {
PCIEHDEVICE_NONE,
PCIEHDEVICE_ETH,
PCIEHDEVICE_MGMTETH,
PCIEHDEVICE_ACCEL,
PCIEHDEVICE_NVME,
PCIEHDEVICE_VIRTIO,
PCIEHDEVICE_PCIESTRESS,
PCIEHDEVICE_DEBUG,
PCIEHDEVICE_RCDEV,
PCIEHDEVICE_CRYPT,
PCIEHDEVICE_UPT,
PCIEHDEVICE_SERIAL,
PCIEHDEVICE_CORE,
} pciehdevice_type_t;
#define PCIEHDEVICE_OVERRIDE_INTRGROUPS 8
#ifdef __cplusplus
}
#endif
#endif /* __PCIEHDEVICE_TYPES_H__ */

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018,2021, Pensando Systems Inc.
*/
#ifndef __PCIESVC_PCIEHW_H__
#define __PCIESVC_PCIEHW_H__
#ifdef __cplusplus
extern "C" {
#if 0
} /* close to calm emacs autoindent */
#endif
#endif
#define PCIEHW_NPORTS 8
#define PCIEHW_NDEVS 1024
#define PCIEHW_CFGSHIFT 11
#define PCIEHW_CFGSZ (1 << PCIEHW_CFGSHIFT)
#define PCIEHW_NROMSK 128
#define PCIEHW_NPMT PMT_COUNT
#define PCIEHW_NPRT PRT_COUNT
#define PCIEHW_NBAR 6 /* 6 cfgspace BARs */
#ifdef __cplusplus
}
#endif
#endif /* __PCIESVC_PCIEHW_H__ */

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021, Pensando Systems Inc.
*/
#ifndef __PCIEHWMEM_H__
#define __PCIEHWMEM_H__
#ifdef __cplusplus
extern "C" {
#if 0
} /* close to calm emacs autoindent */
#endif
#endif
#include "pciehw.h"
#define PCIEHW_NOTIFYSZ (1 * 1024 * 1024)
typedef struct pciehw_mem_s {
u_int8_t notify_area[PCIEHW_NPORTS][PCIEHW_NOTIFYSZ]
__attribute__((aligned(PCIEHW_NOTIFYSZ)));
/* page of zeros to back cfgspace */
u_int8_t zeros[4096] __attribute__((aligned(4096)));
u_int8_t cfgcur[PCIEHW_NDEVS][PCIEHW_CFGSZ] __attribute__((aligned(4096)));
u_int32_t notify_intr_dest[PCIEHW_NPORTS]; /* notify intr dest */
u_int32_t indirect_intr_dest[PCIEHW_NPORTS]; /* indirect intr dest */
u_int32_t magic; /* PCIEHW_MAGIC when initialized */
u_int32_t version; /* PCIEHW_VERSION when initialized */
} pciehw_mem_t;
#ifdef __cplusplus
}
#endif
#endif /* __PCIEHWMEM_H__ */

View File

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2019, Pensando Systems Inc.
*/
#ifndef __PCIEMGR_STATS_H__
#define __PCIEMGR_STATS_H__
#ifdef __cplusplus
extern "C" {
#if 0
} /* close to calm emacs autoindent */
#endif
#endif
typedef union pciemgr_stats {
struct {
#define PCIEMGR_STATS_DEF(S) \
uint64_t S;
#include "pciemgr_stats_defs.h"
};
/* pad to 64 entries, room to grow */
uint64_t _pad[64];
} pciemgr_stats_t;
#ifdef __cplusplus
}
#endif
#endif /* __PCIEMGR_STATS_H__ */

View File

@ -0,0 +1,56 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2019, Pensando Systems Inc.
*/
#ifndef PCIEMGR_STATS_DEF
#define PCIEMGR_STATS_DEF(st)
#endif
PCIEMGR_STATS_DEF(not_intr)
PCIEMGR_STATS_DEF(not_spurious)
PCIEMGR_STATS_DEF(not_polled)
PCIEMGR_STATS_DEF(not_cnt)
PCIEMGR_STATS_DEF(not_max)
PCIEMGR_STATS_DEF(not_cfgrd)
PCIEMGR_STATS_DEF(not_cfgwr)
PCIEMGR_STATS_DEF(not_memrd)
PCIEMGR_STATS_DEF(not_memwr)
PCIEMGR_STATS_DEF(not_iord)
PCIEMGR_STATS_DEF(not_iowr)
PCIEMGR_STATS_DEF(not_unknown)
#define notify_reason_stats not_rsrv0
PCIEMGR_STATS_DEF(not_rsrv0)
PCIEMGR_STATS_DEF(not_rsrv1)
PCIEMGR_STATS_DEF(not_msg)
PCIEMGR_STATS_DEF(not_unsupported)
PCIEMGR_STATS_DEF(not_pmv)
PCIEMGR_STATS_DEF(not_dbpmv)
PCIEMGR_STATS_DEF(not_atomic)
PCIEMGR_STATS_DEF(not_pmtmiss)
PCIEMGR_STATS_DEF(not_pmrmiss)
PCIEMGR_STATS_DEF(not_prtmiss)
PCIEMGR_STATS_DEF(not_bdf2vfidmiss)
PCIEMGR_STATS_DEF(not_prtoor)
PCIEMGR_STATS_DEF(not_vfidoor)
PCIEMGR_STATS_DEF(not_bdfoor)
PCIEMGR_STATS_DEF(not_pmrind)
PCIEMGR_STATS_DEF(not_prtind)
PCIEMGR_STATS_DEF(not_pmrecc)
PCIEMGR_STATS_DEF(not_prtecc)
PCIEMGR_STATS_DEF(ind_intr)
PCIEMGR_STATS_DEF(ind_spurious)
PCIEMGR_STATS_DEF(ind_polled)
PCIEMGR_STATS_DEF(ind_cfgrd)
PCIEMGR_STATS_DEF(ind_cfgwr)
PCIEMGR_STATS_DEF(ind_memrd)
PCIEMGR_STATS_DEF(ind_memwr)
PCIEMGR_STATS_DEF(ind_iord)
PCIEMGR_STATS_DEF(ind_iowr)
PCIEMGR_STATS_DEF(ind_unknown)
PCIEMGR_STATS_DEF(healthlog)
#undef PCIEMGR_STATS_DEF

View File

@ -0,0 +1,207 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018,2021-2022, Pensando Systems Inc.
*/
#ifndef __PCIESHMEM_H__
#define __PCIESHMEM_H__
#ifdef __cplusplus
extern "C" {
#if 0
} /* close to calm emacs autoindent */
#endif
#endif
#include "pciehdevice_types.h"
#include "pciehw.h"
#include "pciemgr_stats.h"
#include "pmt.h"
#include "prt.h"
enum pciehw_cfghnd_e {
PCIEHW_CFGHND_NONE,
PCIEHW_CFGHND_CMD,
PCIEHW_CFGHND_DEV_BARS,
PCIEHW_CFGHND_ROM_BAR,
PCIEHW_CFGHND_BRIDGECTL,
PCIEHW_CFGHND_MSIX,
PCIEHW_CFGHND_VPD,
PCIEHW_CFGHND_PCIE_DEVCTL,
PCIEHW_CFGHND_SRIOV_CTRL,
PCIEHW_CFGHND_SRIOV_BARS,
PCIEHW_CFGHND_DBG_DELAY,
PCIEHW_CFGHND_BRIDGE_BUS,
};
typedef enum pciehw_cfghnd_e pciehw_cfghnd_t;
typedef enum pciehwbartype_e {
PCIEHWBARTYPE_NONE, /* invalid bar type */
PCIEHWBARTYPE_MEM, /* 32-bit memory bar */
PCIEHWBARTYPE_MEM64, /* 64-bit memory bar */
PCIEHWBARTYPE_IO, /* 32-bit I/O bar */
} pciehwbartype_t;
typedef enum pciehw_barhnd_e {
PCIEHW_BARHND_NONE,
PCIEHW_BARHND_SERIAL,
PCIEHW_BARHND_VIRTIO,
} pciehw_barhnd_t;
typedef union pciehwbar_u {
struct {
u_int64_t size; /* total size of this bar */
u_int32_t valid:1; /* valid bar for this dev */
u_int32_t loaded:1; /* pmts loaded */
u_int32_t ovrds:1; /* override pmts chained on ovrd */
pciehwbartype_t type; /* PCIEHWBARTYPE_* */
u_int8_t cfgidx; /* config bars index (0-5) */
u_int8_t hnd; /* indirect/notify handling */
u_int16_t bdf; /* host bdf of bar owner */
u_int32_t pmtb; /* pmt base for bar */
u_int32_t pmtc; /* pmt count for bar */
u_int16_t ovrd; /* override pmts */
u_int16_t _unused;
u_int64_t addr; /* addr of this bar */
};
u_int8_t _pad[64];
} pciehwbar_t;
typedef u_int32_t pciehwdevh_t;
#define PCIEHW_ROMSKSZ (PCIEHW_CFGSZ / sizeof (u_int32_t))
#define PCIEHW_CFGHNDSZ (PCIEHW_CFGSZ / sizeof (u_int32_t))
#define NOVRDINTR 8
/*
* If PCIEHDEVICE_OVERRIDE_INTRGROUPS increases we'll have
* to grow the shared memory region with special handling.
*/
#if NOVRDINTR < PCIEHDEVICE_OVERRIDE_INTRGROUPS
# error "NOVRDINTR < PCIEHDEVICE_OVERRIDE_INTRGROUPS"
#endif
typedef struct ovrdintr_s {
u_int32_t intrb; /* ovrd intr base */
u_int32_t intrc; /* ovrd intr count */
} ovrdintr_t;
typedef union pciehwdev_u {
struct {
char name[32]; /* device name */
int port; /* pcie port */
u_int16_t pf:1; /* is pf */
u_int16_t vf:1; /* is vf */
u_int16_t flexvf:1; /* is flexvf */
u_int16_t totalvfs; /* totalvfs provisioned */
u_int16_t numvfs; /* current numvfs */
u_int16_t vfidx; /* if is vf, vf position */
u_int16_t bdf; /* bdf of this dev */
u_int8_t type; /* PCIEHDEVICE_* */
u_int8_t novrdintr; /* number valid in ovrdintr[] */
u_int32_t lifb; /* lif base for this dev */
u_int32_t lifc; /* lif count for this dev */
u_int32_t intrb; /* intr resource base */
u_int32_t intrc; /* intr resource count */
u_int32_t intrdmask:1; /* reset val for drvcfg.mask */
u_int32_t cfgloaded:1; /* cfg pmt entries loaded */
pciehwdevh_t parenth; /* handle to parent */
pciehwdevh_t childh; /* handle to child */
pciehwdevh_t peerh; /* handle to peer */
u_int8_t intpin; /* legacy int pin */
u_int8_t romsksel[PCIEHW_ROMSKSZ]; /* cfg read-only mask selectors */
u_int8_t cfgpmtf[PCIEHW_CFGHNDSZ]; /* cfg pmt flags */
u_int8_t cfghnd[PCIEHW_CFGHNDSZ]; /* cfg indirect/notify handlers */
pciehwbar_t bar[PCIEHW_NBAR]; /* bar info */
pciehwbar_t rombar; /* option rom bar */
u_int16_t sriovctrl; /* current sriov ctrl reg */
u_int16_t enabledvfs; /* current numvfs enabled */
pciehwdevh_t hwdevh; /* handle to this dev */
u_int32_t pmtb; /* pmt base for cfg */
u_int32_t pmtc; /* pmt count for cfg */
ovrdintr_t ovrdintr[NOVRDINTR]; /* override intr resources */
};
u_int8_t _pad[4096];
} pciehwdev_t;
typedef union pciehw_port_u {
struct {
u_int8_t secbus; /* bridge secondary bus */
pciemgr_stats_t stats;
};
u_int8_t _pad[1024];
} pciehw_port_t;
typedef union pciehw_sprt_u {
struct {
prt_t prt; /* shadow copy of prt */
u_int16_t next; /* next link for chained prts */
};
u_int8_t _pad[32];
} pciehw_sprt_t;
typedef union pciehw_spmt_u {
struct {
u_int64_t baroff; /* bar addr offset */
u_int64_t swrd; /* reads handled by sw (not/ind) */
u_int64_t swwr; /* writes handled by sw (not/ind) */
pciehwdevh_t owner; /* current owner of this entry */
u_int8_t loaded:1; /* is loaded into hw */
u_int8_t vf0:1; /* sriov vf0 apply enabledvfs limit */
u_int8_t vf0stride:5; /* sriov vf0 addr mask stride */
u_int8_t chain:1; /* chained pmts on next */
u_int8_t cfgidx; /* cfgidx for bar we belong to */
pmt_t pmt; /* shadow copy of pmt */
u_int64_t vf0base:52; /* sriov vf0 resource base address */
u_int64_t pmtstart:6; /* sriov vf0 addr mask start */
u_int16_t next; /* next link for chained pmts */
};
u_int8_t _pad[128];
} pciehw_spmt_t;
typedef struct pciehw_sromsk_s {
u_int32_t entry;
u_int32_t count;
} pciehw_sromsk_t;
#define PCIEHW_MAGIC 0x706d656d /* 'pmem' */
#define PCIEHW_VERSION 0x1
#define PCIEHW_VPDSZ 1024
#define PCIEHW_SERIALSZ 1024
typedef struct pciehw_shmem_s {
u_int32_t magic; /* PCIEHW_MAGIC when initialized */
u_int32_t version; /* PCIEHW_VERSION when initialized */
u_int32_t hwinit:1; /* hw is initialized */
u_int32_t notify_verbose:1; /* notify logs all */
u_int32_t skip_notify:1; /* notify skips if ring full */
u_int32_t pmtpri:1; /* support pmt pri */
u_int32_t evregistered:1; /* event handler registered flag */
u_int32_t allocdev;
u_int32_t allocpmt_high; /* high priority pmt free sequential */
u_int32_t allocprt; /* prt free sequential */
u_int32_t notify_ring_mask;
pciehwdevh_t rooth[PCIEHW_NPORTS];
pciehwdev_t dev[PCIEHW_NDEVS];
pciehw_port_t port[PCIEHW_NPORTS];
pciehw_sromsk_t sromsk[PCIEHW_NROMSK];
pciehw_spmt_t spmt[PCIEHW_NPMT];
pciehw_sprt_t sprt[PCIEHW_NPRT];
u_int8_t cfgrst[PCIEHW_NDEVS][PCIEHW_CFGSZ];
u_int8_t cfgmsk[PCIEHW_NDEVS][PCIEHW_CFGSZ];
u_int8_t vpddata[PCIEHW_NDEVS][PCIEHW_VPDSZ];
u_int8_t serial[PCIEHW_NPORTS][PCIEHW_SERIALSZ];
u_int32_t freepmt_high; /* high priority pmt free list */
u_int32_t allocpmt_low; /* low priority pmt free sequential */
u_int32_t freepmt_low; /* low priority pmt free list */
u_int32_t allocpmt_vf0adj; /* low pri vf0 adjust (never freed) */
u_int32_t freeprt_slab; /* prt free slab adjacent */
} pciehw_shmem_t;
#ifdef __cplusplus
}
#endif
#endif /* __PCIESHMEM_H__ */

View File

@ -0,0 +1,85 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021, Pensando Systems Inc.
* Copyright (c) 2022, Advanced Micro Devices, Inc.
*/
#ifndef __PCIESVC_H__
#define __PCIESVC_H__
#ifdef __cplusplus
extern "C" {
#if 0
} /* close to calm emacs autoindent */
#endif
#endif
#include "pmt.h"
#include "prt.h"
#include "pciehwmem.h"
#include "pcieshmem.h"
#include "pciesvc_event.h"
#include "pciesvc_cmd.h"
#define PCIESVC_VERSION_MAJ 3
#define PCIESVC_VERSION_MIN 1
typedef struct pciesvc_params_v0_s {
int port; /* port to config */
uint32_t ind_poll:1; /* indirect trans poll */
uint32_t ind_intr:1; /* indirect trans intr */
uint32_t not_poll:1; /* notify trans poll */
uint32_t not_intr:1; /* notify trans intr */
uint32_t mac_poll:1; /* mac poll */
uint32_t mac_intr:1; /* mac intr */
uint64_t ind_msgaddr; /* ind_intr=1: intr msg addr */
uint32_t ind_msgdata; /* ind_intr=1: intr msg data */
uint64_t not_msgaddr; /* not_intr=1: intr msg addr */
uint32_t not_msgdata; /* not_intr=1: intr msg addr */
} pciesvc_params_v0_t;
typedef struct pciesvc_params_s {
int version;
union {
pciesvc_params_v0_t params_v0;
};
} pciesvc_params_t;
int pciesvc_init(pciesvc_params_t *params);
void pciesvc_shut(const int port);
/*
* Return value:
* <0 error
* =0 no work done
* >0 work done
*/
int pciesvc_poll(const int port);
int pciesvc_indirect_poll_init(const int port);
int pciesvc_indirect_poll(const int port);
int pciesvc_indirect_intr_init(const int port,
u_int64_t msgaddr, u_int32_t msgdata);
int pciesvc_indirect_intr(const int port);
int pciesvc_notify_poll_init(const int port);
int pciesvc_notify_poll(const int port);
int pciesvc_notify_intr_init(const int port,
u_int64_t msgaddr, u_int32_t msgdata);
int pciesvc_notify_intr(const int port);
int pciesvc_cmd_read(char *buf, const long int off, const size_t count);
int pciesvc_cmd_write(const char *buf, const long int off, const size_t count);
extern int pciesvc_version_major;
extern int pciesvc_version_minor;
void pciesvc_get_version(int *maj, int *min);
extern pciesvc_logpri_t pciesvc_log_level;
#ifdef __cplusplus
}
#endif
#endif /* __PCIESVC_H__ */

View File

@ -0,0 +1,62 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2022, Advanced Micro Devices, Inc.
*/
#ifndef __PCIESVC_CMD_H__
#define __PCIESVC_CMD_H__
#ifdef __cplusplus
extern "C" {
#if 0
} /* close to calm emacs autoindent */
#endif
#endif
typedef enum pciesvc_cmdcode_e {
PCIESVC_CMD_NOP = 0,
PCIESVC_CMD_SET_LOG_LEVEL = 1,
} pciesvc_cmdcode_t;
typedef enum pciesvc_cmdstatus_e {
PCIESVC_CMDSTATUS_SUCCESS = 0,
PCIESVC_CMDSTATUS_UNKNOWN_CMD = 1,
} pciesvc_cmdstatus_t;
typedef struct pciesvc_cmd_nop_s {
uint32_t cmd;
} pciesvc_cmd_nop_t;
typedef struct pciesvc_cmdres_nop_s {
uint32_t status;
} pciesvc_cmdres_nop_t;
typedef struct pciesvc_cmd_set_log_level_s {
uint32_t cmd;
uint32_t log_level;
} pciesvc_cmd_set_log_level_t;
typedef struct pciesvc_cmdres_set_log_level_s {
uint32_t status;
uint32_t old_level;
} pciesvc_cmdres_set_log_level_t;
typedef union pciesvc_cmd_u {
uint32_t words[16];
uint8_t cmd;
pciesvc_cmd_nop_t nop;
pciesvc_cmd_set_log_level_t set_log_level;
} pciesvc_cmd_t;
typedef union pciesvc_cmdres_u {
uint32_t words[16];
uint8_t status;
pciesvc_cmdres_nop_t nop;
pciesvc_cmdres_set_log_level_t set_log_level;
} pciesvc_cmdres_t;
#ifdef __cplusplus
}
#endif
#endif /* __PCIESVC_CMD_H__ */

View File

@ -0,0 +1,81 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2020, Pensando Systems Inc.
*/
#ifndef __PCIESVC_EVENT_H__
#define __PCIESVC_EVENT_H__
#ifdef __cplusplus
extern "C" {
#if 0
} /* close to calm emacs autoindent */
#endif
#endif
typedef enum pciesvc_event_e {
PCIESVC_EV_NONE,
PCIESVC_EV_MEMRD_NOTIFY,
PCIESVC_EV_MEMWR_NOTIFY,
PCIESVC_EV_SRIOV_NUMVFS,
PCIESVC_EV_RESET,
PCIESVC_EV_QFULL,
PCIESVC_EV_MGMTCHG,
PCIESVC_EV_LOGMSG,
} pciesvc_event_t;
typedef struct pciesvc_memrw_notify_s {
u_int64_t baraddr; /* PCIe bar address */
u_int64_t baroffset; /* bar-local offset */
u_int8_t cfgidx; /* bar cfgidx */
u_int32_t size; /* i/o size */
u_int64_t localpa; /* local physical address */
u_int64_t data; /* data, if write */
} pciesvc_memrw_notify_t;
typedef struct pciesvc_sriov_numvfs_s {
u_int16_t numvfs; /* number of vfs enabled */
} pciesvc_sriov_numvfs_t;
typedef enum pciesvc_rsttype_e {
PCIESVC_RSTTYPE_NONE,
PCIESVC_RSTTYPE_BUS, /* bus reset */
PCIESVC_RSTTYPE_FLR, /* function level reset */
PCIESVC_RSTTYPE_VF, /* vf reset from sriov ctrl vfe */
} pciesvc_rsttype_t;
typedef struct pciesvc_reset_s {
pciesvc_rsttype_t rsttype; /* RSTTYPE_* */
u_int32_t lifb; /* lif base */
u_int32_t lifc; /* lif count */
} pciesvc_reset_t;
typedef enum pciesvc_logpri_e {
PCIESVC_LOGPRI_DEBUG,
PCIESVC_LOGPRI_INFO,
PCIESVC_LOGPRI_WARN,
PCIESVC_LOGPRI_ERROR,
} pciesvc_logpri_t;
typedef struct pciesvc_logmsg_s {
pciesvc_logpri_t pri; /* log priority LOGPRI_ */
char msg[80]; /* log string, NULL-terminated */
} pciesvc_logmsg_t;
typedef struct pciesvc_eventdata_s {
pciesvc_event_t evtype; /* PCIESVC_EV_* */
u_int8_t port; /* PCIe port */
u_int32_t lif; /* lif if event for lifs */
union {
pciesvc_memrw_notify_t memrw_notify; /* EV_MEMRD/WR_NOTIFY */
pciesvc_sriov_numvfs_t sriov_numvfs; /* EV_SRIOV_NUMVFS */
pciesvc_reset_t reset; /* EV_RESET */
pciesvc_logmsg_t logmsg; /* EV_LOGMSG */
};
} pciesvc_eventdata_t;
#ifdef __cplusplus
}
#endif
#endif /* __PCIESVC_EVENT_H__ */

View File

@ -0,0 +1,62 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021-2022, Pensando Systems Inc.
*/
#ifndef __PCIESVC_LOCAL_H__
#define __PCIESVC_LOCAL_H__
#ifdef __cplusplus
extern "C" {
#if 0
} /* close to calm emacs autoindent */
#endif
#endif
union pciehwdev_u; typedef union pciehwdev_u pciehwdev_t;
union pciehwbar_u; typedef union pciehwbar_u pciehwbar_t;
typedef u_int32_t pciehwdevh_t;
u_int64_t pciehw_bar_getsize(pciehwbar_t *phwbar);
void pciehw_bar_setaddr(pciehwbar_t *phwbar, const u_int64_t addr);
void pciehw_bar_load_ovrds(pciehwbar_t *phwbar);
void pciehw_bar_unload_ovrds(pciehwbar_t *phwbar);
void pciehw_bar_load(pciehwdev_t *phwdev, pciehwbar_t *phwbar);
void pciehw_cfg_load(pciehwdev_t *phwdev);
void pciehw_pmt_setaddr(pciehwbar_t *phwbar, const u_int64_t addr);
void pciehw_reset_bus(pciehwdev_t *phwdev, const u_int8_t bus);
uint32_t pciehw_vpd_read(pciehwdevh_t hwdevh, const uint16_t addr);
void pciehw_vpd_write(pciehwdevh_t hwdevh,
const uint16_t addr, const uint32_t data);
u_int16_t pciehwdev_get_hostbdf(const pciehwdev_t *phwdev);
void pciehw_sriov_ctrl(pciehwdev_t *phwdev,
const u_int16_t ctrl, const u_int16_t numvfs);
struct pmt_s; typedef struct pmt_s pmt_t;
int pmt_reserve_vf0adj(const int n);
int pmt_alloc(const int n, const int pri);
void pmt_free(const int pmtb, const int pmtc);
void pmt_get(const int pmti, pmt_t *pmt);
void pmt_set(const int pmti, const pmt_t *pmt);
void pmt_bar_set_bdf(pmt_t *pmt, const u_int16_t bdf);
u_int64_t pmt_bar_getaddr(const pmt_t *pmt);
void pmt_bar_setaddr(pmt_t *pmt, const u_int64_t addr);
union pmt_entry_u; typedef union pmt_entry_u pmt_entry_t;
struct pmt_datamask_s; typedef struct pmt_datamask_s pmt_datamask_t;
void pmt_entry_enc(pmt_entry_t *pmte, const pmt_datamask_t *dm);
void pmt_entry_dec(const pmt_entry_t *pmte, pmt_datamask_t *dm);
union prt_u; typedef union prt_u prt_t;
int prt_alloc(const int n);
void prt_free(const int prtbase, const int prtcount);
void prt_get(const int prti, prt_t *prt);
void prt_set(const int prti, const prt_t *prt);
#ifdef __cplusplus
}
#endif
#endif /* __PCIESVC_LOCAL_H__ */

Some files were not shown because too many files have changed in this diff Show More